Merge "Add k8s metallb test and sampleservice class"
diff --git a/jobs/pipelines/deploy-cicd-and-test-k8s.groovy b/jobs/pipelines/deploy-cicd-and-test-k8s.groovy
new file mode 100644
index 0000000..baa9853
--- /dev/null
+++ b/jobs/pipelines/deploy-cicd-and-test-k8s.groovy
@@ -0,0 +1,87 @@
+@Library('tcp-qa')_
+
+common = new com.mirantis.mk.Common()
+shared = new com.mirantis.system_qa.SharedPipeline()
+
+
+node ("${NODE_NAME}") {
+  try {
+
+    stage("Clean the environment and clone tcp-qa") {
+        shared.prepare_working_dir()
+    }
+
+    stage("Create environment, generate model, bootstrap the salt-cluster") {
+        shared.swarm_bootstrap_salt_cluster_devops()
+    }
+
+    stage("Install core infrastructure and deploy CICD nodes") {
+        shared.swarm_deploy_cicd(env.DRIVETRAIN_STACK_INSTALL)
+    }
+
+    stage("Install core infrastructure and deploy CICD nodes") {
+        shared.swarm_deploy_platform(env.PLATFORM_STACK_INSTALL)
+    }
+
+    stage("Run tests") {
+        shared.run_cmd("""\
+            export ENV_NAME=${ENV_NAME}
+            . ./tcp_tests/utils/env_salt
+            . ./tcp_tests/utils/env_k8s
+
+            # Prepare snapshots that may be used in tests if MANAGER=devops
+            cp \$(pwd)/${ENV_NAME}_salt_deployed.ini \$(pwd)/${ENV_NAME}_k8s_deployed.ini
+            cp \$(pwd)/${ENV_NAME}_salt_deployed.ini \$(pwd)/${ENV_NAME}_stacklight_deployed.ini
+            #dos.py suspend ${ENV_NAME}
+            #dos.py snapshot ${ENV_NAME} k8s_deployed
+            #dos.py snapshot ${ENV_NAME} stacklight_deployed
+            #dos.py resume ${ENV_NAME}
+            #dos.py time-sync ${ENV_NAME}
+
+            # Initialize variables used in tcp-qa tests
+            export CURRENT_SNAPSHOT=stacklight_deployed  # provide the snapshot name required by the test
+            export TESTS_CONFIGS=\$(pwd)/${ENV_NAME}_salt_deployed.ini  # some SSH data may be filled separatelly
+
+            #export MANAGER=empty  # skip 'hardware' fixture, disable snapshot/revert features
+            export MANAGER=devops  # use 'hardware' fixture to manage fuel-devops environment
+            export MAKE_SNAPSHOT_STAGES=false  # skip 'hardware' fixture, disable snapshot/revert features
+            # export SSH='{...}'  # non-empty SSH required to skip 'underlay' fixture. It is filled from TESTS_CONFIGS now
+            export salt_master_host=\$SALT_MASTER_IP  # skip salt_deployed fixture
+            export salt_master_port=6969
+            export SALT_USER=\$SALTAPI_USER
+            export SALT_PASSWORD=\$SALTAPI_PASS
+            export CORE_INSTALLED=true  # skip core_deployed fixture
+            export K8S_INSTALLED=true              # skip k8s_deployed fixture
+            export sl_installed=true              # skip stacklight_deployed fixture
+
+            py.test --junit-xml=nosetests.xml ${RUN_TEST_OPTS}
+
+            dos.py suspend ${ENV_NAME}
+            dos.py snapshot ${ENV_NAME} test_completed
+            """)
+    }
+
+  } catch (e) {
+      common.printMsg("Job failed", "red")
+    shared.run_cmd("""\
+        dos.py suspend ${ENV_NAME} || true
+        dos.py snapshot ${ENV_NAME} test_failed || true
+        """)
+      throw e
+  } finally {
+    // TODO(ddmitriev): analyze the "def currentResult = currentBuild.result ?: 'SUCCESS'"
+    // and report appropriate data to TestRail
+    if ("${env.SHUTDOWN_ENV_ON_TEARDOWN}" == "true") {
+        shared.run_cmd("""\
+            dos.py destroy ${ENV_NAME} || true
+        """)
+    } else {
+        shared.run_cmd("""\
+            dos.py resume ${ENV_NAME} || true
+            dos.py time-sync ${ENV_NAME} || true
+        """)
+    }
+    shared.report_deploy_result("hardware,create_model,salt," + env.DRIVETRAIN_STACK_INSTALL + "," + env.PLATFORM_STACK_INSTALL)
+    shared.report_test_result()
+  }
+}
\ No newline at end of file
diff --git a/jobs/pipelines/deploy-cicd-and-test-openstack.groovy b/jobs/pipelines/deploy-cicd-and-test-openstack.groovy
new file mode 100644
index 0000000..585ad83
--- /dev/null
+++ b/jobs/pipelines/deploy-cicd-and-test-openstack.groovy
@@ -0,0 +1,89 @@
+@Library('tcp-qa')_
+
+common = new com.mirantis.mk.Common()
+shared = new com.mirantis.system_qa.SharedPipeline()
+
+
+node ("${NODE_NAME}") {
+  try {
+
+    stage("Clean the environment and clone tcp-qa") {
+        shared.prepare_working_dir()
+    }
+
+    stage("Create environment, generate model, bootstrap the salt-cluster") {
+        shared.swarm_bootstrap_salt_cluster_devops()
+    }
+
+    stage("Install core infrastructure and deploy CICD nodes") {
+        shared.swarm_deploy_cicd(env.DRIVETRAIN_STACK_INSTALL)
+    }
+
+    stage("Install core infrastructure and deploy CICD nodes") {
+        shared.swarm_deploy_platform(env.PLATFORM_STACK_INSTALL)
+    }
+
+    stage("Run tests") {
+        shared.run_cmd("""\
+            export ENV_NAME=${ENV_NAME}
+            . ./tcp_tests/utils/env_salt
+            # TODO: . ./tcp_tests/utils/env_keystonercv3
+
+            # Prepare snapshots that may be used in tests if MANAGER=devops
+            cp \$(pwd)/${ENV_NAME}_salt_deployed.ini \$(pwd)/${ENV_NAME}_openstack_deployed.ini
+            cp \$(pwd)/${ENV_NAME}_salt_deployed.ini \$(pwd)/${ENV_NAME}_stacklight_deployed.ini
+            cp \$(pwd)/${ENV_NAME}_salt_deployed.ini \$(pwd)/${ENV_NAME}_sl_os_deployed.ini
+            #dos.py suspend ${ENV_NAME}
+            #dos.py snapshot ${ENV_NAME} openstack_deployed
+            #dos.py snapshot ${ENV_NAME} stacklight_deployed
+            #dos.py snapshot ${ENV_NAME} sl_os_deployed
+            #dos.py resume ${ENV_NAME}
+            #dos.py time-sync ${ENV_NAME}
+
+            # Initialize variables used in tcp-qa tests
+            export CURRENT_SNAPSHOT=stacklight_deployed  # provide the snapshot name required by the test
+            export TESTS_CONFIGS=\$(pwd)/${ENV_NAME}_salt_deployed.ini  # some SSH data may be filled separatelly
+
+            #export MANAGER=empty  # skip 'hardware' fixture, disable snapshot/revert features
+            export MANAGER=devops  # use 'hardware' fixture to manage fuel-devops environment
+            export MAKE_SNAPSHOT_STAGES=false  # skip 'hardware' fixture, disable snapshot/revert features
+            # export SSH='{...}'  # non-empty SSH required to skip 'underlay' fixture. It is filled from TESTS_CONFIGS now
+            export salt_master_host=\$SALT_MASTER_IP  # skip salt_deployed fixture
+            export salt_master_port=6969
+            export SALT_USER=\$SALTAPI_USER
+            export SALT_PASSWORD=\$SALTAPI_PASS
+            export CORE_INSTALLED=true  # skip core_deployed fixture
+            export OPENSTACK_INSTALLED=true              # skip k8s_deployed fixture
+            export sl_installed=true              # skip stacklight_deployed fixture
+
+            py.test --junit-xml=nosetests.xml ${RUN_TEST_OPTS}
+
+            dos.py suspend ${ENV_NAME}
+            dos.py snapshot ${ENV_NAME} test_completed
+            """)
+    }
+
+  } catch (e) {
+      common.printMsg("Job failed", "red")
+      shared.run_cmd("""\
+          dos.py suspend ${ENV_NAME} || true
+          dos.py snapshot ${ENV_NAME} test_failed || true
+          """)
+      throw e
+  } finally {
+    // TODO(ddmitriev): analyze the "def currentResult = currentBuild.result ?: 'SUCCESS'"
+    // and report appropriate data to TestRail
+    if ("${env.SHUTDOWN_ENV_ON_TEARDOWN}" == "true") {
+        shared.run_cmd("""\
+            dos.py destroy ${ENV_NAME} || true
+        """)
+    } else {
+        shared.run_cmd("""\
+            dos.py resume ${ENV_NAME} || true
+            dos.py time-sync ${ENV_NAME} || true
+        """)
+    }
+    shared.report_deploy_result("hardware,create_model,salt," + env.DRIVETRAIN_STACK_INSTALL + "," + env.PLATFORM_STACK_INSTALL)
+    shared.report_test_result()
+  }
+}
\ No newline at end of file
diff --git a/jobs/pipelines/swarm-bootstrap-salt-cluster-devops.groovy b/jobs/pipelines/swarm-bootstrap-salt-cluster-devops.groovy
new file mode 100644
index 0000000..135f9d5
--- /dev/null
+++ b/jobs/pipelines/swarm-bootstrap-salt-cluster-devops.groovy
@@ -0,0 +1,111 @@
+/**
+ *
+ * Create fuel-devops environment, generate a model for it
+ * and bootstrap a salt cluster on the environment nodes
+ *
+ * Expected parameters:
+
+ *   PARENT_NODE_NAME              Name of the jenkins slave to create the environment
+ *   PARENT_WORKSPACE              Path to the workspace of the parent job to use tcp-qa repo
+ *   LAB_CONFIG_NAME               Name of the tcp-qa deployment template
+ *   ENV_NAME                      Fuel-devops environment name
+ *   MCP_VERSION                   MCP version, like 2018.4 or proposed
+ *   MCP_IMAGE_PATH1604            Local path to the image http://ci.mcp.mirantis.net:8085/images/ubuntu-16-04-x64-mcpproposed.qcow2
+ *   IMAGE_PATH_CFG01_DAY01        Local path to the image http://ci.mcp.mirantis.net:8085/images/cfg01-day01-proposed.qcow2
+ *   CFG01_CONFIG_IMAGE_NAME       Name for the creating config drive image, like cfg01.${LAB_CONFIG_NAME}-config-drive.iso
+ *   TCP_QA_REFS                   Reference to the tcp-qa change on review.gerrithub.io, like refs/changes/46/418546/41
+ *   PIPELINE_LIBRARY_REF          Reference to the pipeline-library change
+ *   MK_PIPELINES_REF              Reference to the mk-pipelines change
+ *   COOKIECUTTER_TEMPLATE_COMMIT  Commit/tag/branch for cookiecutter-templates repository. If empty, then takes ${MCP_VERSION} value
+ *   SALT_MODELS_SYSTEM_COMMIT     Commit/tag/branch for reclass-system repository. If empty, then takes ${MCP_VERSION} value
+ *   SHUTDOWN_ENV_ON_TEARDOWN      optional, shutdown fuel-devops environment at the end of the job
+ *
+ */
+
+@Library('tcp-qa')_
+
+common = new com.mirantis.mk.Common()
+shared = new com.mirantis.system_qa.SharedPipeline()
+
+if (! env.PARENT_NODE_NAME) {
+    error "'PARENT_NODE_NAME' must be set from the parent deployment job!"
+}
+
+node ("${PARENT_NODE_NAME}") {
+    if (! fileExists("${PARENT_WORKSPACE}")) {
+        error "'PARENT_WORKSPACE' contains path to non-existing directory ${PARENT_WORKSPACE} on the node '${PARENT_NODE_NAME}'."
+    }
+    dir("${PARENT_WORKSPACE}") {
+        try {
+            stage("Cleanup: erase ${ENV_NAME} and remove config drive") {
+                println "Remove environment ${ENV_NAME}"
+                shared.run_cmd("""\
+                    dos.py erase ${ENV_NAME} || true
+                """)
+                println "Remove config drive ISO"
+                shared.run_cmd("""\
+                    rm /home/jenkins/images/${CFG01_CONFIG_IMAGE_NAME} || true
+                """)
+            }
+
+            stage("Create an environment ${ENV_NAME} in disabled state") {
+                // deploy_hardware.xml
+                shared.run_cmd("""\
+                    export ENV_NAME=${ENV_NAME}
+                    export LAB_CONFIG_NAME=${LAB_CONFIG_NAME}
+                    export MANAGER=devops
+                    export PYTHONIOENCODING=UTF-8
+                    export REPOSITORY_SUITE=${MCP_VERSION}
+                    export TEST_GROUP=test_create_environment
+                    py.test -vvv -s -p no:django -p no:ipdb --junit-xml=deploy_hardware.xml -k \${TEST_GROUP}
+                """)
+            }
+
+            stage("Generate the model") {
+                shared.generate_cookied_model()
+            }
+
+            stage("Generate config drive ISO") {
+                shared.generate_configdrive_iso()
+            }
+
+            stage("Upload generated config drive ISO into volume on cfg01 node") {
+                shared.run_cmd("""\
+                    # Get SALT_MASTER_HOSTNAME to determine the volume name
+                    . ./tcp_tests/utils/env_salt
+                    virsh vol-upload ${ENV_NAME}_\${SALT_MASTER_HOSTNAME}_config /home/jenkins/images/${CFG01_CONFIG_IMAGE_NAME} --pool default
+                    virsh pool-refresh --pool default
+                """)
+            }
+
+            stage("Run the 'underlay' and 'salt-deployed' fixtures to bootstrap salt cluster") {
+                // deploy_salt.xml
+                shared.run_cmd("""\
+                    export ENV_NAME=${ENV_NAME}
+                    export LAB_CONFIG_NAME=${LAB_CONFIG_NAME}
+                    export MANAGER=devops
+                    export SHUTDOWN_ENV_ON_TEARDOWN=false
+                    export BOOTSTRAP_TIMEOUT=900
+                    export PYTHONIOENCODING=UTF-8
+                    export REPOSITORY_SUITE=${MCP_VERSION}
+                    export TEST_GROUP=test_bootstrap_salt
+                    py.test -vvv -s -p no:django -p no:ipdb --junit-xml=deploy_salt.xml -k \${TEST_GROUP}
+                    sleep 60  # wait for jenkins to start and IO calm down
+                """)
+            }
+
+          } catch (e) {
+              common.printMsg("Job failed", "red")
+              throw e
+          } finally {
+            // TODO(ddmitriev): analyze the "def currentResult = currentBuild.result ?: 'SUCCESS'"
+            // and report appropriate data to TestRail
+            // TODO(ddmitriev): add checks for salt cluster
+            if ("${env.SHUTDOWN_ENV_ON_TEARDOWN}" == "true") {
+                shared.run_cmd("""\
+                    dos.py destroy ${ENV_NAME}
+                """)
+            }
+        }
+    }
+}
\ No newline at end of file
diff --git a/jobs/pipelines/swarm-deploy-cicd.groovy b/jobs/pipelines/swarm-deploy-cicd.groovy
new file mode 100644
index 0000000..591467f
--- /dev/null
+++ b/jobs/pipelines/swarm-deploy-cicd.groovy
@@ -0,0 +1,74 @@
+/**
+ *
+ * Deploy CICD cluster using Jenkins master on cfg01 node
+ *
+ * Expected parameters:
+
+ *   PARENT_NODE_NAME              Name of the jenkins slave to create the environment
+ *   PARENT_WORKSPACE              Path to the workspace of the parent job to use tcp-qa repo
+ *   ENV_NAME                      Fuel-devops environment name
+ *   STACK_INSTALL                 Stacks to install using Jenkins on cfg01 node: "core:1800,cicd:1800", where 1800 is timeout
+ *   TCP_QA_REFS                   Reference to the tcp-qa change on review.gerrithub.io, like refs/changes/46/418546/41
+ *   SHUTDOWN_ENV_ON_TEARDOWN      optional, shutdown fuel-devops environment at the end of the job
+ *
+ */
+
+@Library('tcp-qa')_
+
+common = new com.mirantis.mk.Common()
+shared = new com.mirantis.system_qa.SharedPipeline()
+
+if (! env.PARENT_NODE_NAME) {
+    error "'PARENT_NODE_NAME' must be set from the parent deployment job!"
+}
+
+node ("${PARENT_NODE_NAME}") {
+    if (! fileExists("${PARENT_WORKSPACE}")) {
+        error "'PARENT_WORKSPACE' contains path to non-existing directory ${PARENT_WORKSPACE} on the node '${PARENT_NODE_NAME}'."
+    }
+    dir("${PARENT_WORKSPACE}") {
+        try {
+
+            if (! env.STACK_INSTALL) {
+                error "'STACK_INSTALL' must contain one or more comma separated stack names for [deploy_openstack] pipeline"
+            }
+
+            // Install core and cicd
+            def stack
+            def timeout
+
+            for (element in "${env.STACK_INSTALL}".split(",")) {
+                if (element.contains(':')) {
+                    (stack, timeout) = element.split(':')
+                } else {
+                    stack = element
+                    timeout = '1800'
+                }
+                stage("Run Jenkins job on salt-master [deploy_openstack:${stack}]") {
+                    shared.run_job_on_day01_node(stack, timeout)
+                }
+
+                stage("Sanity check the deployed component [${stack}]") {
+                    shared.sanity_check_component(stack)
+                }
+
+                stage("Make environment snapshot [${stack}_deployed]") {
+                    shared.devops_snapshot(stack)
+                }
+            }
+
+        } catch (e) {
+            common.printMsg("Job failed", "red")
+            throw e
+        } finally {
+            // TODO(ddmitriev): analyze the "def currentResult = currentBuild.result ?: 'SUCCESS'"
+            // and report appropriate data to TestRail
+            // TODO(ddmitriev): add checks for cicd cluster
+            if ("${env.SHUTDOWN_ENV_ON_TEARDOWN}" == "true") {
+                shared.run_cmd("""\
+                    dos.py destroy ${ENV_NAME}
+                """)
+            }
+        }
+    }
+}
diff --git a/jobs/pipelines/swarm-deploy-platform.groovy b/jobs/pipelines/swarm-deploy-platform.groovy
new file mode 100644
index 0000000..e144372
--- /dev/null
+++ b/jobs/pipelines/swarm-deploy-platform.groovy
@@ -0,0 +1,74 @@
+/**
+ *
+ * Deploy the product cluster using Jenkins master on CICD cluster
+ *
+ * Expected parameters:
+
+ *   PARENT_NODE_NAME              Name of the jenkins slave to create the environment
+ *   PARENT_WORKSPACE              Path to the workspace of the parent job to use tcp-qa repo
+ *   ENV_NAME                      Fuel-devops environment name
+ *   STACK_INSTALL                 Stacks to install using Jenkins on CICD cluster: "openstack:3200,stacklight:2400", where 3200 and 2400 are timeouts
+ *   TCP_QA_REFS                   Reference to the tcp-qa change on review.gerrithub.io, like refs/changes/46/418546/41
+ *   SHUTDOWN_ENV_ON_TEARDOWN      optional, shutdown fuel-devops environment at the end of the job
+ *
+ */
+
+@Library('tcp-qa')_
+
+common = new com.mirantis.mk.Common()
+shared = new com.mirantis.system_qa.SharedPipeline()
+
+if (! env.PARENT_NODE_NAME) {
+    error "'PARENT_NODE_NAME' must be set from the parent deployment job!"
+}
+
+node ("${PARENT_NODE_NAME}") {
+    if (! fileExists("${PARENT_WORKSPACE}")) {
+        error "'PARENT_WORKSPACE' contains path to non-existing directory ${PARENT_WORKSPACE} on the node '${PARENT_NODE_NAME}'."
+    }
+    dir("${PARENT_WORKSPACE}") {
+        try {
+
+            if (! env.STACK_INSTALL) {
+                error "'STACK_INSTALL' must contain one or more comma separated stack names for [deploy_openstack] pipeline"
+            }
+
+            // Install the cluster
+            def stack
+            def timeout
+
+            for (element in "${STACK_INSTALL}".split(",")) {
+                if (element.contains(':')) {
+                    (stack, timeout) = element.split(':')
+                } else {
+                    stack = element
+                    timeout = '1800'
+                }
+                stage("Run Jenkins job on CICD [deploy_openstack:${stack}]") {
+                    shared.run_job_on_cicd_nodes(stack, timeout)
+                }
+
+                stage("Sanity check the deployed component [${stack}]") {
+                    shared.sanity_check_component(stack)
+                }
+
+                stage("Make environment snapshot [${stack}_deployed]") {
+                    shared.devops_snapshot(stack)
+                }
+            }
+
+        } catch (e) {
+            common.printMsg("Job failed", "red")
+            throw e
+        } finally {
+            // TODO(ddmitriev): analyze the "def currentResult = currentBuild.result ?: 'SUCCESS'"
+            // and report appropriate data to TestRail
+            // TODO(ddmitriev): add checks for the installed stacks
+            if ("${env.SHUTDOWN_ENV_ON_TEARDOWN}" == "true") {
+                shared.run_cmd("""\
+                    dos.py destroy ${ENV_NAME}
+                """)
+            }
+        }
+    }
+}
diff --git a/src/com/mirantis/system_qa/SharedPipeline.groovy b/src/com/mirantis/system_qa/SharedPipeline.groovy
new file mode 100644
index 0000000..af2b2dc
--- /dev/null
+++ b/src/com/mirantis/system_qa/SharedPipeline.groovy
@@ -0,0 +1,233 @@
+package com.mirantis.system_qa
+
+
+def run_cmd(cmd, returnStdout=false) {
+    def common = new com.mirantis.mk.Common()
+    common.printMsg("Run shell command:\n" + cmd, "blue")
+    def VENV_PATH='/home/jenkins/fuel-devops30'
+    script = """\
+        set +x;
+        echo 'activate python virtualenv ${VENV_PATH}';
+        . ${VENV_PATH}/bin/activate;
+        bash -c 'set -ex; set -ex; ${cmd.stripIndent()}'
+    """
+    return sh(script: script, returnStdout: returnStdout)
+}
+
+def run_cmd_stdout(cmd) {
+    return run_cmd(cmd, true)
+}
+
+
+def prepare_working_dir() {
+        println "Clean the working directory ${env.WORKSPACE}"
+        deleteDir()
+
+        //// do not fail if environment doesn't exists
+        // println "Remove environment ${ENV_NAME}"
+        // run_cmd("""\
+        //     dos.py erase ${ENV_NAME} || true
+        // """)
+        // println "Remove config drive ISO"
+        // run_cmd("""\
+        //    rm /home/jenkins/images/${CFG01_CONFIG_IMAGE_NAME} || true
+        // """)
+
+        run_cmd("""\
+        git clone https://github.com/Mirantis/tcp-qa.git ${env.WORKSPACE}
+        if [ -n "$TCP_QA_REFS" ]; then
+            set -e
+            git fetch https://review.gerrithub.io/Mirantis/tcp-qa $TCP_QA_REFS && git checkout FETCH_HEAD || exit \$?
+        fi
+        pip install --upgrade --upgrade-strategy=only-if-needed -r tcp_tests/requirements.txt
+        """)
+}
+
+def swarm_bootstrap_salt_cluster_devops() {
+        def common = new com.mirantis.mk.Common()
+        def parameters = [
+                string(name: 'PARENT_NODE_NAME', value: "${NODE_NAME}"),
+                string(name: 'PARENT_WORKSPACE', value: pwd()),
+                string(name: 'LAB_CONFIG_NAME', value: "${LAB_CONFIG_NAME}"),
+                string(name: 'ENV_NAME', value: "${ENV_NAME}"),
+                string(name: 'MCP_VERSION', value: "${MCP_VERSION}"),
+                string(name: 'MCP_IMAGE_PATH1604', value: "${MCP_IMAGE_PATH1604}"),
+                string(name: 'IMAGE_PATH_CFG01_DAY01', value: "${IMAGE_PATH_CFG01_DAY01}"),
+                string(name: 'CFG01_CONFIG_IMAGE_NAME', value: "${CFG01_CONFIG_IMAGE_NAME}"),
+                string(name: 'TCP_QA_REFS', value: "${TCP_QA_REFS}"),
+                string(name: 'PIPELINE_LIBRARY_REF', value: "${PIPELINE_LIBRARY_REF}"),
+                string(name: 'MK_PIPELINES_REF', value: "${MK_PIPELINES_REF}"),
+                string(name: 'COOKIECUTTER_TEMPLATE_COMMIT', value: "${COOKIECUTTER_TEMPLATE_COMMIT}"),
+                string(name: 'SALT_MODELS_SYSTEM_COMMIT', value: "${SALT_MODELS_SYSTEM_COMMIT}"),
+                booleanParam(name: 'SHUTDOWN_ENV_ON_TEARDOWN', value: false),
+            ]
+        common.printMsg("Start building job 'swarm-bootstrap-salt-cluster-devops' with parameters:", "purple")
+        common.prettyPrint(parameters)
+        build job: 'swarm-bootstrap-salt-cluster-devops',
+            parameters: parameters
+}
+
+def swarm_deploy_cicd(String stack_to_install='core,cicd') {
+        // Run openstack_deploy job on cfg01 Jenkins for specified stacks
+        def common = new com.mirantis.mk.Common()
+        def parameters = [
+                string(name: 'PARENT_NODE_NAME', value: "${NODE_NAME}"),
+                string(name: 'PARENT_WORKSPACE', value: pwd()),
+                string(name: 'ENV_NAME', value: "${ENV_NAME}"),
+                string(name: 'STACK_INSTALL', value: stack_to_install),
+                string(name: 'TCP_QA_REFS', value: "${TCP_QA_REFS}"),
+                booleanParam(name: 'SHUTDOWN_ENV_ON_TEARDOWN', value: false),
+            ]
+        common.printMsg("Start building job 'swarm-deploy-cicd' with parameters:", "purple")
+        common.prettyPrint(parameters)
+        build job: 'swarm-deploy-cicd',
+            parameters: parameters
+}
+
+def swarm_deploy_platform(String stack_to_install) {
+        // Run openstack_deploy job on CICD Jenkins for specified stacks
+        def common = new com.mirantis.mk.Common()
+        def parameters = [
+                string(name: 'PARENT_NODE_NAME', value: "${NODE_NAME}"),
+                string(name: 'PARENT_WORKSPACE', value: pwd()),
+                string(name: 'ENV_NAME', value: "${ENV_NAME}"),
+                string(name: 'STACK_INSTALL', value: stack_to_install),
+                string(name: 'TCP_QA_REFS', value: "${TCP_QA_REFS}"),
+                booleanParam(name: 'SHUTDOWN_ENV_ON_TEARDOWN', value: false),
+            ]
+        common.printMsg("Start building job 'swarm-deploy-platform' with parameters:", "purple")
+        common.prettyPrint(parameters)
+        build job: 'swarm-deploy-platform',
+            parameters: parameters
+}
+
+def generate_cookied_model() {
+        def common = new com.mirantis.mk.Common()
+        // do not fail if environment doesn't exists
+        def IPV4_NET_ADMIN=run_cmd_stdout("dos.py net-list ${ENV_NAME} | grep admin-pool01").trim().split().last()
+        def IPV4_NET_CONTROL=run_cmd_stdout("dos.py net-list ${ENV_NAME} | grep private-pool01").trim().split().last()
+        def IPV4_NET_TENANT=run_cmd_stdout("dos.py net-list ${ENV_NAME} | grep tenant-pool01").trim().split().last()
+        def IPV4_NET_EXTERNAL=run_cmd_stdout("dos.py net-list ${ENV_NAME} | grep external-pool01").trim().split().last()
+        println("IPV4_NET_ADMIN=" + IPV4_NET_ADMIN)
+        println("IPV4_NET_CONTROL=" + IPV4_NET_CONTROL)
+        println("IPV4_NET_TENANT=" + IPV4_NET_TENANT)
+        println("IPV4_NET_EXTERNAL=" + IPV4_NET_EXTERNAL)
+
+        def cookiecuttertemplate_commit = env.COOKIECUTTER_TEMPLATE_COMMIT ?: env.MCP_VERSION
+        def saltmodels_system_commit = env.SALT_MODELS_SYSTEM_COMMIT ?: env.MCP_VERSION
+
+        def parameters = [
+                string(name: 'LAB_CONTEXT_NAME', value: "${LAB_CONFIG_NAME}"),
+                string(name: 'CLUSTER_NAME', value: "${LAB_CONFIG_NAME}"),
+                string(name: 'DOMAIN_NAME', value: "${LAB_CONFIG_NAME}.local"),
+                string(name: 'REPOSITORY_SUITE', value: "${env.MCP_VERSION}"),
+                string(name: 'SALT_MODELS_SYSTEM_COMMIT', value: "${saltmodels_system_commit}"),
+                string(name: 'COOKIECUTTER_TEMPLATE_COMMIT', value: "${cookiecuttertemplate_commit}"),
+                string(name: 'TCP_QA_REVIEW', value: "${TCP_QA_REFS}"),
+                string(name: 'IPV4_NET_ADMIN', value: IPV4_NET_ADMIN),
+                string(name: 'IPV4_NET_CONTROL', value: IPV4_NET_CONTROL),
+                string(name: 'IPV4_NET_TENANT', value: IPV4_NET_TENANT),
+                string(name: 'IPV4_NET_EXTERNAL', value: IPV4_NET_EXTERNAL),
+            ]
+        common.printMsg("Start building job 'swarm-cookied-model-generator' with parameters:", "purple")
+        common.prettyPrint(parameters)
+        build job: 'swarm-cookied-model-generator',
+            parameters: parameters
+}
+
+def generate_configdrive_iso() {
+        def common = new com.mirantis.mk.Common()
+        def SALT_MASTER_IP=run_cmd_stdout("""\
+            export ENV_NAME=${ENV_NAME}
+            . ./tcp_tests/utils/env_salt
+            echo \$SALT_MASTER_IP
+            """).trim().split().last()
+        println("SALT_MASTER_IP=" + SALT_MASTER_IP)
+        def parameters = [
+                string(name: 'CLUSTER_NAME', value: "${LAB_CONFIG_NAME}"),
+                string(name: 'MODEL_URL', value: "http://cz8133.bud.mirantis.net:8098/${LAB_CONFIG_NAME}.git"),
+                string(name: 'MODEL_URL_OBJECT_TYPE', value: "git"),
+                booleanParam(name: 'DOWNLOAD_CONFIG_DRIVE', value: true),
+                string(name: 'MCP_VERSION', value: "${MCP_VERSION}"),
+                string(name: 'COMMON_SCRIPTS_COMMIT', value: "${MCP_VERSION}"),
+                string(name: 'NODE_NAME', value: "${NODE_NAME}"),
+                string(name: 'CONFIG_DRIVE_ISO_NAME', value: "${CFG01_CONFIG_IMAGE_NAME}"),
+                string(name: 'SALT_MASTER_DEPLOY_IP', value: SALT_MASTER_IP),
+                string(name: 'PIPELINE_REPO_URL', value: "https://github.com/Mirantis"),
+                booleanParam(name: 'PIPELINES_FROM_ISO', value: true),
+                string(name: 'MCP_SALT_REPO_URL', value: "http://apt.mirantis.com/xenial"),
+                string(name: 'MCP_SALT_REPO_KEY', value: "http://apt.mirantis.com/public.gpg"),
+                string(name: 'PIPELINE_LIBRARY_REF', value: "${PIPELINE_LIBRARY_REF}"),
+                string(name: 'MK_PIPELINES_REF', value: "${MK_PIPELINES_REF}"),
+            ]
+        common.printMsg("Start building job 'create-cfg-config-drive' with parameters:", "purple")
+        common.prettyPrint(parameters)
+        build job: 'create-cfg-config-drive',
+            parameters: parameters
+}
+
+def run_job_on_day01_node(stack_to_install, timeout=1800) {
+    // stack_to_install="core,cicd"
+    def stack = "${stack_to_install}"
+    run_cmd("""\
+        export ENV_NAME=${ENV_NAME}
+        . ./tcp_tests/utils/env_salt
+        . ./tcp_tests/utils/env_jenkins_day01
+        export JENKINS_BUILD_TIMEOUT=${timeout}
+        JOB_PARAMETERS=\"{
+            \\\"SALT_MASTER_URL\\\": \\\"\${SALTAPI_URL}\\\",
+            \\\"STACK_INSTALL\\\": \\\"${stack}\\\"
+        }\"
+        JOB_PREFIX="[ {job_name}/{build_number}:${stack} {time} ] "
+        python ./tcp_tests/utils/run_jenkins_job.py --verbose --job-name=deploy_openstack --job-parameters="\$JOB_PARAMETERS" --job-output-prefix="\$JOB_PREFIX"
+    """)
+}
+
+def run_job_on_cicd_nodes(stack_to_install, timeout=1800) {
+    // stack_to_install="k8s,calico,stacklight"
+    def stack = "${stack_to_install}"
+    run_cmd("""\
+        export ENV_NAME=${ENV_NAME}
+        . ./tcp_tests/utils/env_salt
+        . ./tcp_tests/utils/env_jenkins_cicd
+        export JENKINS_BUILD_TIMEOUT=${timeout}
+        JOB_PARAMETERS=\"{
+            \\\"SALT_MASTER_URL\\\": \\\"\${SALTAPI_URL}\\\",
+            \\\"STACK_INSTALL\\\": \\\"${stack}\\\"
+        }\"
+        JOB_PREFIX="[ {job_name}/{build_number}:${stack} {time} ] "
+        python ./tcp_tests/utils/run_jenkins_job.py --verbose --job-name=deploy_openstack --job-parameters="\$JOB_PARAMETERS" --job-output-prefix="\$JOB_PREFIX"
+        sleep 60  # Wait for IO calm down on cluster nodes
+    """)
+}
+
+def sanity_check_component(stack) {
+    // Run sanity check for the component ${stack}.
+    // Result will be stored in JUnit XML file deploy_${stack}.xml
+    run_cmd("""\
+        py.test --junit-xml=deploy_${stack}.xml -m check_${stack}
+    """)
+}
+
+def devops_snapshot(stack) {
+    // Make the snapshot with name "${stack}_deployed"
+    // for all VMs in the environment.
+    // If oslo_config INI file ${ENV_NAME}_salt_deployed.ini exists,
+    // then make a copy for the created snapshot to allow the system
+    // tests to revert this snapshot along with the metadata from the INI file.
+    run_cmd("""\
+        dos.py suspend ${ENV_NAME}
+        dos.py snapshot ${ENV_NAME} ${stack}_deployed
+        dos.py resume ${ENV_NAME}
+        dos.py time-sync ${ENV_NAME}
+        if [ -f \$(pwd)/${ENV_NAME}_salt_deployed.ini ]; then
+            cp \$(pwd)/${ENV_NAME}_salt_deployed.ini \$(pwd)/${ENV_NAME}_${stack}_deployed.ini
+        fi
+    """)
+}
+
+def report_deploy_result(deploy_expected_stacks) {
+}
+
+def report_test_result() {
+}
\ No newline at end of file
diff --git a/tcp_tests/fixtures/ceph_fixtures.py b/tcp_tests/fixtures/ceph_fixtures.py
index 94315c4..44ab7d2 100644
--- a/tcp_tests/fixtures/ceph_fixtures.py
+++ b/tcp_tests/fixtures/ceph_fixtures.py
@@ -38,7 +38,7 @@
 @pytest.mark.revert_snapshot(ext.SNAPSHOT.ceph_deployed)
 @pytest.fixture(scope='function')
 def ceph_deployed(revert_snapshot, request, config,
-                  hardware, underlay, common_services_deployed,
+                  hardware, underlay, core_deployed,
                   salt_deployed, ceph_actions):
     """Fixture to get or install Ceph services on environment
 
@@ -48,7 +48,7 @@
     :param config: fixture provides oslo.config
     :param hardware: fixture provides enviromnet manager
     :param underlay: fixture provides underlay manager
-    :param common_services_deployed: fixture provides CommonServicesManager
+    :param core_deployed: fixture provides CoreManager
     :param ceph_actions: fixture provides CephManager instance
     :rtype: CephManager
 
diff --git a/tcp_tests/fixtures/common_services_fixtures.py b/tcp_tests/fixtures/common_services_fixtures.py
deleted file mode 100644
index 7d1c73f..0000000
--- a/tcp_tests/fixtures/common_services_fixtures.py
+++ /dev/null
@@ -1,84 +0,0 @@
-#    Copyright 2016 Mirantis, Inc.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-import pytest
-
-from tcp_tests import logger
-from tcp_tests.helpers import ext
-from tcp_tests.managers import common_services_manager
-
-LOG = logger.logger
-
-
-@pytest.fixture(scope='function')
-def common_services_actions(config, underlay, salt_actions):
-    """Fixture that provides various actions for CommonServices
-
-    :param config: fixture provides oslo.config
-    :param underlay: fixture provides underlay manager
-    :rtype: CommonServicesManager
-    """
-    return common_services_manager.CommonServicesManager(config, underlay,
-                                                         salt_actions)
-
-
-@pytest.mark.revert_snapshot(ext.SNAPSHOT.common_services_deployed)
-@pytest.fixture(scope='function')
-def common_services_deployed(revert_snapshot, request, config,
-                             hardware, underlay, salt_deployed,
-                             common_services_actions):
-    """Fixture to get or install common services on the environment
-
-    :param revert_snapshot: fixture that reverts snapshot that is specified
-                            in test with @pytest.mark.revert_snapshot(<name>)
-    :param request: fixture provides pytest data
-    :param config: fixture provides oslo.config
-    :param hardware: fixture provides enviromnet manager
-    :param underlay: fixture provides underlay manager
-    :param common_services_actions: fixture provides CommonServicesManager
-                                    instance
-    :rtype: CommonServicesManager
-
-    If config.common_services.common_services_installed is not set, this
-    fixture assumes that the common services were not installed
-    , and do the following:
-    - install common services
-    - make snapshot with name 'common_services_deployed'
-    - return CommonServicesManager
-
-    If config.common_services.common_services_installed was set, this fixture
-    assumes that the common services were already installed, and do
-    the following:
-    - return CommonServicesManager instance
-
-    If you want to revert 'common_services_deployed' snapshot, please use mark:
-    @pytest.mark.revert_snapshot("common_services_deployed")
-    """
-    # Create Salt cluster
-    if not config.common_services.common_services_installed:
-        steps_path = config.common_services_deploy.common_services_steps_path
-        commands = underlay.read_template(steps_path)
-        common_services_actions.install(commands)
-        hardware.create_snapshot(ext.SNAPSHOT.common_services_deployed)
-        salt_deployed.sync_time()
-
-    else:
-        # 1. hardware environment created and powered on
-        # 2. config.underlay.ssh contains SSH access to provisioned nodes
-        #    (can be passed from external config with TESTS_CONFIGS variable)
-        # 3. config.tcp.* options contain access credentials to the already
-        #    installed TCP API endpoint
-        pass
-
-    return common_services_actions
diff --git a/tcp_tests/fixtures/core_fixtures.py b/tcp_tests/fixtures/core_fixtures.py
new file mode 100644
index 0000000..cb058a3
--- /dev/null
+++ b/tcp_tests/fixtures/core_fixtures.py
@@ -0,0 +1,83 @@
+#    Copyright 2016 Mirantis, Inc.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+import pytest
+
+from tcp_tests import logger
+from tcp_tests.helpers import ext
+from tcp_tests.managers import core_manager
+
+LOG = logger.logger
+
+
+@pytest.fixture(scope='function')
+def core_actions(config, underlay, salt_actions):
+    """Fixture that provides various actions for Core
+
+    :param config: fixture provides oslo.config
+    :param underlay: fixture provides underlay manager
+    :rtype: CoreManager
+    """
+    return core_manager.CoreManager(config, underlay, salt_actions)
+
+
+@pytest.mark.revert_snapshot(ext.SNAPSHOT.core_deployed)
+@pytest.fixture(scope='function')
+def core_deployed(revert_snapshot, request, config,
+                  hardware, underlay, salt_deployed,
+                  core_actions):
+    """Fixture to get or install common services on the environment
+
+    :param revert_snapshot: fixture that reverts snapshot that is specified
+                            in test with @pytest.mark.revert_snapshot(<name>)
+    :param request: fixture provides pytest data
+    :param config: fixture provides oslo.config
+    :param hardware: fixture provides enviromnet manager
+    :param underlay: fixture provides underlay manager
+    :param core_actions: fixture provides CoreManager
+                                    instance
+    :rtype: CoreManager
+
+    If config.core.core_installed is not set, this
+    fixture assumes that the common services were not installed
+    , and do the following:
+    - install common services
+    - make snapshot with name 'core_deployed'
+    - return CoreManager
+
+    If config.core.core_installed was set, this fixture
+    assumes that the common services were already installed, and do
+    the following:
+    - return CoreManager instance
+
+    If you want to revert 'core_deployed' snapshot, please use mark:
+    @pytest.mark.revert_snapshot("core_deployed")
+    """
+    # Create Salt cluster
+    if not config.core.core_installed:
+        steps_path = config.core_deploy.core_steps_path
+        commands = underlay.read_template(steps_path)
+        core_actions.install(commands)
+        hardware.create_snapshot(ext.SNAPSHOT.core_deployed)
+        salt_deployed.sync_time()
+
+    else:
+        # 1. hardware environment created and powered on
+        # 2. config.underlay.ssh contains SSH access to provisioned nodes
+        #    (can be passed from external config with TESTS_CONFIGS variable)
+        # 3. config.tcp.* options contain access credentials to the already
+        #    installed TCP API endpoint
+        pass
+
+    return core_actions
diff --git a/tcp_tests/fixtures/k8s_fixtures.py b/tcp_tests/fixtures/k8s_fixtures.py
index f59ff61..a85473d 100644
--- a/tcp_tests/fixtures/k8s_fixtures.py
+++ b/tcp_tests/fixtures/k8s_fixtures.py
@@ -40,7 +40,7 @@
 @pytest.mark.revert_snapshot(ext.SNAPSHOT.k8s_deployed)
 @pytest.fixture(scope='function')
 def k8s_deployed(revert_snapshot, request, config, hardware, underlay,
-                 common_services_deployed, salt_deployed, k8s_actions):
+                 core_deployed, salt_deployed, k8s_actions):
     """Fixture to get or install k8s on environment
 
     :param revert_snapshot: fixture that reverts snapshot that is specified
@@ -49,7 +49,7 @@
     :param config: fixture provides oslo.config
     :param hardware: fixture provides enviromnet manager
     :param underlay: fixture provides underlay manager
-    :param common_services_deployed: fixture provides CommonServicesManager
+    :param core_deployed: fixture provides CoreManager
     :param k8s_actions: fixture provides K8SManager instance
     :rtype: K8SManager
 
diff --git a/tcp_tests/fixtures/openstack_fixtures.py b/tcp_tests/fixtures/openstack_fixtures.py
index 8f997a3..480a548 100644
--- a/tcp_tests/fixtures/openstack_fixtures.py
+++ b/tcp_tests/fixtures/openstack_fixtures.py
@@ -39,7 +39,7 @@
 @pytest.mark.revert_snapshot(ext.SNAPSHOT.openstack_deployed)
 @pytest.fixture(scope='function')
 def openstack_deployed(revert_snapshot, request, config,
-                       hardware, underlay, common_services_deployed,
+                       hardware, underlay, core_deployed,
                        salt_deployed, openstack_actions, rally):
     """Fixture to get or install OpenStack services on environment
 
@@ -49,7 +49,7 @@
     :param config: fixture provides oslo.config
     :param hardware: fixture provides enviromnet manager
     :param underlay: fixture provides underlay manager
-    :param common_services_deployed: fixture provides CommonServicesManager
+    :param core_deployed: fixture provides CoreManager
     :param openstack_actions: fixture provides OpenstackManager instance
     :param rally: fixture provides RallyManager instance
     :rtype: OpenstackManager
diff --git a/tcp_tests/fixtures/oss_fixtures.py b/tcp_tests/fixtures/oss_fixtures.py
index 95bbc54..6162e27 100644
--- a/tcp_tests/fixtures/oss_fixtures.py
+++ b/tcp_tests/fixtures/oss_fixtures.py
@@ -88,8 +88,8 @@
                        oss_deployed):
     """Fixture to get or install SL and OpenStack services on environment
 
-    Uses fixtures openstack_deployed and sl_deployed, with 'sl_deployed'
-    top-level snapshot.
+    Uses fixtures openstack_deployed and stacklight_deployed,
+    with 'stacklight_deployed' top-level snapshot.
 
     Returns SLManager instance object
     """
diff --git a/tcp_tests/fixtures/stacklight_fixtures.py b/tcp_tests/fixtures/stacklight_fixtures.py
index c1747b8..2c30530 100644
--- a/tcp_tests/fixtures/stacklight_fixtures.py
+++ b/tcp_tests/fixtures/stacklight_fixtures.py
@@ -35,11 +35,11 @@
     return sl_manager.SLManager(config, underlay, salt_deployed)
 
 
-@pytest.mark.revert_snapshot(ext.SNAPSHOT.sl_deployed)
+@pytest.mark.revert_snapshot(ext.SNAPSHOT.stacklight_deployed)
 @pytest.fixture(scope='function')
-def sl_deployed(revert_snapshot, request, config,
-                hardware, underlay, common_services_deployed,
-                salt_deployed, sl_actions):
+def stacklight_deployed(revert_snapshot, request, config,
+                        hardware, underlay, core_deployed,
+                        salt_deployed, sl_actions):
     """Fixture to get or install SL services on environment
 
     :param revert_snapshot: fixture that reverts snapshot that is specified
@@ -56,7 +56,7 @@
         steps_path = config.sl_deploy.sl_steps_path
         commands = underlay.read_template(steps_path)
         sl_actions.install(commands)
-        hardware.create_snapshot(ext.SNAPSHOT.sl_deployed)
+        hardware.create_snapshot(ext.SNAPSHOT.stacklight_deployed)
         salt_deployed.sync_time()
 
     else:
@@ -70,16 +70,16 @@
     return sl_actions
 
 
-@pytest.mark.revert_snapshot(ext.SNAPSHOT.sl_deployed)
+@pytest.mark.revert_snapshot(ext.SNAPSHOT.stacklight_deployed)
 @pytest.fixture(scope='function')
 def sl_os_deployed(revert_snapshot,
                    openstack_deployed,
-                   sl_deployed):
+                   stacklight_deployed):
     """Fixture to get or install SL and OpenStack services on environment
 
-    Uses fixtures openstack_deployed and sl_deployed, with 'sl_deployed'
-    top-level snapshot.
+    Uses fixtures openstack_deployed and stacklight_deployed,
+    with 'stacklight_deployed' top-level snapshot.
 
     Returns SLManager instance object
     """
-    return sl_deployed
+    return stacklight_deployed
diff --git a/tcp_tests/fixtures/underlay_fixtures.py b/tcp_tests/fixtures/underlay_fixtures.py
index 9b4bed0..182e3f9 100644
--- a/tcp_tests/fixtures/underlay_fixtures.py
+++ b/tcp_tests/fixtures/underlay_fixtures.py
@@ -150,10 +150,25 @@
     request.addfinalizer(test_fin)
 
 
+@pytest.fixture(scope="function")
+def underlay_actions(config):
+    """Fixture that provides SSH access to underlay objects.
+
+    :param config: oslo_config object that keeps various parameters
+                   across the fixtures, tests and test runs.
+                   All SSH data is taken from the provided config.
+    :rtype UnderlaySSHManager: Object that encapsulate SSH credentials;
+                               - provide list of underlay nodes;
+                               - provide SSH access to underlay nodes using
+                                 node names or node IPs.
+    """
+    return underlay_ssh_manager.UnderlaySSHManager(config)
+
+
 @pytest.mark.revert_snapshot(ext.SNAPSHOT.underlay)
 @pytest.fixture(scope="function")
 def underlay(request, revert_snapshot, config, hardware):
-    """Fixture that should provide SSH access to underlay objects.
+    """Fixture that bootstraps the environment underlay.
 
     - Starts the 'hardware' environment and creates 'underlay' with required
       configuration.
@@ -182,7 +197,8 @@
         config.underlay.ssh = hardware.get_ssh_data(
             roles=config.underlay.roles)
 
-        underlay = underlay_ssh_manager.UnderlaySSHManager(config)
+        LOG.info("Config - {}".format(config))
+        underlay = underlay_actions(config)
 
         if not config.underlay.lvm:
             underlay.enable_lvm(hardware.lvm_storages())
@@ -200,7 +216,7 @@
         config.underlay.ssh = hardware.get_ssh_data(
             roles=config.underlay.roles)
 
-        underlay = underlay_ssh_manager.UnderlaySSHManager(config)
+        underlay = underlay_actions(config)
 
         LOG.info("Generate MACs for MaaS")
         macs = {
@@ -241,7 +257,7 @@
         # 1. hardware environment created and powered on
         # 2. config.underlay.ssh contains SSH access to provisioned nodes
         #    (can be passed from external config with TESTS_CONFIGS variable)
-        underlay = underlay_ssh_manager.UnderlaySSHManager(config)
+        underlay = underlay_actions(config)
 
     return underlay
 
diff --git a/tcp_tests/helpers/ext.py b/tcp_tests/helpers/ext.py
index 800367d..19bdf08 100644
--- a/tcp_tests/helpers/ext.py
+++ b/tcp_tests/helpers/ext.py
@@ -43,11 +43,11 @@
     'hardware',
     'underlay',
     'salt_deployed',
-    'common_services_deployed',
+    'core_deployed',
     'oss_deployed',
     'drivetrain_deployed',
     'openstack_deployed',
-    'sl_deployed',
+    'stacklight_deployed',
     'virtlet_deployed',
     'virtlet_ceph_deployed',
     'k8s_deployed',
diff --git a/tcp_tests/managers/common_services_manager.py b/tcp_tests/managers/core_manager.py
similarity index 97%
rename from tcp_tests/managers/common_services_manager.py
rename to tcp_tests/managers/core_manager.py
index 4e1e34a..50767aa 100644
--- a/tcp_tests/managers/common_services_manager.py
+++ b/tcp_tests/managers/core_manager.py
@@ -19,8 +19,8 @@
 LOG = logger.logger
 
 
-class CommonServicesManager(ExecuteCommandsMixin):
-    """docstring for CommonServicesManager"""
+class CoreManager(ExecuteCommandsMixin):
+    """docstring for CoreManager"""
 
     __config = None
     __underlay = None
@@ -29,13 +29,13 @@
         self.__config = config
         self.__underlay = underlay
         self._salt = salt
-        super(CommonServicesManager, self).__init__(
+        super(CoreManager, self).__init__(
             config=config, underlay=underlay)
 
     def install(self, commands):
         self.execute_commands(commands,
                               label='Install common services')
-        self.__config.common_services.common_services_installed = True
+        self.__config.core.core_installed = True
 
     def get_keepalived_vip_minion_id(self, vip):
         """Get minion ID where keepalived VIP is at the moment"""
diff --git a/tcp_tests/managers/k8smanager.py b/tcp_tests/managers/k8smanager.py
index 409df4f..ff81501 100644
--- a/tcp_tests/managers/k8smanager.py
+++ b/tcp_tests/managers/k8smanager.py
@@ -622,7 +622,8 @@
         """
         with self.__underlay.remote(node_name=self.ctl_host) as r:
             cmd = ("apt-get install python-setuptools -y; "
-                   "pip install git+https://github.com/mogaika/xunitmerge.git")
+                   "pip install "
+                   "https://github.com/mogaika/xunitmerge/archive/master.zip")
             LOG.debug('Installing xunitmerge')
             r.check_call(cmd, raise_on_err=False)
             LOG.debug('Merging xunit')
diff --git a/tcp_tests/managers/openstack_manager.py b/tcp_tests/managers/openstack_manager.py
index d47aceb..b0232ca 100644
--- a/tcp_tests/managers/openstack_manager.py
+++ b/tcp_tests/managers/openstack_manager.py
@@ -104,7 +104,7 @@
         else:
             target_name = node_name
 
-        cmd = ("apt-get -y install docker.io")
+        cmd = ("apt-get -y install docker-ce")
         with self.__underlay.remote(node_name=target_name) as node_remote:
             result = node_remote.execute(cmd, verbose=True)
 
diff --git a/tcp_tests/managers/rallymanager.py b/tcp_tests/managers/rallymanager.py
index 36174db..efaf4bf 100644
--- a/tcp_tests/managers/rallymanager.py
+++ b/tcp_tests/managers/rallymanager.py
@@ -91,7 +91,7 @@
         image = self.image_name
         LOG.info("Pull {image}:{version}".format(image=image,
                                                  version=version))
-        cmd = ("apt-get -y install docker.io &&"
+        cmd = ("apt-get -y install docker-ce &&"
                " docker pull {image}:{version}".format(image=image,
                                                        version=version))
         self._underlay.check_call(cmd, node_name=self._node_name)
diff --git a/tcp_tests/managers/runtestmanager.py b/tcp_tests/managers/runtestmanager.py
index 70d573b..778691f 100644
--- a/tcp_tests/managers/runtestmanager.py
+++ b/tcp_tests/managers/runtestmanager.py
@@ -26,7 +26,8 @@
 TEMPEST_CFG_DIR = '/tmp/test'
 
 CONFIG = {
-    'classes': ['service.runtest.tempest'],
+    'classes': ['service.runtest.tempest',
+                'service.runtest.tempest.services.manila.glance'],
     'parameters': {
         '_param': {
             'runtest_tempest_cfg_dir': TEMPEST_CFG_DIR,
@@ -167,7 +168,6 @@
                               class_name=self.class_name),
                 node_name=master_name) as editor:
             editor.content = config
-
         with self.underlay.yaml_editor(
                 file_path="/srv/salt/reclass/nodes/_generated/"
                           "cfg01.{domain_name}.yml".format(
diff --git a/tcp_tests/settings.py b/tcp_tests/settings.py
index 34103a1..fcbe1a9 100644
--- a/tcp_tests/settings.py
+++ b/tcp_tests/settings.py
@@ -34,7 +34,8 @@
 SHUTDOWN_ENV_ON_TEARDOWN = get_var_as_bool('SHUTDOWN_ENV_ON_TEARDOWN', True)
 
 LAB_CONFIG_NAME = os.environ.get('LAB_CONFIG_NAME', 'mk22-lab-basic')
-DOMAIN_NAME = os.environ.get('DOMAIN_NAME', LAB_CONFIG_NAME) + '.local'
+DOMAIN_NAME = os.environ.get('DOMAIN_NAME',
+                             '{}.local'.format(LAB_CONFIG_NAME))
 # LAB_CONFIGS_NAME = os.environ.get('LAB_NAME', 'mk22-lab-advanced')
 
 SSH_LOGIN = os.environ.get('SSH_LOGIN', 'root')
diff --git a/tcp_tests/settings_oslo.py b/tcp_tests/settings_oslo.py
index 541ca34..8da0fae 100644
--- a/tcp_tests/settings_oslo.py
+++ b/tcp_tests/settings_oslo.py
@@ -34,9 +34,9 @@
     __name__, 'environment/')
 _default_templates_dir = pkg_resources.resource_filename(
     __name__, 'templates/')
-_default_common_services_steps = pkg_resources.resource_filename(
+_default_core_steps = pkg_resources.resource_filename(
     __name__,
-    'templates/{0}/common-services.yaml'.format(
+    'templates/{0}/core.yaml'.format(
         settings.LAB_CONFIG_NAME))
 _default_oss_steps = pkg_resources.resource_filename(
     __name__,
@@ -155,14 +155,14 @@
            help="", default='6969'),
 ]
 
-common_services_deploy_opts = [
-    ct.Cfg('common_services_steps_path', ct.String(),
+core_deploy_opts = [
+    ct.Cfg('core_steps_path', ct.String(),
            help="Path to YAML with steps to deploy common services",
-           default=_default_common_services_steps),
+           default=_default_core_steps),
 ]
 
-common_services_opts = [
-    ct.Cfg('common_services_installed', ct.Boolean(),
+core_opts = [
+    ct.Cfg('core_installed', ct.Boolean(),
            help="", default=False),
 ]
 
@@ -405,8 +405,8 @@
     ('underlay', underlay_opts),
     ('salt_deploy', salt_deploy_opts),
     ('salt', salt_opts),
-    ('common_services_deploy', common_services_deploy_opts),
-    ('common_services', common_services_opts),
+    ('core_deploy', core_deploy_opts),
+    ('core', core_opts),
     ('oss_deploy', oss_deploy_opts),
     ('oss', oss_opts),
     ('drivetrain_deploy', drivetrain_deploy_opts),
@@ -444,16 +444,16 @@
                           title="salt config and credentials", help=""))
     config.register_opts(group='salt', opts=salt_opts)
 
-    config.register_group(cfg.OptGroup(name='common_services',
+    config.register_group(cfg.OptGroup(name='core',
                           title="Common services for Openstack", help=""))
-    config.register_opts(group='common_services', opts=common_services_opts)
+    config.register_opts(group='core', opts=core_opts)
 
     config.register_group(
-        cfg.OptGroup(name='common_services_deploy',
+        cfg.OptGroup(name='core_deploy',
                      title="Common services for Openstack deploy config",
                      help=""))
-    config.register_opts(group='common_services_deploy',
-                         opts=common_services_deploy_opts)
+    config.register_opts(group='core_deploy',
+                         opts=core_deploy_opts)
 
     config.register_group(cfg.OptGroup(name='oss',
                           title="Operational Support System Tools", help=""))
diff --git a/tcp_tests/templates/SharedPipeline.groovy b/tcp_tests/templates/SharedPipeline.groovy
deleted file mode 100644
index a34be31..0000000
--- a/tcp_tests/templates/SharedPipeline.groovy
+++ /dev/null
@@ -1,114 +0,0 @@
-common = new com.mirantis.mk.Common()
-
-def run_cmd(cmd, returnStdout=false) {
-    common.printMsg("Run shell command:\n" + cmd, "blue")
-    def VENV_PATH='/home/jenkins/fuel-devops30'
-    script = """\
-        set +x;
-        echo 'activate python virtualenv ${VENV_PATH}';
-        . ${VENV_PATH}/bin/activate;
-        bash -c 'set -ex; set -ex; ${cmd.stripIndent()}'
-    """
-    return sh(script: script, returnStdout: returnStdout)
-}
-
-def run_cmd_stdout(cmd) {
-    return run_cmd(cmd, true)
-}
-
-def generate_cookied_model() {
-        // do not fail if environment doesn't exists
-        def IPV4_NET_ADMIN=run_cmd_stdout("dos.py net-list ${ENV_NAME} | grep admin-pool01").trim().split().last()
-        def IPV4_NET_CONTROL=run_cmd_stdout("dos.py net-list ${ENV_NAME} | grep private-pool01").trim().split().last()
-        def IPV4_NET_TENANT=run_cmd_stdout("dos.py net-list ${ENV_NAME} | grep tenant-pool01").trim().split().last()
-        def IPV4_NET_EXTERNAL=run_cmd_stdout("dos.py net-list ${ENV_NAME} | grep external-pool01").trim().split().last()
-        println("IPV4_NET_ADMIN=" + IPV4_NET_ADMIN)
-        println("IPV4_NET_CONTROL=" + IPV4_NET_CONTROL)
-        println("IPV4_NET_TENANT=" + IPV4_NET_TENANT)
-        println("IPV4_NET_EXTERNAL=" + IPV4_NET_EXTERNAL)
-
-        def parameters = [
-                string(name: 'LAB_CONTEXT_NAME', value: "${LAB_CONFIG_NAME}"),
-                string(name: 'CLUSTER_NAME', value: "${LAB_CONFIG_NAME}"),
-                string(name: 'DOMAIN_NAME', value: "${LAB_CONFIG_NAME}.local"),
-                string(name: 'REPOSITORY_SUITE', value: "${MCP_VERSION}"),
-                string(name: 'SALT_MODELS_SYSTEM_COMMIT', value: "${MCP_VERSION}"),
-                string(name: 'COOKIECUTTER_TEMPLATE_COMMIT', value: "${MCP_VERSION}"),
-                string(name: 'TCP_QA_REVIEW', value: "${TCP_QA_REFS}"),
-                string(name: 'IPV4_NET_ADMIN', value: IPV4_NET_ADMIN),
-                string(name: 'IPV4_NET_CONTROL', value: IPV4_NET_CONTROL),
-                string(name: 'IPV4_NET_TENANT', value: IPV4_NET_TENANT),
-                string(name: 'IPV4_NET_EXTERNAL', value: IPV4_NET_EXTERNAL),
-            ]
-        common.printMsg("Start building job 'swarm-cookied-model-generator' with parameters:", "purple")
-        common.prettyPrint(parameters)
-        build job: 'swarm-cookied-model-generator',
-            parameters: parameters
-}
-
-def generate_configdrive_iso() {
-        def SALT_MASTER_IP=run_cmd_stdout("""\
-            export ENV_NAME=${ENV_NAME}
-            . ./tcp_tests/utils/env_salt
-            echo \$SALT_MASTER_IP
-            """).trim().split().last()
-        println("SALT_MASTER_IP=" + SALT_MASTER_IP)
-        def parameters = [
-                string(name: 'CLUSTER_NAME', value: "${LAB_CONFIG_NAME}"),
-                string(name: 'MODEL_URL', value: "http://cz8133.bud.mirantis.net:8098/${LAB_CONFIG_NAME}.git"),
-                string(name: 'MODEL_URL_OBJECT_TYPE', value: "git"),
-                booleanParam(name: 'DOWNLOAD_CONFIG_DRIVE', value: true),
-                string(name: 'MCP_VERSION', value: "${MCP_VERSION}"),
-                string(name: 'COMMON_SCRIPTS_COMMIT', value: "${MCP_VERSION}"),
-                string(name: 'NODE_NAME', value: "${NODE_NAME}"),
-                string(name: 'CONFIG_DRIVE_ISO_NAME', value: "${CFG01_CONFIG_IMAGE_NAME}"),
-                string(name: 'SALT_MASTER_DEPLOY_IP', value: SALT_MASTER_IP),
-                string(name: 'PIPELINE_REPO_URL', value: "https://github.com/Mirantis"),
-                booleanParam(name: 'PIPELINES_FROM_ISO', value: true),
-                string(name: 'MCP_SALT_REPO_URL', value: "http://apt.mirantis.com/xenial"),
-                string(name: 'MCP_SALT_REPO_KEY', value: "http://apt.mirantis.com/public.gpg"),
-                string(name: 'PIPELINE_LIBRARY_REF', value: "${PIPELINE_LIBRARY_REF}"),
-                string(name: 'MK_PIPELINES_REF', value: "${MK_PIPELINES_REF}"),
-            ]
-        common.printMsg("Start building job 'create-cfg-config-drive' with parameters:", "purple")
-        common.prettyPrint(parameters)
-        build job: 'create-cfg-config-drive',
-            parameters: parameters
-}
-
-def run_job_on_day01_node(stack_to_install) {
-    // stack_to_install="core,cicd"
-    def stack = "${stack_to_install}"
-    run_cmd("""\
-        export ENV_NAME=${ENV_NAME}
-        . ./tcp_tests/utils/env_salt
-        . ./tcp_tests/utils/env_jenkins_day01
-        JOB_PARAMETERS=\"{
-            \\\"SALT_MASTER_URL\\\": \\\"\${SALTAPI_URL}\\\",
-            \\\"STACK_INSTALL\\\": \\\"${stack}\\\"
-        }\"
-        JOB_PREFIX="[ {job_name}/{build_number}:${stack} {time} ] "
-        python ./tcp_tests/utils/run_jenkins_job.py --verbose --job-name=deploy_openstack --job-parameters="\$JOB_PARAMETERS" --job-output-prefix="\$JOB_PREFIX"
-    """)
-}
-
-def run_job_on_cicd_nodes(stack_to_install) {
-    // stack_to_install="k8s,calico,stacklight"
-    def stack = "${stack_to_install}"
-    run_cmd("""\
-        export ENV_NAME=${ENV_NAME}
-        . ./tcp_tests/utils/env_salt
-        . ./tcp_tests/utils/env_jenkins_cicd
-        JOB_PARAMETERS=\"{
-            \\\"SALT_MASTER_URL\\\": \\\"\${SALTAPI_URL}\\\",
-            \\\"STACK_INSTALL\\\": \\\"${stack}\\\"
-        }\"
-        JOB_PREFIX="[ {job_name}/{build_number}:${stack} {time} ] "
-        python ./tcp_tests/utils/run_jenkins_job.py --verbose --job-name=deploy_openstack --job-parameters="\$JOB_PARAMETERS" --job-output-prefix="\$JOB_PREFIX"
-        sleep 60  # Wait for IO calm down on cluster nodes
-    """)
-}
-
-
-// pretend a groovy class, DO NOT REMOVE
-return this
\ No newline at end of file
diff --git a/tcp_tests/templates/cookied-bm-contrail-maas/common-services.yaml b/tcp_tests/templates/cookied-bm-contrail-maas/core.yaml
similarity index 100%
rename from tcp_tests/templates/cookied-bm-contrail-maas/common-services.yaml
rename to tcp_tests/templates/cookied-bm-contrail-maas/core.yaml
diff --git a/tcp_tests/templates/cookied-bm-contrail-nfv-maas/common-services.yaml b/tcp_tests/templates/cookied-bm-contrail-nfv-maas/core.yaml
similarity index 100%
rename from tcp_tests/templates/cookied-bm-contrail-nfv-maas/common-services.yaml
rename to tcp_tests/templates/cookied-bm-contrail-nfv-maas/core.yaml
diff --git a/tcp_tests/templates/cookied-bm-contrail40/common-services.yaml b/tcp_tests/templates/cookied-bm-contrail40/core.yaml
similarity index 100%
rename from tcp_tests/templates/cookied-bm-contrail40/common-services.yaml
rename to tcp_tests/templates/cookied-bm-contrail40/core.yaml
diff --git a/tcp_tests/templates/cookied-bm-contrail40/lab04-physical-inventory.yaml b/tcp_tests/templates/cookied-bm-contrail40/lab04-physical-inventory.yaml
index c4f342f..5cb47fb 100644
--- a/tcp_tests/templates/cookied-bm-contrail40/lab04-physical-inventory.yaml
+++ b/tcp_tests/templates/cookied-bm-contrail40/lab04-physical-inventory.yaml
@@ -55,8 +55,6 @@
         enp5s0f0:

           role: bond0_ab_contrail

           tenant_address: 192.168.0.101

-          dpdk_pci: "'0000:05:00.0'"

-          dpdk_mac: '90:e2:ba:19:c2:18'

         enp5s0f1:

           role: single_vlan_ctl

           single_address: 10.167.8.101

@@ -74,37 +72,6 @@
         enp5s0f0:

           role: bond0_ab_contrail

           tenant_address: 192.168.0.102

-          dpdk_pci: "'0000:05:00.0'"

-          dpdk_mac: '00:1b:21:87:21:98'

         enp5s0f1:

           role: single_vlan_ctl

           single_address: 10.167.8.102

-

-    # Physical nodes for manual testing replacing resources

-    #    kvm04.cookied-bm-contrail40.local:

-    #      reclass_storage_name: infra_kvm_node04

-    #      roles:

-    #      - infra_kvm

-    #      - linux_system_codename_xenial

-    #      interfaces:

-    #        enp2s0f0:

-    #          role: single_mgm

-    #        enp2s0f1:

-    #          role: bond0_ab_ovs_vlan_ctl

-

-          #    cmp003.cookied-bm-contrail40.local:

-          #      reclass_storage_name: openstack_compute_node03

-          #      roles:

-          #      - openstack_compute

-          #      - features_lvm_backend

-          #      - linux_system_codename_xenial

-          #      interfaces:

-          #        enp2s0f0:

-          #          role: single_mgm

-          #          deploy_address: 172.16.49.122

-          #        enp2s0f1:

-          #          role: bond0_ab_ctl_contrail

-          #          tenant_address: 192.168.0.103

-          #          single_address: 10.167.8.103

-          #          dpdk_pci: "'0000:05:00.0'"

-          #          dpdk_mac: '00:1b:21:87:21:98'

diff --git a/tcp_tests/templates/cookied-bm-contrail40/salt.yaml b/tcp_tests/templates/cookied-bm-contrail40/salt.yaml
index cc5b43d..1b97dc7 100644
--- a/tcp_tests/templates/cookied-bm-contrail40/salt.yaml
+++ b/tcp_tests/templates/cookied-bm-contrail40/salt.yaml
@@ -1,9 +1,6 @@
 {% from 'cookied-bm-contrail40/underlay.yaml' import HOSTNAME_CFG01 with context %}
 {% from 'cookied-bm-contrail40/underlay.yaml' import LAB_CONFIG_NAME with context %}
 {% from 'cookied-bm-contrail40/underlay.yaml' import DOMAIN_NAME with context %}
-{% from 'cookied-bm-contrail40/underlay.yaml' import HOSTNAME_KVM01 with context %}
-{% from 'cookied-bm-contrail40/underlay.yaml' import HOSTNAME_KVM02 with context %}
-{% from 'cookied-bm-contrail40/underlay.yaml' import HOSTNAME_KVM03 with context %}
 
 {% set SALT_MODELS_REPOSITORY = os_env('SALT_MODELS_REPOSITORY','https://gerrit.mcp.mirantis.net/salt-models/mcp-virtual-lab') %}
 # Other salt model repository parameters see in shared-salt.yaml
@@ -43,15 +40,15 @@
   retry: {count: 1, delay: 10}
   skip_fail: false
 
-  #- description: "WR for changing image to proposed"
-  #  cmd: |
-  #    set -e;
+- description: "WR for changing image to proposed"
+  cmd: |
+    set -e;
     # Add message_queu host for opencontrail
-    #    . /root/venv-reclass-tools/bin/activate;
-    #    reclass-tools add-key parameters._param.salt_control_xenial_image 'http://ci.mcp.mirantis.net:8085/images/ubuntu-16-04-x64-mcp{{ SHARED.REPOSITORY_SUITE }}.qcow2' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/init.yml;
-    #  node_name: {{ HOSTNAME_CFG01 }}
-    #  retry: {count: 1, delay: 10}
-    #  skip_fail: false
+    . /root/venv-reclass-tools/bin/activate;
+    reclass-tools add-key parameters._param.salt_control_xenial_image 'http://ci.mcp.mirantis.net:8085/images/ubuntu-16-04-x64-mcp{{ SHARED.REPOSITORY_SUITE }}.qcow2' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/init.yml;
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 10}
+  skip_fail: false
 
 - description: Temporary workaround for removing cinder-volume from CTL nodes
   cmd: |
@@ -127,7 +124,7 @@
 
 - description: Hack resolv.conf on VCP nodes for internal services access
   cmd: |
-    salt --hard-crash --state-output=mixed --state-verbose=False -C '* and not kvm* and not cmp* and not gtw* and not cfg*' cmd.run "echo 'nameserver 172.18.208.44' > /etc/resolv.conf;"
+    salt --hard-crash --state-output=mixed --state-verbose=False -C '* and not cfg*' cmd.run "echo 'nameserver 172.18.208.44' > /etc/resolv.conf;"
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 5}
   skip_fail: false
@@ -152,6 +149,10 @@
 
 {{ SHARED.MACRO_BOOTSTRAP_ALL_MINIONS() }}
 
+{{SHARED.MACRO_CHECK_SALT_VERSION_SERVICES_ON_CFG()}}
+
+{{SHARED.MACRO_CHECK_SALT_VERSION_ON_NODES()}}
+
 - description: "Lab04 workaround: Give each node root acces with key from cfg01"
   cmd: |
     set -e;
diff --git a/tcp_tests/templates/cookied-bm-contrail40/sl.yaml b/tcp_tests/templates/cookied-bm-contrail40/sl.yaml
index 3e07967..87651f8 100644
--- a/tcp_tests/templates/cookied-bm-contrail40/sl.yaml
+++ b/tcp_tests/templates/cookied-bm-contrail40/sl.yaml
@@ -107,11 +107,19 @@
   cmd: |
     if salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@prometheus:exporters' match.pillar 'prometheus:exporters' ; then
       salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@prometheus:exporters' state.sls prometheus
-      salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@prometheus:exporters' state.sls prometheus.collector
     fi
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 10}
-  skip_fail: true
+  skip_fail: false
+
+- description: Configure Prometheus exporters, if pillar 'prometheus:collector' exists on any server
+  cmd: |
+    if salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@prometheus:collector' match.pillar 'prometheus:collector' ; then
+      salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@prometheus:collector' state.sls prometheus.collector
+    fi
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 10}
+  skip_fail: false
 
 - description: Install elasticsearch server
   cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@elasticsearch:server' state.sls elasticsearch.server -b 1
@@ -255,4 +263,4 @@
   cmd: salt --hard-crash --state-output=mixed --state-verbose=False "*" state.sls salt.minion
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 3, delay: 15}
-  skip_fail: true
+  skip_fail: false
diff --git a/tcp_tests/templates/cookied-bm-mcp-dvr-vxlan/common-services.yaml b/tcp_tests/templates/cookied-bm-dpdk-pipeline/core.yaml
similarity index 86%
copy from tcp_tests/templates/cookied-bm-mcp-dvr-vxlan/common-services.yaml
copy to tcp_tests/templates/cookied-bm-dpdk-pipeline/core.yaml
index 06df7ad..530a4e7 100644
--- a/tcp_tests/templates/cookied-bm-mcp-dvr-vxlan/common-services.yaml
+++ b/tcp_tests/templates/cookied-bm-dpdk-pipeline/core.yaml
@@ -1,4 +1,19 @@
-{% from 'cookied-bm-mcp-dvr-vxlan/underlay.yaml' import HOSTNAME_CFG01 with context %}
+{% from 'cookied-bm-dpdk-pipeline/underlay.yaml' import HOSTNAME_CFG01 with context %}
+
+# Install support services
+- description: Install keepalived on ctl01
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@keepalived:cluster and *01*' state.sls keepalived
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 10}
+  skip_fail: true
+
+- description: Install keepalived
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@keepalived:cluster' state.sls keepalived
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 10}
+  skip_fail: true
 
 - description: Install glusterfs
   cmd: salt --hard-crash --state-output=mixed --state-verbose=False
@@ -21,21 +36,6 @@
   retry: {count: 1, delay: 5}
   skip_fail: false
 
-# Install support services
-- description: Install keepalived on ctl01
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@keepalived:cluster and *01*' state.sls keepalived
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 10}
-  skip_fail: true
-
-- description: Install keepalived
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@keepalived:cluster' state.sls keepalived
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 10}
-  skip_fail: true
-
 - description: Install RabbitMQ on ctl01
   cmd: salt --hard-crash --state-output=mixed --state-verbose=False
     -C 'I@rabbitmq:server and *01*' state.sls rabbitmq
@@ -66,7 +66,7 @@
 
 - description: Install Galera on other servers
   cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@galera:slave' state.sls galera -b 1
+    -C 'I@galera:slave' state.sls galera
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 5}
   skip_fail: false
@@ -93,13 +93,6 @@
   retry: {count: 1, delay: 5}
   skip_fail: false
 
-- description: Install nginx on prx nodes
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@nginx:server' state.sls nginx
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
 - description: Restart rsyslog
   cmd: salt --hard-crash --state-output=mixed --state-verbose=False
     -C 'I@haproxy:proxy' service.restart rsyslog
@@ -114,10 +107,10 @@
   retry: {count: 1, delay: 5}
   skip_fail: false
 
-- description: Check the OpenStack control VIP
+- description: Check the VIP
   cmd: |
-    OPENSTACK_CONTROL_ADDRESS=$(salt --out=newline_values_only "ctl01*" pillar.get _param:cluster_vip_address);
-    echo "_param:cluster_vip_address (vip): ${OPENSTACK_CONTROL_ADDRESS}";
+    OPENSTACK_CONTROL_ADDRESS=`salt-call --out=newline_values_only pillar.get _param:openstack_control_address`;
+    echo "_param:openstack_control_address (vip): ${OPENSTACK_CONTROL_ADDRESS}";
     salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@keepalived:cluster' cmd.run "ip a | grep ${OPENSTACK_CONTROL_ADDRESS}" | grep -B1 ${OPENSTACK_CONTROL_ADDRESS}
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 3, delay: 10}
diff --git a/tcp_tests/templates/cookied-bm-dpdk-pipeline/salt-context-cookiecutter-openstack_ovs_dpdk.yaml b/tcp_tests/templates/cookied-bm-dpdk-pipeline/salt-context-cookiecutter-openstack_ovs_dpdk.yaml
new file mode 100644
index 0000000..72c8bf4
--- /dev/null
+++ b/tcp_tests/templates/cookied-bm-dpdk-pipeline/salt-context-cookiecutter-openstack_ovs_dpdk.yaml
@@ -0,0 +1,200 @@
+default_context:
+  backup_private_key: |-
+    -----BEGIN RSA PRIVATE KEY-----
+    MIIEogIBAAKCAQEAvDqzt/PHWvSSJrBtvD3UWp21CDrAajgOPeXrVm7VU+sDDpw0
+    YqDvVhYfT/N6ha+SWOK00KyTuMMbB8/I4tvsP9vvCXy7v2AJID/ZO2z/t8KfTDEJ
+    C75/6a0UBg6sl3i7+cUOHbeK+lkcfdnSI1H8Jzdhk4Xj7t7q+MIKTs5n+AlScgyz
+    NSiD2nG/U5LmaQ+rjZ1VsF9J0YTds0yLDF3hztVoDTs7j5obl7Xab3ZlwalgH5Gc
+    Id6BI09jkUbppdPzHpzV2oad7cFpVYTt9m3/MMT0amzPuwl/u/dI64rRuWPe60eT
+    qeVMQD0zP6o9F79upbzQStt82lPJcfF4CXvxYwIDAQABAoIBAAHUXDzUfMKQj/8a
+    RebHfxHmaIUM9SPTKahGXNQ5PY+UQDJbKFMxF0Jx8pn3VuCHxVdh1LoWg1UPaGra
+    BSzXUGOKgrdH5BdHGq+aj0T5mT6zAJNgAqN/lYSy7vfkGp9aSBF0bd+yEgK+7Pz4
+    Kge320iSTDt/2KhQuF30nN8JOI97m2uk2YHH8TixfVtmgLPEy+0Mw4VZLsHD4OY1
+    zu8xN6co2aQR0DB0MPKD6IxH62wSOJKBzF4o5xzzy/fl0ysDZbZ8Z/5Rejvp3yNT
+    68B0X5CM27hVdYE+/tcKGl9WKmewIf3fTZUfBcwFIObMIl9fkK/519nwFed4AfOX
+    /a2LCBECgYEA9Lyl/eyzXuU2dgs6Gw/WMobqOVnHF9wbukS1XSKdwMogtpt7Pb23
+    +32r9xHgeRDvvWwSp8lNPZ8mu77dQ6akbOuOk5C6V3Mqt4zam3DBDMtL63nKq8tq
+    LQ0PVjj8cAgu3GSDCz7htqUb44rn5tX9zlM0vrwxzyYqbet7ZbsGoYsCgYEAxORQ
+    JFn1vwag8VBw3bngx3SJ46CpCC8Gz830W7pEaTS6zTTiDC4p5sATGya91JS8l47G
+    ikP2bcWzvT6aP/u+TZSqZiqp5Kn37fx8Et+ltIl47SH+PJHIR9F9r2f9zqla6mlP
+    zcX/mTSuAJCTP4whQA3/f1wNAeBnewhK9fXCOokCgYAz6TPYSXW+giXsIfOAO/q2
+    GvHhmk5lnDVxbBOAHtCRTQ5lTVN1xCEbQgvQy0TuyQ3hAuRuHH+6u2BO4Gw0Zkto
+    IwrJ+B/eXrpH1qOj5uW73f9Lgjjf+bSau7NuGYZKCSJPcy5smzjrMdhZimQoDWnZ
+    csK0VlzGUJUdXZ599I6ygwKBgGTf+LN3J7H0Snb4WKsw9Zoa+h6WjKO1vE6xXVW1
+    rCEes+o5Autsp2ki1WcexTlp7unTa6MhSNta5Ei8Dzli2FBVL6xihWKzNmRG7Kaa
+    0QIbQMp1lRUhN7Sb/0HkDKRaHktlI07w95Bd7hw59kcjm1F/Gnz9A2kHuNzPFeDI
+    RffJAoGAdeCID5sb0oHEHTIxxB+cgfaiyaAe9qrW2INNWLVn5OTDh6cidatnWAor
+    M/SxwNoiYcCpi869q7wzjw5gNOVoNJbmwzDA7s+lgjTPQpq2jmO6RtweKbYoN5Zw
+    ++LiD3r07TD3p2QAyeooT29D/d6/2Hd6oyTJcZWIQTN+MTcXQO4=
+    -----END RSA PRIVATE KEY-----
+  backup_public_key: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQC8OrO388da9JImsG28PdRanbUIOsBqOA495etWbtVT6wMOnDRioO9WFh9P83qFr5JY4rTQrJO4wxsHz8ji2+w/2+8JfLu/YAkgP9k7bP+3wp9MMQkLvn/prRQGDqyXeLv5xQ4dt4r6WRx92dIjUfwnN2GThePu3ur4wgpOzmf4CVJyDLM1KIPacb9TkuZpD6uNnVWwX0nRhN2zTIsMXeHO1WgNOzuPmhuXtdpvdmXBqWAfkZwh3oEjT2ORRuml0/MenNXahp3twWlVhO32bf8wxPRqbM+7CX+790jritG5Y97rR5Op5UxAPTM/qj0Xv26lvNBK23zaU8lx8XgJe/Fj
+  bmk_enabled: 'False'
+  ceph_enabled: 'False'
+  cicd_control_node01_address: 10.167.11.91
+  cicd_control_node01_hostname: cid01
+  cicd_control_node02_address: 10.167.11.92
+  cicd_control_node02_hostname: cid02
+  cicd_control_node03_address: 10.167.11.93
+  cicd_control_node03_hostname: cid03
+  cicd_control_vip_address: 10.167.11.90
+  cicd_control_vip_hostname: cid
+  cicd_enabled: 'True'
+  cicd_private_key: |-
+    -----BEGIN RSA PRIVATE KEY-----
+    MIIEowIBAAKCAQEAshiE2vK11KH1/PHO9v5IcT1ol3kuAorv6IgW+1paT9w4pFnd
+    H2DHQxTJsZ629cig+ELVAKHQnkND2U++/DM20ai5ZfpOwlvd+dL95/FbGb62Ozxx
+    kxBjyc/Bbbs8LcZtS1SN+agdkjQG1StpckUbFppoJ9nzWgnEcdYdonQ6aThgd+YL
+    rAOX04s3cMlCflClQl3lGFo24Qdhk/Y4M5rodfqfD5NOSKEhYP/dTMunri8zB5bU
+    ifvOvCWUKUOxLjkx95raY82xMHUobMYk87RcLPcq8pyz96/FPoiLqxM1oznTKNiI
+    0bW0xjf7FFjfLCjTapKZPRz8+Wkvzmzj35LLrwIDAQABAoIBADJoGCo0Kdy93nay
+    JgboX66VV+YPaUNU+aQR6JdJsmgKB4oU2S4JYTyORKveJSCZoV3C5LCiG/6/QRPf
+    q0mMYUaj/51qZCJEiCYuXqjoOgWmYcOQTwD10ZiMEc4yAU1fbQ22J9zyhTQdP5XU
+    DKtH/eu+1h35ZRQl0ZD6rjaNuP6QekJM6IVCC7XBaCG5+wSER9R25HbbDhdb7CwH
+    W1GP9IgISqy9Z3f4PQOyCUmn/O99lN8kry6ui7bCywRfITV6C+pn02DpMgzKZ8jn
+    3yts1f2mIbYVxnahtCaI3QTag6wBsnFq+U0uIXrUGMeeRzg9N1Ur01qdJpIR9g0v
+    Nt7QUZkCgYEA4lEavsFitSll/33JY4q82nYpjXAxTON3xraUqNYn5Cde06gNi8n1
+    t9TCLUqDhSpvgEOyJE/uwo5LAj79Ce2EwLkCttNggqRXBlY5ZpljwfWmxZtuGm/z
+    BJaOtkaK/1diR/+Qn/fTMyPH5JIXuQ6/XF60W4MSbzPgY4GO1BDx+G0CgYEAyXRT
+    00GDdiXbxQmzeHTO9Bg5y36Y1FEWDLnc89bpHPTR4sT/XCczerevy/l8jsdzZlnu
+    5ZddfWMF7EGNo51Zbmi0oLQ7nzigoVFcnhFHRFoCP36T9mvJk7O8Ao3ttpl/J2r0
+    mFiaKi0lhmZVbNpmliKjWAMZJyt6I7AfYekcOwsCgYEA0W8MuQptNgkhgtX80ElL
+    iz9eJK12chjuds3vtG66a8CjWGtkXcB/y6bwKsmR/GHQ3XnIGSJ/aTwU3fc8YzuS
+    ZmbPxDDIVx2OCycv52p7jrqtoqC7u2tuEQji+Hs/lhxfrxEp3V+R6vlpunQX0AF9
+    xRU/ApDBNndjZ7I1YrprseECgYA+zx8HgaiMIJeZokGrb7fKkPcMBCeAItveEeDa
+    wYmito3txv/a6nn5a+XKkbmNBpBrO+To/j1ux33kQDf56Cgm7UxLwoXISa6DPUvE
+    GJ0AqZOD2mIldUu+2k3m+ftAcDEdyBIEobNHLRZDBgriSmGrs5b77NNdzAdjsxjF
+    vRlJKwKBgD8DcP/C9pABC2mRQyH//RTk6XZfiDY0L18lwH7acEdHlJiF1PTwvIHD
+    cj1nMyG2MxEiSt1E5O/YQ4Lo3sognFIb8keu7IYxEgLXhvWFR3RwaYCjrF4ZGfD2
+    +83eUFPZQvEwTY/8OCogzJQfs1CT8+pLdO9tZQbrAaxfmF6c48KN
+    -----END RSA PRIVATE KEY-----
+  cicd_public_key: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCyGITa8rXUofX88c72/khxPWiXeS4Ciu/oiBb7WlpP3DikWd0fYMdDFMmxnrb1yKD4QtUAodCeQ0PZT778MzbRqLll+k7CW9350v3n8VsZvrY7PHGTEGPJz8Ftuzwtxm1LVI35qB2SNAbVK2lyRRsWmmgn2fNaCcRx1h2idDppOGB35gusA5fTizdwyUJ+UKVCXeUYWjbhB2GT9jgzmuh1+p8Pk05IoSFg/91My6euLzMHltSJ+868JZQpQ7EuOTH3mtpjzbEwdShsxiTztFws9yrynLP3r8U+iIurEzWjOdMo2IjRtbTGN/sUWN8sKNNqkpk9HPz5aS/ObOPfksuv
+  cluster_domain: cookied-bm-dpdk-pipeline.local
+  cluster_name: cookied-bm-dpdk-pipeline
+  compute_bond_mode: balance-slb
+  compute_primary_first_nic: eth1
+  compute_primary_second_nic: eth2
+  context_seed: zEFbUBMME6LFdiL0rJWFgHMdQGgywnDSE9vFYvHgEBeYHb4QJsDl3HqpdaTgqYlF
+  control_network_netmask: 255.255.255.0
+  control_network_subnet: 10.167.11.0/24
+  control_vlan: '2416'
+  cookiecutter_template_branch: master
+  cookiecutter_template_credentials: gerrit
+  cookiecutter_template_url: https://gerrit.mcp.mirantis.net/mk/cookiecutter-templates.git
+  deploy_network_gateway: 172.16.49.62
+  deploy_network_netmask: 255.255.255.192
+  deploy_network_subnet: 172.16.49.0/26
+  deployment_type: physical
+  dns_server01: 172.18.176.6
+  dns_server02: 172.18.208.44
+  email_address: obutenko@mirantis.com
+  gateway_primary_first_nic: eth1
+  gateway_primary_second_nic: eth2
+  infra_bond_mode: active-backup
+  infra_deploy_nic: eth0
+  infra_kvm01_control_address: 10.167.11.241
+  infra_kvm01_deploy_address: 172.16.49.11
+  infra_kvm01_hostname: kvm01
+  infra_kvm02_control_address: 10.167.11.242
+  infra_kvm02_deploy_address: 172.16.49.12
+  infra_kvm02_hostname: kvm02
+  infra_kvm03_control_address: 10.167.11.243
+  infra_kvm03_deploy_address: 172.16.49.13
+  infra_kvm03_hostname: kvm03
+  infra_kvm_vip_address: 10.167.11.240
+  infra_primary_first_nic: eth1
+  infra_primary_second_nic: eth2
+  kubernetes_enabled: 'False'
+  local_repositories: 'False'
+  maas_deploy_address: 172.16.49.15
+  maas_hostname: cfg01
+  mcp_version: testing
+  offline_deployment: 'False'
+  opencontrail_enabled: 'False'
+  openldap_domain: deploy-name.local
+  openldap_enabled: 'True'
+  openldap_organisation: ${_param:cluster_name}
+  openstack_benchmark_node01_address: 10.167.11.95
+  openstack_benchmark_node01_hostname: bmk01
+  openstack_cluster_size: compact
+  openstack_compute_count: '3'
+  openstack_compute_rack01_hostname: cmp
+  openstack_compute_rack01_single_subnet: 10.167.11
+  openstack_compute_rack01_tenant_subnet: 10.167.12
+  openstack_control_address: 10.167.11.10
+  openstack_control_hostname: ctl
+  openstack_control_node01_address: 10.167.11.11
+  openstack_control_node01_hostname: ctl01
+  openstack_control_node02_address: 10.167.11.12
+  openstack_control_node02_hostname: ctl02
+  openstack_control_node03_address: 10.167.11.13
+  openstack_control_node03_hostname: ctl03
+  openstack_database_address: 10.167.11.50
+  openstack_database_hostname: dbs
+  openstack_database_node01_address: 10.167.11.51
+  openstack_database_node01_hostname: dbs01
+  openstack_database_node02_address: 10.167.11.52
+  openstack_database_node02_hostname: dbs02
+  openstack_database_node03_address: 10.167.11.53
+  openstack_database_node03_hostname: dbs03
+  openstack_enabled: 'True'
+  openstack_gateway_node01_address: 10.167.11.224
+  openstack_gateway_node01_hostname: gtw01
+  openstack_gateway_node01_tenant_address: 10.167.12.6
+  openstack_gateway_node02_address: 10.167.11.225
+  openstack_gateway_node02_hostname: gtw02
+  openstack_gateway_node02_tenant_address: 10.167.12.7
+  openstack_gateway_node03_address: 10.167.11.226
+  openstack_gateway_node03_hostname: gtw03
+  openstack_gateway_node03_tenant_address: 10.167.12.8
+  openstack_message_queue_address: 10.167.11.40
+  openstack_message_queue_hostname: msg
+  openstack_message_queue_node01_address: 10.167.11.41
+  openstack_message_queue_node01_hostname: msg01
+  openstack_message_queue_node02_address: 10.167.11.42
+  openstack_message_queue_node02_hostname: msg02
+  openstack_message_queue_node03_address: 10.167.11.43
+  openstack_message_queue_node03_hostname: msg03
+  openstack_network_engine: ovs
+  openstack_neutron_qos: 'False'
+  openstack_neutron_vlan_aware_vms: 'False'
+  openstack_nfv_dpdk_enabled: 'True'
+  openstack_nfv_sriov_enabled: 'True'
+  openstack_nova_compute_hugepages_count: '16'
+  openstack_nova_compute_nfv_req_enabled: 'True'
+  openstack_nfv_sriov_network: physnet1
+  openstack_nfv_sriov_numvfs: '7'
+  openstack_nfv_sriov_pf_nic: enp5s0f1
+  openstack_nova_cpu_pinning: 6,7,8,9,10,11
+  openstack_ovs_dvr_enabled: 'False'
+  openstack_ovs_encapsulation_type: vxlan
+  openstack_proxy_address: 10.167.11.80
+  openstack_proxy_hostname: prx
+  openstack_proxy_node01_address: 10.167.11.81
+  openstack_proxy_node01_hostname: prx01
+  openstack_proxy_node02_address: 10.167.11.82
+  openstack_proxy_node02_hostname: prx02
+  openstack_upgrade_node01_address: 10.167.11.19
+  openstack_version: pike
+  cinder_version: ${_param:openstack_version}
+  oss_enabled: 'False'
+  oss_node03_address: ${_param:stacklight_monitor_node03_address}
+  platform: openstack_enabled
+  public_host: ${_param:openstack_proxy_address}
+  publication_method: email
+  reclass_repository: https://github.com/Mirantis/mk-lab-salt-model.git
+  salt_api_password: HlcaUHzUnsWsg62uhF8ua5KEbqRbzijz
+  salt_api_password_hash: $6$qdIFillN$XnzP7oIXRcbroVch7nlthyrSekjKlWND8q2MtoMF3Wz2ymepjAOjyqpyR55nmbH9OQzS8EcQJ6sfr5hWKDesV1
+  salt_master_address: 10.167.11.2
+  salt_master_hostname: cfg01
+  salt_master_management_address: 172.16.49.2
+  shared_reclass_branch: master
+  shared_reclass_url: https://gerrit.mcp.mirantis.net/salt-models/reclass-system.git
+  stacklight_enabled: 'False'
+  stacklight_version: '2'
+  static_ips_on_deploy_network_enabled: 'False'
+  tenant_network_gateway: 10.167.12.1
+  tenant_network_netmask: 255.255.255.0
+  tenant_network_subnet: 10.167.12.0/24
+  tenant_vlan: '2417'
+  upstream_proxy_enabled: 'False'
+  use_default_network_scheme: 'True'
\ No newline at end of file
diff --git a/tcp_tests/templates/cookied-bm-dpdk-pipeline/salt-context-environment.yaml b/tcp_tests/templates/cookied-bm-dpdk-pipeline/salt-context-environment.yaml
new file mode 100644
index 0000000..ff8340b
--- /dev/null
+++ b/tcp_tests/templates/cookied-bm-dpdk-pipeline/salt-context-environment.yaml
@@ -0,0 +1,111 @@
+nodes:
+    cfg01.cookied-bm-dpdk-pipeline.local:
+      reclass_storage_name: infra_config_node01
+      roles:
+      - infra_config
+      - linux_system_codename_xenial
+      interfaces:
+        ens3:
+          role: single_dhcp
+        ens4:
+          role: single_static_ctl
+
+    # Physical nodes
+    kvm01.cookied-bm-dpdk-pipeline.local:
+      reclass_storage_name: infra_kvm_node01
+      roles:
+      - infra_kvm
+      - linux_system_codename_xenial
+      interfaces:
+        enp3s0f0:
+          role: single_mgm
+        enp3s0f1:
+          role: bond0_ab_ovs_vlan_ctl
+
+    kvm02.cookied-bm-dpdk-pipeline.local:
+      reclass_storage_name: infra_kvm_node02
+      roles:
+      - infra_kvm
+      - linux_system_codename_xenial
+      interfaces:
+        enp3s0f0:
+          role: single_mgm
+        enp3s0f1:
+          role: bond0_ab_ovs_vlan_ctl
+
+    kvm03.cookied-bm-dpdk-pipeline.local:
+      reclass_storage_name: infra_kvm_node03
+      roles:
+      - infra_kvm
+      - linux_system_codename_xenial
+      interfaces:
+        enp3s0f0:
+          role: single_mgm
+        enp3s0f1:
+          role: bond0_ab_ovs_vlan_ctl
+
+    cmp01.cookied-bm-dpdk-pipeline.local:
+      reclass_storage_name: openstack_compute_node01
+      roles:
+      - openstack_compute_dpdk
+      - features_lvm_backend
+      - linux_system_codename_xenial
+      - openstack_compute_sriov
+      interfaces:
+        enp5s0f0:
+          role: combined_vlan_ctl_mgm
+          single_address: 10.167.11.105
+        enp3s0f0:
+          role: bond_dpdk_prv_lacp
+          dpdk_pci: "0000:03:00.0"
+          tenant_address: 10.167.12.105
+        enp3s0f1:
+          role: bond_dpdk_prv_lacp
+          dpdk_pci: "0000:03:00.1"
+        enp5s0f1:
+          role: sriov
+
+    cmp02.cookied-bm-dpdk-pipeline.local:
+      reclass_storage_name: openstack_compute_node02
+      roles:
+      - openstack_compute_dpdk
+      - features_lvm_backend
+      - linux_system_codename_xenial
+      - openstack_compute_sriov
+      interfaces:
+        enp5s0f0:
+          role: combined_vlan_ctl_mgm
+          single_address: 10.167.11.106
+        enp3s0f0:
+          role: bond_dpdk_prv_lacp
+          dpdk_pci: "0000:03:00.0"
+          tenant_address: 10.167.12.106
+        enp3s0f1:
+          role: bond_dpdk_prv_lacp
+          dpdk_pci: "0000:03:00.1"
+        enp5s0f1:
+          role: sriov
+
+    gtw01.cookied-bm-dpdk-pipeline.local:
+      reclass_storage_name: openstack_gateway_node01
+      roles:
+      - openstack_gateway
+      - linux_system_codename_xenial
+      interfaces:
+        enp3s0f0:
+          role: single_mgm
+          deploy_address: 172.16.49.5
+        enp3s0f1:
+          role: bond0_ab_dvr_vxlan_ctl_mesh_floating
+
+    gtw02.cookied-bm-dpdk-pipeline.local:
+      reclass_storage_name: openstack_gateway_node02
+      roles:
+      - openstack_gateway
+      - linux_system_codename_xenial
+      interfaces:
+        enp3s0f0:
+          role: single_mgm
+          deploy_address: 172.16.49.4
+        enp3s0f1:
+          role: bond0_ab_dvr_vxlan_ctl_mesh_floating
diff --git a/tcp_tests/templates/cookied-bm-dpdk-pipeline/salt-context-vcp-environment.yaml b/tcp_tests/templates/cookied-bm-dpdk-pipeline/salt-context-vcp-environment.yaml
new file mode 100644
index 0000000..cec7902
--- /dev/null
+++ b/tcp_tests/templates/cookied-bm-dpdk-pipeline/salt-context-vcp-environment.yaml
@@ -0,0 +1,175 @@
+nodes:
+    ctl01.cookied-bm-dpdk-pipeline.local:
+      reclass_storage_name: openstack_control_node01
+      roles:
+      - openstack_control_leader
+      - linux_system_codename_xenial
+      interfaces:
+        ens2:
+          role: single_dhcp
+        ens3:
+          role: single_ctl
+
+    ctl02.cookied-bm-dpdk-pipeline.local:
+      reclass_storage_name: openstack_control_node02
+      roles:
+      - openstack_control
+      - linux_system_codename_xenial
+      interfaces:
+        ens2:
+          role: single_dhcp
+        ens3:
+          role: single_ctl
+
+    ctl03.cookied-bm-dpdk-pipeline.local:
+      reclass_storage_name: openstack_control_node03
+      roles:
+      - openstack_control
+      - linux_system_codename_xenial
+      interfaces:
+        ens2:
+          role: single_dhcp
+        ens3:
+          role: single_ctl
+
+    dbs01.cookied-bm-dpdk-pipeline.local:
+      reclass_storage_name: openstack_database_node01
+      roles:
+      - openstack_database_leader
+      - linux_system_codename_xenial
+      interfaces:
+        ens2:
+          role: single_dhcp
+        ens3:
+          role: single_ctl
+
+    dbs02.cookied-bm-dpdk-pipeline.local:
+      reclass_storage_name: openstack_database_node02
+      roles:
+      - openstack_database
+      - linux_system_codename_xenial
+      interfaces:
+        ens2:
+          role: single_dhcp
+        ens3:
+          role: single_ctl
+
+    dbs03.cookied-bm-dpdk-pipeline.local:
+      reclass_storage_name: openstack_database_node03
+      roles:
+      - openstack_database
+      - linux_system_codename_xenial
+      interfaces:
+        ens2:
+          role: single_dhcp
+        ens3:
+          role: single_ctl
+
+    msg01.cookied-bm-dpdk-pipeline.local:
+      reclass_storage_name: openstack_message_queue_node01
+      roles:
+      - openstack_message_queue
+      - linux_system_codename_xenial
+      interfaces:
+        ens2:
+          role: single_dhcp
+        ens3:
+          role: single_ctl
+
+    msg02.cookied-bm-dpdk-pipeline.local:
+      reclass_storage_name: openstack_message_queue_node02
+      roles:
+      - openstack_message_queue
+      - linux_system_codename_xenial
+      interfaces:
+        ens2:
+          role: single_dhcp
+        ens3:
+          role: single_ctl
+
+    msg03.cookied-bm-dpdk-pipeline.local:
+      reclass_storage_name: openstack_message_queue_node03
+      roles:
+      - openstack_message_queue
+      - linux_system_codename_xenial
+      interfaces:
+        ens2:
+          role: single_dhcp
+        ens3:
+          role: single_ctl
+
+    prx01.cookied-bm-dpdk-pipeline.local:
+      reclass_storage_name: openstack_proxy_node01
+      roles:
+      - openstack_proxy
+      - linux_system_codename_xenial
+      interfaces:
+        ens2:
+          role: single_dhcp
+        ens3:
+          role: single_ctl
+
+    prx02.cookied-bm-dpdk-pipeline.local:
+      reclass_storage_name: openstack_proxy_node02
+      roles:
+      - openstack_proxy
+      - linux_system_codename_xenial
+      interfaces:
+        ens2:
+          role: single_dhcp
+        ens3:
+          role: single_ctl
+
+#    mtr01.cookied-bm-dpdk-pipeline.local:
+#      reclass_storage_name: stacklight_telemetry_node01
+#      roles:
+#      - stacklight_telemetry
+#      - linux_system_codename_xenial
+#      interfaces:
+#        ens3:
+#          role: single_ctl
+
+#    mtr02.cookied-bm-dpdk-pipeline.local:
+#      reclass_storage_name: stacklight_telemetry_node02
+#      roles:
+#      - stacklight_telemetry
+#      - linux_system_codename_xenial
+#      interfaces:
+#        ens3:
+#          role: single_ctl
+
+#    mtr03.cookied-bm-dpdk-pipeline.local:
+#      reclass_storage_name: stacklight_telemetry_node03
+#      roles:
+#      - stacklight_telemetry
+#      - linux_system_codename_xenial
+#      interfaces:
+#        ens3:
+#          role: single_ctl
+
+    cid01.cookied-bm-dpdk-pipeline.local:
+      reclass_storage_name: cicd_control_node01
+      roles:
+      - cicd_control_leader
+      - linux_system_codename_xenial
+      interfaces:
+        ens3:
+          role: single_ctl
+
+    cid02.cookied-bm-dpdk-pipeline.local:
+      reclass_storage_name: cicd_control_node02
+      roles:
+      - cicd_control_manager
+      - linux_system_codename_xenial
+      interfaces:
+        ens3:
+          role: single_ctl
+
+    cid03.cookied-bm-dpdk-pipeline.local:
+      reclass_storage_name: cicd_control_node03
+      roles:
+      - cicd_control_manager
+      - linux_system_codename_xenial
+      interfaces:
+        ens3:
+          role: single_ctl
diff --git a/tcp_tests/templates/cookied-bm-dpdk-pipeline/salt.yaml b/tcp_tests/templates/cookied-bm-dpdk-pipeline/salt.yaml
new file mode 100644
index 0000000..6a702b8
--- /dev/null
+++ b/tcp_tests/templates/cookied-bm-dpdk-pipeline/salt.yaml
@@ -0,0 +1,183 @@
+{% from 'cookied-bm-dpdk-pipeline/underlay.yaml' import HOSTNAME_CFG01 with context %}
+{% from 'cookied-bm-dpdk-pipeline/underlay.yaml' import HOSTNAME_CMP01 with context %}
+{% from 'cookied-bm-dpdk-pipeline/underlay.yaml' import HOSTNAME_CMP02 with context %}
+{% from 'cookied-bm-dpdk-pipeline/underlay.yaml' import HOSTNAME_GTW01 with context %}
+{% from 'cookied-bm-dpdk-pipeline/underlay.yaml' import LAB_CONFIG_NAME with context %}
+{% from 'cookied-bm-dpdk-pipeline/underlay.yaml' import DOMAIN_NAME with context %}
+
+{% set SALT_MODELS_REPOSITORY = os_env('SALT_MODELS_REPOSITORY','https://gerrit.mcp.mirantis.net/salt-models/mcp-baremetal-lab') %}
+# Other salt model repository parameters see in shared-salt.yaml
+
+{% import 'shared-salt.yaml' as SHARED with context %}
+
+{{ SHARED.MACRO_INSTALL_SALT_MASTER() }}
+
+{{ SHARED.MACRO_CLONE_RECLASS_MODELS() }}
+
+{{ SHARED.MACRO_CONFIGURE_RECLASS(FORMULA_SERVICES='"linux" "reclass" "salt" "openssh" "ntp" "git" "nginx" "collectd" "sensu" "heka" "sphinx" "keystone" "mysql" "grafana" "haproxy" "rsyslog" "horizon" "prometheus" "telegraf" "elasticsearch" "fluentd" "runtest" "maas" "jenkins" "glusterfs" "backupninja" "auditd"') }}
+
+{{ SHARED.MACRO_INSTALL_SALT_MINIONS() }}
+
+{{ SHARED.MACRO_RUN_SALT_MASTER_UNDERLAY_STATES() }}
+
+{{ SHARED.MACRO_GENERATE_INVENTORY() }}
+{{ SHARED.MACRO_NETWORKING_WORKAROUNDS() }}
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 10}
+  skip_fail: false
+
+- description: Temporary workaround for removing cinder-volume from CTL nodes
+  cmd: |
+    sed -i 's/\-\ system\.cinder\.volume\.single//g' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/openstack/control.yml;
+    sed -i 's/\-\ system\.cinder\.volume\.notification\.messagingv2//g' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/openstack/control.yml;
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: true
+
+- description: WR for mounting 1G hugepages before linux.state
+  cmd: |
+    salt 'cmp*' state.sls linux.system.hugepages;
+    salt 'cmp*' cmd.run "mount -o mode=775,pagesize=1G -t hugetlbfs Hugetlbfs-kvm /mnt/hugepages_1G";
+    salt 'cmp*' cmd.run "echo 16 | sudo  tee  /sys/kernel/mm/hugepages/hugepages-1048576kB/nr_hugepages";
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 10}
+  skip_fail: false
+
+- description: WR for correct acces to git repo from jenkins on cfg01 node
+  cmd: |
+    git clone --mirror https://github.com/Mirantis/mk-pipelines.git /home/repo/mk/mk-pipelines/;
+    git clone --mirror https://github.com/Mirantis/pipeline-library.git /home/repo/mcp-ci/pipeline-library/;
+    chown -R git:www-data /home/repo/mk/mk-pipelines/*;
+    chown -R git:www-data /home/repo/mcp-ci/pipeline-library/*;
+
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 10}
+  skip_fail: false
+
+{{ SHARED.MACRO_BOOTSTRAP_ALL_MINIONS() }}
+
+#########################################
+# Configure all running salt minion nodes
+#########################################
+
+- description: Refresh pillars on all minions
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False '*' saltutil.refresh_pillar
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+- description: Sync all salt resources
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False '*' saltutil.sync_all && sleep 5
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+- description: Show  reclass-salt --top for generated nodes
+  cmd: reclass-salt --top -u /srv/salt/reclass/nodes/_generated/
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+- description: Add cpm nodes to /etc/hosts
+  cmd: |
+    salt --hard-crash --state-output=mixed --state-verbose=False -C '*' cmd.run "echo '10.167.11.105 cmp01.cookied-bm-dpdk-pipeline.local cmp01' >> /etc/hosts";
+    salt --hard-crash --state-output=mixed --state-verbose=False -C '*' cmd.run "echo '10.167.11.106 cmp02.cookied-bm-dpdk-pipeline.local cmp02' >> /etc/hosts";
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 10}
+  skip_fail: true
+
+{{SHARED.MACRO_CHECK_SALT_VERSION_SERVICES_ON_CFG()}}
+{{SHARED.MACRO_CHECK_SALT_VERSION_ON_NODES()}}
+
+- description: Temporary WR
+  cmd: |
+    ssh-keyscan cfg01 > /var/lib/jenkins/.ssh/known_hosts || true;
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 10}
+  skip_fail: true
+  
+- description: Enable Jenkins
+  cmd: |
+    systemctl enable jenkins || true;
+    systemctl restart jenkins || true;
+    sleep 5;
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 10}
+  skip_fail: true
+
+- description: run jenkins.client
+  cmd: |
+    salt-call state.sls jenkins.client
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 10}
+  skip_fail: false
+
+- description: "Give each node root access with key from cfg01"
+  cmd: |
+    set -e;
+    set -x;
+    key=$(ssh-keygen -y -f /root/.ssh/id_rsa);
+    salt '*' cmd.run "echo $key >> /root/.ssh/authorized_keys";
+    salt '*' cmd.run "service sshd restart"
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: true
+
+- description: temp WR
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'cmp*' cmd.run
+    'ifdown br-prv; ifup br-prv'
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 30}
+  skip_fail: false
+
+- description: Create net04_external
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
+    '. /root/keystonercv3; neutron net-create net04_ext --router:external True --provider:physical_network physnet1 --provider:network_type flat'
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 30}
+  skip_fail: false
+
+- description: Create subnet_external
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
+    '. /root/keystonercv3; neutron subnet-create net04_ext 172.17.42.128/26 --name net04_ext__subnet --disable-dhcp --allocation-pool start=172.17.42.130,end=172.17.42.180 --gateway 172.17.42.129'
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 30}
+  skip_fail: false
+
+- description: Create net04
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
+    '. /root/keystonercv3; neutron net-create net04 --provider:network_type vxlan'
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 30}
+  skip_fail: false
+
+- description: Create subnet_net04
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
+    '. /root/keystonercv3; neutron subnet-create net04 192.168.1.0/24 --name net04__subnet --allocation-pool start=192.168.1.150,end=192.168.1.240'
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 30}
+  skip_fail: false
+
+- description: Create router
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
+    '. /root/keystonercv3; neutron router-create net04_router01 --ha False'
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 30}
+  skip_fail: false
+
+- description: Set geteway
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
+    '. /root/keystonercv3; neutron router-gateway-set net04_router01 net04_ext'
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 30}
+  skip_fail: false
+
+- description:  Add interface
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
+    '. /root/keystonercv3; neutron router-interface-add net04_router01 net04__subnet'
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 30}
+  skip_fail: false
+
+{{ SHARED.INSTALL_DOCKER_ON_GTW() }}
+
diff --git a/tcp_tests/templates/cookied-bm-dpdk-pipeline/underlay--meta-data.yaml b/tcp_tests/templates/cookied-bm-dpdk-pipeline/underlay--meta-data.yaml
new file mode 100644
index 0000000..3699401
--- /dev/null
+++ b/tcp_tests/templates/cookied-bm-dpdk-pipeline/underlay--meta-data.yaml
@@ -0,0 +1,4 @@
+| # All the data below will be stored as a string object
+  instance-id: iid-local1
+  hostname: {hostname}
+  local-hostname: {hostname}
diff --git a/tcp_tests/templates/cookied-bm-dpdk-pipeline/underlay--user-data-cfg01.yaml b/tcp_tests/templates/cookied-bm-dpdk-pipeline/underlay--user-data-cfg01.yaml
new file mode 100644
index 0000000..3f4f128
--- /dev/null
+++ b/tcp_tests/templates/cookied-bm-dpdk-pipeline/underlay--user-data-cfg01.yaml
@@ -0,0 +1,79 @@
+| # All the data below will be stored as a string object
+  #cloud-config, see http://cloudinit.readthedocs.io/en/latest/topics/examples.html
+
+  ssh_pwauth: True
+  users:
+   - name: root
+     sudo: ALL=(ALL) NOPASSWD:ALL
+     shell: /bin/bash
+     ssh_authorized_keys:
+     {% for key in config.underlay.ssh_keys %}
+      - ssh-rsa {{ key['public'] }}
+     {% endfor %}
+
+  disable_root: false
+  chpasswd:
+   list: |
+    root:r00tme
+   expire: False
+
+  bootcmd:
+   # Block access to SSH while node is preparing
+   #- cloud-init-per once sudo iptables -A INPUT -p tcp --dport 22 -j DROP
+   # Enable root access
+   - sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
+   - service sshd restart
+  output:
+    all: '| tee -a /var/log/cloud-init-output.log /dev/tty0'
+
+  runcmd:
+   # Configure dhclient
+   - sudo echo "nameserver {gateway}" >> /etc/resolvconf/resolv.conf.d/base
+   - sudo resolvconf -u
+
+   # Enable grub menu using updated config below
+   - update-grub
+
+   # Prepare network connection
+   - sudo ifdown ens3
+   - sudo ip r d default || true  # remove existing default route to get it from dhcp
+   - sudo ifup ens3
+
+   # Create swap
+   - fallocate -l 4G /swapfile
+   - chmod 600 /swapfile
+   - mkswap /swapfile
+   - swapon /swapfile
+   - echo "/swapfile   none    swap    defaults    0   0" >> /etc/fstab
+
+   ############## TCP Cloud cfg01 node ##################
+   #- sleep 120
+   - echo "Preparing base OS"
+
+   - echo "nameserver 172.18.208.44" > /etc/resolv.conf;
+   - which wget >/dev/null || (apt-get update; apt-get install -y wget);
+
+   # Install common packages
+   - eatmydata apt-get install -y python-pip git curl tmux byobu iputils-ping traceroute htop tree sshuttle
+
+   ########################################################
+   # Node is ready, allow SSH access
+   #- echo "Allow SSH access ..."
+   #- sudo iptables -D INPUT -p tcp --dport 22 -j DROP
+   ########################################################
+
+  write_files:
+   - path: /etc/network/interfaces
+     content: |
+          auto ens3
+          iface ens3 inet dhcp
+
+   - path: /root/.ssh/config
+     owner: root:root
+     permissions: '0600'
+     content: |
+          Host *
+            ServerAliveInterval 300
+            ServerAliveCountMax 10
+            StrictHostKeyChecking no
+            UserKnownHostsFile /dev/null
\ No newline at end of file
diff --git a/tcp_tests/templates/cookied-bm-dpdk-pipeline/underlay--user-data1604-hwe.yaml b/tcp_tests/templates/cookied-bm-dpdk-pipeline/underlay--user-data1604-hwe.yaml
new file mode 100644
index 0000000..07a6936
--- /dev/null
+++ b/tcp_tests/templates/cookied-bm-dpdk-pipeline/underlay--user-data1604-hwe.yaml
@@ -0,0 +1,100 @@
+| # All the data below will be stored as a string object

+  #cloud-config, see http://cloudinit.readthedocs.io/en/latest/topics/examples.html

+

+  ssh_pwauth: True

+  users:

+   - name: root

+     sudo: ALL=(ALL) NOPASSWD:ALL

+     shell: /bin/bash

+     ssh_authorized_keys:

+     {% for key in config.underlay.ssh_keys %}

+      - ssh-rsa {{ key['public'] }}

+     {% endfor %}

+

+  disable_root: false

+  chpasswd:

+   list: |

+    root:r00tme

+   expire: False

+

+  bootcmd:

+   #   # Block access to SSH while node is preparing

+   #   - cloud-init-per once sudo iptables -A INPUT -p tcp --dport 22 -j DROP

+   # Enable root access

+   - sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config

+   - service sshd restart

+  output:

+    all: '| tee -a /var/log/cloud-init-output.log /dev/tty0'

+

+  runcmd:

+   - export TERM=linux

+   - export LANG=C

+   # Configure dhclient

+   - sudo echo "nameserver {gateway}" >> /etc/resolvconf/resolv.conf.d/base

+   - sudo resolvconf -u

+

+   # Enable grub menu using updated config below

+   - update-grub

+

+   # Prepare network connection

+   - sudo ifup {interface_name}

+   #- sudo route add default gw {gateway} {interface_name}

+

+   # Create swap

+   - fallocate -l 4G /swapfile

+   - chmod 600 /swapfile

+   - mkswap /swapfile

+   - swapon /swapfile

+   - echo "/swapfile   none    swap    defaults   0   0" >> /etc/fstab

+

+

+   ############## TCP Cloud cfg01 node ##################

+   #- sleep 120

+   #   - echo "Preparing base OS"

+   - echo "nameserver 172.18.208.44" > /etc/resolv.conf;

+   #   - echo "nameserver 8.8.8.8" >> /etc/resolv.conf;

+   #   - which wget >/dev/null || (apt-get update; apt-get install -y wget)

+

+   # Configure Ubuntu mirrors
+   #   - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial main restricted universe" > /etc/apt/sources.list
+   #   - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial-updates main restricted universe" >> /etc/apt/sources.list
+   #   - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial-security main restricted universe" >> /etc/apt/sources.list
+
+   #   - echo "deb [arch=amd64] http://apt.mirantis.com/xenial {{ REPOSITORY_SUITE }} salt extra" > /etc/apt/sources.list.d/mcp_salt.list;

+   #   - wget -O - http://apt.mirantis.com/public.gpg | apt-key add -;

+   #   - echo "deb http://repo.saltstack.com/apt/ubuntu/16.04/amd64/2016.3 xenial main" > /etc/apt/sources.list.d/saltstack.list;

+   #   - wget -O - https://repo.saltstack.com/apt/ubuntu/16.04/amd64/2016.3/SALTSTACK-GPG-KEY.pub | apt-key add -;

+

+   #   - apt-get clean

+   #   - eatmydata apt-get update && apt-get -y upgrade

+

+   # Install common packages

+   #   - eatmydata apt-get install -y python-pip git curl tmux byobu iputils-ping traceroute htop tree mc

+

+   # Install salt-minion and stop it until it is configured

+   #   - eatmydata apt-get install -y salt-minion && service salt-minion stop

+

+   # Install latest kernel

+   #   - eatmydata apt-get install -y {{ os_env('LINUX_KERNEL_HWE_PACKAGE_NAME', 'linux-image-extra-4.10.0-42-generic') }}
+

+   ########################################################

+   # Node is ready, allow SSH access

+   #- echo "Allow SSH access ..."

+   #- sudo iptables -D INPUT -p tcp --dport 22 -j DROP

+   #   - reboot

+   ########################################################

+

+  write_files:

+   - path: /etc/default/grub.d/97-enable-grub-menu.cfg

+     content: |

+         GRUB_RECORDFAIL_TIMEOUT=30

+         GRUB_TIMEOUT=3

+         GRUB_TIMEOUT_STYLE=menu

+

+   - path: /etc/network/interfaces

+     content: |

+          # The loopback network interface

+          auto lo

+          iface lo inet loopback

+          auto {interface_name}

+          iface {interface_name} inet dhcp

diff --git a/tcp_tests/templates/cookied-bm-dpdk-pipeline/underlay--user-data1604.yaml b/tcp_tests/templates/cookied-bm-dpdk-pipeline/underlay--user-data1604.yaml
new file mode 100644
index 0000000..9168b7f
--- /dev/null
+++ b/tcp_tests/templates/cookied-bm-dpdk-pipeline/underlay--user-data1604.yaml
@@ -0,0 +1,61 @@
+| # All the data below will be stored as a string object
+  #cloud-config, see http://cloudinit.readthedocs.io/en/latest/topics/examples.html
+
+  ssh_pwauth: True
+  users:
+   - name: root
+     sudo: ALL=(ALL) NOPASSWD:ALL
+     shell: /bin/bash
+     ssh_authorized_keys:
+     {% for key in config.underlay.ssh_keys %}
+      - ssh-rsa {{ key['public'] }}
+     {% endfor %}
+
+  disable_root: false
+  chpasswd:
+   list: |
+    root:r00tme
+   expire: False
+
+  bootcmd:
+   # Enable root access
+   - sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
+   - service sshd restart
+  output:
+    all: '| tee -a /var/log/cloud-init-output.log /dev/tty0'
+
+  runcmd:
+   - export TERM=linux
+   - export LANG=C
+   # Configure dhclient
+   - sudo echo "nameserver {gateway}" >> /etc/resolvconf/resolv.conf.d/base
+   - sudo resolvconf -u
+
+   # Enable grub menu using updated config below
+   - update-grub
+
+   # Prepare network connection
+   - sudo ifup {interface_name}
+
+   # Create swap
+   - fallocate -l 4G /swapfile
+   - chmod 600 /swapfile
+   - mkswap /swapfile
+   - swapon /swapfile
+   - echo "/swapfile   none    swap    defaults   0   0" >> /etc/fstab
+   - echo "nameserver 172.18.208.44" > /etc/resolv.conf;
+
+  write_files:
+   - path: /etc/default/grub.d/97-enable-grub-menu.cfg
+     content: |
+         GRUB_RECORDFAIL_TIMEOUT=30
+         GRUB_TIMEOUT=3
+         GRUB_TIMEOUT_STYLE=menu
+
+   - path: /etc/network/interfaces
+     content: |
+          # The loopback network interface
+          auto lo
+          iface lo inet loopback
+          auto {interface_name}
+          iface {interface_name} inet dhcp
\ No newline at end of file
diff --git a/tcp_tests/templates/cookied-bm-dpdk-pipeline/underlay.yaml b/tcp_tests/templates/cookied-bm-dpdk-pipeline/underlay.yaml
new file mode 100644
index 0000000..612299f
--- /dev/null
+++ b/tcp_tests/templates/cookied-bm-dpdk-pipeline/underlay.yaml
@@ -0,0 +1,494 @@
+# Set the repository suite, one of the: 'nightly', 'testing', 'stable', or any other required
+{% set REPOSITORY_SUITE = os_env('REPOSITORY_SUITE', 'testing') %}
+{% set LAB_CONFIG_NAME = os_env('LAB_CONFIG_NAME', 'cookied-bm-dpdk-pipeline') %}
+{% set DOMAIN_NAME = os_env('DOMAIN_NAME', LAB_CONFIG_NAME + '.local') %}
+{% set HOSTNAME_CFG01 = os_env('HOSTNAME_CFG01', 'cfg01.' + DOMAIN_NAME) %}
+{% set HOSTNAME_KVM01 = os_env('HOSTNAME_KVM01', 'kvm01.' + DOMAIN_NAME) %}
+{% set HOSTNAME_KVM02 = os_env('HOSTNAME_KVM02', 'kvm02.' + DOMAIN_NAME) %}
+{% set HOSTNAME_KVM03 = os_env('HOSTNAME_KVM03', 'kvm03.' + DOMAIN_NAME) %}
+{% set HOSTNAME_CMP01 = os_env('HOSTNAME_CMP01', 'cmp01.' + DOMAIN_NAME) %}
+{% set HOSTNAME_CMP02 = os_env('HOSTNAME_CMP02', 'cmp02.' + DOMAIN_NAME) %}
+{% set HOSTNAME_GTW01 = os_env('HOSTNAME_GTW01', 'gtw01.' + DOMAIN_NAME) %}
+{% set HOSTNAME_GTW02 = os_env('HOSTNAME_GTW02', 'gtw02.' + DOMAIN_NAME) %}
+{% set ETH1_IP_ADDRESS_CFG01 = os_env('ETH1_IP_ADDRESS_CFG01', '172.16.49.2') %}
+{% set ETH2_IP_ADDRESS_CFG01 = os_env('ETH2_IP_ADDRESS_CFG01', '10.167.11.253') %}
+{% set ETH0_IP_ADDRESS_KVM01 = os_env('ETH0_IP_ADDRESS_KVM01', '172.16.49.11') %}
+{% set ETH0_IP_ADDRESS_KVM02 = os_env('ETH0_IP_ADDRESS_KVM02', '172.16.49.12') %}
+{% set ETH0_IP_ADDRESS_KVM03 = os_env('ETH0_IP_ADDRESS_KVM03', '172.16.49.13') %}
+{% set ETH0_IP_ADDRESS_CMP01 = os_env('ETH0_IP_ADDRESS_CMP01', '172.16.49.3') %}
+{% set ETH0_IP_ADDRESS_CMP02 = os_env('ETH0_IP_ADDRESS_CMP02', '172.16.49.31') %}
+{% set ETH0_IP_ADDRESS_GTW01 = os_env('ETH0_IP_ADDRESS_GTW01', '172.16.49.5') %}
+{% set ETH0_IP_ADDRESS_GTW02 = os_env('ETH0_IP_ADDRESS_GTW02', '172.16.49.4') %}
+
+{% import 'cookied-bm-dpdk-pipeline/underlay--meta-data.yaml' as CLOUDINIT_META_DATA with context %}
+{% import 'cookied-bm-dpdk-pipeline/underlay--user-data-cfg01.yaml' as CLOUDINIT_USER_DATA_CFG01 with context %}
+{% import 'cookied-bm-dpdk-pipeline/underlay--user-data1604.yaml' as CLOUDINIT_USER_DATA_1604 with context %}
+{% import 'cookied-bm-dpdk-pipeline/underlay--user-data1604-hwe.yaml' as CLOUDINIT_USER_DATA_1604_HWE with context %}
+
+---
+aliases:
+ - &interface_model {{ os_env('INTERFACE_MODEL', 'virtio') }}
+ - &cloudinit_meta_data {{ CLOUDINIT_META_DATA }}
+ - &cloudinit_user_data_cfg01 {{ CLOUDINIT_USER_DATA_CFG01 }}
+ - &cloudinit_user_data_1604 {{ CLOUDINIT_USER_DATA_1604 }}
+ - &cloudinit_user_data_1604_hwe {{ CLOUDINIT_USER_DATA_1604_HWE }}
+
+template:
+  devops_settings:
+    env_name: {{ os_env('ENV_NAME', LAB_CONFIG_NAME + '_' + REPOSITORY_SUITE + "_" + os_env('BUILD_NUMBER', '')) }}
+
+    address_pools:
+      admin-pool01:
+        net: {{ os_env('ADMIN_ADDRESS_POOL01', '172.16.49.0/26:26') }}
+        params:
+          ip_reserved:
+            gateway: +62
+            l2_network_device: +61
+            virtual_{{ HOSTNAME_CFG01 }}: {{ ETH1_IP_ADDRESS_CFG01 }}
+            default_{{ HOSTNAME_KVM01 }}: {{ ETH0_IP_ADDRESS_KVM01 }}
+            default_{{ HOSTNAME_KVM02 }}: {{ ETH0_IP_ADDRESS_KVM02 }}
+            default_{{ HOSTNAME_KVM03 }}: {{ ETH0_IP_ADDRESS_KVM03 }}
+            default_{{ HOSTNAME_CMP01 }}: {{ ETH0_IP_ADDRESS_CMP01 }}
+            default_{{ HOSTNAME_CMP02 }}: {{ ETH0_IP_ADDRESS_CMP02 }}
+            default_{{ HOSTNAME_GTW01 }}: {{ ETH0_IP_ADDRESS_GTW01 }}
+            default_{{ HOSTNAME_GTW02 }}: {{ ETH0_IP_ADDRESS_GTW02 }}
+          ip_ranges:
+              dhcp: [+2, -4]
+      private-pool01:
+        net: {{ os_env('PRIVATE_ADDRESS_POOL01', '10.167.11.0/24:24') }}
+        params:
+          ip_reserved:
+            virtual_{{ HOSTNAME_CFG01 }}: {{ ETH2_IP_ADDRESS_CFG01 }}
+            gateway: +1
+            l2_network_device: +1
+
+      tenant-pool01:
+        net: {{ os_env('TENANT_ADDRESS_POOL01', '10.167.12.0/24:24') }}
+        params:
+          ip_reserved:
+            gateway: +1
+            l2_network_device: +1
+
+      external-pool01:
+        net: {{ os_env('EXTERNAL_ADDRESS_POOL01', '172.17.42.128/26:26') }}
+        params:
+          ip_reserved:
+            gateway: +1
+            l2_network_device: -2
+
+
+    groups:
+      - name: virtual
+        driver:
+          name: devops.driver.libvirt
+          params:
+            connection_string: !os_env CONNECTION_STRING, qemu:///system
+            storage_pool_name: !os_env STORAGE_POOL_NAME, default
+            stp: False
+            hpet: False
+            enable_acpi: true
+            use_host_cpu: !os_env DRIVER_USE_HOST_CPU, true
+            use_hugepages: !os_env DRIVER_USE_HUGEPAGES, false
+
+        network_pools:
+          admin: admin-pool01
+
+        l2_network_devices:
+          # Ironic management interface
+          admin:
+            address_pool: admin-pool01
+            dhcp: false
+            parent_iface:
+              phys_dev: !os_env IRONIC_LAB_PXE_IFACE_0
+          private:
+            parent_iface:
+              phys_dev: !os_env CONTROL_IFACE
+
+        group_volumes:
+         - name: cloudimage1604    # This name is used for 'backing_store' option for node volumes.
+           source_image: !os_env IMAGE_PATH1604  # https://cloud-images.ubuntu.com/xenial/current/xenial-server-cloudimg-amd64-disk1.img
+           format: qcow2
+         - name: cfg01_day01_image               # Pre-configured day01 image
+           source_image: {{ os_env('IMAGE_PATH_CFG01_DAY01', os_env('IMAGE_PATH1604')) }} # http://images.mirantis.com/cfg01-day01.qcow2 or fallback to IMAGE_PATH1604
+           format: qcow2
+
+        nodes:
+          - name: {{ HOSTNAME_CFG01 }}
+            role: salt_master
+            params:
+              vcpu: !os_env SLAVE_NODE_CPU, 4
+              memory: !os_env SLAVE_NODE_MEMORY, 8192
+              boot:
+                - hd
+              cloud_init_volume_name: iso
+              cloud_init_iface_up: ens3
+              volumes:
+                - name: system
+                  capacity: !os_env NODE_VOLUME_SIZE, 150
+                  backing_store: cfg01_day01_image
+                  format: qcow2
+                - name: iso  # Volume with name 'iso' will be used
+                             # for store image with cloud-init metadata.
+                  capacity: 1
+                  format: raw
+                  device: cdrom
+                  bus: ide
+                  cloudinit_meta_data: *cloudinit_meta_data
+                  cloudinit_user_data: *cloudinit_user_data_cfg01
+
+              interfaces:
+                - label: ens3
+                  l2_network_device: admin
+                  interface_model: *interface_model
+                  mac_address: !os_env ETH1_MAC_ADDRESS_CFG01
+                - label: ens4
+                  l2_network_device: private
+                  interface_model: *interface_model
+              network_config:
+                ens3:
+                  networks:
+                    - admin
+                ens4:
+                  networks:
+                    - private
+
+
+      - name: default
+        driver:
+          name: devops_driver_ironic
+          params:
+            os_auth_token: fake-token
+            ironic_url: !os_env IRONIC_URL  # URL that will be used by fuel-devops
+                                            # to access Ironic API
+            # Agent URL that is accessible from deploying node when nodes
+            # are bootstrapped with PXE. Usually PXE/provision network address is used.
+            agent_kernel_url: !os_env IRONIC_AGENT_KERNEL_URL
+            agent_ramdisk_url: !os_env IRONIC_AGENT_RAMDISK_URL
+
+        network_pools:
+          admin: admin-pool01
+
+        nodes:
+          - name: {{ HOSTNAME_KVM01 }}
+            role: salt_minion
+            params:
+              ipmi_user: !os_env IPMI_USER
+              ipmi_password: !os_env IPMI_PASSWORD
+              ipmi_previlegies: OPERATOR
+              ipmi_host: !os_env IPMI_HOST_KVM01  # hostname or IP address
+              ipmi_lan_interface: lanplus
+              ipmi_port: 623
+
+              root_volume_name: system     # see 'volumes' below
+              cloud_init_volume_name: iso  # see 'volumes' below
+              cloud_init_iface_up: enp3s0f0  # see 'interfaces' below.
+              volumes:
+                - name: system
+                  capacity: !os_env NODE_VOLUME_SIZE, 200
+
+                  # The same as for agent URL, here is an URL to the image that should be
+                  # used for deploy the node. It should also be accessible from deploying
+                  # node when nodes are provisioned by agent. Usually PXE/provision network address is used.
+                  source_image: !os_env IRONIC_SOURCE_IMAGE_URL
+                  source_image_checksum: !os_env IRONIC_SOURCE_IMAGE_CHECKSUM
+
+                - name: iso  # Volume with name 'iso' will be used
+                             # for store image with cloud-init metadata.
+
+                  cloudinit_meta_data: *cloudinit_meta_data
+                  cloudinit_user_data: *cloudinit_user_data_1604
+
+              interfaces:
+                - label: enp3s0f0
+                  l2_network_device: admin
+                  mac_address: !os_env ETH0_MAC_ADDRESS_KVM01
+                - label: enp3s0f1
+                  mac_address: !os_env ETH1_MAC_ADDRESS_KVM01
+
+              network_config:
+                enp3s0f0:
+                  networks:
+                   - admin
+                bond0:
+                  networks:
+                   - control
+                  aggregation: active-backup
+                  parents:
+                   - enp3s0f1
+
+          - name: {{ HOSTNAME_KVM02 }}
+            role: salt_minion
+            params:
+              ipmi_user: !os_env IPMI_USER
+              ipmi_password: !os_env IPMI_PASSWORD
+              ipmi_previlegies: OPERATOR
+              ipmi_host: !os_env IPMI_HOST_KVM02  # hostname or IP address
+              ipmi_lan_interface: lanplus
+              ipmi_port: 623
+
+              root_volume_name: system     # see 'volumes' below
+              cloud_init_volume_name: iso  # see 'volumes' below
+              cloud_init_iface_up: enp3s0f0  # see 'interfaces' below.
+              volumes:
+                - name: system
+                  capacity: !os_env NODE_VOLUME_SIZE, 200
+
+                  # The same as for agent URL, here is an URL to the image that should be
+                  # used for deploy the node. It should also be accessible from deploying
+                  # node when nodes are provisioned by agent. Usually PXE/provision network address is used.
+                  source_image: !os_env IRONIC_SOURCE_IMAGE_URL
+                  source_image_checksum: !os_env IRONIC_SOURCE_IMAGE_CHECKSUM
+
+                - name: iso  # Volume with name 'iso' will be used
+                             # for store image with cloud-init metadata.
+
+                  cloudinit_meta_data: *cloudinit_meta_data
+                  cloudinit_user_data: *cloudinit_user_data_1604
+
+              interfaces:
+                - label: enp3s0f0
+                  l2_network_device: admin
+                  mac_address: !os_env ETH0_MAC_ADDRESS_KVM02
+                - label: enp3s0f1
+                  mac_address: !os_env ETH1_MAC_ADDRESS_KVM02
+
+              network_config:
+                enp3s0f0:
+                  networks:
+                   - admin
+                bond0:
+                  networks:
+                   - control
+                  aggregation: active-backup
+                  parents:
+                   - enp3s0f1
+
+          - name: {{ HOSTNAME_KVM03 }}
+            role: salt_minion
+            params:
+              ipmi_user: !os_env IPMI_USER
+              ipmi_password: !os_env IPMI_PASSWORD
+              ipmi_previlegies: OPERATOR
+              ipmi_host: !os_env IPMI_HOST_KVM03  # hostname or IP address
+              ipmi_lan_interface: lanplus
+              ipmi_port: 623
+
+              root_volume_name: system     # see 'volumes' below
+              cloud_init_volume_name: iso  # see 'volumes' below
+              cloud_init_iface_up: enp3s0f0  # see 'interfaces' below.
+              volumes:
+                - name: system
+                  capacity: !os_env NODE_VOLUME_SIZE, 200
+
+                  # The same as for agent URL, here is an URL to the image that should be
+                  # used for deploy the node. It should also be accessible from deploying
+                  # node when nodes are provisioned by agent. Usually PXE/provision network address is used.
+                  source_image: !os_env IRONIC_SOURCE_IMAGE_URL
+                  source_image_checksum: !os_env IRONIC_SOURCE_IMAGE_CHECKSUM
+
+                - name: iso  # Volume with name 'iso' will be used
+                             # for store image with cloud-init metadata.
+
+                  cloudinit_meta_data: *cloudinit_meta_data
+                  cloudinit_user_data: *cloudinit_user_data_1604
+
+              interfaces:
+                - label: enp3s0f0
+                  l2_network_device: admin
+                  mac_address: !os_env ETH0_MAC_ADDRESS_KVM03
+                - label: enp3s0f1
+                  mac_address: !os_env ETH1_MAC_ADDRESS_KVM03
+
+              network_config:
+                enp3s0f0:
+                  networks:
+                   - admin
+                bond0:
+                  networks:
+                   - control
+                  aggregation: active-backup
+                  parents:
+                   - enp3s0f1
+
+
+          - name: {{ HOSTNAME_CMP01 }}
+            role: salt_minion
+            params:
+              ipmi_user: !os_env IPMI_USER
+              ipmi_password: !os_env IPMI_PASSWORD
+              ipmi_previlegies: OPERATOR
+              ipmi_host: !os_env IPMI_HOST_CMP01  # hostname or IP address
+              ipmi_lan_interface: lanplus
+              ipmi_port: 623
+
+              root_volume_name: system     # see 'volumes' below
+              cloud_init_volume_name: iso  # see 'volumes' below
+              cloud_init_iface_up: enp5s0f0  # see 'interfaces' below.
+              volumes:
+                - name: system
+                  capacity: !os_env NODE_VOLUME_SIZE, 200
+
+                  # The same as for agent URL, here is an URL to the image that should be
+                  # used for deploy the node. It should also be accessible from deploying
+                  # node when nodes are provisioned by agent. Usually PXE/provision network address is used.
+                  source_image: !os_env IRONIC_SOURCE_IMAGE_URL
+                  source_image_checksum: !os_env IRONIC_SOURCE_IMAGE_CHECKSUM
+
+                - name: iso  # Volume with name 'iso' will be used
+                             # for store image with cloud-init metadata.
+
+                  cloudinit_meta_data: *cloudinit_meta_data
+                  cloudinit_user_data: *cloudinit_user_data_1604_hwe
+
+              interfaces:
+                - label: enp3s0f0
+                  mac_address: !os_env ETH0_MAC_ADDRESS_CMP01
+                - label: enp3s0f1
+                  mac_address: !os_env ETH1_MAC_ADDRESS_CMP01
+                - label: enp5s0f0
+                  l2_network_device: admin
+                  mac_address: !os_env ETH2_MAC_ADDRESS_CMP01
+                - label: enp5s0f1
+                  mac_address: !os_env ETH3_MAC_ADDRESS_CMP01
+              network_config:
+                enp5s0f0:
+                  networks:
+                   - admin
+
+          - name: {{ HOSTNAME_CMP02 }}
+            role: salt_minion
+            params:
+              ipmi_user: !os_env IPMI_USER
+              ipmi_password: !os_env IPMI_PASSWORD
+              ipmi_previlegies: OPERATOR
+              ipmi_host: !os_env IPMI_HOST_CMP02  # hostname or IP address
+              ipmi_lan_interface: lanplus
+              ipmi_port: 623
+
+              root_volume_name: system     # see 'volumes' below
+              cloud_init_volume_name: iso  # see 'volumes' below
+              cloud_init_iface_up: enp5s0f0  # see 'interfaces' below.
+              volumes:
+                - name: system
+                  capacity: !os_env NODE_VOLUME_SIZE, 200
+
+                  # The same as for agent URL, here is an URL to the image that should be
+                  # used for deploy the node. It should also be accessible from deploying
+                  # node when nodes are provisioned by agent. Usually PXE/provision network address is used.
+                  source_image: !os_env IRONIC_SOURCE_IMAGE_URL
+                  source_image_checksum: !os_env IRONIC_SOURCE_IMAGE_CHECKSUM
+
+                - name: iso  # Volume with name 'iso' will be used
+                             # for store image with cloud-init metadata.
+
+                  cloudinit_meta_data: *cloudinit_meta_data
+                  cloudinit_user_data: *cloudinit_user_data_1604_hwe
+
+              interfaces:
+                - label: enp3s0f0
+                  mac_address: !os_env ETH0_MAC_ADDRESS_CMP02
+                - label: enp3s0f1
+                  mac_address: !os_env ETH1_MAC_ADDRESS_CMP02
+                - label: enp5s0f0
+                  l2_network_device: admin
+                  mac_address: !os_env ETH2_MAC_ADDRESS_CMP02
+                - label: enp5s0f1
+                  mac_address: !os_env ETH3_MAC_ADDRESS_CMP02
+              network_config:
+                enp5s0f0:
+                  networks:
+                   - admin
+
+          - name: {{ HOSTNAME_GTW01 }}
+            role: salt_minion
+            params:
+              ipmi_user: !os_env IPMI_USER
+              ipmi_password: !os_env IPMI_PASSWORD
+              ipmi_previlegies: OPERATOR
+              ipmi_host: !os_env IPMI_HOST_GTW01  # hostname or IP address
+              ipmi_lan_interface: lanplus
+              ipmi_port: 623
+
+              root_volume_name: system     # see 'volumes' below
+              cloud_init_volume_name: iso  # see 'volumes' below
+              cloud_init_iface_up: enp3s0f0  # see 'interfaces' below.
+              volumes:
+                - name: system
+                  capacity: !os_env NODE_VOLUME_SIZE, 200
+
+                  # The same as for agent URL, here is an URL to the image that should be
+                  # used for deploy the node. It should also be accessible from deploying
+                  # node when nodes are provisioned by agent. Usually PXE/provision network address is used.
+                  source_image: !os_env IRONIC_SOURCE_IMAGE_URL
+                  source_image_checksum: !os_env IRONIC_SOURCE_IMAGE_CHECKSUM
+
+                - name: iso  # Volume with name 'iso' will be used
+                             # for store image with cloud-init metadata.
+
+                  cloudinit_meta_data: *cloudinit_meta_data
+                  cloudinit_user_data: *cloudinit_user_data_1604_hwe
+
+              interfaces:
+                - label: enp3s0f0
+                  l2_network_device: admin
+                  mac_address: !os_env ETH0_MAC_ADDRESS_GTW01
+                - label: enp3s0f1
+                  mac_address: !os_env ETH1_MAC_ADDRESS_GTW01
+
+              network_config:
+                enp3s0f0:
+                  networks:
+                   - admin
+                bond0:
+                  networks:
+                   - control
+                  aggregation: active-backup
+                  parents:
+                   - enp3s0f1
+
+          - name: {{ HOSTNAME_GTW02 }}
+            role: salt_minion
+            params:
+              ipmi_user: !os_env IPMI_USER
+              ipmi_password: !os_env IPMI_PASSWORD
+              ipmi_previlegies: OPERATOR
+              ipmi_host: !os_env IPMI_HOST_GTW02  # hostname or IP address
+              ipmi_lan_interface: lanplus
+              ipmi_port: 623
+
+              root_volume_name: system     # see 'volumes' below
+              cloud_init_volume_name: iso  # see 'volumes' below
+              cloud_init_iface_up: enp3s0f0  # see 'interfaces' below.
+              volumes:
+                - name: system
+                  capacity: !os_env NODE_VOLUME_SIZE, 200
+
+                  # The same as for agent URL, here is an URL to the image that should be
+                  # used for deploy the node. It should also be accessible from deploying
+                  # node when nodes are provisioned by agent. Usually PXE/provision network address is used.
+                  source_image: !os_env IRONIC_SOURCE_IMAGE_URL
+                  source_image_checksum: !os_env IRONIC_SOURCE_IMAGE_CHECKSUM
+
+                - name: iso  # Volume with name 'iso' will be used
+                             # for store image with cloud-init metadata.
+
+                  cloudinit_meta_data: *cloudinit_meta_data
+                  cloudinit_user_data: *cloudinit_user_data_1604_hwe
+
+              interfaces:
+                - label: enp3s0f0
+                  l2_network_device: admin
+                  mac_address: !os_env ETH0_MAC_ADDRESS_GTW02
+                - label: enp3s0f1
+                  mac_address: !os_env ETH1_MAC_ADDRESS_GTW02
+
+              network_config:
+                enp3s0f0:
+                  networks:
+                   - admin
+                bond0:
+                  networks:
+                   - control
+                  aggregation: active-backup
+                  parents:
+                   - enp3s0f1
diff --git a/tcp_tests/templates/cookied-bm-mcp-dvr-vxlan/common-services.yaml b/tcp_tests/templates/cookied-bm-mcp-dvr-vxlan/core.yaml
similarity index 94%
rename from tcp_tests/templates/cookied-bm-mcp-dvr-vxlan/common-services.yaml
rename to tcp_tests/templates/cookied-bm-mcp-dvr-vxlan/core.yaml
index 06df7ad..6dc4829 100644
--- a/tcp_tests/templates/cookied-bm-mcp-dvr-vxlan/common-services.yaml
+++ b/tcp_tests/templates/cookied-bm-mcp-dvr-vxlan/core.yaml
@@ -1,5 +1,20 @@
 {% from 'cookied-bm-mcp-dvr-vxlan/underlay.yaml' import HOSTNAME_CFG01 with context %}
 
+# Install support services
+- description: Install keepalived on ctl01
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@keepalived:cluster and *01*' state.sls keepalived
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 10}
+  skip_fail: true
+
+- description: Install keepalived
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@keepalived:cluster' state.sls keepalived
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 10}
+  skip_fail: true
+
 - description: Install glusterfs
   cmd: salt --hard-crash --state-output=mixed --state-verbose=False
     -C 'I@glusterfs:server' state.sls glusterfs.server.service
@@ -21,21 +36,6 @@
   retry: {count: 1, delay: 5}
   skip_fail: false
 
-# Install support services
-- description: Install keepalived on ctl01
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@keepalived:cluster and *01*' state.sls keepalived
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 10}
-  skip_fail: true
-
-- description: Install keepalived
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@keepalived:cluster' state.sls keepalived
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 10}
-  skip_fail: true
-
 - description: Install RabbitMQ on ctl01
   cmd: salt --hard-crash --state-output=mixed --state-verbose=False
     -C 'I@rabbitmq:server and *01*' state.sls rabbitmq
@@ -114,10 +114,10 @@
   retry: {count: 1, delay: 5}
   skip_fail: false
 
-- description: Check the OpenStack control VIP
+- description: Check the VIP
   cmd: |
-    OPENSTACK_CONTROL_ADDRESS=$(salt --out=newline_values_only "ctl01*" pillar.get _param:cluster_vip_address);
-    echo "_param:cluster_vip_address (vip): ${OPENSTACK_CONTROL_ADDRESS}";
+    OPENSTACK_CONTROL_ADDRESS=`salt-call --out=newline_values_only pillar.get _param:openstack_control_address`;
+    echo "_param:openstack_control_address (vip): ${OPENSTACK_CONTROL_ADDRESS}";
     salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@keepalived:cluster' cmd.run "ip a | grep ${OPENSTACK_CONTROL_ADDRESS}" | grep -B1 ${OPENSTACK_CONTROL_ADDRESS}
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 3, delay: 10}
diff --git a/tcp_tests/templates/cookied-bm-mcp-dvr-vxlan/openstack.yaml b/tcp_tests/templates/cookied-bm-mcp-dvr-vxlan/openstack.yaml
index 9c24f6d..44badb5 100644
--- a/tcp_tests/templates/cookied-bm-mcp-dvr-vxlan/openstack.yaml
+++ b/tcp_tests/templates/cookied-bm-mcp-dvr-vxlan/openstack.yaml
@@ -1,11 +1,10 @@
 {% from 'cookied-bm-mcp-dvr-vxlan/underlay.yaml' import HOSTNAME_CFG01 with context %}
 {% from 'cookied-bm-mcp-dvr-vxlan/underlay.yaml' import HOSTNAME_CTL01 with context %}
 {% from 'cookied-bm-mcp-dvr-vxlan/underlay.yaml' import HOSTNAME_GTW01 with context %}
-{# from 'cookied-bm-mcp-dvr-vxlan/underlay.yaml' import HOSTNAME_GTW02 with context #}
+{% from 'cookied-bm-mcp-dvr-vxlan/underlay.yaml' import HOSTNAME_GTW02 with context %}
 {% from 'shared-salt.yaml' import IPV4_NET_EXTERNAL_PREFIX with context %}
 {% from 'shared-salt.yaml' import IPV4_NET_TENANT_PREFIX with context %}
-{% set PATTERN = os_env('PATTERN', 'false') %}
-{% set RUN_TEMPEST = os_env('RUN_TEMPEST', 'false') %}
+{% import 'shared-salt.yaml' as SHARED with context %}
 
 # Install OpenStack control services
 
@@ -97,7 +96,7 @@
   cmd: salt --hard-crash --state-output=mixed --state-verbose=False
     -C 'I@cinder:volume' state.sls cinder
   node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
+  retry: {count: 2, delay: 5}
   skip_fail: false
 
 - description: Check cinder list
@@ -180,23 +179,6 @@
   retry: {count: 10, delay: 30}
   skip_fail: false
 
-
-  # Upload cirros image
-
-- description: Upload cirros image on ctl01
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
-    'wget http://download.cirros-cloud.net/0.3.4/cirros-0.3.4-i386-disk.img'
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 2, delay: 30}
-  skip_fail: false
-
-- description: Register image in glance
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
-    '. /root/keystonercv3; glance --timeout 120 image-create --name cirros --visibility public --disk-format qcow2 --container-format bare --progress < /root/cirros-0.3.4-i386-disk.img'
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 30}
-  skip_fail: false
-
 - description: Create net04_external
   cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
     '. /root/keystonercv3; neutron net-create net04_ext --router:external True --provider:physical_network physnet1 --provider:network_type flat'
@@ -206,7 +188,7 @@
 
 - description: Create subnet_external
   cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
-    '. /root/keystonercv3; neutron subnet-create net04_ext {{ IPV4_NET_EXTERNAL_PREFIX }}.0/24 --name net04_ext__subnet --disable-dhcp --allocation-pool start={{ IPV4_NET_EXTERNAL_PREFIX }}.150,end={{ IPV4_NET_EXTERNAL_PREFIX }}.180 --gateway {{ IPV4_NET_EXTERNAL_PREFIX }}.1'
+    '. /root/keystonercv3; neutron subnet-create net04_ext 172.17.42.0/24 --name net04_ext__subnet --disable-dhcp --allocation-pool start=172.17.42.10,end=172.17.42.60 --gateway 172.17.42.1'
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 30}
   skip_fail: false
@@ -220,7 +202,7 @@
 
 - description: Create subnet_net04
   cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
-    '. /root/keystonercv3; neutron subnet-create net04 {{ IPV4_NET_TENANT_PREFIX }}.0/24 --name net04__subnet --allocation-pool start={{ IPV4_NET_TENANT_PREFIX }}.120,end={{ IPV4_NET_TENANT_PREFIX }}.240'
+    '. /root/keystonercv3; neutron subnet-create net04 192.168.0.0/24 --name net04__subnet --allocation-pool start=192.168.0.120,end=192.168.0.240'
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 30}
   skip_fail: false
@@ -246,54 +228,4 @@
   retry: {count: 1, delay: 30}
   skip_fail: false
 
-#- description:  Allow all tcp
-#  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
-#    '. /root/keystonercv3; nova secgroup-add-rule default tcp 1 65535 0.0.0.0/0'
-#  node_name: {{ HOSTNAME_CFG01 }}
-#  retry: {count: 1, delay: 30}
-# skip_fail: false
-
-#- description:  Allow all icmp
-#  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
-#    '. /root/keystonercv3; nova secgroup-add-rule default icmp -1 -1 0.0.0.0/0'
-#  node_name: {{ HOSTNAME_CFG01 }}
-#  retry: {count: 1, delay: 30}
-#  skip_fail: false
-
-#- description: sync time
-#  cmd: salt --hard-crash --state-output=mixed --state-verbose=False '*' cmd.run
-#    'service ntp stop; ntpd -gq;  service ntp start'
-#  node_name: {{ HOSTNAME_CFG01 }}
-#  retry: {count: 1, delay: 30}
-#  skip_fail: false
-
-- description: Temp workaround of  PROD-13167
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl*' cmd.run
-    'apt-get install python-pymysql -y'
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 30}
-  skip_fail: false
-
-- description: Install docker.io on gtw
-  cmd: salt 'gtw01*' cmd.run 'apt-get install docker.io -y'
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 30}
-  skip_fail: false
-
-- description: Enable forward policy
-  cmd: salt "gtw01*" cmd.run 'iptables --policy FORWARD ACCEPT'
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 30}
-  skip_fail: false
-
-- description: create rc file on cfg
-  cmd: scp ctl01:/root/keystonercv3 /root
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 30}
-  skip_fail: false
-
-- description: Copy rc file
-  cmd: scp /root/keystonercv3 gtw01:/root
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 30}
-  skip_fail: false
+{{ SHARED.INSTALL_DOCKER_ON_GTW() }}
\ No newline at end of file
diff --git a/tcp_tests/templates/cookied-bm-mcp-dvr-vxlan/salt-context-cookiecutter-openstack_ovs_dvr_vxlan.yaml b/tcp_tests/templates/cookied-bm-mcp-dvr-vxlan/salt-context-cookiecutter-openstack_ovs_dvr_vxlan.yaml
index 392798b..f1416d5 100644
--- a/tcp_tests/templates/cookied-bm-mcp-dvr-vxlan/salt-context-cookiecutter-openstack_ovs_dvr_vxlan.yaml
+++ b/tcp_tests/templates/cookied-bm-mcp-dvr-vxlan/salt-context-cookiecutter-openstack_ovs_dvr_vxlan.yaml
@@ -1,7 +1,17 @@
 default_context:
-  mcp_version: testing
+  mcp_version: proposed
   ceph_enabled: 'False'
-  cicd_enabled: 'False'
+  cicd_enabled: 'True'
+  cicd_control_node01_address: 10.167.4.91
+  cicd_control_node01_hostname: cid01
+  cicd_control_node02_address: 10.167.4.92
+  cicd_control_node02_hostname: cid02
+  cicd_control_node03_address: 10.167.4.93
+  cicd_control_node03_hostname: cid03
+  cicd_control_vip_address: 10.167.4.90
+  cicd_control_vip_hostname: cid
+  kqueen_custom_mail_enabled: 'False'
+  kqueen_enabled: 'False'
   cluster_domain: cookied-bm-mcp-dvr-vxlan.local
   cluster_name: cookied-bm-mcp-dvr-vxlan
   compute_bond_mode: active-backup
@@ -10,13 +20,13 @@
   context_seed: Psupdi5ne1kCk31iDWV7fhbHnBALIr3SWhce7Z01jCaMwlAhGKxeLPFPQ9CgYzJD
   control_network_netmask: 255.255.255.0
   control_network_subnet: 10.167.4.0/24
-  control_vlan: '2416'
+  control_vlan: '2403'
   cookiecutter_template_branch: master
   cookiecutter_template_credentials: gerrit
   cookiecutter_template_url: ssh://mcp-jenkins@gerrit.mcp.mirantis.net:29418/mk/cookiecutter-templates.git
-  deploy_network_gateway: 172.16.49.1
+  deploy_network_gateway: 172.16.164.1
   deploy_network_netmask: 255.255.255.192
-  deploy_network_subnet: 172.16.49.0/26
+  deploy_network_subnet: 172.16.164.0/26
   deployment_type: physical
   dns_server01: 172.18.176.6
   dns_server02: 172.18.208.44
@@ -26,37 +36,40 @@
   infra_bond_mode: active-backup
   infra_deploy_nic: eth0
   infra_kvm01_control_address: 10.167.4.241
-  infra_kvm01_deploy_address: 172.16.49.11
+  infra_kvm01_deploy_address: 172.16.164.11
   infra_kvm01_hostname: kvm01
   infra_kvm02_control_address: 10.167.4.242
-  infra_kvm02_deploy_address: 172.16.49.12
+  infra_kvm02_deploy_address: 172.16.164.12
   infra_kvm02_hostname: kvm02
   infra_kvm03_control_address: 10.167.4.243
-  infra_kvm03_deploy_address: 172.16.49.13
+  infra_kvm03_deploy_address: 172.16.164.13
   infra_kvm03_hostname: kvm03
   infra_kvm_vip_address: 10.167.4.240
   infra_primary_first_nic: eth1
   infra_primary_second_nic: eth2
   kubernetes_enabled: 'False'
   local_repositories: 'False'
-  maas_deploy_address: 172.16.49.15
+  maas_deploy_address: 172.16.164.14
   maas_hostname: cfg01
   offline_deployment: 'False'
   opencontrail_enabled: 'False'
-  #openstack_benchmark_node01_address: 10.167.4.95
-  #openstack_benchmark_node01_hostname: bmk01
+  openldap_enabled: 'False'
+  bmk_enabled: 'False'
+  static_ips_on_deploy_network_enabled: 'False'
+  penstack_benchmark_node01_address: 10.167.4.95
+  openstack_benchmark_node01_hostname: bmk01
   openstack_compute_count: '2'
   openstack_compute_rack01_hostname: cmp
   openstack_compute_rack01_single_subnet: 10.167.4
   openstack_compute_rack01_tenant_subnet: 10.167.6
-  openstack_compute_node01_hostname: cmp001
-  openstack_compute_node02_hostname: cmp002
+  openstack_compute_node01_hostname: cmp01
+  openstack_compute_node02_hostname: cmp02
   openstack_compute_node01_address: 10.167.4.3
   openstack_compute_node02_address: 10.167.4.31
   openstack_compute_node01_single_address: 10.167.4.3
   openstack_compute_node02_single_address: 10.167.4.31
-  openstack_compute_node01_deploy_address: 172.16.49.3
-  openstack_compute_node02_deploy_address: 172.16.49.31
+  openstack_compute_node01_deploy_address: 172.16.164.3
+  openstack_compute_node02_deploy_address: 172.16.164.31
   openstack_control_address: 10.167.4.10
   openstack_control_hostname: ctl
   openstack_control_node01_address: 10.167.4.11
@@ -76,10 +89,10 @@
   openstack_enabled: 'True'
   openstack_gateway_node01_address: 10.167.4.224
   openstack_gateway_node01_hostname: gtw01
-  openstack_gateway_node01_tenant_address: 10.167.6.6
+  openstack_gateway_node01_tenant_address: 10.167.6.4
   openstack_gateway_node02_address: 10.167.4.225
   openstack_gateway_node02_hostname: gtw02
-  openstack_gateway_node02_tenant_address: 10.167.6.7
+  openstack_gateway_node02_tenant_address: 10.167.6.5
   openstack_message_queue_address: 10.167.4.40
   openstack_message_queue_hostname: msg
   openstack_message_queue_node01_address: 10.167.4.41
@@ -140,39 +153,43 @@
   backup_public_key: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDEvr+tWAJ62wROllpSZeaSPxxnVY3R65sfUW8wM6L8tr1knJOTQLoBikmcjISb3ekyPlwubTypGoxb7al06FiNwfr3KDkytflKRGTyMKYgchighuFCfBuePd13cjf1l19TYU7u7a+VuCVWi7pmhDGUkMi24s23OroQb7D14XX17v46wLrqJQi2nrXzN/DWXcn/ycq8IZ7ZFgN/uYlbpfAKX8PCvImbDDO8+BgndAy4MPz8cWOWsnfGMVNePhvhazVcijLvx8Vu2Iuvg7CoJiSGjTe7YTms44/WpnFkHreyK8cwsw4wzls4BApu6UU2jIAsAMZh9zux/Rtni71dcNfF
   salt_master_address: 10.167.4.2
   salt_master_hostname: cfg01
-  salt_master_management_address: 172.16.49.2
+  salt_master_management_address: 172.16.164.2
   shared_reclass_url: ssh://mcp-jenkins@gerrit.mcp.mirantis.net:29418/salt-models/reclass-system.git
-  stacklight_enabled: 'True'
-  stacklight_log_address: 10.167.8.60
+  stacklight_enabled: 'False'
+  fluentd_enabled: 'False'
+  stacklight_log_address: 10.167.4.60
   stacklight_log_hostname: log
-  stacklight_log_node01_address: 10.167.8.61
+  stacklight_log_node01_address: 10.167.4.61
   stacklight_log_node01_hostname: log01
-  stacklight_log_node02_address: 10.167.8.62
+  stacklight_log_node02_address: 10.167.4.62
   stacklight_log_node02_hostname: log02
-  stacklight_log_node03_address: 10.167.8.63
+  stacklight_log_node03_address: 10.167.4.63
   stacklight_log_node03_hostname: log03
-  stacklight_monitor_address: 10.167.8.70
+  stacklight_monitor_address: 10.167.4.70
   stacklight_monitor_hostname: mon
-  stacklight_monitor_node01_address: 10.167.8.71
+  stacklight_monitor_node01_address: 10.167.4.71
   stacklight_monitor_node01_hostname: mon01
-  stacklight_monitor_node02_address: 10.167.8.72
+  stacklight_monitor_node02_address: 10.167.4.72
   stacklight_monitor_node02_hostname: mon02
-  stacklight_monitor_node03_address: 10.167.8.73
+  stacklight_monitor_node03_address: 10.167.4.73
   stacklight_monitor_node03_hostname: mon03
   stacklight_notification_address: alerts@localhost
   stacklight_notification_smtp_host: 127.0.0.1
-  stacklight_telemetry_address: 10.167.8.85
+  stacklight_telemetry_address: 10.167.4.85
   stacklight_telemetry_hostname: mtr
-  stacklight_telemetry_node01_address: 10.167.8.86
+  stacklight_telemetry_node01_address: 10.167.4.86
   stacklight_telemetry_node01_hostname: mtr01
-  stacklight_telemetry_node02_address: 10.167.8.87
+  stacklight_telemetry_node02_address: 10.167.4.87
   stacklight_telemetry_node02_hostname: mtr02
-  stacklight_telemetry_node03_address: 10.167.8.88
+  stacklight_telemetry_node03_address: 10.167.4.88
   stacklight_telemetry_node03_hostname: mtr03
   stacklight_version: '2'
-  fluentd_enabled: 'True'
   tenant_network_gateway: 10.167.6.1
   tenant_network_netmask: 255.255.255.0
   tenant_network_subnet: 10.167.6.0/24
-  tenant_vlan: '2417'
+  stacklight_long_term_storage_type: prometheus
+  prometheus_relay_bind_port: 9094
+  tenant_vlan: '2406'
   upstream_proxy_enabled: 'False'
+  use_default_network_scheme: 'True'
+  vnf_onboarding_enabled: 'False'
diff --git a/tcp_tests/templates/cookied-bm-mcp-dvr-vxlan/salt-context-lab03-environment.yaml b/tcp_tests/templates/cookied-bm-mcp-dvr-vxlan/salt-context-lab03-environment.yaml
index a1bd850..a70b9f3 100644
--- a/tcp_tests/templates/cookied-bm-mcp-dvr-vxlan/salt-context-lab03-environment.yaml
+++ b/tcp_tests/templates/cookied-bm-mcp-dvr-vxlan/salt-context-lab03-environment.yaml
@@ -7,6 +7,8 @@
       interfaces:
         ens3:
           role: single_dhcp
+        ens4:
+          role: single_static_ctl
 
     # Physical nodes
     kvm01.cookied-bm-mcp-dvr-vxlan.local:
@@ -42,7 +44,7 @@
         enp9s0f1:
           role: bond0_ab_ovs_vlan_ctl
 
-    cmp001.cookied-bm-mcp-dvr-vxlan.local:
+    cmp01.cookied-bm-mcp-dvr-vxlan.local:
       reclass_storage_name: openstack_compute_node01
       roles:
       - openstack_compute
@@ -50,14 +52,14 @@
       - linux_system_codename_xenial
       interfaces:
         enp9s0f0:
-          role: single_dhcp
+          role: single_mgm_dhcp
         enp9s0f1:
-          role: bond0_ab_ovs_vxlan_ctl_mesh
-          single_address: ${_param:openstack_compute_node01_control_address}
-          tenant_address: ${_param:openstack_compute_node01_tenant_address}
+          role: bond0_ab_dvr_vxlan_ctl_mesh_floating
+          single_address: 10.167.4.105
+          tenant_address: 10.167.6.105
 
 
-    cmp002.cookied-bm-mcp-dvr-vxlan.local:
+    cmp02.cookied-bm-mcp-dvr-vxlan.local:
       reclass_storage_name: openstack_compute_node02
       roles:
       - openstack_compute
@@ -65,12 +67,11 @@
       - linux_system_codename_xenial
       interfaces:
         enp9s0f0:
-          role: single_dhcp
+          role: single_mgm_dhcp
         enp9s0f1:
-          role: bond0_ab_ovs_vxlan_ctl_mesh
-          single_address: ${_param:openstack_compute_node02_control_address}
-          tenant_address: ${_param:openstack_compute_node02_tenant_address}
-
+          role: bond0_ab_dvr_vxlan_ctl_mesh_floating
+          single_address: 10.167.4.105
+          tenant_address: 10.167.6.105
 
     gtw01.cookied-bm-mcp-dvr-vxlan.local:
       reclass_storage_name: openstack_gateway_node01
@@ -79,7 +80,7 @@
       - linux_system_codename_xenial
       interfaces:
         enp2s0f0:
-          role: single_dhcp
+          role: single_mgm_dhcp
         enp2s0f1:
           role: bond0_ab_dvr_vxlan_ctl_mesh_floating
 
@@ -90,6 +91,6 @@
       - linux_system_codename_xenial
       interfaces:
         enp2s0f0:
-          role: single_dhcp
+          role: single_mgm_dhcp
         enp2s0f1:
           role: bond0_ab_dvr_vxlan_ctl_mesh_floating
diff --git a/tcp_tests/templates/cookied-bm-mcp-dvr-vxlan/salt-context-vcp-environment.yaml b/tcp_tests/templates/cookied-bm-mcp-dvr-vxlan/salt-context-vcp-environment.yaml
index 7435fc8..37d0b14 100644
--- a/tcp_tests/templates/cookied-bm-mcp-dvr-vxlan/salt-context-vcp-environment.yaml
+++ b/tcp_tests/templates/cookied-bm-mcp-dvr-vxlan/salt-context-vcp-environment.yaml
@@ -14,7 +14,6 @@
       reclass_storage_name: openstack_control_node02
       roles:
       - openstack_control
-      - features_designate
       - linux_system_codename_xenial
       interfaces:
         ens2:
@@ -37,9 +36,10 @@
       reclass_storage_name: openstack_database_node01
       roles:
       - openstack_database_leader
-      - features_designate_database
       - linux_system_codename_xenial
       interfaces:
+        ens2:
+          role: single_dhcp
         ens3:
           role: single_ctl
 
@@ -49,6 +49,8 @@
       - openstack_database
       - linux_system_codename_xenial
       interfaces:
+        ens2:
+          role: single_dhcp
         ens3:
           role: single_ctl
 
@@ -56,9 +58,10 @@
       reclass_storage_name: openstack_database_node03
       roles:
       - openstack_database
-      - features_designate_database
       - linux_system_codename_xenial
       interfaces:
+        ens2:
+          role: single_dhcp
         ens3:
           role: single_ctl
 
@@ -68,6 +71,8 @@
       - openstack_message_queue
       - linux_system_codename_xenial
       interfaces:
+        ens2:
+          role: single_dhcp
         ens3:
           role: single_ctl
 
@@ -77,6 +82,8 @@
       - openstack_message_queue
       - linux_system_codename_xenial
       interfaces:
+        ens2:
+          role: single_dhcp
         ens3:
           role: single_ctl
 
@@ -86,6 +93,8 @@
       - openstack_message_queue
       - linux_system_codename_xenial
       interfaces:
+        ens2:
+          role: single_dhcp
         ens3:
           role: single_ctl
 
@@ -93,9 +102,10 @@
       reclass_storage_name: openstack_proxy_node01
       roles:
       - openstack_proxy
-      - features_designate_proxy
       - linux_system_codename_xenial
       interfaces:
+        ens2:
+          role: single_dhcp
         ens3:
           role: single_ctl
 
@@ -103,35 +113,63 @@
       reclass_storage_name: openstack_proxy_node02
       roles:
       - openstack_proxy
-      - features_designate_proxy
+      - linux_system_codename_xenial
+      interfaces:
+        ens2:
+          role: single_dhcp
+        ens3:
+          role: single_ctl
+
+    cid01.cookied-bm-mcp-dvr-vxlan.local:
+      reclass_storage_name: cicd_control_node01
+      roles:
+      - cicd_control_leader
       - linux_system_codename_xenial
       interfaces:
         ens3:
           role: single_ctl
 
-    mtr01.cookied-bm-mcp-dvr-vxlan.local:
-      reclass_storage_name: stacklight_telemetry_node01
+    cid02.cookied-bm-mcp-dvr-vxlan.local:
+      reclass_storage_name: cicd_control_node02
       roles:
-      - stacklight_telemetry
+      - cicd_control_manager
       - linux_system_codename_xenial
       interfaces:
         ens3:
           role: single_ctl
 
-    mtr02.cookied-bm-mcp-dvr-vxlan.local:
-      reclass_storage_name: stacklight_telemetry_node02
+    cid03.cookied-bm-mcp-dvr-vxlan.local:
+      reclass_storage_name: cicd_control_node03
       roles:
-      - stacklight_telemetry
+      - cicd_control_manager
       - linux_system_codename_xenial
       interfaces:
         ens3:
           role: single_ctl
 
-    mtr03.cookied-bm-mcp-dvr-vxlan.local:
-      reclass_storage_name: stacklight_telemetry_node03
-      roles:
-      - stacklight_telemetry
-      - linux_system_codename_xenial
-      interfaces:
-        ens3:
-          role: single_ctl
+#    mon01.cookied-bm-mcp-dvr-vxlan.local:
+#      reclass_storage_name: stacklight_server_node01
+#      roles:
+#      - stacklightv2_server_leader
+#      - linux_system_codename_xenial
+#      interfaces:
+#        ens3:
+#          role: single_ctl
+#
+#    mon02.cookied-bm-mcp-dvr-vxlan.local:
+#      reclass_storage_name: stacklight_server_node02
+#      roles:
+#      - stacklightv2_server
+#      - linux_system_codename_xenial
+#      interfaces:
+#        ens3:
+#         role: single_ctl
+#
+#    mon03.cookied-bm-mcp-dvr-vxlan.local:
+#      reclass_storage_name: stacklight_server_node03
+#      roles:
+#      - stacklightv2_server
+#      - linux_system_codename_xenial
+#      interfaces:
+#        ens3:
+#          role: single_ctl
diff --git a/tcp_tests/templates/cookied-bm-mcp-dvr-vxlan/salt.yaml b/tcp_tests/templates/cookied-bm-mcp-dvr-vxlan/salt.yaml
index 9e176ab..89895ea 100644
--- a/tcp_tests/templates/cookied-bm-mcp-dvr-vxlan/salt.yaml
+++ b/tcp_tests/templates/cookied-bm-mcp-dvr-vxlan/salt.yaml
@@ -6,51 +6,14 @@
 {% from 'cookied-bm-mcp-dvr-vxlan/underlay.yaml' import HOSTNAME_KVM03 with context %}
 
 {% set SALT_MODELS_REPOSITORY = os_env('SALT_MODELS_REPOSITORY','https://gerrit.mcp.mirantis.net/salt-models/mcp-virtual-lab') %}
-# Other salt model repository parameters see in shared-salt.yaml
 
-# Name of the context file (without extension, that is fixed .yaml) used to render the Environment model
 {% set ENVIRONMENT_MODEL_INVENTORY_NAME = os_env('ENVIRONMENT_MODEL_INVENTORY_NAME','cookied-bm-mcp-dvr-vxlan') %}
-# Path to the context files used to render Cluster and Environment models
-{%- set CLUSTER_CONTEXT_NAME = 'salt-context-cookiecutter-openstack_ovs_dvr_vxlan.yaml' %}
-{%- set ENVIRONMENT_CONTEXT_NAMES = ['salt-context-vcp-environment.yaml', 'salt-context-lab03-environment.yaml'] %}
-
-{%- set CONTROL_VLAN = os_env('CONTROL_VLAN', '2416') %}
-{%- set TENANT_VLAN = os_env('TENANT_VLAN', '2417') %}
 
 {% import 'shared-salt.yaml' as SHARED with context %}
 
 {{ SHARED.MACRO_INSTALL_SALT_MASTER() }}
 
-{{ SHARED.MACRO_GENERATE_COOKIECUTTER_MODEL(CONTROL_VLAN=CONTROL_VLAN, TENANT_VLAN=TENANT_VLAN) }}
-
-- description: Temporary WR for cinder backend defined by default in reclass.system
-  cmd: |
-    sed -i 's/backend\:\ {}//g' /srv/salt/reclass/classes/system/cinder/control/cluster.yml;
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-
-{{ SHARED.MACRO_GENERATE_AND_ENABLE_ENVIRONMENT_MODEL() }}
-
-- description: "Workaround for rack01 compute generator"
-  cmd: |
-    set -e;
-    # Remove rack01 key
-    . /root/venv-reclass-tools/bin/activate;
-    reclass-tools del-key parameters.reclass.storage.node.openstack_compute_rack01 /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/config.yml;
-
-    # Add openstack_compute_node definition from system
-    reclass-tools add-key 'classes' 'system.reclass.storage.system.openstack_compute_multi' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/config.yml --merge;
-
-    # Set ipaddresses for our nodes
-    reclass-tools add-key parameters._param.openstack_compute_node01_control_address {{ SHARED.IPV4_NET_CONTROL_PREFIX }}.3 /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/config.yml;
-    reclass-tools add-key parameters._param.openstack_compute_node02_control_address {{ SHARED.IPV4_NET_CONTROL_PREFIX }}.31 /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/config.yml;
-    reclass-tools add-key parameters._param.openstack_compute_node01_tenant_address {{ SHARED.IPV4_NET_TENANT_PREFIX }}.3 /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/config.yml;
-    reclass-tools add-key parameters._param.openstack_compute_node02_tenant_address {{ SHARED.IPV4_NET_TENANT_PREFIX }}.31 /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/config.yml;
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 10}
-  skip_fail: false
+{{ SHARED.MACRO_CLONE_RECLASS_MODELS() }}
 
 {{ SHARED.MACRO_CONFIGURE_RECLASS(FORMULA_SERVICES='"linux" "reclass" "salt" "openssh" "ntp" "git" "nginx" "collectd" "sensu" "heka" "sphinx" "keystone" "mysql" "grafana" "haproxy" "rsyslog" "horizon" "prometheus" "telegraf" "elasticsearch" "powerdns" "glusterfs" "xtrabackup" "maas" "backupninja" "jenkins" "fluentd" "runtest" "auditd" ') }}
 
@@ -58,23 +21,21 @@
 
 {{ SHARED.MACRO_RUN_SALT_MASTER_UNDERLAY_STATES() }}
 
-{{ SHARED.MACRO_GENERATE_INVENTORY() }}
-
-{{ SHARED.MACRO_NETWORKING_WORKAROUNDS() }}
-
-{{ SHARED.MACRO_BOOTSTRAP_ALL_MINIONS() }}
-
-- description: "Lab03 workaround: Control network access from cfg01 node using sshuttle via kvm01"
+- description: "WR for changing image to proposed"
   cmd: |
     set -e;
-    set -x;
-    KVM01_DEPLOY_ADDRESS=$(salt-call --out=newline_values_only pillar.get _param:infra_kvm_node01_deploy_address);
-    apt-get install -y sshuttle;
-    sshuttle -r ${KVM01_DEPLOY_ADDRESS} {{ SHARED.IPV4_NET_CONTROL }} -D >/dev/null;
+    apt-get -y install python-virtualenv python-pip build-essential python-dev libssl-dev;
+    [[ -d /root/venv-reclass-tools ]] || virtualenv /root/venv-reclass-tools;
+    . /root/venv-reclass-tools/bin/activate;
+    pip install git+https://github.com/dis-xcom/reclass-tools;
+    reclass-tools add-key parameters._param.salt_control_xenial_image 'http://ci.mcp.mirantis.net:8085/images/ubuntu-16-04-x64-mcpproposed.qcow2' /srv/salt/reclass/classes/cluster/cookied-bm-mcp-dvr-vxlan/infra/init.yml;
   node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
+  retry: {count: 1, delay: 10}
   skip_fail: false
 
+{{ SHARED.MACRO_GENERATE_INVENTORY() }}
+{{ SHARED.MACRO_NETWORKING_WORKAROUNDS() }}
+
 - description: Temporary workaround for removing cinder-volume from CTL nodes
   cmd: |
     sed -i 's/\-\ system\.cinder\.volume\.single//g' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/openstack/control.yml;
@@ -83,16 +44,35 @@
   retry: {count: 1, delay: 5}
   skip_fail: true
 
+{{ SHARED.MACRO_BOOTSTRAP_ALL_MINIONS() }}
+
 ########################################
 # Spin up Control Plane VMs on KVM nodes
 ########################################
 
+- description: Hack resolv.conf on nodes for internal services access
+  cmd: |
+    salt --hard-crash --state-output=mixed --state-verbose=False -C '*' cmd.run "echo 'nameserver 172.18.208.44' > /etc/resolv.conf;"
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
 - description: Execute 'libvirt' states to create necessary libvirt networks
   cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'kvm*' state.sls libvirt
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 2, delay: 10}
   skip_fail: false
 
+- description: Temporary WR for correct bridge name according to envoronment templates
+  cmd: |
+    sed -i 's/br\-ctl/br\_ctl/g' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/kvm.yml;
+    sed -i 's/br\-mgm/br\_mgm/g' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/kvm.yml;
+    sed -i 's/br\-ctl/br\_ctl/g' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/openstack/gateway.yml;
+    salt '*' saltutil.refresh_pillar;
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 10}
+  skip_fail: false
+
 - description: Create VMs for control plane
   cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'kvm*' state.sls salt.control
   node_name: {{ HOSTNAME_CFG01 }}
@@ -131,6 +111,15 @@
 
 {{ SHARED.MACRO_BOOTSTRAP_ALL_MINIONS() }}
 
+- description: Add cpm nodes to /etc/hosts
+  cmd: |
+    salt --hard-crash --state-output=mixed --state-verbose=False -C '*' cmd.run "echo '10.167.4.105 cmp01.cookied-bm-mcp-dvr-vxlan.local cmp01' >> /etc/hosts";
+    salt --hard-crash --state-output=mixed --state-verbose=False -C '*' cmd.run "echo '10.167.4.106 cmp02.cookied-bm-mcp-dvr-vxlan.local cmp02' >> /etc/hosts";
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 10}
+  skip_fail: true
+
 {{SHARED.MACRO_CHECK_SALT_VERSION_SERVICES_ON_CFG()}}
 
 {{SHARED.MACRO_CHECK_SALT_VERSION_ON_NODES()}}
+
diff --git a/tcp_tests/templates/cookied-bm-mcp-dvr-vxlan/underlay--user-data-cfg01.yaml b/tcp_tests/templates/cookied-bm-mcp-dvr-vxlan/underlay--user-data-cfg01.yaml
index 12016f5..3f4f128 100644
--- a/tcp_tests/templates/cookied-bm-mcp-dvr-vxlan/underlay--user-data-cfg01.yaml
+++ b/tcp_tests/templates/cookied-bm-mcp-dvr-vxlan/underlay--user-data-cfg01.yaml
@@ -18,8 +18,8 @@
    expire: False
 
   bootcmd:
-   #   # Block access to SSH while node is preparing
-   #   - cloud-init-per once sudo iptables -A INPUT -p tcp --dport 22 -j DROP
+   # Block access to SSH while node is preparing
+   #- cloud-init-per once sudo iptables -A INPUT -p tcp --dport 22 -j DROP
    # Enable root access
    - sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
    - service sshd restart
@@ -38,7 +38,6 @@
    - sudo ifdown ens3
    - sudo ip r d default || true  # remove existing default route to get it from dhcp
    - sudo ifup ens3
-   #- sudo route add default gw {gateway} {interface_name}
 
    # Create swap
    - fallocate -l 4G /swapfile
@@ -49,43 +48,21 @@
 
    ############## TCP Cloud cfg01 node ##################
    #- sleep 120
-   #   - echo "Preparing base OS"
+   - echo "Preparing base OS"
 
    - echo "nameserver 172.18.208.44" > /etc/resolv.conf;
-   #   - which wget >/dev/null || (apt-get update; apt-get install -y wget);
+   - which wget >/dev/null || (apt-get update; apt-get install -y wget);
 
-   #   # Configure Ubuntu mirrors
-   #   - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial main restricted universe" > /etc/apt/sources.list
-   #   - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial-updates main restricted universe" >> /etc/apt/sources.list
-   #   - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial-security main restricted universe" >> /etc/apt/sources.list
+   # Install common packages
+   - eatmydata apt-get install -y python-pip git curl tmux byobu iputils-ping traceroute htop tree sshuttle
 
-   #   - echo "deb [arch=amd64] http://apt.mirantis.com/xenial {{ REPOSITORY_SUITE }} salt extra" > /etc/apt/sources.list.d/mcp_salt.list;
-   #   - wget -O - http://apt.mirantis.com/public.gpg | apt-key add -;
-   #   - echo "deb http://repo.saltstack.com/apt/ubuntu/16.04/amd64/2016.3 xenial main" > /etc/apt/sources.list.d/saltstack.list;
-   #   - wget -O - https://repo.saltstack.com/apt/ubuntu/16.04/amd64/2016.3/SALTSTACK-GPG-KEY.pub | apt-key add -;
-
-   #   - apt-get clean
-   #   - apt-get update
-
-   #   # Install common packages
-   #   - eatmydata apt-get install -y python-pip git curl tmux byobu iputils-ping traceroute htop tree mc
-
-   #   # Install salt-minion and stop it until it is configured
-   #   - eatmydata apt-get install -y salt-minion && service salt-minion stop
-
-   #   ########################################################
-   #   # Node is ready, allow SSH access
-   #   - echo "Allow SSH access ..."
-   #   - sudo iptables -D INPUT -p tcp --dport 22 -j DROP
-   #   ########################################################
+   ########################################################
+   # Node is ready, allow SSH access
+   #- echo "Allow SSH access ..."
+   #- sudo iptables -D INPUT -p tcp --dport 22 -j DROP
+   ########################################################
 
   write_files:
-   - path: /etc/default/grub.d/97-enable-grub-menu.cfg
-     content: |
-         GRUB_RECORDFAIL_TIMEOUT=30
-         GRUB_TIMEOUT=3
-         GRUB_TIMEOUT_STYLE=menu
-
    - path: /etc/network/interfaces
      content: |
           auto ens3
diff --git a/tcp_tests/templates/cookied-bm-mcp-dvr-vxlan/underlay--user-data1604-hwe.yaml b/tcp_tests/templates/cookied-bm-mcp-dvr-vxlan/underlay--user-data1604-hwe.yaml
index ca4b062..983a026 100644
--- a/tcp_tests/templates/cookied-bm-mcp-dvr-vxlan/underlay--user-data1604-hwe.yaml
+++ b/tcp_tests/templates/cookied-bm-mcp-dvr-vxlan/underlay--user-data1604-hwe.yaml
@@ -52,9 +52,10 @@
    #- sleep 120
    #   - echo "Preparing base OS"
    - echo "nameserver 172.18.208.44" > /etc/resolv.conf;
+   #   - echo "nameserver 8.8.8.8" >> /etc/resolv.conf;
    #   - which wget >/dev/null || (apt-get update; apt-get install -y wget)
 
-   #   # Configure Ubuntu mirrors
+   # Configure Ubuntu mirrors
    #   - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial main restricted universe" > /etc/apt/sources.list
    #   - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial-updates main restricted universe" >> /etc/apt/sources.list
    #   - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial-security main restricted universe" >> /etc/apt/sources.list
@@ -67,13 +68,13 @@
    #   - apt-get clean
    #   - eatmydata apt-get update && apt-get -y upgrade
 
-   #   # Install common packages
+   # Install common packages
    #   - eatmydata apt-get install -y python-pip git curl tmux byobu iputils-ping traceroute htop tree mc
 
-   #   # Install salt-minion and stop it until it is configured
+   # Install salt-minion and stop it until it is configured
    #   - eatmydata apt-get install -y salt-minion && service salt-minion stop
 
-   #   # Install latest kernel
+   # Install latest kernel
    #   - eatmydata apt-get install -y {{ os_env('LINUX_KERNEL_HWE_PACKAGE_NAME', 'linux-image-extra-4.10.0-42-generic') }}
 
    ########################################################
@@ -96,4 +97,4 @@
           auto lo
           iface lo inet loopback
           auto {interface_name}
-          iface {interface_name} inet dhcp
\ No newline at end of file
+          iface {interface_name} inet dhcp
diff --git a/tcp_tests/templates/cookied-bm-mcp-dvr-vxlan/underlay--user-data1604.yaml b/tcp_tests/templates/cookied-bm-mcp-dvr-vxlan/underlay--user-data1604.yaml
index 7985929..9168b7f 100644
--- a/tcp_tests/templates/cookied-bm-mcp-dvr-vxlan/underlay--user-data1604.yaml
+++ b/tcp_tests/templates/cookied-bm-mcp-dvr-vxlan/underlay--user-data1604.yaml
@@ -18,8 +18,6 @@
    expire: False
 
   bootcmd:
-   #   # Block access to SSH while node is preparing
-   #   - cloud-init-per once sudo iptables -A INPUT -p tcp --dport 22 -j DROP
    # Enable root access
    - sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
    - service sshd restart
@@ -38,7 +36,6 @@
 
    # Prepare network connection
    - sudo ifup {interface_name}
-   #- sudo route add default gw {gateway} {interface_name}
 
    # Create swap
    - fallocate -l 4G /swapfile
@@ -46,38 +43,7 @@
    - mkswap /swapfile
    - swapon /swapfile
    - echo "/swapfile   none    swap    defaults   0   0" >> /etc/fstab
-
-
-   ############## TCP Cloud cfg01 node ##################
-   #- sleep 120
-   #   - echo "Preparing base OS"
    - echo "nameserver 172.18.208.44" > /etc/resolv.conf;
-   #   - which wget >/dev/null || (apt-get update; apt-get install -y wget)
-
-   #   # Configure Ubuntu mirrors
-   #   - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial main restricted universe" > /etc/apt/sources.list
-   #   - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial-updates main restricted universe" >> /etc/apt/sources.list
-   #   - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial-security main restricted universe" >> /etc/apt/sources.list
-
-   #   - echo "deb [arch=amd64] http://apt.mirantis.com/xenial {{ REPOSITORY_SUITE }} salt extra" > /etc/apt/sources.list.d/mcp_salt.list;
-   #   - wget -O - http://apt.mirantis.com/public.gpg | apt-key add -;
-   #   - echo "deb http://repo.saltstack.com/apt/ubuntu/16.04/amd64/2016.3 xenial main" > /etc/apt/sources.list.d/saltstack.list;
-   #   - wget -O - https://repo.saltstack.com/apt/ubuntu/16.04/amd64/2016.3/SALTSTACK-GPG-KEY.pub | apt-key add -;
-
-   #   - apt-get clean
-   #   - eatmydata apt-get update && apt-get -y upgrade
-
-   #   # Install common packages
-   #   - eatmydata apt-get install -y python-pip git curl tmux byobu iputils-ping traceroute htop tree mc
-
-   #   # Install salt-minion and stop it until it is configured
-   #   - eatmydata apt-get install -y salt-minion && service salt-minion stop
-
-   ########################################################
-   # Node is ready, allow SSH access
-   #   - echo "Allow SSH access ..."
-   #   - sudo iptables -D INPUT -p tcp --dport 22 -j DROP
-   ########################################################
 
   write_files:
    - path: /etc/default/grub.d/97-enable-grub-menu.cfg
diff --git a/tcp_tests/templates/cookied-bm-mcp-dvr-vxlan/underlay.yaml b/tcp_tests/templates/cookied-bm-mcp-dvr-vxlan/underlay.yaml
index ab28e9b..25c98bc 100644
--- a/tcp_tests/templates/cookied-bm-mcp-dvr-vxlan/underlay.yaml
+++ b/tcp_tests/templates/cookied-bm-mcp-dvr-vxlan/underlay.yaml
@@ -6,19 +6,19 @@
 {% set HOSTNAME_KVM01 = os_env('HOSTNAME_KVM01', 'kvm01.' + DOMAIN_NAME) %}
 {% set HOSTNAME_KVM02 = os_env('HOSTNAME_KVM02', 'kvm02.' + DOMAIN_NAME) %}
 {% set HOSTNAME_KVM03 = os_env('HOSTNAME_KVM03', 'kvm03.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CMP001 = os_env('HOSTNAME_CMP001', 'cmp001.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CMP002 = os_env('HOSTNAME_CMP002', 'cmp002.' + DOMAIN_NAME) %}
+{% set HOSTNAME_CMP01 = os_env('HOSTNAME_CMP01', 'cmp01.' + DOMAIN_NAME) %}
+{% set HOSTNAME_CMP02 = os_env('HOSTNAME_CMP02', 'cmp02.' + DOMAIN_NAME) %}
 {% set HOSTNAME_GTW01 = os_env('HOSTNAME_GTW01', 'gtw01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CTL01 = os_env('HOSTNAME_CTL01', 'ctl01.' + DOMAIN_NAME) %}
 {% set HOSTNAME_GTW02 = os_env('HOSTNAME_GTW02', 'gtw02.' + DOMAIN_NAME) %}
-{% set ETH1_IP_ADDRESS_CFG01 = os_env('ETH1_IP_ADDRESS_CFG01', '172.16.49.2') %}
-{% set ETH0_IP_ADDRESS_KVM01 = os_env('ETH0_IP_ADDRESS_KVM01', '172.16.49.11') %}
-{% set ETH0_IP_ADDRESS_KVM02 = os_env('ETH0_IP_ADDRESS_KVM02', '172.16.49.12') %}
-{% set ETH0_IP_ADDRESS_KVM03 = os_env('ETH0_IP_ADDRESS_KVM03', '172.16.49.13') %}
-{% set ETH0_IP_ADDRESS_CMP001 = os_env('ETH0_IP_ADDRESS_CMP001', '172.16.49.3') %}
-{% set ETH0_IP_ADDRESS_CMP002 = os_env('ETH0_IP_ADDRESS_CMP002', '172.16.49.31') %}
-{% set ETH0_IP_ADDRESS_GTW01 = os_env('ETH0_IP_ADDRESS_GTW01', '172.16.49.5') %}
-{% set ETH0_IP_ADDRESS_GTW02 = os_env('ETH0_IP_ADDRESS_GTW02', '172.16.49.4') %}
+{% set ETH1_IP_ADDRESS_CFG01 = os_env('ETH1_IP_ADDRESS_CFG01', '172.16.164.2') %}
+{% set ETH2_IP_ADDRESS_CFG01 = os_env('ETH2_IP_ADDRESS_CFG01', '10.167.4.253') %}
+{% set ETH0_IP_ADDRESS_KVM01 = os_env('ETH0_IP_ADDRESS_KVM01', '172.16.164.11') %}
+{% set ETH0_IP_ADDRESS_KVM02 = os_env('ETH0_IP_ADDRESS_KVM02', '172.16.164.12') %}
+{% set ETH0_IP_ADDRESS_KVM03 = os_env('ETH0_IP_ADDRESS_KVM03', '172.16.164.13') %}
+{% set ETH0_IP_ADDRESS_CMP01 = os_env('ETH0_IP_ADDRESS_CMP01', '172.16.164.3') %}
+{% set ETH0_IP_ADDRESS_CMP02 = os_env('ETH0_IP_ADDRESS_CMP02', '172.16.164.31') %}
+{% set ETH0_IP_ADDRESS_GTW01 = os_env('ETH0_IP_ADDRESS_GTW01', '172.16.164.4') %}
+{% set ETH0_IP_ADDRESS_GTW02 = os_env('ETH0_IP_ADDRESS_GTW02', '172.16.164.5') %}
 
 {% import 'cookied-bm-mcp-dvr-vxlan/underlay--meta-data.yaml' as CLOUDINIT_META_DATA with context %}
 {% import 'cookied-bm-mcp-dvr-vxlan/underlay--user-data-cfg01.yaml' as CLOUDINIT_USER_DATA_CFG01 with context %}
@@ -39,7 +39,7 @@
 
     address_pools:
       admin-pool01:
-        net: {{ os_env('ADMIN_ADDRESS_POOL01', '172.16.49.0/26:26') }}
+        net: {{ os_env('ADMIN_ADDRESS_POOL01', '172.16.164.0/26:26') }}
         params:
           ip_reserved:
             gateway: +62
@@ -48,8 +48,8 @@
             default_{{ HOSTNAME_KVM01 }}: {{ ETH0_IP_ADDRESS_KVM01 }}
             default_{{ HOSTNAME_KVM02 }}: {{ ETH0_IP_ADDRESS_KVM02 }}
             default_{{ HOSTNAME_KVM03 }}: {{ ETH0_IP_ADDRESS_KVM03 }}
-            default_{{ HOSTNAME_CMP001 }}: {{ ETH0_IP_ADDRESS_CMP001 }}
-            default_{{ HOSTNAME_CMP002 }}: {{ ETH0_IP_ADDRESS_CMP002 }}
+            default_{{ HOSTNAME_CMP01 }}: {{ ETH0_IP_ADDRESS_CMP01 }}
+            default_{{ HOSTNAME_CMP02 }}: {{ ETH0_IP_ADDRESS_CMP02 }}
             default_{{ HOSTNAME_GTW01 }}: {{ ETH0_IP_ADDRESS_GTW01 }}
             default_{{ HOSTNAME_GTW02 }}: {{ ETH0_IP_ADDRESS_GTW02 }}
           ip_ranges:
@@ -58,6 +58,7 @@
         net: {{ os_env('PRIVATE_ADDRESS_POOL01', '10.167.4.0/24:24') }}
         params:
           ip_reserved:
+            virtual_{{ HOSTNAME_CFG01 }}: {{ ETH2_IP_ADDRESS_CFG01 }}
             gateway: +1
             l2_network_device: +1
 
@@ -69,7 +70,7 @@
             l2_network_device: +1
 
       external-pool01:
-        net: {{ os_env('EXTERNAL_ADDRESS_POOL01', '172.17.42.128/26:26') }}
+        net: {{ os_env('EXTERNAL_ADDRESS_POOL01', '172.17.42.0/26:26') }}
         params:
           ip_reserved:
             gateway: +1
@@ -99,6 +100,9 @@
             dhcp: false
             parent_iface:
               phys_dev: !os_env IRONIC_LAB_PXE_IFACE_0
+          private:
+            parent_iface:
+              phys_dev: !os_env CONTROL_IFACE
 
         group_volumes:
          - name: cloudimage1604    # This name is used for 'backing_store' option for node volumes.
@@ -137,16 +141,16 @@
                   l2_network_device: admin
                   interface_model: *interface_model
                   mac_address: !os_env ETH1_MAC_ADDRESS_CFG01
-                #- label: ens4
-                #  l2_network_device: private
-                #  interface_model: *interface_model
+                - label: ens4
+                  l2_network_device: private
+                  interface_model: *interface_model
               network_config:
                 ens3:
                   networks:
                     - admin
-                #ens4:
-                #  networks:
-                #    - private
+                ens4:
+                  networks:
+                    - private
 
 
       - name: default
@@ -307,13 +311,13 @@
                    - enp9s0f1
 
 
-          - name: {{ HOSTNAME_CMP001 }}
+          - name: {{ HOSTNAME_CMP01 }}
             role: salt_minion
             params:
               ipmi_user: !os_env IPMI_USER
               ipmi_password: !os_env IPMI_PASSWORD
               ipmi_previlegies: OPERATOR
-              ipmi_host: !os_env IPMI_HOST_CMP001  # hostname or IP address
+              ipmi_host: !os_env IPMI_HOST_CMP01  # hostname or IP address
               ipmi_lan_interface: lanplus
               ipmi_port: 623
 
@@ -339,16 +343,9 @@
               interfaces:
                 - label: enp9s0f0
                   l2_network_device: admin
-                  mac_address: !os_env ETH0_MAC_ADDRESS_CMP001
+                  mac_address: !os_env ETH0_MAC_ADDRESS_CMP01
                 - label: enp9s0f1
-                  mac_address: !os_env ETH1_MAC_ADDRESS_CMP001
-#                - label: enp9s0f0
-#                 mac_address: !os_env ETH2_MAC_ADDRESS_CMP001
-#               - label: enp5s0f1
-#                 mac_address: !os_env ETH3_MAC_ADDRESS_CMP001
-#               - label: enp5s0f2
-#                 mac_address: !os_env ETH4_MAC_ADDRESS_CMP001
-
+                  mac_address: !os_env ETH1_MAC_ADDRESS_CMP01
               network_config:
                 enp9s0f0:
                   networks:
@@ -363,13 +360,13 @@
 
 
 
-          - name: {{ HOSTNAME_CMP002 }}
+          - name: {{ HOSTNAME_CMP02 }}
             role: salt_minion
             params:
               ipmi_user: !os_env IPMI_USER
               ipmi_password: !os_env IPMI_PASSWORD
               ipmi_previlegies: OPERATOR
-              ipmi_host: !os_env IPMI_HOST_CMP002  # hostname or IP address
+              ipmi_host: !os_env IPMI_HOST_CMP02  # hostname or IP address
               ipmi_lan_interface: lanplus
               ipmi_port: 623
 
@@ -395,15 +392,9 @@
               interfaces:
                 - label: enp9s0f0
                   l2_network_device: admin
-                  mac_address: !os_env ETH0_MAC_ADDRESS_CMP002
+                  mac_address: !os_env ETH0_MAC_ADDRESS_CMP02
                 - label: enp9s0f1
-                  mac_address: !os_env ETH1_MAC_ADDRESS_CMP002
-#               - label: eth3
-#                 mac_address: !os_env ETH2_MAC_ADDRESS_CMP002
-#               - label: eth2
-#                 mac_address: !os_env ETH3_MAC_ADDRESS_CMP002
-#               - label: eth4
-#                 mac_address: !os_env ETH4_MAC_ADDRESS_CMP002
+                  mac_address: !os_env ETH1_MAC_ADDRESS_CMP02
               network_config:
                 enp9s0f0:
                   networks:
diff --git a/tcp_tests/templates/cookied-bm-mcp-ocata-contrail-nfv/common-services.yaml b/tcp_tests/templates/cookied-bm-mcp-ocata-contrail-nfv/core.yaml
similarity index 100%
rename from tcp_tests/templates/cookied-bm-mcp-ocata-contrail-nfv/common-services.yaml
rename to tcp_tests/templates/cookied-bm-mcp-ocata-contrail-nfv/core.yaml
diff --git a/tcp_tests/templates/cookied-bm-mcp-ocata-contrail-nfv/openstack.yaml b/tcp_tests/templates/cookied-bm-mcp-ocata-contrail-nfv/openstack.yaml
index a297622..01f0283 100644
--- a/tcp_tests/templates/cookied-bm-mcp-ocata-contrail-nfv/openstack.yaml
+++ b/tcp_tests/templates/cookied-bm-mcp-ocata-contrail-nfv/openstack.yaml
@@ -299,7 +299,9 @@
   skip_fail: false
 
 - description: Reboot computes
-  cmd: salt --timeout=600  "cmp*" system.reboot
+  cmd: |
+    salt "cmp*" system.reboot;
+    sleep 600;
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 30}
   skip_fail: true
diff --git a/tcp_tests/templates/cookied-bm-mcp-ocata-contrail/common-services.yaml b/tcp_tests/templates/cookied-bm-mcp-ocata-contrail/core.yaml
similarity index 100%
rename from tcp_tests/templates/cookied-bm-mcp-ocata-contrail/common-services.yaml
rename to tcp_tests/templates/cookied-bm-mcp-ocata-contrail/core.yaml
diff --git a/tcp_tests/templates/cookied-bm-mcp-ocata-contrail/lab04-physical-inventory.yaml b/tcp_tests/templates/cookied-bm-mcp-ocata-contrail/lab04-physical-inventory.yaml
index bcb3ec3..7bf4d2e 100644
--- a/tcp_tests/templates/cookied-bm-mcp-ocata-contrail/lab04-physical-inventory.yaml
+++ b/tcp_tests/templates/cookied-bm-mcp-ocata-contrail/lab04-physical-inventory.yaml
@@ -45,6 +45,7 @@
     cmp001.cookied-bm-mcp-ocata-contrail.local:

       reclass_storage_name: openstack_compute_node01

       roles:

+      - openstack_compute

       - features_lvm_backend

       - linux_system_codename_xenial

       interfaces:

@@ -61,6 +62,7 @@
     cmp002.cookied-bm-mcp-ocata-contrail.local:

       reclass_storage_name: openstack_compute_node02

       roles:

+      - openstack_compute

       - features_lvm_backend

       - linux_system_codename_xenial

       interfaces:

diff --git a/tcp_tests/templates/cookied-bm-mcp-ocata-contrail/openstack.yaml b/tcp_tests/templates/cookied-bm-mcp-ocata-contrail/openstack.yaml
index 742a607..fbb3d26 100644
--- a/tcp_tests/templates/cookied-bm-mcp-ocata-contrail/openstack.yaml
+++ b/tcp_tests/templates/cookied-bm-mcp-ocata-contrail/openstack.yaml
@@ -94,7 +94,7 @@
   cmd: salt --hard-crash --state-output=mixed --state-verbose=False
     -C 'I@cinder:volume' state.sls cinder
   node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
+  retry: {count: 2, delay: 5}
   skip_fail: false
 
 - description: Check cinder list
@@ -298,7 +298,9 @@
   skip_fail: true
 
 - description: Reboot computes
-  cmd: salt --timeout=600  "cmp*" system.reboot
+  cmd: |
+    salt "cmp*" system.reboot;
+    sleep 600;
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 30}
   skip_fail: true
diff --git a/tcp_tests/templates/cookied-bm-mcp-ocata-contrail/salt.yaml b/tcp_tests/templates/cookied-bm-mcp-ocata-contrail/salt.yaml
index 731548c..54d1298 100644
--- a/tcp_tests/templates/cookied-bm-mcp-ocata-contrail/salt.yaml
+++ b/tcp_tests/templates/cookied-bm-mcp-ocata-contrail/salt.yaml
@@ -1,9 +1,8 @@
 {% from 'cookied-bm-mcp-ocata-contrail/underlay.yaml' import HOSTNAME_CFG01 with context %}
 {% from 'cookied-bm-mcp-ocata-contrail/underlay.yaml' import LAB_CONFIG_NAME with context %}
 {% from 'cookied-bm-mcp-ocata-contrail/underlay.yaml' import DOMAIN_NAME with context %}
-{% from 'cookied-bm-mcp-ocata-contrail/underlay.yaml' import HOSTNAME_KVM01 with context %}
-{% from 'cookied-bm-mcp-ocata-contrail/underlay.yaml' import HOSTNAME_KVM02 with context %}
-{% from 'cookied-bm-mcp-ocata-contrail/underlay.yaml' import HOSTNAME_KVM03 with context %}
+{% from 'cookied-bm-mcp-ocata-contrail/underlay.yaml' import CUSTOM_VCP_TRUSTY_IMAGE_URL with context %}
+{% from 'cookied-bm-mcp-ocata-contrail/underlay.yaml' import CUSTOM_VCP_XENIAL_IMAGE_URL with context %}
 
 {% set SALT_MODELS_REPOSITORY = os_env('SALT_MODELS_REPOSITORY','https://gerrit.mcp.mirantis.net/salt-models/mcp-virtual-lab') %}
 # Other salt model repository parameters see in shared-salt.yaml
@@ -31,8 +30,6 @@
 
 {{ SHARED.MACRO_RUN_SALT_MASTER_UNDERLAY_STATES() }}
 
-{{ SHARED.ADJUST_SL_OPTS(OVERRIDES_FILENAME='/srv/salt/reclass/classes/environment/' + ENVIRONMENT_MODEL_INVENTORY_NAME + '/overrides.yml') }}
-
 - description: "Workaround for rack01 compute generator"
   cmd: |
     set -e;
@@ -45,38 +42,31 @@
   retry: {count: 1, delay: 10}
   skip_fail: false
 
-- description: "WR for changing image to proposed"
+{%- if CUSTOM_VCP_TRUSTY_IMAGE_URL != '' %}
+
+- description: "Change trusty image to custom"
   cmd: |
-    set -e;
-    # Add message_queu host for opencontrail
+    echo "CUSTOM_TRUSTY_IMAGE is {{ CUSTOM_VCP_TRUSTY_IMAGE_URL }}";
     . /root/venv-reclass-tools/bin/activate;
-    reclass-tools add-key parameters._param.salt_control_xenial_image 'http://ci.mcp.mirantis.net:8085/images/ubuntu-16-04-x64-mcpproposed.qcow2' /srv/salt/reclass/classes/cluster/cookied-bm-mcp-ocata-contrail/infra/init.yml;
-    reclass-tools add-key parameters._param.salt_control_trusty_image 'http://ci.mcp.mirantis.net:8085/images/ubuntu-14-04-x64-mcpproposed.qcow2' /srv/salt/reclass/classes/cluster/cookied-bm-mcp-ocata-contrail/infra/init.yml;
+    reclass-tools add-key parameters._param.salt_control_trusty_image "{{ CUSTOM_VCP_TRUSTY_IMAGE_URL }}" /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/init.yml;
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 10}
   skip_fail: false
 
-{{ SHARED.MACRO_GENERATE_INVENTORY() }}
+{%- endif %}
 
-{{ SHARED.MACRO_NETWORKING_WORKAROUNDS() }}
+{%- if CUSTOM_VCP_XENIAL_IMAGE_URL != '' %}
 
-
-- description: "Workaround for PROD-14060"
+- description: "Change xenial image to custom"
   cmd: |
-    set -e;
-    # Add tenant and single addresses for computes
-    salt-call reclass.cluster_meta_set deploy_address 172.16.49.73 /srv/salt/reclass/nodes/_generated/cmp001.cookied-bm-mcp-ocata-contrail.local.yml
-    salt-call reclass.cluster_meta_set tenant_address 192.168.0.101 /srv/salt/reclass/nodes/_generated/cmp001.cookied-bm-mcp-ocata-contrail.local.yml
-    salt-call reclass.cluster_meta_set single_address 10.167.8.101 /srv/salt/reclass/nodes/_generated/cmp001.cookied-bm-mcp-ocata-contrail.local.yml
-
-    salt-call reclass.cluster_meta_set deploy_address 172.16.49.74 /srv/salt/reclass/nodes/_generated/cmp002.cookied-bm-mcp-ocata-contrail.local.yml
-    salt-call reclass.cluster_meta_set tenant_address 192.168.0.102 /srv/salt/reclass/nodes/_generated/cmp002.cookied-bm-mcp-ocata-contrail.local.yml
-    salt-call reclass.cluster_meta_set single_address 10.167.8.102 /srv/salt/reclass/nodes/_generated/cmp002.cookied-bm-mcp-ocata-contrail.local.yml
-
+    echo "CUSTOM_XENIAL_IMAGE is {{ CUSTOM_VCP_XENIAL_IMAGE_URL }}";
+    . /root/venv-reclass-tools/bin/activate;
+    reclass-tools add-key parameters._param.salt_control_xenial_image "{{ CUSTOM_VCP_XENIAL_IMAGE_URL }}" /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/init.yml;
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 10}
   skip_fail: false
 
+{%- endif %}
 
 - description: Temporary workaround for removing cinder-volume from CTL nodes
   cmd: |
@@ -93,7 +83,33 @@
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 10}
   skip_fail: false
-  
+
+{{ SHARED.MACRO_GENERATE_INVENTORY() }}
+
+{{ SHARED.MACRO_NETWORKING_WORKAROUNDS() }}
+
+- description: Update minion information
+  cmd: |
+    salt --hard-crash --state-output=mixed --state-verbose=False '*' saltutil.sync_grains &&
+    salt --hard-crash --state-output=mixed --state-verbose=False '*' mine.update &&
+    salt --hard-crash --state-output=mixed --state-verbose=False '*' saltutil.refresh_pillar && sleep 10
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 10}
+  skip_fail: false
+
+- description: Rerun openssh after env model is generated
+  cmd: |
+    salt-call state.sls openssh
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 10}
+  skip_fail: false
+
+- description: Execute linux.network.host one more time after salt.minion to apply dynamically registered hosts on the cluster nodes
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@linux:system' state.sls linux.network.host
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 2, delay: 10}
+  skip_fail: false
+
 {{ SHARED.MACRO_BOOTSTRAP_ALL_MINIONS() }}
 
 ########################################
@@ -126,7 +142,7 @@
 
 - description: Hack resolv.conf on VCP nodes for internal services access
   cmd: |
-    salt --hard-crash --state-output=mixed --state-verbose=False -C '* and not kvm* and not cmp* and not gtw* and not cfg*' cmd.run "echo 'nameserver 172.18.208.44' > /etc/resolv.conf;"
+    salt --hard-crash --state-output=mixed --state-verbose=False -C '* and not cfg*' cmd.run "echo 'nameserver 172.18.208.44' > /etc/resolv.conf;"
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 5}
   skip_fail: false
@@ -154,3 +170,25 @@
 {{SHARED.MACRO_CHECK_SALT_VERSION_SERVICES_ON_CFG()}}
 
 {{SHARED.MACRO_CHECK_SALT_VERSION_ON_NODES()}}
+
+- description: "Lab04 workaround: Give each node root acces with key from cfg01"
+  cmd: |
+    set -e;
+    set -x;
+    key=$(ssh-keygen -y -f /root/.ssh/id_rsa);
+    salt '*' cmd.run "echo $key >> /root/.ssh/authorized_keys";
+    salt '*' cmd.run "service sshd restart"
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: true
+
+- description: "Lab04 workaround: Control network access from cfg01 node using sshuttle via kvm01"
+  cmd: |
+    set -e;
+    set -x;
+    KVM01_DEPLOY_ADDRESS=$(salt-call --out=newline_values_only pillar.get _param:infra_kvm_node01_deploy_address);
+    apt-get install -y sshuttle;
+    sshuttle -r ${KVM01_DEPLOY_ADDRESS} 10.167.8.0/24 -D >/dev/null;
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: true
diff --git a/tcp_tests/templates/cookied-bm-mcp-ocata-contrail/sl.yaml b/tcp_tests/templates/cookied-bm-mcp-ocata-contrail/sl.yaml
index 4045fe8..925c795 100644
--- a/tcp_tests/templates/cookied-bm-mcp-ocata-contrail/sl.yaml
+++ b/tcp_tests/templates/cookied-bm-mcp-ocata-contrail/sl.yaml
@@ -73,7 +73,7 @@
       salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@mongodb:server' state.sls mongodb
     fi
   node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 10}
+  retry: {count: 2, delay: 30}
   skip_fail: false
 
 - description: Configure Alerta if it is exists
@@ -112,6 +112,15 @@
   retry: {count: 1, delay: 10}
   skip_fail: false
 
+- description: Configure Prometheus exporters, if pillar 'prometheus:collector' exists on any server
+  cmd: |
+    if salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@prometheus:collector' match.pillar 'prometheus:collector' ; then
+      salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@prometheus:collector' state.sls prometheus.collector
+    fi
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 10}
+  skip_fail: false
+
 - description: Install elasticsearch server
   cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@elasticsearch:server' state.sls elasticsearch.server -b 1
   node_name: {{ HOSTNAME_CFG01 }}
@@ -253,5 +262,5 @@
 - description: Run salt minion to create cert files
   cmd: salt --hard-crash --state-output=mixed --state-verbose=False "*" state.sls salt.minion
   node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 10}
+  retry: {count: 3, delay: 15}
   skip_fail: false
diff --git a/tcp_tests/templates/cookied-bm-mcp-ocata-contrail/underlay.yaml b/tcp_tests/templates/cookied-bm-mcp-ocata-contrail/underlay.yaml
index f50f0b6..d520d62 100644
--- a/tcp_tests/templates/cookied-bm-mcp-ocata-contrail/underlay.yaml
+++ b/tcp_tests/templates/cookied-bm-mcp-ocata-contrail/underlay.yaml
@@ -1,5 +1,7 @@
 # Set the repository suite, one of the: 'nightly', 'testing', 'stable', or any other required
 {% set REPOSITORY_SUITE = os_env('REPOSITORY_SUITE', 'testing') %}
+{% set CUSTOM_VCP_TRUSTY_IMAGE_URL = os_env('CUSTOM_VCP_TRUSTY_IMAGE_URL', '') %}
+{% set CUSTOM_VCP_XENIAL_IMAGE_URL = os_env('CUSTOM_VCP_XENIAL_IMAGE_URL', '') %}
 
 #{# set DOMAIN_NAME = os_env('LAB_CONFIG_NAME', 'physical_mcp11_ovs_dpdk') + '.local' #}
 {% set LAB_CONFIG_NAME = os_env('LAB_CONFIG_NAME', 'cookied-bm-mcp-ocata-contrail') %}
diff --git a/tcp_tests/templates/cookied-bm-mcp-ovs-dpdk/common-services.yaml b/tcp_tests/templates/cookied-bm-mcp-ovs-dpdk/core.yaml
similarity index 100%
rename from tcp_tests/templates/cookied-bm-mcp-ovs-dpdk/common-services.yaml
rename to tcp_tests/templates/cookied-bm-mcp-ovs-dpdk/core.yaml
diff --git a/tcp_tests/templates/cookied-bm-mcp-ovs-dpdk/openstack.yaml b/tcp_tests/templates/cookied-bm-mcp-ovs-dpdk/openstack.yaml
index b8c6bd8..b0f75c4 100644
--- a/tcp_tests/templates/cookied-bm-mcp-ovs-dpdk/openstack.yaml
+++ b/tcp_tests/templates/cookied-bm-mcp-ovs-dpdk/openstack.yaml
@@ -4,6 +4,8 @@
 {% from 'cookied-bm-mcp-ovs-dpdk/underlay.yaml' import HOSTNAME_CTL03 with context %}
 {% from 'shared-salt.yaml' import IPV4_NET_EXTERNAL_PREFIX with context %}
 {% from 'shared-salt.yaml' import IPV4_NET_TENANT_PREFIX with context %}
+{% import 'shared-salt.yaml' as SHARED with context %}
+
 
 # Install OpenStack control services
 
@@ -226,4 +228,13 @@
     '. /root/keystonercv3; neutron router-interface-add net04_router01 net04__subnet'
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 30}
-  skip_fail: false
\ No newline at end of file
+  skip_fail: false
+
+- description: temp WR
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'cmp*' cmd.run
+    'ifdown br-prv; ifup br-prv'
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 30}
+  skip_fail: false
+
+{{ SHARED.INSTALL_DOCKER_ON_GTW() }}
diff --git a/tcp_tests/templates/cookied-bm-mcp-ovs-dpdk/salt-context-environment.yaml b/tcp_tests/templates/cookied-bm-mcp-ovs-dpdk/salt-context-environment.yaml
index 64713fe..802c2ee 100644
--- a/tcp_tests/templates/cookied-bm-mcp-ovs-dpdk/salt-context-environment.yaml
+++ b/tcp_tests/templates/cookied-bm-mcp-ovs-dpdk/salt-context-environment.yaml
@@ -7,6 +7,8 @@
       interfaces:
         ens3:
           role: single_dhcp
+        ens4:
+          role: single_static_ctl
 
     # Physical nodes
     kvm01.cookied-bm-mcp-ovs-dpdk.local:
@@ -60,6 +62,8 @@
         enp3s0f1:
           role: bond_dpdk_prv_lacp
           dpdk_pci: "0000:03:00.1"
+        enp5s0f1:
+          role: sriov
 
     cmp02.cookied-bm-mcp-ovs-dpdk.local:
       reclass_storage_name: openstack_compute_node02
@@ -79,6 +83,8 @@
         enp3s0f1:
           role: bond_dpdk_prv_lacp
           dpdk_pci: "0000:03:00.1"
+        enp5s0f1:
+          role: sriov
 
     gtw01.cookied-bm-mcp-ovs-dpdk.local:
       reclass_storage_name: openstack_gateway_node01
diff --git a/tcp_tests/templates/cookied-bm-mcp-ovs-dpdk/salt.yaml b/tcp_tests/templates/cookied-bm-mcp-ovs-dpdk/salt.yaml
index 6293886..cfe1145 100644
--- a/tcp_tests/templates/cookied-bm-mcp-ovs-dpdk/salt.yaml
+++ b/tcp_tests/templates/cookied-bm-mcp-ovs-dpdk/salt.yaml
@@ -14,25 +14,12 @@
 
 {{ SHARED.MACRO_CLONE_RECLASS_MODELS() }}
 
-{{ SHARED.MACRO_CONFIGURE_RECLASS(FORMULA_SERVICES='"linux" "reclass" "salt" "openssh" "ntp" "git" "nginx" "collectd" "sensu" "heka" "sphinx" "keystone" "mysql" "grafana" "haproxy" "rsyslog" "horizon" "prometheus" "telegraf" "elasticsearch" "fluentd" "runtest" "auditd"') }}
+{{ SHARED.MACRO_CONFIGURE_RECLASS(FORMULA_SERVICES='"linux" "reclass" "salt" "openssh" "ntp" "git" "nginx" "collectd" "sensu" "heka" "sphinx" "keystone" "mysql" "grafana" "haproxy" "rsyslog" "horizon" "prometheus" "telegraf" "elasticsearch" "fluentd" "runtest" "maas" "jenkins" "glusterfs" "backupninja" "auditd"') }}
 
 {{ SHARED.MACRO_INSTALL_SALT_MINIONS() }}
 
 {{ SHARED.MACRO_RUN_SALT_MASTER_UNDERLAY_STATES() }}
 
-- description: "WR for changing image to proposed"
-  cmd: |
-    set -e;
-    apt-get -y install python-virtualenv python-pip build-essential python-dev libssl-dev;
-    [[ -d /root/venv-reclass-tools ]] || virtualenv /root/venv-reclass-tools;
-    . /root/venv-reclass-tools/bin/activate;
-    pip install git+https://github.com/dis-xcom/reclass-tools;
-    reclass-tools add-key parameters._param.salt_control_xenial_image 'http://ci.mcp.mirantis.net:8085/images/ubuntu-16-04-x64-mcpproposed.qcow2' /srv/salt/reclass/classes/cluster/cookied-bm-mcp-ovs-dpdk/infra/init.yml;
-    reclass-tools add-key parameters._param.salt_control_trusty_image 'http://ci.mcp.mirantis.net:8085/images/ubuntu-14-04-x64-mcpproposed.qcow2' /srv/salt/reclass/classes/cluster/cookied-bm-mcp-ovs-dpdk/infra/init.yml;
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 10}
-  skip_fail: false
-
 {{ SHARED.MACRO_GENERATE_INVENTORY() }}
 {{ SHARED.MACRO_NETWORKING_WORKAROUNDS() }}
   node_name: {{ HOSTNAME_CFG01 }}
@@ -56,21 +43,13 @@
   retry: {count: 1, delay: 10}
   skip_fail: false
 
-- description: "Workaround for PROD-18834: Pre-install linux-headers package"
-  cmd: salt 'cmp*' cmd.run "apt-get install -y linux-headers-$(uname -r)";
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 10}
-  skip_fail: false
-
-- description: "Workaround for PROD-17975: Pre-install ovs packages to update alternatives to DPDK"
+- description: WR for correct acces to git repo from jenkins on cfg01 node
   cmd: |
-    set -ex;
-    salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@linux:system' state.sls linux.system.repo
-    salt 'cmp*' cmd.run "apt-get install -y openvswitch-switch openvswitch-switch-dpdk";
-    salt 'cmp*' cmd.run "service openvswitch-switch stop";
-    salt 'cmp*' cmd.run "rm -f /var/lib/openvswitch/*";
-    salt 'cmp*' cmd.run "update-alternatives --remove ovs-vswitchd /usr/lib/openvswitch-switch/ovs-vswitchd";
-    salt 'cmp*' cmd.run "service openvswitch-switch start";
+    git clone --mirror https://github.com/Mirantis/mk-pipelines.git /home/repo/mk/mk-pipelines/;
+    git clone --mirror https://github.com/Mirantis/pipeline-library.git /home/repo/mcp-ci/pipeline-library/;
+    chown -R git:www-data /home/repo/mk/mk-pipelines/*;
+    chown -R git:www-data /home/repo/mcp-ci/pipeline-library/*;
+
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 10}
   skip_fail: false
@@ -133,8 +112,6 @@
   retry: {count: 1, delay: 5}
   skip_fail: false
 
-{{ SHARED.MACRO_BOOTSTRAP_ALL_MINIONS() }}
-
 - description: Add cpm nodes to /etc/hosts
   cmd: |
     salt --hard-crash --state-output=mixed --state-verbose=False -C '*' cmd.run "echo '10.167.11.105 cmp01.cookied-bm-mcp-ovs-dpdk.local cmp01' >> /etc/hosts";
@@ -143,23 +120,16 @@
   retry: {count: 1, delay: 10}
   skip_fail: true
 
-- description: Enable sriov interfaces
-  cmd: |
-    salt --hard-crash --state-output=mixed --state-verbose=False -C 'cmp*' cmd.run "echo 7 > /sys/class/net/enp5s0f1/device/sriov_numvfs"
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 10}
-  skip_fail: true
-
-- description: Temporary WR
-  cmd: |
-    ssh-keygen -y -f /root/.ssh/id_rsa > /root/.ssh/id_rsa.pub;
-    pub_key=`cat /root/.ssh/id_rsa.pub`;
-    salt '*' cmd.run "echo $pub_key >> /root/.ssh/authorized_keys";
-    salt '*' cmd.run "service sshd restart";
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 10}
-  skip_fail: true
-
 {{SHARED.MACRO_CHECK_SALT_VERSION_SERVICES_ON_CFG()}}
-
 {{SHARED.MACRO_CHECK_SALT_VERSION_ON_NODES()}}
+
+- description: "Give each node root access with key from cfg01"
+  cmd: |
+    set -e;
+    set -x;
+    key=$(ssh-keygen -y -f /root/.ssh/id_rsa);
+    salt '*' cmd.run "echo $key >> /root/.ssh/authorized_keys";
+    salt '*' cmd.run "service sshd restart"
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: true
diff --git a/tcp_tests/templates/cookied-bm-mcp-ovs-dpdk/underlay.yaml b/tcp_tests/templates/cookied-bm-mcp-ovs-dpdk/underlay.yaml
index ae10126..23eb24c 100644
--- a/tcp_tests/templates/cookied-bm-mcp-ovs-dpdk/underlay.yaml
+++ b/tcp_tests/templates/cookied-bm-mcp-ovs-dpdk/underlay.yaml
@@ -11,6 +11,7 @@
 {% set HOSTNAME_GTW01 = os_env('HOSTNAME_GTW01', 'gtw01.' + DOMAIN_NAME) %}
 {% set HOSTNAME_GTW02 = os_env('HOSTNAME_GTW02', 'gtw02.' + DOMAIN_NAME) %}
 {% set ETH1_IP_ADDRESS_CFG01 = os_env('ETH1_IP_ADDRESS_CFG01', '172.16.49.2') %}
+{% set ETH2_IP_ADDRESS_CFG01 = os_env('ETH2_IP_ADDRESS_CFG01', '10.167.11.253') %}
 {% set ETH0_IP_ADDRESS_KVM01 = os_env('ETH0_IP_ADDRESS_KVM01', '172.16.49.11') %}
 {% set ETH0_IP_ADDRESS_KVM02 = os_env('ETH0_IP_ADDRESS_KVM02', '172.16.49.12') %}
 {% set ETH0_IP_ADDRESS_KVM03 = os_env('ETH0_IP_ADDRESS_KVM03', '172.16.49.13') %}
@@ -57,6 +58,7 @@
         net: {{ os_env('PRIVATE_ADDRESS_POOL01', '10.167.11.0/24:24') }}
         params:
           ip_reserved:
+            virtual_{{ HOSTNAME_CFG01 }}: {{ ETH2_IP_ADDRESS_CFG01 }}
             gateway: +1
             l2_network_device: +1
 
@@ -98,6 +100,9 @@
             dhcp: false
             parent_iface:
               phys_dev: !os_env IRONIC_LAB_PXE_IFACE_0
+          private:
+            parent_iface:
+              phys_dev: !os_env CONTROL_IFACE
 
         group_volumes:
          - name: cloudimage1604    # This name is used for 'backing_store' option for node volumes.
@@ -136,16 +141,16 @@
                   l2_network_device: admin
                   interface_model: *interface_model
                   mac_address: !os_env ETH1_MAC_ADDRESS_CFG01
-                #- label: ens4
-                #  l2_network_device: private
-                #  interface_model: *interface_model
+                - label: ens4
+                  l2_network_device: private
+                  interface_model: *interface_model
               network_config:
                 ens3:
                   networks:
                     - admin
-                #ens4:
-                #  networks:
-                #    - private
+                ens4:
+                  networks:
+                    - private
 
 
       - name: default
diff --git a/tcp_tests/templates/cookied-bm-mcp-pike-k8s-contrail/core.yaml b/tcp_tests/templates/cookied-bm-mcp-pike-k8s-contrail/core.yaml
new file mode 100644
index 0000000..99b3aa7
--- /dev/null
+++ b/tcp_tests/templates/cookied-bm-mcp-pike-k8s-contrail/core.yaml
@@ -0,0 +1,76 @@
+{% from 'cookied-bm-mcp-pike-k8s-contrail/underlay.yaml' import HOSTNAME_CFG01 with context %}
+{% from 'cookied-bm-mcp-pike-k8s-contrail/underlay.yaml' import HOSTNAME_KVM01 with context %}
+{% from 'cookied-bm-mcp-pike-k8s-contrail/underlay.yaml' import HOSTNAME_KVM02 with context %}
+{% from 'cookied-bm-mcp-pike-k8s-contrail/underlay.yaml' import HOSTNAME_KVM03 with context %}
+{% from 'cookied-bm-mcp-pike-k8s-contrail/underlay.yaml' import HOSTNAME_CFG01 with context %}
+
+# Install support services
+- description: Create and distribute SSL certificates for services using salt state
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False '*' state.sls salt
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: true
+
+- description: Install docker
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@docker:host' state.sls docker.host
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+- description: Check docker
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@docker:host' cmd.run 'docker ps'
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+- description:  Install keepalived on primary controller
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@keepalived:cluster and *01*' state.sls keepalived
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 10}
+  skip_fail: true
+
+- description:  Install keepalived
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@keepalived:cluster' state.sls keepalived
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 10}
+  skip_fail: true
+
+- description: Install RabbitMQ
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@rabbitmq:server' state.sls rabbitmq
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 2, delay: 10}
+  skip_fail: false
+
+- description: Check RabbitMQ
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@rabbitmq:server' cmd.run "rabbitmqctl cluster_status"
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 10}
+  skip_fail: false
+
+- description: Install haproxy
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@haproxy:proxy' state.sls haproxy
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+- description: Check haproxy service
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@haproxy:proxy' service.status haproxy
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+- description: Restart rsyslog
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@haproxy:proxy' service.restart rsyslog
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
diff --git a/tcp_tests/templates/cookied-bm-mcp-pike-k8s-contrail/k8s.yaml b/tcp_tests/templates/cookied-bm-mcp-pike-k8s-contrail/k8s.yaml
new file mode 100644
index 0000000..45ad04f
--- /dev/null
+++ b/tcp_tests/templates/cookied-bm-mcp-pike-k8s-contrail/k8s.yaml
@@ -0,0 +1,264 @@
+{% from 'k8s-ha-contrail/underlay.yaml' import HOSTNAME_CFG01 with context %}
+
+- description: Install etcd
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@etcd:server' state.sls etcd.server.service
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+- description: Check the etcd health
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+     -C 'I@etcd:server' cmd.run '. /var/lib/etcd/configenv && etcdctl cluster-health'
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+- description: Install Kubernetes Addons
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@kubernetes:master' state.sls kubernetes.master.kube-addons
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: true
+
+- description: Install Kubernetes components
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@kubernetes:pool' state.sls kubernetes.pool
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 5, delay: 60}
+  skip_fail: false
+
+# Opencontrail Control Plane
+
+- description: Create configuration files for OpenContrail
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@opencontrail:control' state.sls opencontrail exclude=opencontrail.client
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+- description: Install Opencontrail db on ctl01
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@opencontrail:database and *01*' state.sls opencontrail.database
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 2, delay: 20}
+  skip_fail: false
+
+- description: Configure OpenContrail as an add-on for Kubernetes
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@kubernetes:master' state.sls kubernetes.master.kube-addons
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: true
+
+- description: Verify the status of the OpenContrail service
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@opencontrail:database' cmd.run 'doctrail all contrail-status'
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: true
+
+- description: Set up the OpenContrail resources
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@opencontrail:database:id:1' state.sls opencontrail.client
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+# OpenContrail vrouters
+- description: Refresh pillars on cmp*
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'cmp*' saltutil.refresh_pillar
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+- description: Sync all on contrail computes
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@opencontrail:compute' saltutil.sync_all
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+- description: Apply highstate on contrail computes
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@opencontrail:compute' state.highstate exclude=opencontrail.client
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+# - description: Reboot contrail computes
+#   cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+#     -C 'I@opencontrail:compute' cmd.run 'reboot'
+#   node_name: {{ HOSTNAME_CFG01 }}
+#   retry: {count: 1, delay: 5}
+#   skip_fail: false
+
+- description: Reboot contrail computes
+  cmd: salt --timeout=600 -C 'I@opencontrail:compute' system.reboot
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 30}
+  skip_fail: true
+
+- description: Apply opencontrail.client on contrail computes
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@opencontrail:compute' state.sls opencontrail.client
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+- description: Run Kubernetes master without setup
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+     -C 'I@kubernetes:master' state.sls kubernetes exclude=kubernetes.master.setup
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 3, delay: 5}
+  skip_fail: true
+
+- description: Run Kubernetes master setup
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+     -C 'I@kubernetes:master' --subset 1 state.sls kubernetes.master.setup
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: true
+
+- description: Restart Kubelet
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@kubernetes:pool' service.restart 'kubelet'
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: true
+
+- description: Check nodes registrations
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@kubernetes:pool' cmd.run 'sleep 60; kubectl get nodes'
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: true
+
+- description: Renew hosts file on a whole cluster
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C '*' state.sls linux.network.host;
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+# - description: Install Opencontrail db on all nodes
+#   cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+#     -C 'I@opencontrail:database' state.sls opencontrail.database
+#   node_name: {{ HOSTNAME_CFG01 }}
+#   retry: {count: 2, delay: 20}
+#   skip_fail: false
+
+# - description: Install Opencontrail control on ctl01
+#   cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+#     -C 'I@opencontrail:control and *01*' state.sls opencontrail exclude=opencontrail.client
+#   node_name: {{ HOSTNAME_CFG01 }}
+#   retry: {count: 1, delay: 5}
+#   skip_fail: false
+
+# - description: Install Opencontrail control on all nodes
+#   cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+#     -C 'I@opencontrail:control' state.sls opencontrail exclude=opencontrail.client
+#   node_name: {{ HOSTNAME_CFG01 }}
+#   retry: {count: 1, delay: 5}
+#   skip_fail: false
+
+# - description: Install Opencontrail on collector
+#   cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+#     -C 'I@opencontrail:collector' state.sls opencontrail exclude=opencontrail.client
+#   node_name: {{ HOSTNAME_CFG01 }}
+#   retry: {count: 1, delay: 5}
+#   skip_fail: false
+
+# # OpenContrail vrouters
+# - description: Install Opencontrail client
+#   cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+#     -C 'I@opencontrail:database:id:1' state.sls opencontrail.client
+#   node_name: {{ HOSTNAME_CFG01 }}
+#   retry: {count: 1, delay: 5}
+#   skip_fail: false
+
+# - description: Install Opencontrail on computes
+#   cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+#     -C 'I@opencontrail:compute' state.sls opencontrail exclude=opencontrail.client
+#   node_name: {{ HOSTNAME_CFG01 }}
+#   retry: {count: 2, delay: 5}
+#   skip_fail: false
+
+# - description: Wake up vhost0
+#   cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+#     -C 'I@nova:compute' cmd.run 'exec 0>&-; exec 1>&-; exec 2>&-;
+#     nohup bash -c "ip link | grep vhost && echo no_reboot || sleep 5 && reboot & "'
+#   node_name: {{ HOSTNAME_CFG01 }}
+#   retry: {count: 1, delay: 5}
+#   skip_fail: false
+
+# - description: Install Opencontrail client on computes
+#   cmd: sleep 300 && salt --timeout=60 --hard-crash --state-output=mixed --state-verbose=False
+#     -C 'I@opencontrail:compute' state.sls 'opencontrail.client'
+#   node_name: {{ HOSTNAME_CFG01 }}
+#   retry: {count: 1, delay: 5}
+#   skip_fail: false
+
+# - description: Install Opencontrail on computes #2
+#   cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+#     -C 'I@opencontrail:compute' state.sls opencontrail
+#   node_name: {{ HOSTNAME_CFG01 }}
+#   retry: {count: 2, delay: 5}
+#   skip_fail: false
+
+# # Kubernetes
+# - description: Install Kubernetes Addons
+#   cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+#     -C 'I@kubernetes:master' state.sls kubernetes.master.kube-addons
+#   node_name: {{ HOSTNAME_CFG01 }}
+#   retry: {count: 1, delay: 5}
+#   skip_fail: true
+
+# - description: Check contrail status
+#   cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+#     -C 'I@opencontrail:database' cmd.run contrail-status
+#   node_name: {{ HOSTNAME_CFG01 }}
+#   retry: {count: 1, delay: 5}
+#   skip_fail: false
+
+# - description: Install Kubernetes components
+#   cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+#     -C 'I@kubernetes:pool' state.sls kubernetes.pool
+#   node_name: {{ HOSTNAME_CFG01 }}
+#   retry: {count: 5, delay: 60}
+#   skip_fail: false
+
+# # NOTE(vryzhenkin): There is nothing to setup at this model
+# #- description: Setup etcd server on primary controller
+# #  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+# #     -C 'I@kubernetes:master and *01*' state.sls etcd.server.setup
+# #  node_name: {{ HOSTNAME_CFG01 }}
+# #  retry: {count: 1, delay: 5}
+# #  skip_fail: false
+
+# - description: Run Kubernetes master without setup
+#   cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+#      -C 'I@kubernetes:master' state.sls kubernetes exclude=kubernetes.master.setup
+#   node_name: {{ HOSTNAME_CFG01 }}
+#   retry: {count: 3, delay: 5}
+#   skip_fail: true
+
+# - description: Run Kubernetes master setup
+#   cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+#      -C 'I@kubernetes:master and *01*' state.sls kubernetes.master.setup
+#   node_name: {{ HOSTNAME_CFG01 }}
+#   retry: {count: 1, delay: 5}
+#   skip_fail: true
+
+# - description: Restart Kubelet
+#   cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+#     -C 'I@kubernetes:pool' service.restart 'kubelet'
+#   node_name: {{ HOSTNAME_CFG01 }}
+#   retry: {count: 1, delay: 5}
+#   skip_fail: true
+
+# - description: Renew hosts file on a whole cluster
+#   cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C '*' state.sls linux.network.host;
+#   node_name: {{ HOSTNAME_CFG01 }}
+#   retry: {count: 1, delay: 5}
+#   skip_fail: false
diff --git a/tcp_tests/templates/cookied-bm-mcp-pike-k8s-contrail/lab04-upgrade-physical-inventory.yaml b/tcp_tests/templates/cookied-bm-mcp-pike-k8s-contrail/lab04-upgrade-physical-inventory.yaml
new file mode 100644
index 0000000..eb9d76f
--- /dev/null
+++ b/tcp_tests/templates/cookied-bm-mcp-pike-k8s-contrail/lab04-upgrade-physical-inventory.yaml
@@ -0,0 +1,93 @@
+nodes:
+    cfg01.bm-mcp-pike-k8s-contrail.local:
+      reclass_storage_name: infra_config_node01
+      roles:
+      - infra_config
+      - linux_system_codename_xenial
+      interfaces:
+        ens3:
+          role: single_dhcp
+    # Physical nodes
+
+    kvm01.bm-mcp-pike-k8s-contrail.local:
+      reclass_storage_name: infra_kvm_node01
+      roles:
+      - infra_kvm
+      - linux_system_codename_xenial
+      interfaces:
+        enp9s0f0:
+          role: single_mgm
+        enp9s0f1:
+          role: single_ctl
+
+    kvm02.bm-mcp-pike-k8s-contrail.local:
+      reclass_storage_name: infra_kvm_node02
+      roles:
+      - infra_kvm
+      - linux_system_codename_xenial
+      interfaces:
+        enp9s0f0:
+          role: single_mgm
+        enp9s0f1:
+          role: single_ctl
+
+    kvm03.bm-mcp-pike-k8s-contrail.local:
+      reclass_storage_name: infra_kvm_node03
+      roles:
+      - infra_kvm
+      - linux_system_codename_xenial
+      interfaces:
+        enp9s0f0:
+          role: single_mgm
+        enp9s0f1:
+          role: single_ctl
+
+    # prx01.bm-mcp-pike-k8s-contrail.local:
+    #   reclass_storage_name: kubernetes_proxy_node01
+    #   roles:
+    #   - kubernetes_proxy
+    #   # - infra_proxy
+    #   # - stacklight_proxy
+    #   - salt_master_host
+    #   - linux_system_codename_xenial
+    #   interfaces:
+    #     enp9s0f0:
+    #       role: single_mgm
+    #       deploy_address: 172.17.41.8
+    #     enp9s0f1:
+    #       role: single_ctl
+    #       single_address: 10.167.8.81
+
+    cmp001.bm-mcp-pike-k8s-contrail.local:
+      reclass_storage_name: kubernetes_compute_node001
+      roles:
+      - linux_system_codename_xenial
+      - kubernetes_compute_contrail
+      - salt_master_host
+        #- features_lvm_backend
+      interfaces:
+        enp9s0f0:
+          role: single_dhcp
+        ens11f0:
+          role: bond0_ab_contrail
+          tenant_address: 192.168.0.101
+        ens11f1:
+          role: single_ctl
+          single_address: 10.167.8.101
+
+    # cmp002.bm-mcp-pike-k8s-contrail.local:
+    #   reclass_storage_name: kubernetes_compute_node02
+    #   roles:
+    #   - features_lvm_backend
+    #   - linux_system_codename_xenial
+    #   - kubernetes_compute_contrail
+    #   interfaces:
+    #     enp2s0f1:
+    #       role: single_mgm
+    #       deploy_address: 172.16.49.74
+    #     enp5s0f0:
+    #       role: bond0_ab_contrail
+    #       tenant_address: 192.168.0.102
+    #     enp5s0f1:
+    #       role: single_vlan_ctl
+    #       single_address: 10.167.8.102
diff --git a/tcp_tests/templates/cookied-bm-mcp-pike-k8s-contrail/salt-context-cookiecutter-k8s-contrail.yaml b/tcp_tests/templates/cookied-bm-mcp-pike-k8s-contrail/salt-context-cookiecutter-k8s-contrail.yaml
new file mode 100644
index 0000000..b1a3be5
--- /dev/null
+++ b/tcp_tests/templates/cookied-bm-mcp-pike-k8s-contrail/salt-context-cookiecutter-k8s-contrail.yaml
@@ -0,0 +1,200 @@
+default_context:
+  backup_private_key: |-
+    -----BEGIN RSA PRIVATE KEY-----
+    MIIEowIBAAKCAQEAskZyhTySYWvGrp+dlv+q2La8oiM8Sv1JfQog8+UW28hGUbCq
+    PnWa7bzVqENEY+g+qbQYh2Pvb2xURyY9+02TWLmCYzq7+orO1eG2SDt384YzDc6B
+    nQohUbGwaSH2YsT/QA8KV1fvUW9P0gVEHmJWR8Jo3vdRyw+tEUszYkiTYkcVc9zt
+    O5eYT9a6gSjLhl+tmJXu38jdWTcE8zJP+OGpJRDJpV2EAYwv+LXde9REv4KCGMqN
+    lvli9IA+zarfMsAzSTPpL5ooslB20akKM1h5li3LG47OpKMG9sMVFGEEo7+lqTGa
+    zUJEUPbJU95PqLfxoIOjYF/MhWKU5VALLraVwwIDAQABAoIBAHUAj0CqlFej6G3L
+    DB6CBZrds8el4HhG6+hIsX/gprPwKVaL3+/GN7w35vHb1BLN5fJy5HZXPFmge1+G
+    1b8NFttwRQbjEWRJnZ352Sxm+z60oOU61w4+C8gWGnWleJMyP2PHez3/1G1Z5MUt
+    95sJZx8JlNJg9ymSTD/BXyPuBezFKf8jUSgPbhBv8B2yy82YGzqc9u7sK6BN90P1
+    3ZcynQ4cfieZLoim56dF9YEixr8plGmGpOspPZFlVCGIc1y2BC4ZUyDatcCa7/gQ
+    3aDdt9lkEfoCHezAFOmaZDCOZ70spkwCqXYk42BXpDjKF6djCXyx3WKVF+IhEOYT
+    /S1I8KECgYEA1tzUZgW43/Z7Sm+QnoK3R9hG2oZZOoiTDdHyC+f5dk6maNbJbivM
+    FAPIpzHtCyvBEiSgGmlqpUrwR2NbYnOHjOX72Yq7/e0Vl1XWmplKCsTDNFGXx5Fs
+    9AQbWjOF+drgfZ5p3dNyE9689nJZg5EhTxL8dfwnZat/l+/OKFO2tM0CgYEA1GhW
+    4FMsXe3/g7M0dj5FnbS8xjz93pei5YZanID9mY/RUcKbegdtfvtwsfkZe5trbI39
+    jv96QyJeAnf48UDFwCV6SSZF/8Ez0pYHZanwkwKsli5uZdvSz7rUyVwE6tyO24WA
+    Trgpmbb8uxhJHBNuD+bC/iGd1H0IUuJ65ChD9M8CgYEAxfp2z4boQZ2ZHw5LoHLr
+    tIyJROPUJgYgEfToZaYbC7MOzL1Un2pFwg92fPCY7dkkLraGu690r9esLOtVEhNH
+    zEFB3cJi1Gf3pBlZA9zJB8Ej6Pphs2bBkNqT5XpiMcZHYhhsjhQ+Iibz0NWuu3cn
+    zPe+nmx4VMtAZ1x0hl4UlOUCgYBh8NaWS2O60AIwrRI0r5QUGwGsevUojU0Mfr4L
+    SiMlir4e8BgW1ZP0qGYXel/4sfp/rJ1NMZojmJY2lzFPtOk6J03SqvY97s1BffJd
+    O1X1w5bnDPPUvd7f8CsryeVuklLBADbglWSBP3IbpyAW9RKb/HDPE5seHqUW6t11
+    lOd42wKBgBW0tTV6+aqao6o4ZBU0SVoNg9/PwgOubAzeSDW2+ArXn1sMmroSfXVw
+    fbUTJI5TF/1pd7A5AKoR1FfTqlssoYlMGEPI6HJ4n9/4SqLpwd99HFW0ISt+EUMh
+    Tqt9mDfKzwHxG2QTuOwyrslO8iTwRoN5OYgm4dsL471Obr4DILTz
+    -----END RSA PRIVATE KEY-----
+  backup_public_key: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCyRnKFPJJha8aun52W/6rYtryiIzxK/Ul9CiDz5RbbyEZRsKo+dZrtvNWoQ0Rj6D6ptBiHY+9vbFRHJj37TZNYuYJjOrv6is7V4bZIO3fzhjMNzoGdCiFRsbBpIfZixP9ADwpXV+9Rb0/SBUQeYlZHwmje91HLD60RSzNiSJNiRxVz3O07l5hP1rqBKMuGX62Yle7fyN1ZNwTzMk/44aklEMmlXYQBjC/4td171ES/goIYyo2W+WL0gD7Nqt8ywDNJM+kvmiiyUHbRqQozWHmWLcsbjs6kowb2wxUUYQSjv6WpMZrNQkRQ9slT3k+ot/Ggg6NgX8yFYpTlUAsutpXD
+  bmk_enabled: 'False'
+  ceph_enabled: 'False'
+  cicd_control_node01_address: 10.167.8.91
+  cicd_control_node01_hostname: cid01
+  cicd_control_node02_address: 10.167.8.92
+  cicd_control_node02_hostname: cid02
+  cicd_control_node03_address: 10.167.8.93
+  cicd_control_node03_hostname: cid03
+  cicd_control_vip_address: 10.167.8.90
+  cicd_control_vip_hostname: cid
+  cicd_enabled: 'True'
+  cicd_private_key: |-
+    -----BEGIN RSA PRIVATE KEY-----
+    MIIEpgIBAAKCAQEAxLQy4F7sNBloj0fFvklCq9+IX/BV5OBs6OtSBf6A+ztTs68i
+    ib5W6Tz/knh7wt2URB6uKJJBRBK+Oqj91ZNJxISewP2f5gX10WpjOAti+Fghkemt
+    kvyA8aUxX1wuAz7Y2v1uv1IkfWa5ubL8oJXNjeT9yeCNJWBxvd46XT9UiCs5CnDL
+    lBjRg+AP2+u5OabUFtH7GSzVqcMzhi0qLilP+cRhKmar2tQXFEI5wnwADh0REAF/
+    OxUZPaPEPD9TW7fGxjfrMtyUKqTEbi+EPsIvldkR0IhYrKXjwcFFu3FKISuy8PVM
+    EKUM5aZaLMI/WiMs1zmx+bAOrkCsUAf+sVmocQIDAQABAoIBAQCRnSAojrxmmQSZ
+    RLVy9wK+/zwSYXUEeMrG5iUTQOM0mCePVa/IrjnicYB0anKbv7IZd2jPqe1cuk9O
+    V3mJGH68Vry6+0XaX0EpJIuMmolKdNttC8Ktk/TUbciN4kxBpM2d14ybXvCaUGhe
+    usxfCGZhi0oAnxV9vNaWiqNEEjS+k4u9XTnj3+GxstEwch+l7xJwz83WEsx7G1Zz
+    3Yxg7mh2uRPVCOZGVdClciym+9WHHrcdYw/OJCsSFsT7+qgzptsvXBVxa6EuGaVY
+    Pba+UfOnYIKlBtka4i3zXGaCQF6t2FHw5WyUEmYm3iBYmrGBbEf+3665Kh4NQs0a
+    PV4eHlLdAoGBAO8nDgkTA4gi1gyFy2YBUFP2BignkKCZGHkD8qvBnOt1Rxm6USlZ
+    7GzAtU3nSd8ODzgOBI7+zd82yRqv2hEwP7xARhr0Nx1XvyaQtRlQ6tQnBgvqLDCG
+    n0qvWoBM+Yl6sTRGYavAMCaR7PuULUcZFNWk7m0fv4vqddGijgRsje37AoGBANKP
+    nN72BujsQIhdzAYS+u5/Hxu56Tvgupe6kWkhQuV8MQcM+79I6cgJxxH6zQDP/hGt
+    3vXapgWUgi025LuEUWfkxAtTUfT4cRP2x529CH/XLQMYVqWxkoben9r+eFav+Kgw
+    C0dR3vSOlEMzYoIF+p/km0mIV1ZKZvrWymtXSdODAoGBAL4feUwDfqpKr4pzD1l/
+    r+Gf1BM2KQdTzp3eYpzjJiIWMTkl4wIRyCBJL5nIRvT6E2VH153qubY7srLxnFZP
+    2kuJeXJSNkKwkHlTT3XZ22Zfw7HTL+BAFgDk2PjouPTvwlLBpUJKXr07A4CZs0kz
+    ilmybg340GmmMpY/OdIQjuDjAoGBAMcd5hP2hqxHlfMe+CwaUM+uySU4FvZ32xxW
+    4uGPAarwWZC4V20Zr3JqxKUdDjYhersPOFQ4c129hajqSz2EsFLWRAoNvegx9QUT
+    Dsv9EgeK3Vca8f14wf7mkjbPA8++UyABZvkH1BZiqpQuCI66xrnjvnG4DBde/qlg
+    60S84+SvAoGBAKH1feNtJaNhDxF0OqRuVmSFyL3pkMDoYr/mgpT4T1ToRBW5AtEt
+    Io4egi68ph8IChAt/TGFvikW7tbEgK9ACD/RAfl+LiuhxqJJFtC1LfGfHI7ntuRj
+    DjQrUy59ULoflh3iWBPtpw2ooRlSrAwaIgGt9odMECXp3BK8WLsUG9H1
+    -----END RSA PRIVATE KEY-----
+  cicd_public_key: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDEtDLgXuw0GWiPR8W+SUKr34hf8FXk4Gzo61IF/oD7O1OzryKJvlbpPP+SeHvC3ZREHq4okkFEEr46qP3Vk0nEhJ7A/Z/mBfXRamM4C2L4WCGR6a2S/IDxpTFfXC4DPtja/W6/UiR9Zrm5svyglc2N5P3J4I0lYHG93jpdP1SIKzkKcMuUGNGD4A/b67k5ptQW0fsZLNWpwzOGLSouKU/5xGEqZqva1BcUQjnCfAAOHREQAX87FRk9o8Q8P1Nbt8bGN+sy3JQqpMRuL4Q+wi+V2RHQiFispePBwUW7cUohK7Lw9UwQpQzlploswj9aIyzXObH5sA6uQKxQB/6xWahx
+  cluster_domain: bm-mcp-pike-k8s-contrail.local
+  cluster_name: bm-mcp-pike-k8s-contrail
+  # compute_bond_mode: active-backup
+  compute_padding_with_zeros: 'True'
+  compute_primary_first_nic: eth1
+  compute_primary_second_nic: eth2
+  context_seed: GAZbu3hguubkeYMg15AQ1J6UuY60TAh8h0EVpNUrHWhjRS2SxRYBuxxLvVURv82m
+  control_network_netmask: 255.255.255.0
+  control_network_subnet: 10.167.8.0/24
+  control_vlan: '2410'
+  cookiecutter_template_branch: ''
+  cookiecutter_template_credentials: gerrit
+  cookiecutter_template_url: https://gerrit.mcp.mirantis.net/mk/cookiecutter-templates.git
+  deploy_network_gateway: 172.17.41.2
+  deploy_network_netmask: 255.255.255.192
+  deploy_network_subnet: 172.16.49.64/26
+  deployment_type: physical
+  dns_server01: 172.17.41.2
+  dns_server02: 172.17.41.2
+  email_address: dtyzhnenko@mirantis.com
+  etcd_ssl: 'True'
+  infra_bond_mode: active-backup
+  infra_deploy_nic: eth0
+  infra_kvm01_control_address: 10.167.8.241
+  infra_kvm01_deploy_address: 172.17.41.4
+  infra_kvm01_hostname: kvm01
+  infra_kvm02_control_address: 10.167.8.242
+  infra_kvm02_deploy_address: 172.17.41.5
+  infra_kvm02_hostname: kvm02
+  infra_kvm03_control_address: 10.167.8.243
+  infra_kvm03_deploy_address: 172.17.41.6
+  infra_kvm03_hostname: kvm03
+  infra_kvm_vip_address: 10.167.8.240
+  infra_primary_first_nic: eth1
+  infra_primary_second_nic: eth2
+  kubernetes_enabled: 'True'
+  kubernetes_compute_count: 1
+  kubernetes_compute_rack01_single_subnet: 10.167.8
+  kubernetes_compute_rack01_tenant_subnet: 192.168.0
+  kubernetes_network_opencontrail_enabled: 'True'
+  local_repositories: 'False'
+  maas_deploy_address: 172.16.49.66
+  maas_deploy_range_end: 10.0.0.254
+  maas_deploy_range_start: 10.0.0.1
+  maas_deploy_vlan: '0'
+  maas_fabric_name: fabric-0
+  maas_hostname: cfg01
+  mcp_common_scripts_branch: ''
+  mcp_version: proposed
+  offline_deployment: 'False'
+  opencontrail_analytics_address: 10.167.8.30
+  opencontrail_analytics_hostname: nal
+  opencontrail_analytics_node01_address: 10.167.8.31
+  opencontrail_analytics_node01_hostname: nal01
+  opencontrail_analytics_node02_address: 10.167.8.32
+  opencontrail_analytics_node02_hostname: nal02
+  opencontrail_analytics_node03_address: 10.167.8.33
+  opencontrail_analytics_node03_hostname: nal03
+  opencontrail_compute_iface_mask: '24'
+  opencontrail_control_address: 10.167.8.20
+  opencontrail_control_hostname: ntw
+  opencontrail_control_node01_address: 10.167.8.21
+  opencontrail_control_node01_hostname: ntw01
+  opencontrail_control_node02_address: 10.167.8.22
+  opencontrail_control_node02_hostname: ntw02
+  opencontrail_control_node03_address: 10.167.8.23
+  opencontrail_control_node03_hostname: ntw03
+  opencontrail_enabled: 'True'
+  opencontrail_router01_address: 10.167.8.100
+  opencontrail_router01_hostname: rtr01
+  opencontrail_router02_address: 10.167.8.101
+  opencontrail_router02_hostname: rtr02
+  opencontrail_version: '4.0'
+  openstack_enabled: 'False'
+  openssh_groups: ''
+  openstack_version: pike
+  oss_enabled: 'False'
+  oss_node03_address: ${_param:stacklight_monitor_node03_address}
+  oss_notification_smtp_use_tls: 'False'
+  oss_pushkin_email_sender_password: password
+  oss_pushkin_smtp_host: 127.0.0.1
+  oss_pushkin_smtp_port: '587'
+  oss_webhook_app_id: '24'
+  oss_webhook_login_id: '13'
+  platform: kubernetes_enabled
+  public_host: ${_param:infra_config_address}
+  publication_method: email
+  reclass_repository: https://github.com/Mirantis/mk-lab-salt-model.git
+  salt_api_password: frJMLJsfGkFXCk4qTTEttKla518Akvdp
+  salt_api_password_hash: $6$ixefPtcd$vasKuySO6L2JM0FKaB/udsQvH4upI2dWoJZaR9XTqeAx4UMvkHsNNSwsALVqTTLbXIngkFqYNXpbNm1o4iMGS.
+  salt_master_address: 172.17.41.3
+  salt_master_hostname: cfg01
+  salt_master_management_address: 172.17.41.3
+  shared_reclass_branch: ''
+  shared_reclass_url: https://gerrit.mcp.mirantis.net/salt-models/reclass-system.git
+  stacklight_enabled: 'True'
+  stacklight_log_address: 10.167.8.60
+  stacklight_log_hostname: log
+  stacklight_log_node01_address: 10.167.8.61
+  stacklight_log_node01_hostname: log01
+  stacklight_log_node02_address: 10.167.8.62
+  stacklight_log_node02_hostname: log02
+  stacklight_log_node03_address: 10.167.8.63
+  stacklight_log_node03_hostname: log03
+  stacklight_long_term_storage_type: influxdb
+  stacklight_monitor_address: 10.167.8.70
+  stacklight_monitor_hostname: mon
+  stacklight_monitor_node01_address: 10.167.8.71
+  stacklight_monitor_node01_hostname: mon01
+  stacklight_monitor_node02_address: 10.167.8.72
+  stacklight_monitor_node02_hostname: mon02
+  stacklight_monitor_node03_address: 10.167.8.73
+  stacklight_monitor_node03_hostname: mon03
+  stacklight_telemetry_address: 10.167.8.85
+  stacklight_telemetry_hostname: mtr
+  stacklight_telemetry_node01_address: 10.167.8.86
+  stacklight_telemetry_node01_hostname: mtr01
+  stacklight_telemetry_node02_address: 10.167.8.87
+  stacklight_telemetry_node02_hostname: mtr02
+  stacklight_telemetry_node03_address: 10.167.8.88
+  stacklight_telemetry_node03_hostname: mtr03
+  stacklight_version: '2'
+  static_ips_on_deploy_network_enabled: 'False'
+  tenant_network_gateway: 192.168.0.1
+  tenant_network_netmask: 255.255.255.0
+  tenant_network_subnet: 192.168.0.0/24
+  tenant_vlan: '2411'
+  upstream_proxy_enabled: 'False'
+  use_default_network_scheme: 'True'
+  vnf_onboarding_enabled: 'False'
diff --git a/tcp_tests/templates/cookied-bm-mcp-pike-k8s-contrail/salt-context-environment.yaml b/tcp_tests/templates/cookied-bm-mcp-pike-k8s-contrail/salt-context-environment.yaml
new file mode 100644
index 0000000..63f07b5
--- /dev/null
+++ b/tcp_tests/templates/cookied-bm-mcp-pike-k8s-contrail/salt-context-environment.yaml
@@ -0,0 +1,209 @@
+nodes:
+    # Virtual Control Plane nodes
+
+    ctl01.cookied-bm-mcp-ocata-contrail.local:
+      reclass_storage_name: kubernetes_control_node01
+      roles:
+      - kubernetes_control_contrail
+      - linux_system_codename_xenial
+      interfaces:
+        ens3:
+          role: single_ctl
+
+    ctl02.cookied-bm-mcp-ocata-contrail.local:
+      reclass_storage_name: kubernetes_control_node02
+      roles:
+      - kubernetes_control_contrail
+      - linux_system_codename_xenial
+      interfaces:
+        ens3:
+          role: single_ctl
+
+    ctl03.cookied-bm-mcp-ocata-contrail.local:
+      reclass_storage_name: kubernetes_control_node03
+      roles:
+      - kubernetes_control_contrail
+      - linux_system_codename_xenial
+      interfaces:
+        ens3:
+          role: single_ctl
+
+    prx01.cookied-bm-mcp-ocata-contrail.local:
+      reclass_storage_name: kubernetes_proxy_node01
+      roles:
+      - kubernetes_proxy
+      # - infra_proxy
+      # - stacklight_proxy
+      - salt_master_host
+      - linux_system_codename_xenial
+      interfaces:
+        ens3:
+          role: single_ctl
+
+    prx02.cookied-bm-mcp-ocata-contrail.local:
+      reclass_storage_name: kubernetes_proxy_node02
+      roles:
+      - kubernetes_proxy
+      # - infra_proxy
+      # - stacklight_proxy
+      - salt_master_host
+      - linux_system_codename_xenial
+      interfaces:
+        ens3:
+          role: single_ctl
+
+    mon01.cookied-bm-mcp-ocata-contrail.local:
+      reclass_storage_name: stacklight_server_node01
+      roles:
+      - stacklightv2_server_leader
+      - linux_system_codename_xenial
+      interfaces:
+        ens3:
+          role: single_ctl
+
+    mon02.cookied-bm-mcp-ocata-contrail.local:
+      reclass_storage_name: stacklight_server_node02
+      roles:
+      - stacklightv2_server
+      - linux_system_codename_xenial
+      interfaces:
+        ens3:
+          role: single_ctl
+
+    mon03.cookied-bm-mcp-ocata-contrail.local:
+      reclass_storage_name: stacklight_server_node03
+      roles:
+      - stacklightv2_server
+      - linux_system_codename_xenial
+      interfaces:
+        ens3:
+          role: single_ctl
+
+    nal01.cookied-bm-mcp-ocata-contrail.local:
+      reclass_storage_name: opencontrail_analytics_node01
+      roles:
+      - opencontrail_analytics
+      - linux_system_codename_xenial
+      - salt_master_host
+      interfaces:
+        ens3:
+          role: single_ctl
+          single_address: 10.167.8.31
+
+    nal02.cookied-bm-mcp-ocata-contrail.local:
+      reclass_storage_name: opencontrail_analytics_node02
+      roles:
+      - opencontrail_analytics
+      - linux_system_codename_xenial
+      - salt_master_host
+      interfaces:
+        ens3:
+          role: single_ctl
+          single_address: 10.167.8.32
+
+    nal03.cookied-bm-mcp-ocata-contrail.local:
+      reclass_storage_name: opencontrail_analytics_node03
+      roles:
+      - opencontrail_analytics
+      - linux_system_codename_xenial
+      - salt_master_host
+      interfaces:
+        ens3:
+          role: single_ctl
+          single_address: 10.167.8.33
+
+    ntw01.cookied-bm-mcp-ocata-contrail.local:
+      reclass_storage_name: opencontrail_control_node01
+      roles:
+      - opencontrail_control
+      - linux_system_codename_xenial
+      - salt_master_host
+      interfaces:
+        ens3:
+          role: single_ctl
+          single_address: 10.167.8.21
+
+    ntw02.cookied-bm-mcp-ocata-contrail.local:
+      reclass_storage_name: opencontrail_control_node02
+      roles:
+      - opencontrail_control
+      - linux_system_codename_xenial
+      - salt_master_host
+      interfaces:
+        ens3:
+          role: single_ctl
+          single_address: 10.167.8.22
+
+    ntw03.cookied-bm-mcp-ocata-contrail.local:
+      reclass_storage_name: opencontrail_control_node03
+      roles:
+      - opencontrail_control
+      - linux_system_codename_xenial
+      - salt_master_host
+      interfaces:
+        ens3:
+          role: single_ctl
+          single_address: 10.167.8.23
+
+    mtr01.cookied-bm-mcp-ocata-contrail.local:
+      reclass_storage_name: stacklight_telemetry_node01
+      roles:
+      - stacklight_telemetry
+      - linux_system_codename_xenial
+      interfaces:
+        ens3:
+          role: single_ctl
+
+    mtr02.cookied-bm-mcp-ocata-contrail.local:
+      reclass_storage_name: stacklight_telemetry_node02
+      roles:
+      - stacklight_telemetry
+      - linux_system_codename_xenial
+      interfaces:
+        ens3:
+          role: single_ctl
+
+    mtr03.cookied-bm-mcp-ocata-contrail.local:
+      reclass_storage_name: stacklight_telemetry_node03
+      roles:
+      - stacklight_telemetry
+      - linux_system_codename_xenial
+      interfaces:
+        ens3:
+          role: single_ctl
+
+    log01.cookied-bm-mcp-ocata-contrail.local:
+      reclass_storage_name: stacklight_log_node01
+      roles:
+      - stacklight_log_leader_v2
+      - linux_system_codename_xenial
+      interfaces:
+        ens3:
+          role: single_ctl
+
+    log02.cookied-bm-mcp-ocata-contrail.local:
+      reclass_storage_name: stacklight_log_node02
+      roles:
+      - stacklight_log
+      - linux_system_codename_xenial
+      interfaces:
+        ens3:
+          role: single_ctl
+
+    log03.cookied-bm-mcp-ocata-contrail.local:
+      reclass_storage_name: stacklight_log_node03
+      roles:
+      - stacklight_log
+      - linux_system_codename_xenial
+      interfaces:
+        ens3:
+          role: single_ctl
+
+#    bmk01.cookied-bm-mcp-ocata-contrail.local:
+#      reclass_storage_name: openstack_benchmark_node01
+#      roles:
+#      - openstack_benchmark
+#      - linux_system_codename_xenial
+#      interfaces:
+#        ens3:
+#          role: single_ctl
diff --git a/tcp_tests/templates/cookied-bm-mcp-pike-k8s-contrail/salt.yaml b/tcp_tests/templates/cookied-bm-mcp-pike-k8s-contrail/salt.yaml
new file mode 100644
index 0000000..7e46564
--- /dev/null
+++ b/tcp_tests/templates/cookied-bm-mcp-pike-k8s-contrail/salt.yaml
@@ -0,0 +1,189 @@
+{% from 'cookied-bm-mcp-pike-k8s-contrail/underlay.yaml' import HOSTNAME_CFG01 with context %}
+{% from 'cookied-bm-mcp-pike-k8s-contrail/underlay.yaml' import HOSTNAME_CMP001 with context %}
+{% from 'cookied-bm-mcp-pike-k8s-contrail/underlay.yaml' import LAB_CONFIG_NAME with context %}
+{% from 'cookied-bm-mcp-pike-k8s-contrail/underlay.yaml' import DOMAIN_NAME with context %}
+{% from 'cookied-bm-mcp-pike-k8s-contrail/underlay.yaml' import ETH0_IP_ADDRESS_CFG01 with context %}
+{% from 'cookied-bm-mcp-pike-k8s-contrail/underlay.yaml' import ETH0_IP_ADDRESS_CMP001 with context %}
+{% from 'cookied-bm-mcp-pike-k8s-contrail/underlay.yaml' import HOSTNAME_KVM01 with context %}
+{% from 'cookied-bm-mcp-pike-k8s-contrail/underlay.yaml' import HOSTNAME_KVM02 with context %}
+{% from 'cookied-bm-mcp-pike-k8s-contrail/underlay.yaml' import HOSTNAME_KVM03 with context %}
+
+{% set SALT_MODELS_REPOSITORY = os_env('SALT_MODELS_REPOSITORY','https://gerrit.mcp.mirantis.net/salt-models/mcp-virtual-lab') %}
+# Other salt model repository parameters see in shared-salt.yaml
+
+# Name of the context file (without extension, that is fixed .yaml) used to render the Environment model
+{% set ENVIRONMENT_MODEL_INVENTORY_NAME = os_env('ENVIRONMENT_MODEL_INVENTORY_NAME','bm-mcp-pike-k8s-contrail') %}
+# Path to the context files used to render Cluster and Environment models
+{%- set CLUSTER_CONTEXT_NAME = 'salt-context-cookiecutter-k8s-contrail.yaml' %}
+{%- set ENVIRONMENT_CONTEXT_NAMES = ['salt-context-environment.yaml','lab04-upgrade-physical-inventory.yaml'] %}
+{%- set CONTROL_VLAN = os_env('CONTROL_VLAN', '2410') %}
+{%- set TENANT_VLAN = os_env('TENANT_VLAN', '2411') %}
+
+
+{% import 'shared-salt.yaml' as SHARED with context %}
+
+{{ SHARED.MACRO_INSTALL_SALT_MASTER() }}
+
+{{ SHARED.MACRO_GENERATE_COOKIECUTTER_MODEL(CONTROL_VLAN=CONTROL_VLAN, TENANT_VLAN=TENANT_VLAN) }}
+
+{{ SHARED.MACRO_GENERATE_AND_ENABLE_ENVIRONMENT_MODEL() }}
+
+{{ SHARED.MACRO_CONFIGURE_RECLASS(FORMULA_SERVICES='"linux" "reclass" "salt" "openssh" "ntp" "git" "nginx" "collectd" "sensu" "heka" "sphinx" "keystone" "mysql" "grafana" "haproxy" "rsyslog" "horizon" "prometheus" "telegraf" "elasticsearch" "powerdns" "glusterfs" "xtrabackup" "maas" "backupninja" "jenkins" "fluentd" "auditd"') }}
+
+{{ SHARED.MACRO_INSTALL_SALT_MINIONS() }}
+
+{{ SHARED.MACRO_RUN_SALT_MASTER_UNDERLAY_STATES() }}
+
+{{ SHARED.ADJUST_SL_OPTS(OVERRIDES_FILENAME='/srv/salt/reclass/classes/environment/' + ENVIRONMENT_MODEL_INVENTORY_NAME + '/overrides.yml') }}
+
+
+# - description: "Registration cmp001 node"
+#   cmd: |
+#     salt-call event.send "reclass/minion/classify" \
+#       "{\"node_master_ip\": \"{{ ETH0_IP_ADDRESS_CFG01 }}\", \
+#       \"node_deploy_ip\": \"{{ ETH0_IP_ADDRESS_CMP001 }}\", \
+#       \"node_control_ip\": \"10.167.8.101\", \
+#       \"node_tenant_ipcontrol_ip\": \"10.167.8.101\", \
+#       \"node_os\": \"xenial\", \
+#       \"node_domain\": \"{{ DOMAIN_NAME }}\", \
+#       \"node_cluster\": \"{{ LAB_CONFIG_NAME }}\",
+#       \"node_hostname\": \"$(hostname -s)\"}"
+#   node_name: {{ HOSTNAME_CMP001 }}
+#   retry: {count: 1, delay: 10}
+#   skip_fail: false
+#
+- description: "Workaround for rack01 compute generator"
+  cmd: |
+    set -e;
+    # Remove rack01 key
+    . /root/venv-reclass-tools/bin/activate;
+    reclass-tools del-key parameters.reclass.storage.node.kubernetes_compute_rack01 /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/config.yml;
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 10}
+  skip_fail: false
+
+# - description: "WR for changing image to proposed"
+#   cmd: |
+#     set -e;
+#     # Add message_queu host for opencontrail
+#     . /root/venv-reclass-tools/bin/activate;
+#     reclass-tools add-key parameters._param.salt_control_xenial_image 'http://ci.mcp.mirantis.net:8085/images/ubuntu-16-04-x64-mcpproposed.qcow2' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/init.yml;
+#     reclass-tools add-key parameters._param.salt_control_trusty_image 'http://ci.mcp.mirantis.net:8085/images/ubuntu-14-04-x64-mcpproposed.qcow2' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/init.yml;
+#   node_name: {{ HOSTNAME_CFG01 }}
+#   retry: {count: 1, delay: 10}
+#   skip_fail: false
+
+- description: "Workaround for xenial images"
+  cmd: |
+    set -e;
+    # Add tenant and single addresses for computes
+        set -e;
+    # Add message_queu host for opencontrail
+    . /root/venv-reclass-tools/bin/activate;
+    reclass-tools add-key parameters.salt.control.cluster.internal.node.nal01.image '${_param:salt_control_xenial_image}' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/kvm.yml;
+    reclass-tools add-key parameters.salt.control.cluster.internal.node.nal02.image '${_param:salt_control_xenial_image}' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/kvm.yml;
+    reclass-tools add-key parameters.salt.control.cluster.internal.node.nal03.image '${_param:salt_control_xenial_image}' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/kvm.yml;
+    reclass-tools add-key parameters.salt.control.cluster.internal.node.ntw01.image '${_param:salt_control_xenial_image}' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/kvm.yml;
+    reclass-tools add-key parameters.salt.control.cluster.internal.node.ntw02.image '${_param:salt_control_xenial_image}' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/kvm.yml;
+    reclass-tools add-key parameters.salt.control.cluster.internal.node.ntw03.image '${_param:salt_control_xenial_image}' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/kvm.yml;
+    reclass-tools add-key parameters.salt.control.cluster.internal.node.prx01.image '${_param:salt_control_xenial_image}' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/kvm.yml;
+    reclass-tools add-key parameters.salt.control.cluster.internal.node.prx02.image '${_param:salt_control_xenial_image}' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/kvm.yml;
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 10}
+  skip_fail: false
+
+
+{{ SHARED.MACRO_GENERATE_INVENTORY() }}
+
+{{ SHARED.MACRO_NETWORKING_WORKAROUNDS() }}
+
+
+- description: "Workaround for PROD-14060"
+  cmd: |
+    set -e;
+    # Add tenant and single addresses for computes
+    salt-call reclass.cluster_meta_set deploy_address 172.17.41.7 /srv/salt/reclass/nodes/_generated/cmp001.cookied-bm-mcp-ocata-contrail.local.yml
+    salt-call reclass.cluster_meta_set tenant_address 192.168.0.101 /srv/salt/reclass/nodes/_generated/cmp001.cookied-bm-mcp-ocata-contrail.local.yml
+    salt-call reclass.cluster_meta_set single_address 10.167.8.101 /srv/salt/reclass/nodes/_generated/cmp001.cookied-bm-mcp-ocata-contrail.local.yml
+
+    # salt-call reclass.cluster_meta_set deploy_address 172.16.49.74 /srv/salt/reclass/nodes/_generated/cmp002.cookied-bm-mcp-ocata-contrail.local.yml
+    # salt-call reclass.cluster_meta_set tenant_address 192.168.0.102 /srv/salt/reclass/nodes/_generated/cmp002.cookied-bm-mcp-ocata-contrail.local.yml
+    # salt-call reclass.cluster_meta_set single_address 10.167.8.102 /srv/salt/reclass/nodes/_generated/cmp002.cookied-bm-mcp-ocata-contrail.local.yml
+
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 10}
+  skip_fail: false
+
+
+- description: Temporary workaround for removing cinder-volume from CTL nodes
+  cmd: |
+    sed -i 's/\-\ system\.cinder\.volume\.single//g' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/openstack/control.yml;
+    sed -i 's/\-\ system\.cinder\.volume\.notification\.messagingv2//g' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/openstack/control.yml;
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: true
+
+- description: Temporary WR for correct bridge name according to envoronment templates
+  cmd: |
+    sed -i 's/br\-ctl/br\_ctl/g' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/kvm.yml;
+    sed -i 's/br\-mgm/br\_mgm/g' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/kvm.yml;
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 10}
+  skip_fail: false
+
+{{ SHARED.MACRO_BOOTSTRAP_ALL_MINIONS() }}
+
+########################################
+# Spin up Control Plane VMs on KVM nodes
+########################################
+
+- description: Execute 'libvirt' states to create necessary libvirt networks
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'kvm*' state.sls libvirt
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 2, delay: 10}
+  skip_fail: false
+
+- description: Create VMs for control plane
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'kvm*' state.sls salt.control
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 3, delay: 10}
+  skip_fail: false
+
+- description: '*Workaround* for waiting the control-plane VMs in the salt-key (instead of sleep)'
+  cmd: |
+    salt-key -l acc| sort > /tmp/current_keys.txt &&
+    salt 'kvm*' cmd.run 'virsh list --name' | grep -v 'kvm'|sort|xargs -I {} fgrep {} /tmp/current_keys.txt
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 20, delay: 30}
+  skip_fail: false
+
+#########################################
+# Configure all running salt minion nodes
+#########################################
+
+- description: Hack resolv.conf on VCP nodes for internal services access
+  cmd: |
+    salt --hard-crash --state-output=mixed --state-verbose=False -C '* and not kvm* and not cmp* and not gtw* and not cfg*' cmd.run "echo 'nameserver 172.17.41.2' > /etc/resolv.conf;"
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+- description: Refresh pillars on all minions
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False '*' saltutil.refresh_pillar
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+- description: Sync all salt resources
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False '*' saltutil.sync_all && sleep 5
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+- description: Show  reclass-salt --top for generated nodes
+  cmd: reclass-salt --top -u /srv/salt/reclass/nodes/_generated/
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+{{ SHARED.MACRO_BOOTSTRAP_ALL_MINIONS() }}
diff --git a/tcp_tests/templates/cookied-bm-mcp-dvr-vxlan/sl.yaml b/tcp_tests/templates/cookied-bm-mcp-pike-k8s-contrail/sl.yaml
similarity index 64%
rename from tcp_tests/templates/cookied-bm-mcp-dvr-vxlan/sl.yaml
rename to tcp_tests/templates/cookied-bm-mcp-pike-k8s-contrail/sl.yaml
index f4b05d0..4045fe8 100644
--- a/tcp_tests/templates/cookied-bm-mcp-dvr-vxlan/sl.yaml
+++ b/tcp_tests/templates/cookied-bm-mcp-pike-k8s-contrail/sl.yaml
@@ -1,6 +1,6 @@
-{% from 'cookied-bm-mcp-dvr-vxlan/underlay.yaml' import HOSTNAME_CFG01 with context %}
+{% from 'cookied-bm-mcp-ocata-contrail/underlay.yaml' import HOSTNAME_CFG01 with context %}
 
-# Install docker swarm.
+# Install docker swarm
 - description: Configure docker service
   cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' state.sls docker.host
   node_name: {{ HOSTNAME_CFG01 }}
@@ -56,16 +56,17 @@
   retry: {count: 1, delay: 10}
   skip_fail: false
 
-- description: Check the VIP on StackLight mon nodes
+- description: Check the VIP on mon nodes
   cmd: |
-    SL_VIP=$(salt --out=newline_values_only "mon01*" pillar.get _param:cluster_vip_address);
-    echo "_param:cluster_vip_address (vip): ${SL_VIP}";
+    SL_VIP=`salt-call --out=newline_values_only pillar.get _param:stacklight_monitor_address`;
+    echo "_param:stacklight_monitor_address (vip): ${SL_VIP}";
     salt --hard-crash --state-output=mixed --state-verbose=False -C 'mon*' cmd.run "ip a | grep ${SL_VIP}" | grep -B1 ${SL_VIP}
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 5}
   skip_fail: false
 
 # Install slv2 infra
+#Launch containers
 - description: Install Mongo if target matches
   cmd: |
     if salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@mongodb:server' match.pillar 'mongodb:server' ; then
@@ -84,6 +85,18 @@
   retry: {count: 1, delay: 10}
   skip_fail: false
 
+- description: launch prometheus containers
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:master and I@prometheus:server' state.sls docker.client
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 2, delay: 10}
+  skip_fail: false
+
+- description: Check docker ps
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm and I@prometheus:server' cmd.run "docker ps"
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 2, delay: 10}
+  skip_fail: false
+
 - description: Install telegraf
   cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@telegraf:agent or I@telegraf:remote_agent' state.sls telegraf
   node_name: {{ HOSTNAME_CFG01 }}
@@ -99,12 +112,6 @@
   retry: {count: 1, delay: 10}
   skip_fail: false
 
-- description: Configure fluentd
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@fluentd:agent' state.sls fluentd
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 10}
-  skip_fail: false
-
 - description: Install elasticsearch server
   cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@elasticsearch:server' state.sls elasticsearch.server -b 1
   node_name: {{ HOSTNAME_CFG01 }}
@@ -140,6 +147,45 @@
   retry: {count: 1, delay: 5}
   skip_fail: true
 
+# Install Prometheus LTS(optional if set in model)
+- description: Prometheus LTS(optional if set in model)
+  cmd: |
+    PROMETHEUS_SERVICE=`salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@prometheus:relay' test.ping 1>/dev/null 2>&1 && echo true`;
+    echo "PROMETHEUS rely service presence: ${PROMETHEUS_SERVICE}";
+    if [[ "$PROMETHEUS_SERVICE" == "true" ]]; then
+        salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@prometheus:relay' state.sls prometheus
+    fi
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: true
+
+# Install service for the log collection
+- description: Configure fluentd
+  cmd: |
+    FLUENTD_SERVICE=`salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@fluentd:agent' test.ping 1>/dev/null 2>&1 && echo true`;
+    echo "Fluentd service presence: ${FLUENTD_SERVICE}";
+    if [[ "$FLUENTD_SERVICE" == "true" ]]; then
+        salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@fluentd:agent' state.sls fluentd
+    else
+        salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@heka:log_collector' state.sls heka.log_collector
+    fi
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 10}
+  skip_fail: false
+
+#Install heka ceilometer collector
+- description: Install heka ceilometer if they exists
+  cmd: |
+    CEILO=`salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@heka:ceilometer_collector:enabled' test.ping 1>/dev/null 2>&1 && echo true`;
+    echo "Ceilometer service presence: ${CEILO}";
+    if [[ "$CEILO" == "true" ]]; then
+        salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@heka:ceilometer_collector:enabled' state.sls heka.ceilometer_collector;
+        salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@heka:ceilometer_collector:enabled' service.restart ceilometer_collector
+    fi
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 10}
+  skip_fail: false
+
 # Collect grains needed to configure the services
 
 - description: Get grains
@@ -157,30 +203,49 @@
 - description: Update mine
   cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@salt:minion' mine.update; sleep 5;
   node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 10}
+  retry: {count: 5, delay: 15}
   skip_fail: false
 
 # Configure the services running in Docker Swarm
-- description: Install prometheus alertmanager
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@prometheus:server' state.sls prometheus -b 1
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 2, delay: 10}
-  skip_fail: false
-
-- description: run docker state
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:master' state.sls docker
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 2, delay: 120}
-  skip_fail: false
-
-- description: docker ps
-  cmd: sleep 120; salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' dockerng.ps
+- description: Configure prometheus in docker swarm
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm and I@prometheus:server' state.sls prometheus
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 10}
   skip_fail: false
 
+- description: Configure Remote Collector in Docker Swarm for Openstack deployments
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm and I@prometheus:server' state.sls heka.remote_collector
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 10}
+  skip_fail: false
+
+- description: Install sphinx
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@sphinx:server' state.sls sphinx
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 10}
+  skip_fail: false
+
+
+#- description: Install prometheus alertmanager
+#  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' state.sls prometheus,heka.remote_collector -b 1
+#  node_name: {{ HOSTNAME_CFG01 }}
+#  retry: {count: 1, delay: 10}
+#  skip_fail: false
+
+#- description: run docker state
+#  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:master' state.sls docker
+#  node_name: {{ HOSTNAME_CFG01 }}
+#  retry: {count: 1, delay: 10}
+#  skip_fail: false
+#
+#- description: docker ps
+#  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' dockerng.ps
+#  node_name: {{ HOSTNAME_CFG01 }}
+#  retry: {count: 1, delay: 10}
+#  skip_fail: false
+
 - description: Configure Grafana dashboards and datasources
-  cmd: sleep 60;  salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@grafana:client' state.sls grafana.client
+  cmd: sleep 30;  salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@grafana:client' state.sls grafana.client
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 2, delay: 10}
   skip_fail: false
@@ -190,4 +255,3 @@
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 10}
   skip_fail: false
-
diff --git a/tcp_tests/templates/cookied-bm-mcp-pike-k8s-contrail/underlay--meta-data.yaml b/tcp_tests/templates/cookied-bm-mcp-pike-k8s-contrail/underlay--meta-data.yaml
new file mode 100644
index 0000000..3699401
--- /dev/null
+++ b/tcp_tests/templates/cookied-bm-mcp-pike-k8s-contrail/underlay--meta-data.yaml
@@ -0,0 +1,4 @@
+| # All the data below will be stored as a string object
+  instance-id: iid-local1
+  hostname: {hostname}
+  local-hostname: {hostname}
diff --git a/tcp_tests/templates/cookied-bm-mcp-pike-k8s-contrail/underlay--user-data-cfg01.yaml b/tcp_tests/templates/cookied-bm-mcp-pike-k8s-contrail/underlay--user-data-cfg01.yaml
new file mode 100644
index 0000000..646af7a
--- /dev/null
+++ b/tcp_tests/templates/cookied-bm-mcp-pike-k8s-contrail/underlay--user-data-cfg01.yaml
@@ -0,0 +1,103 @@
+| # All the data below will be stored as a string object
+  #cloud-config, see http://cloudinit.readthedocs.io/en/latest/topics/examples.html
+
+  ssh_pwauth: True
+  users:
+   - name: root
+     sudo: ALL=(ALL) NOPASSWD:ALL
+     shell: /bin/bash
+     ssh_authorized_keys:
+     {% for key in config.underlay.ssh_keys %}
+      - ssh-rsa {{ key['public'] }}
+     {% endfor %}
+
+  disable_root: false
+  chpasswd:
+   list: |
+    root:r00tme
+   expire: False
+
+  bootcmd:
+   #   # Block access to SSH while node is preparing
+   #   - cloud-init-per once sudo iptables -A INPUT -p tcp --dport 22 -j DROP
+   # Enable root access
+   - sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
+   - sed -i -e '/^PasswordAuthentication/s/^.*$/PasswordAuthentication yes/' /etc/ssh/sshd_config
+   - service sshd restart
+  output:
+    all: '| tee -a /var/log/cloud-init-output.log /dev/tty0'
+
+  runcmd:
+   # Configure dhclient
+   - sudo echo "nameserver {gateway}" >> /etc/resolvconf/resolv.conf.d/base
+   - sudo resolvconf -u
+
+   # Enable grub menu using updated config below
+   - update-grub
+
+   # Prepare network connection
+   - sudo ifdown ens3
+   - sudo ip r d default || true  # remove existing default route to get it from dhcp
+   - sudo ifup ens3
+   #- sudo route add default gw {gateway} {interface_name}
+
+   # Create swap
+   - fallocate -l 4G /swapfile
+   - chmod 600 /swapfile
+   - mkswap /swapfile
+   - swapon /swapfile
+   - echo "/swapfile   none    swap    defaults    0   0" >> /etc/fstab
+
+   ############## TCP Cloud cfg01 node ##################
+   #- sleep 120
+   #   - echo "Preparing base OS"
+
+   - echo "nameserver 172.17.41.2" > /etc/resolv.conf;
+   #   - which wget >/dev/null || (apt-get update; apt-get install -y wget);
+
+   # Configure Ubuntu mirrors
+   #   - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial main restricted universe" > /etc/apt/sources.list
+   #   - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial-updates main restricted universe" >> /etc/apt/sources.list
+   #   - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial-security main restricted universe" >> /etc/apt/sources.list
+
+   #   - echo "deb [arch=amd64] http://apt.mirantis.com/xenial {{ REPOSITORY_SUITE }} salt extra" > /etc/apt/sources.list.d/mcp_salt.list;
+   #   - wget -O - http://apt.mirantis.com/public.gpg | apt-key add -;
+   #   - echo "deb http://repo.saltstack.com/apt/ubuntu/16.04/amd64/2016.3 xenial main" > /etc/apt/sources.list.d/saltstack.list;
+   #   - wget -O - https://repo.saltstack.com/apt/ubuntu/16.04/amd64/2016.3/SALTSTACK-GPG-KEY.pub | apt-key add -;
+
+   #   - apt-get clean
+   #   - apt-get update
+
+   # Install common packages
+   #   - eatmydata apt-get install -y python-pip git curl tmux byobu iputils-ping traceroute htop tree mc
+
+   # Install salt-minion and stop it until it is configured
+   #   - eatmydata apt-get install -y salt-minion && service salt-minion stop
+
+   ########################################################
+   # Node is ready, allow SSH access
+   #   - echo "Allow SSH access ..."
+   #   - sudo iptables -D INPUT -p tcp --dport 22 -j DROP
+   ########################################################
+
+  write_files:
+   - path: /etc/default/grub.d/97-enable-grub-menu.cfg
+     content: |
+         GRUB_RECORDFAIL_TIMEOUT=30
+         GRUB_TIMEOUT=3
+         GRUB_TIMEOUT_STYLE=menu
+
+   - path: /etc/network/interfaces
+     content: |
+          auto ens3
+          iface ens3 inet dhcp
+
+   - path: /root/.ssh/config
+     owner: root:root
+     permissions: '0600'
+     content: |
+          Host *
+            ServerAliveInterval 300
+            ServerAliveCountMax 10
+            StrictHostKeyChecking no
+            UserKnownHostsFile /dev/null
diff --git a/tcp_tests/templates/cookied-bm-mcp-pike-k8s-contrail/underlay--user-data1604-hwe-compute.yaml b/tcp_tests/templates/cookied-bm-mcp-pike-k8s-contrail/underlay--user-data1604-hwe-compute.yaml
new file mode 100644
index 0000000..a8981c7
--- /dev/null
+++ b/tcp_tests/templates/cookied-bm-mcp-pike-k8s-contrail/underlay--user-data1604-hwe-compute.yaml
@@ -0,0 +1,104 @@
+| # All the data below will be stored as a string object
+  #cloud-config, see http://cloudinit.readthedocs.io/en/latest/topics/examples.html
+
+  ssh_pwauth: True
+  users:
+   - name: root
+     sudo: ALL=(ALL) NOPASSWD:ALL
+     shell: /bin/bash
+     ssh_authorized_keys:
+     {% for key in config.underlay.ssh_keys %}
+      - ssh-rsa {{ key['public'] }}
+     {% endfor %}
+
+  disable_root: false
+  chpasswd:
+   list: |
+    root:r00tme
+   expire: False
+
+  bootcmd:
+   #   # Block access to SSH while node is preparing
+   #   - cloud-init-per once sudo iptables -A INPUT -p tcp --dport 22 -j DROP
+   # Enable root access
+   - sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
+   # - sed -i -e '/^PasswordAuthentication/s/^.*$/PasswordAuthentication yes/' /etc/ssh/sshd_config
+   - service sshd restart
+  output:
+    all: '| tee -a /var/log/cloud-init-output.log /dev/tty0'
+
+  runcmd:
+   - export TERM=linux
+   - export LANG=C
+   # Configure dhclient
+   - sudo echo "nameserver {gateway}" >> /etc/resolvconf/resolv.conf.d/base
+   - sudo resolvconf -u
+
+   # Enable grub menu using updated config below
+   - update-grub
+
+   # Prepare network connection
+   - sudo ifup {interface_name}
+   #- sudo route add default gw {gateway} {interface_name}
+
+   # Create swap
+   - fallocate -l 4G /swapfile
+   - chmod 600 /swapfile
+   - mkswap /swapfile
+   - swapon /swapfile
+   - echo "/swapfile   none    swap    defaults   0   0" >> /etc/fstab
+
+
+   ############## TCP Cloud cfg01 node ##################
+   #- sleep 120
+   #   - echo "Preparing base OS"
+   - echo "nameserver 172.17.41.2" > /etc/resolv.conf;
+   #   - which wget >/dev/null || (apt-get update; apt-get install -y wget)
+
+   # Configure Ubuntu mirrors
+   #   - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial main restricted universe" > /etc/apt/sources.list
+   #   - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial-updates main restricted universe" >> /etc/apt/sources.list
+   #   - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial-security main restricted universe" >> /etc/apt/sources.list
+
+   #   - echo "deb [arch=amd64] http://apt.mirantis.com/xenial {{ REPOSITORY_SUITE }} salt extra" > /etc/apt/sources.list.d/mcp_salt.list;
+   #   - wget -O - http://apt.mirantis.com/public.gpg | apt-key add -;
+   #   - echo "deb http://repo.saltstack.com/apt/ubuntu/16.04/amd64/2016.3 xenial main" > /etc/apt/sources.list.d/saltstack.list;
+   #   - wget -O - https://repo.saltstack.com/apt/ubuntu/16.04/amd64/2016.3/SALTSTACK-GPG-KEY.pub | apt-key add -;
+
+   #   - apt-get clean
+   #   - eatmydata apt-get update && apt-get -y upgrade
+
+   # Install common packages
+   #   - eatmydata apt-get install -y python-pip git curl tmux byobu iputils-ping traceroute htop tree mc
+
+   # Install salt-minion and stop it until it is configured
+   #   - eatmydata apt-get install -y salt-minion && service salt-minion stop
+
+   # Install latest kernel
+   #   - eatmydata apt-get install -y {{ os_env('LINUX_KERNEL_HWE_PACKAGE_NAME', 'linux-image-extra-4.10.0-42-generic') }}
+
+   # Register compute node in salt master
+   # - salt-call event.send "reclass/minion/classify" "{{ "{{" }}\"node_master_ip\": \"{{ ETH0_IP_ADDRESS_CFG01 }}\", \"node_os\": \"xenial\", \"node_domain\": \"{{ DOMAIN_NAME }}\", \"node_cluster\": \"{{ LAB_CONFIG_NAME }}\"{{ "}}" }}"
+
+
+   ########################################################
+   # Node is ready, allow SSH access
+   #- echo "Allow SSH access ..."
+   #- sudo iptables -D INPUT -p tcp --dport 22 -j DROP
+   #   - reboot
+   ########################################################
+
+  write_files:
+   - path: /etc/default/grub.d/97-enable-grub-menu.cfg
+     content: |
+         GRUB_RECORDFAIL_TIMEOUT=30
+         GRUB_TIMEOUT=3
+         GRUB_TIMEOUT_STYLE=menu
+
+   - path: /etc/network/interfaces
+     content: |
+          # The loopback network interface
+          auto lo
+          iface lo inet loopback
+          auto {interface_name}
+          iface {interface_name} inet dhcp
diff --git a/tcp_tests/templates/cookied-bm-mcp-pike-k8s-contrail/underlay--user-data1604.yaml b/tcp_tests/templates/cookied-bm-mcp-pike-k8s-contrail/underlay--user-data1604.yaml
new file mode 100644
index 0000000..bb7056a
--- /dev/null
+++ b/tcp_tests/templates/cookied-bm-mcp-pike-k8s-contrail/underlay--user-data1604.yaml
@@ -0,0 +1,95 @@
+| # All the data below will be stored as a string object
+  #cloud-config, see http://cloudinit.readthedocs.io/en/latest/topics/examples.html
+
+  ssh_pwauth: True
+  users:
+   - name: root
+     sudo: ALL=(ALL) NOPASSWD:ALL
+     shell: /bin/bash
+     ssh_authorized_keys:
+     {% for key in config.underlay.ssh_keys %}
+      - ssh-rsa {{ key['public'] }}
+     {% endfor %}
+
+  disable_root: false
+  chpasswd:
+   list: |
+    root:r00tme
+   expire: False
+
+  bootcmd:
+   # Block access to SSH while node is preparing
+   #   - cloud-init-per once sudo iptables -A INPUT -p tcp --dport 22 -j DROP
+   # Enable root access
+   - sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
+   - service sshd restart
+  output:
+    all: '| tee -a /var/log/cloud-init-output.log /dev/tty0'
+
+  runcmd:
+   - export TERM=linux
+   - export LANG=C
+   # Configure dhclient
+   - sudo echo "nameserver {gateway}" >> /etc/resolvconf/resolv.conf.d/base
+   - sudo resolvconf -u
+
+   # Enable grub menu using updated config below
+   - update-grub
+
+   # Prepare network connection
+   - sudo ifup {interface_name}
+   #- sudo route add default gw {gateway} {interface_name}
+
+   # Create swap
+   - fallocate -l 4G /swapfile
+   - chmod 600 /swapfile
+   - mkswap /swapfile
+   - swapon /swapfile
+   - echo "/swapfile   none    swap    defaults   0   0" >> /etc/fstab
+
+
+   ############## TCP Cloud cfg01 node ##################
+   #- sleep 120
+   #   - echo "Preparing base OS"
+   - echo "nameserver 172.17.41.2" > /etc/resolv.conf;
+   #   - which wget >/dev/null || (apt-get update; apt-get install -y wget)
+
+   # Configure Ubuntu mirrors
+   #   - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial main restricted universe" > /etc/apt/sources.list
+   #   - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial-updates main restricted universe" >> /etc/apt/sources.list
+   #   - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial-security main restricted universe" >> /etc/apt/sources.list
+
+   #   - echo "deb [arch=amd64] http://apt.mirantis.com/xenial {{ REPOSITORY_SUITE }} salt extra" > /etc/apt/sources.list.d/mcp_salt.list;
+   #   - wget -O - http://apt.mirantis.com/public.gpg | apt-key add -;
+   #   - echo "deb http://repo.saltstack.com/apt/ubuntu/16.04/amd64/2016.3 xenial main" > /etc/apt/sources.list.d/saltstack.list;
+   #   - wget -O - https://repo.saltstack.com/apt/ubuntu/16.04/amd64/2016.3/SALTSTACK-GPG-KEY.pub | apt-key add -;
+
+   #   - apt-get clean
+   #   - eatmydata apt-get update && apt-get -y upgrade
+
+   # Install common packages
+   #   - eatmydata apt-get install -y python-pip git curl tmux byobu iputils-ping traceroute htop tree mc
+
+   # Install salt-minion and stop it until it is configured
+   #   - eatmydata apt-get install -y salt-minion && service salt-minion stop
+
+   ########################################################
+   # Node is ready, allow SSH access
+   #   - echo "Allow SSH access ..."
+   #   - sudo iptables -D INPUT -p tcp --dport 22 -j DROP
+   ########################################################
+
+  write_files:
+   - path: /etc/default/grub.d/97-enable-grub-menu.cfg
+     content: |
+         GRUB_RECORDFAIL_TIMEOUT=30
+         GRUB_TIMEOUT=3
+         GRUB_TIMEOUT_STYLE=menu
+
+   - path: /etc/network/interfaces
+     content: |
+          # The loopback network interface
+          auto lo
+          iface lo inet loopback
+          auto {interface_name}
+          iface {interface_name} inet dhcp
diff --git a/tcp_tests/templates/cookied-bm-mcp-pike-k8s-contrail/underlay.yaml b/tcp_tests/templates/cookied-bm-mcp-pike-k8s-contrail/underlay.yaml
new file mode 100644
index 0000000..ef8c4f1
--- /dev/null
+++ b/tcp_tests/templates/cookied-bm-mcp-pike-k8s-contrail/underlay.yaml
@@ -0,0 +1,538 @@
+# Set the repository suite, one of the: 'nightly', 'testing', 'stable', or any other required
+{% set REPOSITORY_SUITE = os_env('REPOSITORY_SUITE', 'testing') %}
+
+#{% set DOMAIN_NAME = os_env('LAB_CONFIG_NAME', 'physical_mcp11_ovs_dpdk') + '.local' %}
+{% set LAB_CONFIG_NAME = os_env('LAB_CONFIG_NAME', 'cookied-bm-mcp-pike-k8s-contrail') %}
+{% set DOMAIN_NAME = os_env('DOMAIN_NAME', LAB_CONFIG_NAME + '.local') %}
+{% set HOSTNAME_CFG01 = os_env('HOSTNAME_CFG01', 'cfg01.' + DOMAIN_NAME) %}
+{% set HOSTNAME_KVM01 = os_env('HOSTNAME_KVM01', 'kvm01.' + DOMAIN_NAME) %}
+{% set HOSTNAME_KVM02 = os_env('HOSTNAME_KVM02', 'kvm02.' + DOMAIN_NAME) %}
+{% set HOSTNAME_KVM03 = os_env('HOSTNAME_KVM03', 'kvm03.' + DOMAIN_NAME) %}
+{% set HOSTNAME_CMP001 = os_env('HOSTNAME_CMP001', 'cmp001.' + DOMAIN_NAME) %}
+{#
+{% set HOSTNAME_CMP002 = os_env('HOSTNAME_CMP002', 'cmp002.' + DOMAIN_NAME) %}
+{% set HOSTNAME_PRX01 = os_env('HOSTNAME_PRX01', 'prx01.' + DOMAIN_NAME) %}
+{% set HOSTNAME_GTW01 = os_env('HOSTNAME_GTW01', 'gtw01.' + DOMAIN_NAME) %}
+{% set HOSTNAME_GTW02 = os_env('HOSTNAME_GTW02', 'gtw02.' + DOMAIN_NAME) %}
+#}
+{% set ETH0_IP_ADDRESS_CFG01 = os_env('ETH0_IP_ADDRESS_CFG01', '172.17.41.3') %}
+{% set ETH0_IP_ADDRESS_KVM01 = os_env('ETH0_IP_ADDRESS_KVM01', '172.17.41.4') %}
+{% set ETH0_IP_ADDRESS_KVM02 = os_env('ETH0_IP_ADDRESS_KVM02', '172.17.41.5') %}
+{% set ETH0_IP_ADDRESS_KVM03 = os_env('ETH0_IP_ADDRESS_KVM03', '172.17.41.6') %}
+{% set ETH0_IP_ADDRESS_CMP001 = os_env('ETH0_IP_ADDRESS_CMP001', '172.17.41.7') %}
+{#
+# {% set ETH0_IP_ADDRESS_CMP002 = os_env('ETH0_IP_ADDRESS_CMP002', '172.16.49.74') %}
+# {% set ETH0_IP_ADDRESS_CMP003 = os_env('ETH0_IP_ADDRESS_CMP003', '172.16.167.140') %}
+#}
+{#
+# {% set ETH0_IP_ADDRESS_PRX01 = os_env('ETH0_IP_ADDRESS_PRX01', '172.17.41.8') %}
+# {% set ETH0_IP_ADDRESS_GTW01 = os_env('ETH0_IP_ADDRESS_GTW01', '172.16.49.5') %}
+# {% set ETH0_IP_ADDRESS_GTW02 = os_env('ETH0_IP_ADDRESS_GTW02', '172.16.49.4') %}
+#}
+{% import 'cookied-bm-mcp-pike-k8s-contrail/underlay--meta-data.yaml' as CLOUDINIT_META_DATA with context %}
+{% import 'cookied-bm-mcp-pike-k8s-contrail/underlay--user-data-cfg01.yaml' as CLOUDINIT_USER_DATA_CFG01 with context %}
+{% import 'cookied-bm-mcp-pike-k8s-contrail/underlay--user-data1604.yaml' as CLOUDINIT_USER_DATA with context %}
+{% import 'cookied-bm-mcp-pike-k8s-contrail/underlay--user-data1604-hwe-compute.yaml' as CLOUDINIT_USER_DATA_HWE_CMP with context %}
+
+---
+aliases:
+ - &interface_model {{ os_env('INTERFACE_MODEL', 'virtio') }}
+ - &cloudinit_meta_data {{ CLOUDINIT_META_DATA }}
+ - &cloudinit_user_data_cfg01 {{ CLOUDINIT_USER_DATA_CFG01 }}
+ - &cloudinit_user_data {{ CLOUDINIT_USER_DATA }}
+ - &cloudinit_user_data_hwe_cmp {{ CLOUDINIT_USER_DATA_HWE_CMP }}
+
+
+template:
+  devops_settings:
+    env_name: {{ os_env('ENV_NAME', 'cookied-bm-mcp-pike-k8s-contrail_' + REPOSITORY_SUITE + "_" + os_env('BUILD_NUMBER', '')) }}
+
+    address_pools:
+      admin-pool01:
+        net: {{ os_env('ADMIN_ADDRESS_POOL01', '172.17.41.0/26:26') }}
+        params:
+          ip_reserved:
+            gateway: +1
+            l2_network_device: +61
+            default_{{ HOSTNAME_CFG01 }}: {{ ETH0_IP_ADDRESS_CFG01 }}
+            default_{{ HOSTNAME_KVM01 }}: {{ ETH0_IP_ADDRESS_KVM01 }}
+            default_{{ HOSTNAME_KVM02 }}: {{ ETH0_IP_ADDRESS_KVM02 }}
+            default_{{ HOSTNAME_KVM03 }}: {{ ETH0_IP_ADDRESS_KVM03 }}
+            default_{{ HOSTNAME_CMP001 }}: {{ ETH0_IP_ADDRESS_CMP001 }}
+            #default_{{ HOSTNAME_PRX01 }}: {{ ETH0_IP_ADDRESS_PRX01 }}
+
+            virtual_{{ HOSTNAME_CFG01 }}: {{ ETH0_IP_ADDRESS_CFG01 }}
+            virtual_{{ HOSTNAME_KVM01 }}: {{ ETH0_IP_ADDRESS_KVM01 }}
+            virtual_{{ HOSTNAME_KVM02 }}: {{ ETH0_IP_ADDRESS_KVM02 }}
+            virtual_{{ HOSTNAME_KVM03 }}: {{ ETH0_IP_ADDRESS_KVM03 }}
+            virtual_{{ HOSTNAME_CMP001 }}: {{ ETH0_IP_ADDRESS_CMP001 }}
+            #virtual_{{ HOSTNAME_PRX01 }}: {{ ETH0_IP_ADDRESS_PRX01 }}
+          #ip_ranges:
+          #    dhcp: [+2, -4]
+      private-pool01:
+        net: {{ os_env('PRIVATE_ADDRESS_POOL01', '10.167.13.0/24:24') }}
+        params:
+          ip_reserved:
+            gateway: +1
+            l2_network_device: +1
+
+      tenant-pool01:
+        net: {{ os_env('TENANT_ADDRESS_POOL01', '10.167.14.0/24:24') }}
+        params:
+          ip_reserved:
+            gateway: +1
+            l2_network_device: +1
+
+      external-pool01:
+        net: {{ os_env('EXTERNAL_ADDRESS_POOL01', '172.17.41.128/26:26') }}
+        params:
+          ip_reserved:
+            gateway: +1
+            l2_network_device: -2
+
+    groups:
+
+      - name: virtual
+        driver:
+          name: devops.driver.libvirt
+          params:
+            connection_string: !os_env CONNECTION_STRING, qemu:///system
+            storage_pool_name: !os_env STORAGE_POOL_NAME, default
+            stp: False
+            hpet: False
+            enable_acpi: true
+            use_host_cpu: !os_env DRIVER_USE_HOST_CPU, true
+
+        network_pools:
+          admin: admin-pool01
+
+        l2_network_devices:
+          # Ironic management interface
+          admin:
+            address_pool: admin-pool01
+            dhcp: false
+            parent_iface:
+              phys_dev: !os_env IRONIC_LAB_PXE_IFACE_0
+
+        group_volumes:
+         - name: cloudimage1604    # This name is used for 'backing_store' option for node volumes.
+           source_image: !os_env IMAGE_PATH1604  # https://cloud-images.ubuntu.com/xenial/current/xenial-server-cloudimg-amd64-disk1.img
+           format: qcow2
+         - name: cfg01_day01_image               # Pre-configured day01 image
+           source_image: {{ os_env('IMAGE_PATH_CFG01_DAY01', os_env('IMAGE_PATH1604')) }} # http://images.mirantis.com/cfg01-day01.qcow2 or fallback to IMAGE_PATH1604
+           format: qcow2
+
+        nodes:
+          - name: {{ HOSTNAME_CFG01 }}
+            role: salt_master
+            params:
+              vcpu: !os_env SLAVE_NODE_CPU, 4
+              memory: !os_env SLAVE_NODE_MEMORY, 8192
+              boot:
+                - hd
+              cloud_init_volume_name: iso
+              cloud_init_iface_up: ens3
+              volumes:
+                - name: system
+                  capacity: !os_env NODE_VOLUME_SIZE, 150
+                  backing_store: cfg01_day01_image
+                  format: qcow2
+                - name: iso  # Volume with name 'iso' will be used
+                             # for store image with cloud-init metadata.
+                  capacity: 1
+                  format: raw
+                  device: cdrom
+                  bus: ide
+                  cloudinit_meta_data: *cloudinit_meta_data
+                  cloudinit_user_data: *cloudinit_user_data_cfg01
+
+              interfaces:
+                - label: ens3
+                  l2_network_device: admin
+                  interface_model: *interface_model
+                  mac_address: !os_env ETH0_MAC_ADDRESS_CFG01
+                #- label: ens4
+                #  l2_network_device: private
+                #  interface_model: *interface_model
+              network_config:
+                ens3:
+                  networks:
+                    - admin
+                #ens4:
+                #  networks:
+                #    - private
+
+
+      - name: default
+        driver:
+          name: devops_driver_ironic
+          params:
+            os_auth_token: fake-token
+            ironic_url: !os_env IRONIC_URL  # URL that will be used by fuel-devops
+                                            # to access Ironic API
+            # Agent URL that is accessible from deploying node when nodes
+            # are bootstrapped with PXE. Usually PXE/provision network address is used.
+            agent_kernel_url: !os_env IRONIC_AGENT_KERNEL_URL
+            agent_ramdisk_url: !os_env IRONIC_AGENT_RAMDISK_URL
+
+        network_pools:
+          admin: admin-pool01
+
+        nodes:
+
+        #  - name: {{ HOSTNAME_CFG01 }}
+        #    role: salt_master
+        #    params:
+        #      ipmi_user: !os_env IPMI_USER
+        #      ipmi_password: !os_env IPMI_PASSWORD
+        #      ipmi_previlegies: OPERATOR
+        #      ipmi_host: !os_env IPMI_HOST_CFG01  # hostname or IP address
+        #      ipmi_lan_interface: lanplus
+        #      ipmi_port: 623
+
+        #      root_volume_name: system     # see 'volumes' below
+        #      cloud_init_volume_name: iso  # see 'volumes' below
+        #      cloud_init_iface_up: enp3s0f1  # see 'interfaces' below.
+        #      volumes:
+        #        - name: system
+        #          capacity: !os_env NODE_VOLUME_SIZE, 200
+
+        #          # The same as for agent URL, here is an URL to the image that should be
+        #          # used for deploy the node. It should also be accessible from deploying
+        #          # node when nodes are provisioned by agent. Usually PXE/provision network address is used.
+        #          source_image: !os_env IRONIC_SOURCE_IMAGE_URL
+        #          source_image_checksum: !os_env IRONIC_SOURCE_IMAGE_CHECKSUM
+
+        #        - name: iso  # Volume with name 'iso' will be used
+        #                     # for store image with cloud-init metadata.
+
+        #          cloudinit_meta_data: *cloudinit_meta_data
+        #          cloudinit_user_data: *cloudinit_user_data_cfg01
+
+        #      interfaces:
+        #        - label: enp3s0f0  # Infra interface
+        #          mac_address: !os_env ETH0_MAC_ADDRESS_CFG01
+        #        - label: enp3s0f1
+        #          l2_network_device: admin
+        #          mac_address: !os_env ETH1_MAC_ADDRESS_CFG01
+
+        #      network_config:
+        #        enp3s0f0:
+        #          networks:
+        #           - infra
+        #        enp3s0f1:
+        #          networks:
+        #           - admin
+          # - name: {{ HOSTNAME_PRX01 }}
+          #   role: salt_minion
+          #   params:
+          #     ipmi_user: !os_env IPMI_USER
+          #     ipmi_password: !os_env IPMI_PASSWORD
+          #     ipmi_previlegies: OPERATOR
+          #     ipmi_host: !os_env IPMI_HOST_PRX01  # hostname or IP address
+          #     ipmi_lan_interface: lanplus
+          #     ipmi_port: 623
+
+          #     root_volume_name: system     # see 'volumes' below
+          #     cloud_init_volume_name: iso  # see 'volumes' below
+          #     cloud_init_iface_up: enp9s0f0  # see 'interfaces' below.
+          #     volumes:
+          #       - name: system
+          #         capacity: !os_env NODE_VOLUME_SIZE, 200
+
+          #         # The same as for agent URL, here is an URL to the image that should be
+          #         # used for deploy the node. It should also be accessible from deploying
+          #         # node when nodes are provisioned by agent. Usually PXE/provision network address is used.
+          #         source_image: !os_env IRONIC_SOURCE_IMAGE_URL
+          #         source_image_checksum: !os_env IRONIC_SOURCE_IMAGE_CHECKSUM
+
+          #       - name: iso  # Volume with name 'iso' will be used
+          #                    # for store image with cloud-init metadata.
+
+          #         cloudinit_meta_data: *cloudinit_meta_data
+          #         cloudinit_user_data: *cloudinit_user_data
+
+          #     interfaces:
+          #       - label: enp9s0f0
+          #         l2_network_device: admin
+          #         mac_address: !os_env ETH0_MAC_ADDRESS_PRX01
+          #       - label: enp9s0f1
+          #         mac_address: !os_env ETH1_MAC_ADDRESS_PRX01
+
+          #     network_config:
+          #       enp9s0f0:
+          #         networks:
+          #          - admin
+          #       bond0:
+          #         networks:
+          #          - control
+          #         aggregation: active-backup
+          #         parents:
+          #          - enp9s0f1
+
+          - name: {{ HOSTNAME_KVM01 }}
+            role: salt_minion
+            params:
+              ipmi_user: !os_env IPMI_USER
+              ipmi_password: !os_env IPMI_PASSWORD
+              ipmi_previlegies: OPERATOR
+              ipmi_host: !os_env IPMI_HOST_KVM01  # hostname or IP address
+              ipmi_lan_interface: lanplus
+              ipmi_port: 623
+
+              root_volume_name: system     # see 'volumes' below
+              cloud_init_volume_name: iso  # see 'volumes' below
+              cloud_init_iface_up: enp9s0f0  # see 'interfaces' below.
+              volumes:
+                - name: system
+                  capacity: !os_env NODE_VOLUME_SIZE, 200
+
+                  # The same as for agent URL, here is an URL to the image that should be
+                  # used for deploy the node. It should also be accessible from deploying
+                  # node when nodes are provisioned by agent. Usually PXE/provision network address is used.
+                  source_image: !os_env IRONIC_SOURCE_IMAGE_URL
+                  source_image_checksum: !os_env IRONIC_SOURCE_IMAGE_CHECKSUM
+
+                - name: iso  # Volume with name 'iso' will be used
+                             # for store image with cloud-init metadata.
+
+                  cloudinit_meta_data: *cloudinit_meta_data
+                  cloudinit_user_data: *cloudinit_user_data
+
+              interfaces:
+                - label: enp9s0f0
+                  l2_network_device: admin
+                  mac_address: !os_env ETH0_MAC_ADDRESS_KVM01
+                - label: enp9s0f1
+                  mac_address: !os_env ETH1_MAC_ADDRESS_KVM01
+
+              network_config:
+                enp9s0f0:
+                  networks:
+                   - admin
+                bond0:
+                  networks:
+                   - control
+                  aggregation: active-backup
+                  parents:
+                   - enp9s0f1
+
+          - name: {{ HOSTNAME_KVM02 }}
+            role: salt_minion
+            params:
+              ipmi_user: !os_env IPMI_USER
+              ipmi_password: !os_env IPMI_PASSWORD
+              ipmi_previlegies: OPERATOR
+              ipmi_host: !os_env IPMI_HOST_KVM02  # hostname or IP address
+              ipmi_lan_interface: lanplus
+              ipmi_port: 623
+
+              root_volume_name: system     # see 'volumes' below
+              cloud_init_volume_name: iso  # see 'volumes' below
+              cloud_init_iface_up: enp9s0f0  # see 'interfaces' below.
+              volumes:
+                - name: system
+                  capacity: !os_env NODE_VOLUME_SIZE, 200
+
+                  # The same as for agent URL, here is an URL to the image that should be
+                  # used for deploy the node. It should also be accessible from deploying
+                  # node when nodes are provisioned by agent. Usually PXE/provision network address is used.
+                  source_image: !os_env IRONIC_SOURCE_IMAGE_URL
+                  source_image_checksum: !os_env IRONIC_SOURCE_IMAGE_CHECKSUM
+
+                - name: iso  # Volume with name 'iso' will be used
+                             # for store image with cloud-init metadata.
+
+                  cloudinit_meta_data: *cloudinit_meta_data
+                  cloudinit_user_data: *cloudinit_user_data
+
+              interfaces:
+                - label: enp9s0f0
+                  l2_network_device: admin
+                  mac_address: !os_env ETH0_MAC_ADDRESS_KVM02
+                - label: enp9s0f1
+                  mac_address: !os_env ETH1_MAC_ADDRESS_KVM02
+
+              network_config:
+                enp9s0f0:
+                  networks:
+                   - admin
+                bond0:
+                  networks:
+                   - control
+                  aggregation: active-backup
+                  parents:
+                   - enp9s0f1
+
+          - name: {{ HOSTNAME_KVM03 }}
+            role: salt_minion
+            params:
+              ipmi_user: !os_env IPMI_USER
+              ipmi_password: !os_env IPMI_PASSWORD
+              ipmi_previlegies: OPERATOR
+              ipmi_host: !os_env IPMI_HOST_KVM03  # hostname or IP address
+              ipmi_lan_interface: lanplus
+              ipmi_port: 623
+
+              root_volume_name: system     # see 'volumes' below
+              cloud_init_volume_name: iso  # see 'volumes' below
+              # cloud_init_iface_up: eno1  # see 'interfaces' below.
+              cloud_init_iface_up: enp9s0f0  # see 'interfaces' below.
+              volumes:
+                - name: system
+                  capacity: !os_env NODE_VOLUME_SIZE, 200
+
+                  # The same as for agent URL, here is an URL to the image that should be
+                  # used for deploy the node. It should also be accessible from deploying
+                  # node when nodes are provisioned by agent. Usually PXE/provision network address is used.
+                  source_image: !os_env IRONIC_SOURCE_IMAGE_URL
+                  source_image_checksum: !os_env IRONIC_SOURCE_IMAGE_CHECKSUM
+
+                - name: iso  # Volume with name 'iso' will be used
+                             # for store image with cloud-init metadata.
+
+                  cloudinit_meta_data: *cloudinit_meta_data
+                  cloudinit_user_data: *cloudinit_user_data
+
+              interfaces:
+                # - label: eno1
+                - label: enp9s0f0
+                  l2_network_device: admin
+                  mac_address: !os_env ETH0_MAC_ADDRESS_KVM03
+                # - label: eno2
+                - label: enp9s0f1
+                  mac_address: !os_env ETH1_MAC_ADDRESS_KVM03
+
+              network_config:
+                # eno1:
+                enp9s0f0:
+                  networks:
+                   - admin
+                bond0:
+                  networks:
+                   - control
+                  aggregation: active-backup
+                  parents:
+                   - enp9s0f1
+
+
+          - name: {{ HOSTNAME_CMP001 }}
+            role: salt_minion
+            params:
+              ipmi_user: !os_env IPMI_USER
+              ipmi_password: !os_env IPMI_PASSWORD
+              ipmi_previlegies: OPERATOR
+              ipmi_host: !os_env IPMI_HOST_CMP001  # hostname or IP address
+              ipmi_lan_interface: lanplus
+              ipmi_port: 623
+
+              root_volume_name: system     # see 'volumes' below
+              cloud_init_volume_name: iso  # see 'volumes' below
+              # cloud_init_iface_up: enp3s0f0  # see 'interfaces' below.
+              cloud_init_iface_up: enp9s0f0  # see 'interfaces' below.
+              volumes:
+                - name: system
+                  capacity: !os_env NODE_VOLUME_SIZE, 200
+
+                  # The same as for agent URL, here is an URL to the image that should be
+                  # used for deploy the node. It should also be accessible from deploying
+                  # node when nodes are provisioned by agent. Usually PXE/provision network address is used.
+                  source_image: !os_env IRONIC_SOURCE_IMAGE_URL
+                  source_image_checksum: !os_env IRONIC_SOURCE_IMAGE_CHECKSUM
+
+                - name: iso  # Volume with name 'iso' will be used
+                             # for store image with cloud-init metadata.
+
+                  cloudinit_meta_data: *cloudinit_meta_data
+                  cloudinit_user_data: *cloudinit_user_data_hwe_cmp
+
+              interfaces:
+                - label: enp9s0f0
+                  l2_network_device: admin
+                  mac_address: !os_env ETH0_MAC_ADDRESS_CMP001
+                - label: enp9s0f1
+                  mac_address: !os_env ETH1_MAC_ADDRESS_CMP001
+                # - label: enp5s0f0
+                #   mac_address: !os_env ETH2_MAC_ADDRESS_CMP001
+                #   features: ['dpdk', 'dpdk_pci: 0000:05:00.0']
+                # - label: enp5s0f1
+                #   mac_address: !os_env ETH3_MAC_ADDRESS_CMP001
+                #   features: ['dpdk', 'dpdk_pci: 0000:05:00.1']
+                # - label: enp5s0f2
+                #   mac_address: !os_env ETH4_MAC_ADDRESS_CMP001
+                #   features: ['dpdk', 'dpdk_pci: 0000:05:00.2']
+
+              network_config:
+                enp9s0f0:
+                  networks:
+                   - admin
+                bond0:
+                  networks:
+                   - control
+                  aggregation: active-backup
+                  parents:
+                   - enp9s0f1
+
+
+
+          # - name: {{ HOSTNAME_CMP002 }}
+          #   role: salt_minion
+          #   params:
+          #     ipmi_user: !os_env IPMI_USER
+          #     ipmi_password: !os_env IPMI_PASSWORD
+          #     ipmi_previlegies: OPERATOR
+          #     ipmi_host: !os_env IPMI_HOST_CMP002  # hostname or IP address
+          #     ipmi_lan_interface: lanplus
+          #     ipmi_port: 623
+
+          #     root_volume_name: system     # see 'volumes' below
+          #     cloud_init_volume_name: iso  # see 'volumes' below
+          #     # cloud_init_iface_up: eno1  # see 'interfaces' below.
+          #     cloud_init_iface_up: enp2s0f1  # see 'interfaces' below.
+          #     volumes:
+          #       - name: system
+          #         capacity: !os_env NODE_VOLUME_SIZE, 200
+
+          #         # The same as for agent URL, here is an URL to the image that should be
+          #         # used for deploy the node. It should also be accessible from deploying
+          #         # node when nodes are provisioned by agent. Usually PXE/provision network address is used.
+          #         source_image: !os_env IRONIC_SOURCE_IMAGE_URL
+          #         source_image_checksum: !os_env IRONIC_SOURCE_IMAGE_CHECKSUM
+
+          #       - name: iso  # Volume with name 'iso' will be used
+          #                    # for store image with cloud-init metadata.
+
+          #         cloudinit_meta_data: *cloudinit_meta_data
+          #         cloudinit_user_data: *cloudinit_user_data_hwe
+
+          #     interfaces:
+          #       # - label: eno1
+          #       - label: enp2s0f0
+          #         mac_address: !os_env ETH0_MAC_ADDRESS_CMP002
+          #       # - label: eth0
+          #       - label: enp2s0f1
+          #         l2_network_device: admin
+          #         mac_address: !os_env ETH1_MAC_ADDRESS_CMP002
+          #       # - label: eth3
+          #       - label: enp5s0f0
+          #         mac_address: !os_env ETH2_MAC_ADDRESS_CMP002
+          #         features: ['dpdk', 'dpdk_pci: 0000:05:00.0']
+          #       # - label: eth2
+          #       - label: enp5s0f1
+          #         mac_address: !os_env ETH3_MAC_ADDRESS_CMP002
+          #         features: ['dpdk', 'dpdk_pci: 0000:05:00.1']
+          #       # - label: eth4
+          #       #   mac_address: !os_env ETH4_MAC_ADDRESS_CMP002
+          #       #   features: ['dpdk', 'dpdk_pci: 0000:0b:00.0']
+
+          #     network_config:
+          #       enp2s0f1:
+          #         networks:
+          #          - admin
+          #       bond0:
+          #         networks:
+          #          - control
+          #         aggregation: active-backup
+          #         parents:
+          #          - enp5s0f0
+          #          - enp5s0f1
diff --git a/tcp_tests/templates/cookied-bm-ocata-cicd-pipeline/common-services.yaml b/tcp_tests/templates/cookied-bm-ocata-cicd-pipeline/core.yaml
similarity index 100%
rename from tcp_tests/templates/cookied-bm-ocata-cicd-pipeline/common-services.yaml
rename to tcp_tests/templates/cookied-bm-ocata-cicd-pipeline/core.yaml
diff --git a/tcp_tests/templates/cookied-cicd-k8s-calico-sl/cookiecutter-context-k8s-sl.yaml b/tcp_tests/templates/cookied-cicd-k8s-calico-sl/cookiecutter-context-k8s-sl.yaml
index e055d78..7d453c1 100644
--- a/tcp_tests/templates/cookied-cicd-k8s-calico-sl/cookiecutter-context-k8s-sl.yaml
+++ b/tcp_tests/templates/cookied-cicd-k8s-calico-sl/cookiecutter-context-k8s-sl.yaml
@@ -1,4 +1,5 @@
 default_context:
+  auditd_enabled: 'False'
   backup_private_key: |-
     -----BEGIN RSA PRIVATE KEY-----
     MIIEowIBAAKCAQEAtyCfiXxwB6Dk6n7Y1t9u2XqMkLPvMArKwRUWGEwTzS7w0NzY
@@ -108,12 +109,11 @@
   internal_proxy_enabled: 'False'
   kqueen_custom_mail_enabled: 'False'
   kqueen_enabled: 'False'
-  kubernetes_compute_node01_address: 10.167.4.101
-  kubernetes_compute_node01_deploy_address: 10.167.5.101
-  kubernetes_compute_node01_hostname: cmp01
-  kubernetes_compute_node02_address: 10.167.4.102
-  kubernetes_compute_node02_deploy_address: 10.167.5.102
-  kubernetes_compute_node02_hostname: cmp02
+  kubernetes_compute_count: 2
+  kubernetes_compute_rack01_deploy_subnet: 10.167.5
+  kubernetes_compute_rack01_single_subnet: 10.167.4
+  kubernetes_compute_rack01_tenant_subnet: 10.167.6
+  kubernetes_compute_rack01_hostname: cmp
   kubernetes_control_address: 10.167.4.10
   kubernetes_control_node01_address: 10.167.4.11
   kubernetes_control_node01_deploy_address: 10.167.5.11
@@ -131,10 +131,10 @@
   kubernetes_virtlet_enabled: 'False'
   local_repositories: 'False'
   maas_deploy_address: 10.167.5.15
-  maas_deploy_range_end: 10.167.5.254
-  maas_deploy_range_start: 10.167.5.1
+  maas_deploy_range_end: 10.167.5.199
+  maas_deploy_range_start: 10.167.5.180
   maas_deploy_vlan: '0'
-  maas_fabric_name: fabric-0
+  maas_fabric_name: deploy-fabric0
   maas_hostname: cfg01
   mcp_common_scripts_branch: ''
   mcp_version: proposed
@@ -148,6 +148,8 @@
   oss_enabled: 'False'
   oss_node03_address: ${_param:stacklight_monitor_node03_address}
   oss_webhook_app_id: '24'
+  oss_pushkin_email_sender_password: password
+  oss_pushkin_smtp_port: '587'
   oss_webhook_login_id: '13'
   platform: kubernetes_enabled
   public_host: ${_param:infra_config_address}
@@ -158,7 +160,7 @@
   salt_master_address: 10.167.4.15
   salt_master_hostname: cfg01
   salt_master_management_address: 10.167.5.15
-  shared_reclass_branch: ''
+  shared_reclass_branch: 'proposed'
   shared_reclass_url: https://gerrit.mcp.mirantis.net/salt-models/reclass-system.git
   stacklight_enabled: 'True'
   stacklight_log_address: 10.167.4.60
diff --git a/tcp_tests/templates/cookied-cicd-k8s-calico-sl/deploy-and-test.groovy b/tcp_tests/templates/cookied-cicd-k8s-calico-sl/deploy-and-test.groovy
index 71b3e8d..d0f69b7 100644
--- a/tcp_tests/templates/cookied-cicd-k8s-calico-sl/deploy-and-test.groovy
+++ b/tcp_tests/templates/cookied-cicd-k8s-calico-sl/deploy-and-test.groovy
@@ -1,85 +1,17 @@
+@Library('tcp-qa')_
+
 common = new com.mirantis.mk.Common()
-
-def run_cmd(cmd, returnStdout=false) {
-    common.printMsg("Run shell command:\n" + cmd, "blue")
-    def VENV_PATH='/home/jenkins/fuel-devops30'
-    script = "set +x; echo 'activate python virtualenv ${VENV_PATH}';. ${VENV_PATH}/bin/activate; bash -c 'set -ex;set -ex;${cmd.stripIndent()}'"
-    return sh(script: script, returnStdout: returnStdout)
-}
-
-def run_cmd_stdout(cmd) {
-    return run_cmd(cmd, true)
-}
+shared = new com.mirantis.system_qa.SharedPipeline()
 
 node ("${NODE_NAME}") {
   try {
 
-    stage("Clean the environment") {
-        println "Clean the working directory ${env.WORKSPACE}"
-        deleteDir()
-        // do not fail if environment doesn't exists
-        println "Remove environment ${ENV_NAME}"
-        run_cmd("""\
-            dos.py erase ${ENV_NAME} || true
-        """)
-        println "Remove config drive ISO"
-        run_cmd("""\
-            rm /home/jenkins/images/${CFG01_CONFIG_IMAGE_NAME} || true
-        """)
+    stage("Clean the environment and clone tcp-qa") {
+        shared.prepare_working_dir()
     }
 
-    stage("Clone tcp-qa project and install requirements") {
-        run_cmd("""\
-        git clone https://github.com/Mirantis/tcp-qa.git ${env.WORKSPACE}
-        #cd tcp-qa
-        if [ -n "$TCP_QA_REFS" ]; then
-            set -e
-            git fetch https://review.gerrithub.io/Mirantis/tcp-qa $TCP_QA_REFS && git checkout FETCH_HEAD || exit \$?
-        fi
-        pip install --upgrade --upgrade-strategy=only-if-needed -r tcp_tests/requirements.txt
-        """)
-    }
-
-    // load shared methods from the clonned tcp-qa repository.
-    // DO NOT MOVE this code before clonning the repo
-    def rootDir = pwd()
-    def shared = load "${rootDir}/tcp_tests/templates/SharedPipeline.groovy"
-
-    stage("Create an environment ${ENV_NAME} in disabled state") {
-        // do not fail if environment doesn't exists
-        run_cmd("""\
-        python ./tcp_tests/utils/create_devops_env.py
-        """)
-    }
-
-    stage("Generate the model") {
-        shared.generate_cookied_model()
-    }
-
-    stage("Generate config drive ISO") {
-        shared.generate_configdrive_iso()
-    }
-
-    stage("Upload generated config drive ISO into volume on cfg01 node") {
-        run_cmd("""\
-        virsh vol-upload ${ENV_NAME}_cfg01.${LAB_CONFIG_NAME}.local_config /home/jenkins/images/${CFG01_CONFIG_IMAGE_NAME} --pool default
-        virsh pool-refresh --pool default
-        """)
-    }
-
-    stage("Run the 'underlay' and 'salt-deployed' fixtures to bootstrap salt cluster") {
-        run_cmd("""\
-        export MANAGER=devops
-        export SHUTDOWN_ENV_ON_TEARDOWN=false
-        export BOOTSTRAP_TIMEOUT=900
-        export PYTHONIOENCODING=UTF-8
-        export REPOSITORY_SUITE=${MCP_VERSION}
-        #export SALT_STEPS_PATH=templates/${LAB_CONFIG_NAME}/salt.yaml
-        export TEST_GROUP=test_install_local_salt
-        py.test -vvv -s -p no:django -p no:ipdb --junit-xml=nosetests.xml -k \${TEST_GROUP}
-        sleep 60  # wait for jenkins to start and IO calm down
-
-        """)
+    stage("Create environment, generate mode, bootstrap the salt-cluster") {
+        shared.swarm_bootstrap_salt_cluster_devops()
     }
 
     // Install core and cicd
@@ -92,20 +24,20 @@
     }
 
     // Install the cluster
-    for (stack in "${STACK_INSTALL}".split(",")) {
+    for (stack in "${PLATFORM_STACK_INSTALL}".split(",")) {
         stage("Run Jenkins job on CICD [deploy_openstack:${stack}]") {
             shared.run_job_on_cicd_nodes(stack)
         }
     }
 
     stage("Run tests") {
-        run_cmd("""\
+        shared.run_cmd("""\
             export ENV_NAME=${ENV_NAME}
             . ./tcp_tests/utils/env_salt
             . ./tcp_tests/utils/env_k8s
 
             # Initialize variables used in tcp-qa tests
-            export CURRENT_SNAPSHOT=sl_deployed  # provide the snapshot name required by the test
+            export CURRENT_SNAPSHOT=stacklight_deployed  # provide the snapshot name required by the test
             export TESTS_CONFIGS=\$(pwd)/${ENV_NAME}_salt_deployed.ini  # some SSH data may be filled separatelly
 
             export MANAGER=empty  # skip 'hardware' fixture, disable snapshot/revert features
@@ -114,9 +46,9 @@
             export salt_master_port=6969
             export SALT_USER=\$SALTAPI_USER
             export SALT_PASSWORD=\$SALTAPI_PASS
-            export COMMON_SERVICES_INSTALLED=true  # skip common_services_deployed fixture
+            export CORE_INSTALLED=true  # skip core_deployed fixture
             export K8S_INSTALLED=true              # skip k8s_deployed fixture
-            export sl_installed=true              # skip sl_deployed fixture
+            export sl_installed=true              # skip stacklight_deployed fixture
 
             py.test -vvv -s -p no:django -p no:ipdb --junit-xml=nosetests.xml -m k8s_calico_sl
 
@@ -133,7 +65,7 @@
   } finally {
     // TODO(ddmitriev): analyze the "def currentResult = currentBuild.result ?: 'SUCCESS'"
     // and report appropriate data to TestRail
-    run_cmd("""\
+    shared.run_cmd("""\
         dos.py destroy ${ENV_NAME}
     """)
   }
diff --git a/tcp_tests/templates/cookied-cicd-k8s-calico-sl/environment-context-k8s-sl.yaml b/tcp_tests/templates/cookied-cicd-k8s-calico-sl/environment-context-k8s-sl.yaml
index 248b42d..2b89364 100644
--- a/tcp_tests/templates/cookied-cicd-k8s-calico-sl/environment-context-k8s-sl.yaml
+++ b/tcp_tests/templates/cookied-cicd-k8s-calico-sl/environment-context-k8s-sl.yaml
@@ -109,8 +109,9 @@
         ens4:
           role: single_ctl
 
-    cmp01:
-      reclass_storage_name: kubernetes_compute_node01
+    # Generator-based computes. For compatibility only
+    cmp<<count>>:
+      reclass_storage_name: kubernetes_compute_rack01
       roles:
       - kubernetes_compute
       - linux_system_codename_xenial
@@ -120,20 +121,6 @@
           role: single_dhcp
         ens4:
           role: single_ctl
-          single_address: ${_param:kubernetes_compute_node01_address}
-
-    cmp02:
-      reclass_storage_name: kubernetes_compute_node02
-      roles:
-      - kubernetes_compute
-      - linux_system_codename_xenial
-      - salt_master_host
-      interfaces:
-        ens3:
-          role: single_dhcp
-        ens4:
-          role: single_ctl
-          single_address: ${_param:kubernetes_compute_node02_address}
 
     mon01:
       reclass_storage_name: stacklight_server_node01
diff --git a/tcp_tests/templates/cookied-cicd-k8s-calico-sl/salt.yaml b/tcp_tests/templates/cookied-cicd-k8s-calico-sl/salt.yaml
index 7c3ae61..8a739fc 100644
--- a/tcp_tests/templates/cookied-cicd-k8s-calico-sl/salt.yaml
+++ b/tcp_tests/templates/cookied-cicd-k8s-calico-sl/salt.yaml
@@ -7,12 +7,6 @@
 
 {% import 'shared-salt.yaml' as SHARED with context %}
 
-- description: Workaround, configure ntp and rsyslog on salt master node
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@salt:master' state.sls ntp,rsyslog
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 10}
-  skip_fail: false
-
 {{ SHARED.MACRO_INSTALL_SALT_MINIONS() }}
 
 {{SHARED.MACRO_CHECK_SALT_VERSION_SERVICES_ON_CFG()}}
diff --git a/tcp_tests/templates/cookied-cicd-k8s-calico-sl/underlay.yaml b/tcp_tests/templates/cookied-cicd-k8s-calico-sl/underlay.yaml
index 10c716e..c249522 100644
--- a/tcp_tests/templates/cookied-cicd-k8s-calico-sl/underlay.yaml
+++ b/tcp_tests/templates/cookied-cicd-k8s-calico-sl/underlay.yaml
@@ -3,29 +3,29 @@
 
 {% set LAB_CONFIG_NAME = os_env('LAB_CONFIG_NAME', 'cookied-cicd-k8s-calico-sl') %}
 {% set DOMAIN_NAME = os_env('DOMAIN_NAME', LAB_CONFIG_NAME + '.local') %}
-{% set HOSTNAME_CFG01 = os_env('HOSTNAME_CFG01', 'cfg01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CID01 = os_env('HOSTNAME_CID01', 'cid01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CID02 = os_env('HOSTNAME_CID02', 'cid02.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CID03 = os_env('HOSTNAME_CID03', 'cid03.' + DOMAIN_NAME) %}
+{% set HOSTNAME_CFG01 = os_env('HOSTNAME_CFG01', 'cfg01') %}
+{% set HOSTNAME_CID01 = os_env('HOSTNAME_CID01', 'cid01') %}
+{% set HOSTNAME_CID02 = os_env('HOSTNAME_CID02', 'cid02') %}
+{% set HOSTNAME_CID03 = os_env('HOSTNAME_CID03', 'cid03') %}
 
-{% set HOSTNAME_CTL01 = os_env('HOSTNAME_CTL01', 'ctl01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CTL02 = os_env('HOSTNAME_CTL02', 'ctl02.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CTL03 = os_env('HOSTNAME_CTL03', 'ctl03.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CMP01 = os_env('HOSTNAME_CMP01', 'cmp01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CMP02 = os_env('HOSTNAME_CMP02', 'cmp02.' + DOMAIN_NAME) %}
-{% set HOSTNAME_LOG01 = os_env('HOSTNAME_LOG01', 'log01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_LOG02 = os_env('HOSTNAME_LOG02', 'log02.' + DOMAIN_NAME) %}
-{% set HOSTNAME_LOG03 = os_env('HOSTNAME_LOG03', 'log03.' + DOMAIN_NAME) %}
-{% set HOSTNAME_MTR01 = os_env('HOSTNAME_MTR01', 'mtr01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_MTR02 = os_env('HOSTNAME_MTR02', 'mtr02.' + DOMAIN_NAME) %}
-{% set HOSTNAME_MTR03 = os_env('HOSTNAME_MTR03', 'mtr03.' + DOMAIN_NAME) %}
-{% set HOSTNAME_MON01 = os_env('HOSTNAME_MON01', 'mon01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_MON02 = os_env('HOSTNAME_MON02', 'mon02.' + DOMAIN_NAME) %}
-{% set HOSTNAME_MON03 = os_env('HOSTNAME_MON03', 'mon03.' + DOMAIN_NAME) %}
-{% set HOSTNAME_PRX01 = os_env('HOSTNAME_PRX01', 'prx01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_KVM01 = os_env('HOSTNAME_KVM01', 'kvm01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_KVM02 = os_env('HOSTNAME_KVM02', 'kvm02.' + DOMAIN_NAME) %}
-{% set HOSTNAME_KVM03 = os_env('HOSTNAME_KVM03', 'kvm03.' + DOMAIN_NAME) %}
+{% set HOSTNAME_CTL01 = os_env('HOSTNAME_CTL01', 'ctl01') %}
+{% set HOSTNAME_CTL02 = os_env('HOSTNAME_CTL02', 'ctl02') %}
+{% set HOSTNAME_CTL03 = os_env('HOSTNAME_CTL03', 'ctl03') %}
+{% set HOSTNAME_CMP01 = os_env('HOSTNAME_CMP01', 'cmp001') %}
+{% set HOSTNAME_CMP02 = os_env('HOSTNAME_CMP02', 'cmp002') %}
+{% set HOSTNAME_LOG01 = os_env('HOSTNAME_LOG01', 'log01') %}
+{% set HOSTNAME_LOG02 = os_env('HOSTNAME_LOG02', 'log02') %}
+{% set HOSTNAME_LOG03 = os_env('HOSTNAME_LOG03', 'log03') %}
+{% set HOSTNAME_MTR01 = os_env('HOSTNAME_MTR01', 'mtr01') %}
+{% set HOSTNAME_MTR02 = os_env('HOSTNAME_MTR02', 'mtr02') %}
+{% set HOSTNAME_MTR03 = os_env('HOSTNAME_MTR03', 'mtr03') %}
+{% set HOSTNAME_MON01 = os_env('HOSTNAME_MON01', 'mon01') %}
+{% set HOSTNAME_MON02 = os_env('HOSTNAME_MON02', 'mon02') %}
+{% set HOSTNAME_MON03 = os_env('HOSTNAME_MON03', 'mon03') %}
+{% set HOSTNAME_PRX01 = os_env('HOSTNAME_PRX01', 'prx01') %}
+{% set HOSTNAME_KVM01 = os_env('HOSTNAME_KVM01', 'kvm01') %}
+{% set HOSTNAME_KVM02 = os_env('HOSTNAME_KVM02', 'kvm02') %}
+{% set HOSTNAME_KVM03 = os_env('HOSTNAME_KVM03', 'kvm03') %}
 
 {% import 'cookied-cicd-k8s-calico-sl/underlay--meta-data.yaml' as CLOUDINIT_META_DATA with context %}
 {% import 'cookied-cicd-k8s-calico-sl/underlay--user-data-cfg01.yaml' as CLOUDINIT_USER_DATA_CFG01 with context %}
diff --git a/tcp_tests/templates/cookied-cicd-k8s-calico/cookiecutter-context-k8s.yaml b/tcp_tests/templates/cookied-cicd-k8s-calico/cookiecutter-context-k8s.yaml
index 693a589..45a0d80 100644
--- a/tcp_tests/templates/cookied-cicd-k8s-calico/cookiecutter-context-k8s.yaml
+++ b/tcp_tests/templates/cookied-cicd-k8s-calico/cookiecutter-context-k8s.yaml
@@ -1,4 +1,5 @@
 default_context:
+  auditd_enabled: 'False'
   backup_private_key: |-
     -----BEGIN RSA PRIVATE KEY-----
     MIIEpAIBAAKCAQEA3ufjR+Eh/CJp84JZPKosMNL7ydXidfe9qdAnQIGGOsS/TBnc
@@ -109,12 +110,11 @@
   internal_proxy_enabled: 'False'
   kqueen_custom_mail_enabled: 'False'
   kqueen_enabled: 'False'
-  kubernetes_compute_node01_address: 10.167.4.101
-  kubernetes_compute_node01_deploy_address: 10.167.5.101
-  kubernetes_compute_node01_hostname: cmp01
-  kubernetes_compute_node02_address: 10.167.4.102
-  kubernetes_compute_node02_deploy_address: 10.167.5.102
-  kubernetes_compute_node02_hostname: cmp02
+  kubernetes_compute_count: 2
+  kubernetes_compute_rack01_deploy_subnet: 10.167.5
+  kubernetes_compute_rack01_single_subnet: 10.167.4
+  kubernetes_compute_rack01_tenant_subnet: 10.167.6
+  kubernetes_compute_rack01_hostname: cmp
   kubernetes_control_address: 10.167.4.10
   kubernetes_control_node01_address: 10.167.4.11
   kubernetes_control_node01_deploy_address: 10.167.5.11
@@ -132,6 +132,10 @@
   kubernetes_virtlet_enabled: 'False'
   local_repositories: 'False'
   maas_deploy_address: 10.167.5.15
+  maas_deploy_range_end: 10.167.5.199
+  maas_deploy_range_start: 10.167.5.180
+  maas_deploy_vlan: '0'
+  maas_fabric_name: deploy-fabric0
   maas_hostname: cfg01
   mcp_common_scripts_branch: ''
   mcp_version: proposed
@@ -153,7 +157,7 @@
   salt_master_address: 10.167.4.15
   salt_master_hostname: cfg01
   salt_master_management_address: 10.167.5.15
-  shared_reclass_branch: ''
+  shared_reclass_branch: 'proposed'
   shared_reclass_url: https://gerrit.mcp.mirantis.net/salt-models/reclass-system.git
   stacklight_enabled: 'False'
   stacklight_version: '2'
diff --git a/tcp_tests/templates/cookied-cicd-k8s-calico/deploy-and-test.groovy b/tcp_tests/templates/cookied-cicd-k8s-calico/deploy-and-test.groovy
index 18a0c2c..83335a9 100644
--- a/tcp_tests/templates/cookied-cicd-k8s-calico/deploy-and-test.groovy
+++ b/tcp_tests/templates/cookied-cicd-k8s-calico/deploy-and-test.groovy
@@ -1,85 +1,17 @@
+@Library('tcp-qa')_
+
 common = new com.mirantis.mk.Common()
-
-def run_cmd(cmd, returnStdout=false) {
-    common.printMsg("Run shell command:\n" + cmd, "blue")
-    def VENV_PATH='/home/jenkins/fuel-devops30'
-    script = "set +x; echo 'activate python virtualenv ${VENV_PATH}';. ${VENV_PATH}/bin/activate; bash -c 'set -ex;set -ex;${cmd.stripIndent()}'"
-    return sh(script: script, returnStdout: returnStdout)
-}
-
-def run_cmd_stdout(cmd) {
-    return run_cmd(cmd, true)
-}
+shared = new com.mirantis.system_qa.SharedPipeline()
 
 node ("${NODE_NAME}") {
   try {
 
-    stage("Clean the environment") {
-        println "Clean the working directory ${env.WORKSPACE}"
-        deleteDir()
-        // do not fail if environment doesn't exists
-        println "Remove environment ${ENV_NAME}"
-        run_cmd("""\
-            dos.py erase ${ENV_NAME} || true
-        """)
-        println "Remove config drive ISO"
-        run_cmd("""\
-            rm /home/jenkins/images/${CFG01_CONFIG_IMAGE_NAME} || true
-        """)
+    stage("Clean the environment and clone tcp-qa") {
+        shared.prepare_working_dir()
     }
 
-    stage("Clone tcp-qa project and install requirements") {
-        run_cmd("""\
-        git clone https://github.com/Mirantis/tcp-qa.git ${env.WORKSPACE}
-        #cd tcp-qa
-        if [ -n "$TCP_QA_REFS" ]; then
-            set -e
-            git fetch https://review.gerrithub.io/Mirantis/tcp-qa $TCP_QA_REFS && git checkout FETCH_HEAD || exit \$?
-        fi
-        pip install --upgrade --upgrade-strategy=only-if-needed -r tcp_tests/requirements.txt
-        """)
-    }
-
-    // load shared methods from the clonned tcp-qa repository.
-    // DO NOT MOVE this code before clonning the repo
-    def rootDir = pwd()
-    def shared = load "${rootDir}/tcp_tests/templates/SharedPipeline.groovy"
-
-    stage("Create an environment ${ENV_NAME} in disabled state") {
-        // do not fail if environment doesn't exists
-        run_cmd("""\
-        python ./tcp_tests/utils/create_devops_env.py
-        """)
-    }
-
-    stage("Generate the model") {
-        shared.generate_cookied_model()
-    }
-
-    stage("Generate config drive ISO") {
-        shared.generate_configdrive_iso()
-    }
-
-    stage("Upload generated config drive ISO into volume on cfg01 node") {
-        run_cmd("""\
-        virsh vol-upload ${ENV_NAME}_cfg01.${LAB_CONFIG_NAME}.local_config /home/jenkins/images/${CFG01_CONFIG_IMAGE_NAME} --pool default
-        virsh pool-refresh --pool default
-        """)
-    }
-
-    stage("Run the 'underlay' and 'salt-deployed' fixtures to bootstrap salt cluster") {
-        run_cmd("""\
-        export MANAGER=devops
-        export SHUTDOWN_ENV_ON_TEARDOWN=false
-        export BOOTSTRAP_TIMEOUT=900
-        export PYTHONIOENCODING=UTF-8
-        export REPOSITORY_SUITE=${MCP_VERSION}
-        #export SALT_STEPS_PATH=templates/${LAB_CONFIG_NAME}/salt.yaml
-        export TEST_GROUP=test_install_local_salt
-        py.test -vvv -s -p no:django -p no:ipdb --junit-xml=nosetests.xml -k \${TEST_GROUP}
-        sleep 60  # wait for jenkins to start and IO calm down
-
-        """)
+    stage("Create environment, generate mode, bootstrap the salt-cluster") {
+        shared.swarm_bootstrap_salt_cluster_devops()
     }
 
     // Install core and cicd
@@ -92,14 +24,14 @@
     }
 
     // Install the cluster
-    for (stack in "${STACK_INSTALL}".split(",")) {
+    for (stack in "${PLATFORM_STACK_INSTALL}".split(",")) {
         stage("Run Jenkins job on CICD [deploy_openstack:${stack}]") {
             shared.run_job_on_cicd_nodes(stack)
         }
     }
 
     stage("Run tests") {
-        run_cmd("""\
+        shared.run_cmd("""\
             export ENV_NAME=${ENV_NAME}
             . ./tcp_tests/utils/env_salt
             . ./tcp_tests/utils/env_k8s
@@ -114,7 +46,7 @@
             export salt_master_port=6969
             export SALT_USER=\$SALTAPI_USER
             export SALT_PASSWORD=\$SALTAPI_PASS
-            export COMMON_SERVICES_INSTALLED=true  # skip common_services_deployed fixture
+            export CORE_INSTALLED=true  # skip core_deployed fixture
             export K8S_INSTALLED=true              # skip k8s_deployed fixture
 
             py.test -vvv -s -p no:django -p no:ipdb --junit-xml=nosetests.xml -m k8s_calico
@@ -127,7 +59,7 @@
   } finally {
     // TODO(ddmitriev): analyze the "def currentResult = currentBuild.result ?: 'SUCCESS'"
     // and report appropriate data to TestRail
-    run_cmd("""\
+    shared.run_cmd("""\
         dos.py destroy ${ENV_NAME}
     """)
   }
diff --git a/tcp_tests/templates/cookied-cicd-k8s-calico/environment-context-k8s.yaml b/tcp_tests/templates/cookied-cicd-k8s-calico/environment-context-k8s.yaml
index 44c773f..66d7cec 100644
--- a/tcp_tests/templates/cookied-cicd-k8s-calico/environment-context-k8s.yaml
+++ b/tcp_tests/templates/cookied-cicd-k8s-calico/environment-context-k8s.yaml
@@ -109,8 +109,9 @@
         ens4:
           role: single_ctl
 
-    cmp01:
-      reclass_storage_name: kubernetes_compute_node01
+    # Generator-based computes. For compatibility only
+    cmp<<count>>:
+      reclass_storage_name: kubernetes_compute_rack01
       roles:
       - kubernetes_compute
       - linux_system_codename_xenial
@@ -120,17 +121,3 @@
           role: single_dhcp
         ens4:
           role: single_ctl
-          single_address: ${_param:kubernetes_compute_node01_address}
-
-    cmp02:
-      reclass_storage_name: kubernetes_compute_node02
-      roles:
-      - kubernetes_compute
-      - linux_system_codename_xenial
-      - salt_master_host
-      interfaces:
-        ens3:
-          role: single_dhcp
-        ens4:
-          role: single_ctl
-          single_address: ${_param:kubernetes_compute_node02_address}
diff --git a/tcp_tests/templates/cookied-cicd-k8s-calico/salt.yaml b/tcp_tests/templates/cookied-cicd-k8s-calico/salt.yaml
index 1d51907..a7994a8 100644
--- a/tcp_tests/templates/cookied-cicd-k8s-calico/salt.yaml
+++ b/tcp_tests/templates/cookied-cicd-k8s-calico/salt.yaml
@@ -7,12 +7,6 @@
 
 {% import 'shared-salt.yaml' as SHARED with context %}
 
-- description: Workaround, configure ntp and rsyslog on salt master node
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@salt:master' state.sls ntp,rsyslog
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 10}
-  skip_fail: false
-
 {{ SHARED.MACRO_INSTALL_SALT_MINIONS() }}
 
 {{SHARED.MACRO_CHECK_SALT_VERSION_SERVICES_ON_CFG()}}
diff --git a/tcp_tests/templates/cookied-cicd-k8s-calico/underlay.yaml b/tcp_tests/templates/cookied-cicd-k8s-calico/underlay.yaml
index 921be9b..6168b6e 100644
--- a/tcp_tests/templates/cookied-cicd-k8s-calico/underlay.yaml
+++ b/tcp_tests/templates/cookied-cicd-k8s-calico/underlay.yaml
@@ -11,8 +11,8 @@
 {% set HOSTNAME_CTL01 = os_env('HOSTNAME_CTL01', 'ctl01.' + DOMAIN_NAME) %}
 {% set HOSTNAME_CTL02 = os_env('HOSTNAME_CTL02', 'ctl02.' + DOMAIN_NAME) %}
 {% set HOSTNAME_CTL03 = os_env('HOSTNAME_CTL03', 'ctl03.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CMP01 = os_env('HOSTNAME_CMP01', 'cmp01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CMP02 = os_env('HOSTNAME_CMP02', 'cmp02.' + DOMAIN_NAME) %}
+{% set HOSTNAME_CMP01 = os_env('HOSTNAME_CMP01', 'cmp001.' + DOMAIN_NAME) %}
+{% set HOSTNAME_CMP02 = os_env('HOSTNAME_CMP02', 'cmp002.' + DOMAIN_NAME) %}
 {% set HOSTNAME_LOG01 = os_env('HOSTNAME_LOG01', 'log01.' + DOMAIN_NAME) %}
 {% set HOSTNAME_LOG02 = os_env('HOSTNAME_LOG02', 'log02.' + DOMAIN_NAME) %}
 {% set HOSTNAME_LOG03 = os_env('HOSTNAME_LOG03', 'log03.' + DOMAIN_NAME) %}
diff --git a/tcp_tests/templates/cookied-cicd-pike-dpdk/cookiecutter-context-pike-ovs-dpdk.yaml b/tcp_tests/templates/cookied-cicd-pike-dpdk/cookiecutter-context-pike-ovs-dpdk.yaml
new file mode 100644
index 0000000..a5992bf
--- /dev/null
+++ b/tcp_tests/templates/cookied-cicd-pike-dpdk/cookiecutter-context-pike-ovs-dpdk.yaml
@@ -0,0 +1,202 @@
+default_context:
+  auditd_enabled: 'False'
+  bmk_enabled: 'False'
+  ceph_enabled: 'False'
+  cicd_control_node01_address: 10.167.4.91
+  cicd_control_node01_hostname: cid01
+  cicd_control_node02_address: 10.167.4.92
+  cicd_control_node02_hostname: cid02
+  cicd_control_node03_address: 10.167.4.93
+  cicd_control_node03_hostname: cid03
+  cicd_control_vip_address: 10.167.4.90
+  cicd_control_vip_hostname: cid
+  cicd_enabled: 'True'
+  cicd_private_key: |-
+    -----BEGIN RSA PRIVATE KEY-----
+    MIIEpAIBAAKCAQEAv64AnFbEuuOQHLlmMkmaZ+Hh/8hJ+VfFpJ/MzW1wWzYyhis7
+    3A8rxNFWJ/I1/LJSsFI8qU0DpxjFjS9LMTTFXhDPPpuzgRLwfVusEmuQdXjOiT34
+    AIs07Q4w1nlvJ2+/l788ie1AEfnewd9erUHOs8Wt/PT3OOM/0ikY7EibvYF4L1Lb
+    xGRKYnUkY7G3eal9XcQpsTzAFRXoK3WafbCFBFsfzEWOhx1T+tn1SwaxPYJDt1OB
+    B1s77enFtBwbmbd0m1F1ufSXmdWea2xF3+5caS6tps/hwhCoOSJUQb7+dK4ri8og
+    q2YIhfEptrMP1R+nVqEY76P31aa/YSw4zOvcQwIDAQABAoIBAQCLKOzQlD4n4ObT
+    s9Z6U+2B1gCaDvOFzy9yoYGy8u1Li0GLHwBKd8kzDzgZsEN5vo1B7bKUx5ELU3S5
+    V8ijZMiVzmZn8eqUnwdyO4flp6otXxOzmAXhfy9hm5fhXjBQ1VSn+vMcv95wLpSG
+    9IBsEQbchXwX1lFWP8Yp8iRiByTqoz6A7qSxRzIOtq1/coYS9Vcy7VZDMiUjqvuc
+    pYvwYHvrgeYqxLXyDRzbZX1BbkSoNI/5VwxLb9IMG901IXph0r4V3uVgnnq+Xzkk
+    MoOfmB3cyOrvtWblZAjkyA+jzTs/QNALRUeI7wUeh4FvlwEGHE6v5G4G28zOS0vL
+    7IEhCqThAoGBAOeyDO07b060l+NOO+Jkv+NV31VD0w3S4TMyLPVSxXsrRPoHM9RM
+    udi6lewmALE4wk2Lc1Il6n0UrUGVbXxf55NJp2BQoSic+ZK2nTki0cZ/CkUDVNwY
+    R0WtWE0i3J+eF3e8j9VYm1mIlv0aDoYeH4qCp5is/JanvLy4MUl6tM7/AoGBANPJ
+    XheDO5lmqq1ejDTo3GAzYuAs44dQLDs0znEuuaUKZ4MKgQ4ax0L9n0MxvsuUGVcN
+    Nm7fZS4uMY3zLCOLcAXyD1jXY210gmOgFdXeYrH+2kSmqfflV8KHOLCatxLzRtbe
+    KBflcrEnrpUVNGKlpZaYr+4AyapXeMuXIxwveva9AoGAYtoDS9/UwHaqau+A+zlS
+    6TJFA8LZNAepz0b0CYLUAJXYavhRs508mWwZ9NPN7c6yj5UUkZLdtZnxxY50VOEy
+    ExQUljIwX/yBOogxEiR57b9b6U/fj7vIBMFNcDOUf4Far9pCX5rbRNrS2I+abLxD
+    ZrwRt0Duz3QnQTkwxhHVPI8CgYAaIjQJJLl7AW84O32DneRrvouJ7CAbd2ot2CNN
+    Vh20XudNBUPNkMJb4t3/Nak8h8bktg2sesaKf0rAIGym6jLlmOwJ43IydHkOgBeR
+    r4JwQml+pS4+F7/Pkk4NhNnobbqlEv7RjA+uCp6BaP9w2M3pGmhDLzezXF3ciYbc
+    mINM5QKBgQCyM9ZWwSiA0D3oitnhs7C4eC0IHBfnSoa7f40osKm4VvmqKBFgRu8L
+    qYK9qX++pUm4sk0q7poGUscc1udMlejAkfc/HLIlUi6MM+S7ZQ2NHtnZ7COZa5O4
+    9fG8FTiigLvMHka9ihYXtPbyGvusCaqyHp3D9VyOT+WsyM5eJe40lA==
+    -----END RSA PRIVATE KEY-----
+  cicd_public_key: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQC/rgCcVsS645AcuWYySZpn4eH/yEn5V8Wkn8zNbXBbNjKGKzvcDyvE0VYn8jX8slKwUjypTQOnGMWNL0sxNMVeEM8+m7OBEvB9W6wSa5B1eM6JPfgAizTtDjDWeW8nb7+XvzyJ7UAR+d7B316tQc6zxa389Pc44z/SKRjsSJu9gXgvUtvEZEpidSRjsbd5qX1dxCmxPMAVFegrdZp9sIUEWx/MRY6HHVP62fVLBrE9gkO3U4EHWzvt6cW0HBuZt3SbUXW59JeZ1Z5rbEXf7lxpLq2mz+HCEKg5IlRBvv50riuLyiCrZgiF8Sm2sw/VH6dWoRjvo/fVpr9hLDjM69xD
+  cluster_domain: cookied-cicd-pike-dpdk.local
+  cluster_name: cookied-cicd-pike-dpdk
+  compute_bond_mode: active-backup
+  compute_primary_first_nic: eth1
+  compute_primary_second_nic: eth2
+  context_seed: wUqrwKeBTCpRpVrhK1KwZQv4cjM9VhG7L2vQ0iQsTuMrXASklEBDmJEf6bnPEqcK
+  control_network_netmask: 255.255.255.0
+  control_network_subnet: 10.167.4.0/24
+  control_vlan: '10'
+  cookiecutter_template_branch: master
+  cookiecutter_template_credentials: gerrit
+  cookiecutter_template_url: https://gerrit.mcp.mirantis.net/mk/cookiecutter-templates.git
+  deploy_network_gateway: 10.167.5.1
+  deploy_network_netmask: 255.255.255.0
+  deploy_network_subnet: 10.167.5.0/24
+  deployment_type: physical
+  dns_server01: 172.18.176.6
+  dns_server02: 172.18.208.44
+  email_address: ddmitriev@mirantis.com
+  gateway_primary_first_nic: eth1
+  gateway_primary_second_nic: eth2
+  infra_bond_mode: active-backup
+  infra_deploy_nic: eth0
+  infra_kvm01_control_address: 10.167.4.241
+  infra_kvm01_deploy_address: 10.167.5.91
+  infra_kvm01_hostname: kvm01
+  infra_kvm02_control_address: 10.167.4.242
+  infra_kvm02_deploy_address: 10.167.5.92
+  infra_kvm02_hostname: kvm02
+  infra_kvm03_control_address: 10.167.4.243
+  infra_kvm03_deploy_address: 10.167.5.93
+  infra_kvm03_hostname: kvm03
+  infra_kvm_vip_address: 10.167.4.240
+  infra_primary_first_nic: eth1
+  infra_primary_second_nic: eth2
+  kubernetes_enabled: 'False'
+  local_repositories: 'False'
+  maas_deploy_address: 10.167.5.15
+  maas_deploy_range_end: 10.167.5.199
+  maas_deploy_range_start: 10.167.5.180
+  maas_deploy_vlan: '0'
+  maas_fabric_name: deploy-fabric0
+  maas_hostname: cfg01
+  mcp_version: proposed
+  offline_deployment: 'False'
+  opencontrail_enabled: 'False'
+  openldap_domain: ${_param:cluster_name}.local
+  openldap_enabled: 'True'
+  openldap_organisation: ${_param:cluster_name}
+  openssh_groups: cicd
+  openstack_benchmark_node01_address: 10.167.4.95
+  openstack_benchmark_node01_hostname: bmk01
+  openstack_cluster_size: compact
+  openstack_compute_count: '2'
+  openstack_compute_rack01_hostname: cmp
+  openstack_compute_rack01_single_subnet: 10.167.4
+  openstack_compute_rack01_tenant_subnet: 10.167.6
+  openstack_control_address: 10.167.4.100
+  openstack_control_hostname: ctl
+  openstack_control_node01_address: 10.167.4.101
+  openstack_control_node01_hostname: ctl01
+  openstack_control_node02_address: 10.167.4.102
+  openstack_control_node02_hostname: ctl02
+  openstack_control_node03_address: 10.167.4.103
+  openstack_control_node03_hostname: ctl03
+  openstack_database_address: 10.167.4.50
+  openstack_database_hostname: dbs
+  openstack_database_node01_address: 10.167.4.51
+  openstack_database_node01_hostname: dbs01
+  openstack_database_node02_address: 10.167.4.52
+  openstack_database_node02_hostname: dbs02
+  openstack_database_node03_address: 10.167.4.53
+  openstack_database_node03_hostname: dbs03
+  openstack_enabled: 'True'
+  openstack_gateway_node01_address: 10.167.4.110
+  openstack_gateway_node01_hostname: gtw01
+  openstack_gateway_node01_tenant_address: 10.167.6.6
+  openstack_gateway_node02_address: 10.167.4.111
+  openstack_gateway_node02_hostname: gtw02
+  openstack_gateway_node02_tenant_address: 10.167.6.7
+  openstack_gateway_node03_address: 10.167.4.112
+  openstack_gateway_node03_hostname: gtw03
+  openstack_gateway_node03_tenant_address: 10.167.6.8
+  openstack_message_queue_address: 10.167.4.40
+  openstack_message_queue_hostname: msg
+  openstack_message_queue_node01_address: 10.167.4.41
+  openstack_message_queue_node01_hostname: msg01
+  openstack_message_queue_node02_address: 10.167.4.42
+  openstack_message_queue_node02_hostname: msg02
+  openstack_message_queue_node03_address: 10.167.4.43
+  openstack_message_queue_node03_hostname: msg03
+  openstack_network_engine: ovs
+  openstack_neutron_qos: 'False'
+  openstack_neutron_vlan_aware_vms: 'False'
+  openstack_nfv_dpdk_enabled: 'True'
+  openstack_nfv_sriov_enabled: 'False'
+  openstack_nova_compute_hugepages_count: '2048'
+  openstack_nova_compute_nfv_req_enabled: 'False'
+  openstack_nova_cpu_pinning: '3'
+  openstack_ovs_dvr_enabled: 'False'
+  openstack_ovs_encapsulation_type: vxlan
+  openstack_proxy_address: 10.167.4.80
+  openstack_proxy_hostname: prx
+  openstack_proxy_node01_address: 10.167.4.121
+  openstack_proxy_node01_hostname: prx01
+  openstack_proxy_node02_address: 10.167.4.122
+  openstack_proxy_node02_hostname: prx02
+  openstack_upgrade_node01_address: 10.167.4.19
+  openstack_version: pike
+  oss_enabled: 'False'
+  oss_node03_address: ${_param:stacklight_monitor_node03_address}
+  platform: openstack_enabled
+  public_host: ${_param:openstack_proxy_address}
+  publication_method: email
+  reclass_repository: https://github.com/Mirantis/mk-lab-salt-model.git
+  backup_private_key: |
+      -----BEGIN RSA PRIVATE KEY-----
+      MIIEowIBAAKCAQEAxL6/rVgCetsETpZaUmXmkj8cZ1WN0eubH1FvMDOi/La9ZJyT
+      k0C6AYpJnIyEm93pMj5cLm08qRqMW+2pdOhYjcH69yg5MrX5SkRk8jCmIHIYoIbh
+      Qnwbnj3dd3I39ZdfU2FO7u2vlbglVou6ZoQxlJDItuLNtzq6EG+w9eF19e7+OsC6
+      6iUItp618zfw1l3J/8nKvCGe2RYDf7mJW6XwCl/DwryJmwwzvPgYJ3QMuDD8/HFj
+      lrJ3xjFTXj4b4Ws1XIoy78fFbtiLr4OwqCYkho03u2E5rOOP1qZxZB63sivHMLMO
+      MM5bOAQKbulFNoyALADGYfc7sf0bZ4u9XXDXxQIDAQABAoIBAQCfmc2MJRT97KW1
+      yqpCpX9BrAiymuiNHf+cjEcSZxEUyHkjIRFmJt+9WB0W7ba1anM92vCUiPDojSzH
+      dig9Oi578JxR20NrK8uqv4jUHzrknynzLveVI3CUEcOSnglfJQijbxDFKfOCFPvV
+      FUyE1UATMNBh6+LNfMprgu+exuMWOPnDyUiYQ+WZ0JfuZY8fuaZte4woJJOb9LUu
+      5rsMG/smIzjpgZ0Z9ZVDMurfq565qhpaXRAqKeIuyht8pacTo31iMQdHB78AvY/3
+      g0z21Gk8k3z0Kr/YFKr2r4FmXY5m/gAUvZly2ZrVQM5XsbTVCzq/JpI5fssNvSbU
+      AKmXzf4RAoGBAOO3d4/cstxERzW6hyOTjZIN1ppR52CsnZTsVPbfd0pCtmzmVZce
+      CtHKdcXSbTwZvvkK09QSWAp3MoSpd0gIOiLU8Wx/R/RIZsu9BlhTS3r3EQLnk72d
+      H/1TTA+j4T/LIYLSojQ1RxvIrHetAD44j732aTwKAHj/SybEAVqNkOB/AoGBAN0u
+      gLcrgqIHGrk4VjWSvlCGymfF40equcx+ud7XhfZDGETUOSahW4dPZ52cjPAkrCBQ
+      MMfcDwSVGsOAjd+mNt11BHUKobnhXwFaWWuyqyn9NmWFbjMbICVh7E3Of5aVN38o
+      lrmo/7LuKMVG7XRwphCv5NkaJmQG4njDyUQWlaW7AoGADCd8wDb9bPhP/LQqBmIX
+      ylXmwHHisaxE9O/wUQT4bwREjGd25gv6c9wkkRx8LBsLsGs9hzI7dMOL9Ly+2x9l
+      SvqmsC3S/1zl77X1Ir2/Z57MT6Vgo1xBmtnZU3Rhz2/eKAdqFPNLClaZrgGT475N
+      HcyLLWMzR0IJFtabY+Puea0CgYA8Zb5wRkldxWLewSuJZZDinGwY+kieAVjLJq/K
+      0j+ah6fQ48LXcah0wpIgz+cMjHcUO9GWQdk3/x9X03rqX5EL2DBnZYfUIl63F9zj
+      M97ZkHOSNWVqPzX//0Vv2butewG0j3jZKfTo/2/SrxOYgEpYtC9huWpSVi7xm0US
+      erhSkQKBgFIf9JEsfgE57ANhvITZ3ZI0uZXNxZkXQaVg8jvScDi79IIhy9iPzhKC
+      aIIQoDNIlWv1ftCRZ5AlBvVXgvQ/QNrwy48JiQTzWZlb9Ezg8w+olQmSbG6fq7Y+
+      7r3i+QUZ7RBdOb24QcQ618q54ozNTCB7OywY78ptFzeoBeptiNr1
+      -----END RSA PRIVATE KEY-----
+  backup_public_key: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDEvr+tWAJ62wROllpSZeaSPxxnVY3R65sfUW8wM6L8tr1knJOTQLoBikmcjISb3ekyPlwubTypGoxb7al06FiNwfr3KDkytflKRGTyMKYgchighuFCfBuePd13cjf1l19TYU7u7a+VuCVWi7pmhDGUkMi24s23OroQb7D14XX17v46wLrqJQi2nrXzN/DWXcn/ycq8IZ7ZFgN/uYlbpfAKX8PCvImbDDO8+BgndAy4MPz8cWOWsnfGMVNePhvhazVcijLvx8Vu2Iuvg7CoJiSGjTe7YTms44/WpnFkHreyK8cwsw4wzls4BApu6UU2jIAsAMZh9zux/Rtni71dcNfF
+  salt_api_password: H0rTPdmktZ8RI7T7y6fjqY0uEbbs7Kwi
+  salt_api_password_hash: $6$lfbIFtMZ$.nTbTDMzs1iYv0WqkZHia8H8Fma963Nv3qyyz1x68jQh0YXK9i907B/hvoG4QHMvfolE7V7vQnFClJ1mVA3Yb.
+  salt_master_address: 10.167.4.15
+  salt_master_hostname: cfg01
+  salt_master_management_address: 10.167.5.15
+  shared_reclass_branch: 'proposed'
+  shared_reclass_url: https://gerrit.mcp.mirantis.net/salt-models/reclass-system.git
+  stacklight_enabled: 'False'
+  stacklight_version: '2'
+  static_ips_on_deploy_network_enabled: 'False'
+  tenant_network_gateway: 10.167.6.1
+  tenant_network_netmask: 255.255.255.0
+  tenant_network_subnet: 10.167.6.0/24
+  tenant_vlan: '20'
+  upstream_proxy_enabled: 'False'
+  use_default_network_scheme: 'False'
diff --git a/tcp_tests/templates/cookied-cicd-pike-dpdk/environment-context.yaml b/tcp_tests/templates/cookied-cicd-pike-dpdk/environment-context.yaml
new file mode 100644
index 0000000..53f5dd0
--- /dev/null
+++ b/tcp_tests/templates/cookied-cicd-pike-dpdk/environment-context.yaml
@@ -0,0 +1,224 @@
+nodes:
+    cfg01:
+      reclass_storage_name: infra_config_node01
+      roles:
+      - infra_config
+      - linux_system_codename_xenial
+      interfaces:
+        ens3:
+          role: single_dhcp
+        ens4:
+          role: single_static_ctl
+
+    kvm01:
+      reclass_storage_name: infra_kvm_node01
+      roles:
+      - infra_kvm
+      - linux_system_codename_xenial
+      interfaces:
+        ens3:
+          role: single_dhcp
+        ens4:
+          role: single_ctl
+
+    kvm02:
+      reclass_storage_name: infra_kvm_node02
+      roles:
+      - infra_kvm
+      - linux_system_codename_xenial
+      interfaces:
+        ens3:
+          role: single_dhcp
+        ens4:
+          role: single_ctl
+
+    kvm03:
+      reclass_storage_name: infra_kvm_node03
+      roles:
+      - infra_kvm
+      - linux_system_codename_xenial
+      interfaces:
+        ens3:
+          role: single_dhcp
+        ens4:
+          role: single_ctl
+
+    cid01:
+      reclass_storage_name: cicd_control_node01
+      roles:
+      - cicd_control_leader
+      - linux_system_codename_xenial
+      interfaces:
+        ens3:
+          role: single_dhcp
+        ens4:
+          role: single_ctl
+
+    cid02:
+      reclass_storage_name: cicd_control_node02
+      roles:
+      - cicd_control_manager
+      - linux_system_codename_xenial
+      interfaces:
+        ens3:
+          role: single_dhcp
+        ens4:
+          role: single_ctl
+
+    cid03:
+      reclass_storage_name: cicd_control_node03
+      roles:
+      - cicd_control_manager
+      - linux_system_codename_xenial
+      interfaces:
+        ens3:
+          role: single_dhcp
+        ens4:
+          role: single_ctl
+
+    ctl01:
+      reclass_storage_name: openstack_control_node01
+      roles:
+      - openstack_control_leader
+      - linux_system_codename_xenial
+      interfaces:
+        ens3:
+          role: single_dhcp
+        ens4:
+          role: single_ctl
+
+    ctl02:
+      reclass_storage_name: openstack_control_node02
+      roles:
+      - openstack_control
+      - linux_system_codename_xenial
+      interfaces:
+        ens3:
+          role: single_dhcp
+        ens4:
+          role: single_ctl
+
+    ctl03:
+      reclass_storage_name: openstack_control_node03
+      roles:
+      - openstack_control
+      - linux_system_codename_xenial
+      interfaces:
+        ens3:
+          role: single_dhcp
+        ens4:
+          role: single_ctl
+
+    dbs01:
+      reclass_storage_name: openstack_database_node01
+      roles:
+      - openstack_database_leader
+      - linux_system_codename_xenial
+      interfaces:
+        ens3:
+          role: single_dhcp
+        ens4:
+          role: single_ctl
+
+    dbs02:
+      reclass_storage_name: openstack_database_node02
+      roles:
+      - openstack_database
+      - linux_system_codename_xenial
+      interfaces:
+        ens3:
+          role: single_dhcp
+        ens4:
+          role: single_ctl
+
+    dbs03:
+      reclass_storage_name: openstack_database_node03
+      roles:
+      - openstack_database
+      - linux_system_codename_xenial
+      interfaces:
+        ens3:
+          role: single_dhcp
+        ens4:
+          role: single_ctl
+
+    msg01:
+      reclass_storage_name: openstack_message_queue_node01
+      roles:
+      - openstack_message_queue
+      - linux_system_codename_xenial
+      interfaces:
+        ens3:
+          role: single_dhcp
+        ens4:
+          role: single_ctl
+
+    msg02:
+      reclass_storage_name: openstack_message_queue_node02
+      roles:
+      - openstack_message_queue
+      - linux_system_codename_xenial
+      interfaces:
+        ens3:
+          role: single_dhcp
+        ens4:
+          role: single_ctl
+
+    msg03:
+      reclass_storage_name: openstack_message_queue_node03
+      roles:
+      - openstack_message_queue
+      - linux_system_codename_xenial
+      interfaces:
+        ens3:
+          role: single_dhcp
+        ens4:
+          role: single_ctl
+
+    prx01:
+      reclass_storage_name: openstack_proxy_node01
+      roles:
+      - openstack_proxy
+      - linux_system_codename_xenial
+      interfaces:
+        ens3:
+          role: single_dhcp
+        ens4:
+          role: single_ctl
+
+    # Generator-based computes. For compatibility only
+    cmp<<count>>:
+      reclass_storage_name: openstack_compute_rack01
+      roles:
+      - openstack_compute
+      - features_lvm_backend
+      - linux_system_codename_xenial
+      interfaces:
+        ens3:
+          role: single_dhcp
+        ens4:
+          role: single_ctl
+        dpdkport0:
+          role: bond2_dpdk_prv
+          dpdk_pci: "0000:00:05.0"
+        dpdkport1:
+          role: bond2_dpdk_prv
+          dpdk_pci: "0000:00:06.0"
+        ens7:
+          role: bond1_ab_ovs_floating
+
+    gtw01:
+      reclass_storage_name: openstack_gateway_node01
+      roles:
+      - openstack_gateway
+      - linux_system_codename_xenial
+      interfaces:
+        ens3:
+          role: single_dhcp
+        ens4:
+          role: single_ctl
+        ens5:
+          role: single_ovs_br_prv
+          mtu: 1500
+        ens7:
+          role: bond1_ab_ovs_floating
diff --git a/tcp_tests/templates/cookied-cicd-pike-dpdk/salt.yaml b/tcp_tests/templates/cookied-cicd-pike-dpdk/salt.yaml
new file mode 100644
index 0000000..a2d8eb5
--- /dev/null
+++ b/tcp_tests/templates/cookied-cicd-pike-dpdk/salt.yaml
@@ -0,0 +1,42 @@
+{% from 'cookied-cicd-pike-dpdk/underlay.yaml' import HOSTNAME_CFG01 with context %}
+{% from 'cookied-cicd-pike-dpdk/underlay.yaml' import HOSTNAME_CMP01 with context %}
+{% from 'cookied-cicd-pike-dpdk/underlay.yaml' import HOSTNAME_CMP02 with context %}
+{% from 'cookied-cicd-pike-dpdk/underlay.yaml' import HOSTNAME_GTW01 with context %}
+{% from 'cookied-cicd-pike-dpdk/underlay.yaml' import LAB_CONFIG_NAME with context %}
+{% from 'cookied-cicd-pike-dpdk/underlay.yaml' import DOMAIN_NAME with context %}
+
+{% set SALT_MODELS_REPOSITORY = os_env('SALT_MODELS_REPOSITORY','https://gerrit.mcp.mirantis.net/salt-models/mcp-virtual-lab') %}
+# Other salt model repository parameters see in shared-salt.yaml
+
+{% import 'shared-salt.yaml' as SHARED with context %}
+
+{{ SHARED.MACRO_INSTALL_SALT_MINIONS() }}
+
+- description: Enable hugepages on cmp nodes
+  cmd: salt 'cmp*' cmd.run "apt-get install -y hugepages; echo 2048 > /proc/sys/vm/nr_hugepages";
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 10}
+  skip_fail: false
+
+- description: "Workaround for PROD-18834: Pre-install linux-headers package"
+  cmd: salt 'cmp*' cmd.run "apt-get install -y linux-headers-$(uname -r)";
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 10}
+  skip_fail: false
+
+- description: "Workaround for PROD-17975: Pre-install ovs packages to update alternatives to DPDK"
+  cmd: |
+    set -ex;
+    salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@linux:system' state.sls linux.system.repo
+    salt 'cmp*' cmd.run "apt-get install -y openvswitch-switch openvswitch-switch-dpdk";
+    salt 'cmp*' cmd.run "service openvswitch-switch stop";
+    salt 'cmp*' cmd.run "rm -f /var/lib/openvswitch/*";
+    salt 'cmp*' cmd.run "update-alternatives --remove ovs-vswitchd /usr/lib/openvswitch-switch/ovs-vswitchd";
+    salt 'cmp*' cmd.run "service openvswitch-switch start";
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 10}
+  skip_fail: false
+
+{{SHARED.MACRO_CHECK_SALT_VERSION_SERVICES_ON_CFG()}}
+
+{{SHARED.MACRO_CHECK_SALT_VERSION_ON_NODES()}}
diff --git a/tcp_tests/templates/cookied-cicd-pike-dpdk/underlay--meta-data.yaml b/tcp_tests/templates/cookied-cicd-pike-dpdk/underlay--meta-data.yaml
new file mode 100644
index 0000000..3699401
--- /dev/null
+++ b/tcp_tests/templates/cookied-cicd-pike-dpdk/underlay--meta-data.yaml
@@ -0,0 +1,4 @@
+| # All the data below will be stored as a string object
+  instance-id: iid-local1
+  hostname: {hostname}
+  local-hostname: {hostname}
diff --git a/tcp_tests/templates/cookied-cicd-pike-dpdk/underlay--user-data-cfg01.yaml b/tcp_tests/templates/cookied-cicd-pike-dpdk/underlay--user-data-cfg01.yaml
new file mode 100644
index 0000000..4c43578
--- /dev/null
+++ b/tcp_tests/templates/cookied-cicd-pike-dpdk/underlay--user-data-cfg01.yaml
@@ -0,0 +1,101 @@
+| # All the data below will be stored as a string object
+  #cloud-config, see http://cloudinit.readthedocs.io/en/latest/topics/examples.html
+
+  ssh_pwauth: True
+  users:
+   - name: root
+     sudo: ALL=(ALL) NOPASSWD:ALL
+     shell: /bin/bash
+     ssh_authorized_keys:
+     {% for key in config.underlay.ssh_keys %}
+      - ssh-rsa {{ key['public'] }}
+     {% endfor %}
+
+  disable_root: false
+  chpasswd:
+   list: |
+    root:r00tme
+   expire: False
+
+  bootcmd:
+   # Block access to SSH while node is preparing
+   - cloud-init-per once sudo touch /is_cloud_init_started
+   # Enable root access
+   - sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
+   - service sshd restart
+
+  output:
+    all: '| tee -a /var/log/cloud-init-output.log /dev/tty0'
+
+  runcmd:
+   - echo "******** MOUNT CONFIG DRIVE"
+   # Mount config drive
+   - mkdir /root/config-drive
+   - mount /dev/sr0 /root/config-drive
+
+   # Configure dhclient
+   - sudo echo "nameserver {gateway}" >> /etc/resolvconf/resolv.conf.d/base
+   - sudo resolvconf -u
+
+   # Enable grub menu using updated config below
+   - update-grub
+
+   # Prepare network connection
+   #- sudo ifdown ens3
+   #- sudo ip r d default || true  # remove existing default route to get it from dhcp
+   #- sudo ifup ens3
+   #- sudo route add default gw {gateway} {interface_name}
+
+   # Create swap
+   - fallocate -l 16G /swapfile
+   - chmod 600 /swapfile
+   - mkswap /swapfile
+   - swapon /swapfile
+   - echo "/swapfile   none    swap    defaults    0   0" >> /etc/fstab
+
+   # Run user data script from config drive
+   - ifdown --force ens3; ifconfig ens3 down; ip a flush dev ens3; rm -f /var/run/network/ifstate.ens3; ip l set down ens3
+   - ifdown --force ens4; ifconfig ens4 down; ip a flush dev ens4; rm -f /var/run/network/ifstate.ens4; ip l set down ens4
+   - rm -f /etc/network/interfaces
+   #- ifdown --force ens5; ifconfig ens5 down; ip a flush dev ens5; rm -f /var/run/network/ifstate.ens5
+   #- cp /root/config-drive/user-data /root/user-data
+   #- sed -i '/^reboot$/d' /root/user-data
+   #- set -x; cd /root && /bin/bash -xe ./user-data
+   - |
+     set -x
+     cd /root/config-drive
+     if /bin/bash -xe ./user-data; then
+         touch /is_cloud_init_finished
+     else
+         set +x
+         echo "bootstrap script /root/config-drive/user-data failed\n" > /is_cloud_init_failed
+     fi
+
+   # Enable root access (after reboot)
+   - sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
+
+  write_files:
+   - path: /etc/default/grub.d/97-enable-grub-menu.cfg
+     content: |
+         GRUB_RECORDFAIL_TIMEOUT=30
+         GRUB_TIMEOUT=3
+         GRUB_TIMEOUT_STYLE=menu
+
+   #- path: /etc/network/interfaces
+   - path: /root/interfaces
+     content: |
+          auto lo
+          iface lo inet loopback
+
+          auto ens3
+          iface ens3 inet dhcp
+
+   - path: /root/.ssh/config
+     owner: root:root
+     permissions: '0600'
+     content: |
+          Host *
+            ServerAliveInterval 60
+            ServerAliveCountMax 0
+            StrictHostKeyChecking no
+            UserKnownHostsFile /dev/null
diff --git a/tcp_tests/templates/cookied-cicd-pike-dpdk/underlay--user-data1604-swp.yaml b/tcp_tests/templates/cookied-cicd-pike-dpdk/underlay--user-data1604-swp.yaml
new file mode 100644
index 0000000..319c007
--- /dev/null
+++ b/tcp_tests/templates/cookied-cicd-pike-dpdk/underlay--user-data1604-swp.yaml
@@ -0,0 +1,59 @@
+| # All the data below will be stored as a string object
+  #cloud-config, see http://cloudinit.readthedocs.io/en/latest/topics/examples.html
+
+  ssh_pwauth: True
+  users:
+   - name: root
+     sudo: ALL=(ALL) NOPASSWD:ALL
+     shell: /bin/bash
+     ssh_authorized_keys:
+     {% for key in config.underlay.ssh_keys %}
+      - ssh-rsa {{ key['public'] }}
+     {% endfor %}
+
+  disable_root: false
+  chpasswd:
+   list: |
+    root:r00tme
+   expire: False
+
+  bootcmd:
+   # Enable root access
+   - sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
+   - service sshd restart
+  output:
+    all: '| tee -a /var/log/cloud-init-output.log /dev/tty0'
+
+  runcmd:
+   - export TERM=linux
+   - export LANG=C
+   # Configure dhclient
+   - sudo echo "nameserver {gateway}" >> /etc/resolvconf/resolv.conf.d/base
+   - sudo resolvconf -u
+
+   # Enable grub menu using updated config below
+   - update-grub
+
+   # Prepare network connection
+   - sudo ifup ens3
+   #- sudo route add default gw {gateway} {interface_name}
+
+   # Create swap
+   - fallocate -l 16G /swapfile
+   - chmod 600 /swapfile
+   - mkswap /swapfile
+   - swapon /swapfile
+   - echo "/swapfile   none    swap    defaults   0   0" >> /etc/fstab
+
+  write_files:
+   - path: /etc/default/grub.d/97-enable-grub-menu.cfg
+     content: |
+         GRUB_RECORDFAIL_TIMEOUT=30
+         GRUB_TIMEOUT=3
+         GRUB_TIMEOUT_STYLE=menu
+
+   - path: /etc/network/interfaces
+     content: |
+          auto ens3
+          iface ens3 inet dhcp
+
diff --git a/tcp_tests/templates/cookied-cicd-pike-dpdk/underlay.yaml b/tcp_tests/templates/cookied-cicd-pike-dpdk/underlay.yaml
new file mode 100644
index 0000000..69d4a42
--- /dev/null
+++ b/tcp_tests/templates/cookied-cicd-pike-dpdk/underlay.yaml
@@ -0,0 +1,816 @@
+# Set the repository suite, one of the: 'nightly', 'testing', 'stable', or any other required
+{% set REPOSITORY_SUITE = os_env('REPOSITORY_SUITE', 'testing') %}
+
+{% import 'cookied-cicd-pike-dpdk/underlay--meta-data.yaml' as CLOUDINIT_META_DATA with context %}
+{% import 'cookied-cicd-pike-dpdk/underlay--user-data-cfg01.yaml' as CLOUDINIT_USER_DATA_CFG01 with context %}
+{% import 'cookied-cicd-pike-dpdk/underlay--user-data1604-swp.yaml' as CLOUDINIT_USER_DATA_1604_SWP with context %}
+
+---
+aliases:
+ - &interface_model {{ os_env('INTERFACE_MODEL', 'virtio') }}
+ - &cloudinit_meta_data {{ CLOUDINIT_META_DATA }}
+ - &cloudinit_user_data_cfg01 {{ CLOUDINIT_USER_DATA_CFG01 }}
+ - &cloudinit_user_data_1604_swp {{ CLOUDINIT_USER_DATA_1604_SWP }}
+
+{% set LAB_CONFIG_NAME = os_env('LAB_CONFIG_NAME', 'cookied-cicd-pike-dpdk') %}
+{% set DOMAIN_NAME = os_env('DOMAIN_NAME', LAB_CONFIG_NAME) + '.local' %}
+{% set HOSTNAME_CFG01 = os_env('HOSTNAME_CFG01', 'cfg01.' + DOMAIN_NAME) %}
+{% set HOSTNAME_KVM01 = os_env('HOSTNAME_KVM01', 'kvm01.' + DOMAIN_NAME) %}
+{% set HOSTNAME_KVM02 = os_env('HOSTNAME_KVM02', 'kvm02.' + DOMAIN_NAME) %}
+{% set HOSTNAME_KVM03 = os_env('HOSTNAME_KVM03', 'kvm03.' + DOMAIN_NAME) %}
+{% set HOSTNAME_CID01 = os_env('HOSTNAME_CID01', 'cid01.' + DOMAIN_NAME) %}
+{% set HOSTNAME_CID02 = os_env('HOSTNAME_CID02', 'cid02.' + DOMAIN_NAME) %}
+{% set HOSTNAME_CID03 = os_env('HOSTNAME_CID03', 'cid03.' + DOMAIN_NAME) %}
+{% set HOSTNAME_CTL01 = os_env('HOSTNAME_CTL01', 'ctl01.' + DOMAIN_NAME) %}
+{% set HOSTNAME_CTL02 = os_env('HOSTNAME_CTL02', 'ctl02.' + DOMAIN_NAME) %}
+{% set HOSTNAME_CTL03 = os_env('HOSTNAME_CTL03', 'ctl03.' + DOMAIN_NAME) %}
+{% set HOSTNAME_DBS = os_env('HOSTNAME_DBS', 'dbs.' + DOMAIN_NAME) %}
+{% set HOSTNAME_DBS01 = os_env('HOSTNAME_DBS01', 'dbs01.' + DOMAIN_NAME) %}
+{% set HOSTNAME_DBS02 = os_env('HOSTNAME_DBS02', 'dbs02.' + DOMAIN_NAME) %}
+{% set HOSTNAME_DBS03 = os_env('HOSTNAME_DBS03', 'dbs03.' + DOMAIN_NAME) %}
+{% set HOSTNAME_MSG = os_env('HOSTNAME_MSG', 'msg.' + DOMAIN_NAME) %}
+{% set HOSTNAME_MSG01 = os_env('HOSTNAME_MSG01', 'msg01.' + DOMAIN_NAME) %}
+{% set HOSTNAME_MSG02 = os_env('HOSTNAME_MSG02', 'msg02.' + DOMAIN_NAME) %}
+{% set HOSTNAME_MSG03 = os_env('HOSTNAME_MSG03', 'msg03.' + DOMAIN_NAME) %}
+{% set HOSTNAME_CMP01 = os_env('HOSTNAME_CMP01', 'cmp001.' + DOMAIN_NAME) %}
+{% set HOSTNAME_CMP02 = os_env('HOSTNAME_CMP02', 'cmp002.' + DOMAIN_NAME) %}
+{% set HOSTNAME_GTW01 = os_env('HOSTNAME_GTW01', 'gtw01.' + DOMAIN_NAME) %}
+{% set HOSTNAME_PRX01 = os_env('HOSTNAME_PRX01', 'prx01.' + DOMAIN_NAME) %}
+
+template:
+  devops_settings:
+    env_name: {{ os_env('ENV_NAME', 'cookied-cicd-pike-dpdk_' + REPOSITORY_SUITE + "_" + os_env('BUILD_NUMBER', '')) }}
+
+    address_pools:
+      private-pool01:
+        net: {{ os_env('PRIVATE_ADDRESS_POOL01', '10.60.0.0/16:24') }}
+        params:
+          ip_reserved:
+            gateway: +1
+            l2_network_device: +1
+            default_{{ HOSTNAME_CFG01 }}: +15
+            default_{{ HOSTNAME_KVM }}: +240
+            default_{{ HOSTNAME_KVM01 }}: +241
+            default_{{ HOSTNAME_KVM02 }}: +242
+            default_{{ HOSTNAME_KVM03 }}: +243
+            default_{{ HOSTNAME_CID }}: +90
+            default_{{ HOSTNAME_CID01 }}: +91
+            default_{{ HOSTNAME_CID02 }}: +92
+            default_{{ HOSTNAME_CID03 }}: +93
+            default_{{ HOSTNAME_CTL01 }}: +101
+            default_{{ HOSTNAME_CTL02 }}: +102
+            default_{{ HOSTNAME_CTL03 }}: +103
+            default_{{ HOSTNAME_DBS }}: +50
+            default_{{ HOSTNAME_DBS01 }}: +51
+            default_{{ HOSTNAME_DBS02 }}: +52
+            default_{{ HOSTNAME_DBS03 }}: +53
+            default_{{ HOSTNAME_MSG }}: +40
+            default_{{ HOSTNAME_MSG01 }}: +41
+            default_{{ HOSTNAME_MSG02 }}: +42
+            default_{{ HOSTNAME_MSG03 }}: +43
+            default_{{ HOSTNAME_CMP01 }}: +105
+            default_{{ HOSTNAME_CMP02 }}: +106
+            default_{{ HOSTNAME_GTW01 }}: +110
+            default_{{ HOSTNAME_PRX01 }}: +121
+          ip_ranges:
+            dhcp: [+90, -10]
+
+      admin-pool01:
+        net: {{ os_env('ADMIN_ADDRESS_POOL01', '10.70.0.0/16:24') }}
+        params:
+          ip_reserved:
+            gateway: +1
+            l2_network_device: +1
+            default_{{ HOSTNAME_CFG01 }}: +15
+            default_{{ HOSTNAME_KVM }}: +240
+            default_{{ HOSTNAME_KVM01 }}: +241
+            default_{{ HOSTNAME_KVM02 }}: +242
+            default_{{ HOSTNAME_KVM03 }}: +243
+            default_{{ HOSTNAME_CID }}: +90
+            default_{{ HOSTNAME_CID01 }}: +91
+            default_{{ HOSTNAME_CID02 }}: +92
+            default_{{ HOSTNAME_CID03 }}: +93
+            default_{{ HOSTNAME_CTL01 }}: +101
+            default_{{ HOSTNAME_CTL02 }}: +102
+            default_{{ HOSTNAME_CTL03 }}: +103
+            default_{{ HOSTNAME_DBS }}: +50
+            default_{{ HOSTNAME_DBS01 }}: +51
+            default_{{ HOSTNAME_DBS02 }}: +52
+            default_{{ HOSTNAME_DBS03 }}: +53
+            default_{{ HOSTNAME_MSG }}: +40
+            default_{{ HOSTNAME_MSG01 }}: +41
+            default_{{ HOSTNAME_MSG02 }}: +42
+            default_{{ HOSTNAME_MSG03 }}: +43
+            default_{{ HOSTNAME_CMP01 }}: +105
+            default_{{ HOSTNAME_CMP02 }}: +106
+            default_{{ HOSTNAME_GTW01 }}: +110
+            default_{{ HOSTNAME_PRX01 }}: +121
+          ip_ranges:
+            dhcp: [+90, -10]
+
+      tenant-pool01:
+        net: {{ os_env('TENANT_ADDRESS_POOL01', '10.80.0.0/16:24') }}
+        params:
+          ip_reserved:
+            gateway: +1
+            l2_network_device: +1
+            default_{{ HOSTNAME_CFG01 }}: +15
+            default_{{ HOSTNAME_CTL01 }}: +101
+            default_{{ HOSTNAME_CTL02 }}: +102
+            default_{{ HOSTNAME_CTL03 }}: +103
+            default_{{ HOSTNAME_DBS }}: +50
+            default_{{ HOSTNAME_DBS01 }}: +51
+            default_{{ HOSTNAME_DBS02 }}: +52
+            default_{{ HOSTNAME_DBS03 }}: +53
+            default_{{ HOSTNAME_MSG }}: +40
+            default_{{ HOSTNAME_MSG01 }}: +41
+            default_{{ HOSTNAME_MSG02 }}: +42
+            default_{{ HOSTNAME_MSG03 }}: +43
+            default_{{ HOSTNAME_CMP01 }}: +105
+            default_{{ HOSTNAME_CMP02 }}: +106
+            default_{{ HOSTNAME_GTW01 }}: +110
+            default_{{ HOSTNAME_PRX01 }}: +121
+          ip_ranges:
+            dhcp: [+10, -10]
+
+      external-pool01:
+        net: {{ os_env('EXTERNAL_ADDRESS_POOL01', '10.90.0.0/16:24') }}
+        params:
+          ip_reserved:
+            gateway: +1
+            l2_network_device: +1
+            default_{{ HOSTNAME_CFG01 }}: +15
+            default_{{ HOSTNAME_CTL01 }}: +101
+            default_{{ HOSTNAME_CTL02 }}: +102
+            default_{{ HOSTNAME_CTL03 }}: +103
+            default_{{ HOSTNAME_DBS }}: +50
+            default_{{ HOSTNAME_DBS01 }}: +51
+            default_{{ HOSTNAME_DBS02 }}: +52
+            default_{{ HOSTNAME_DBS03 }}: +53
+            default_{{ HOSTNAME_MSG }}: +40
+            default_{{ HOSTNAME_MSG01 }}: +41
+            default_{{ HOSTNAME_MSG02 }}: +42
+            default_{{ HOSTNAME_MSG03 }}: +43
+            default_{{ HOSTNAME_CMP01 }}: +105
+            default_{{ HOSTNAME_CMP02 }}: +106
+            default_{{ HOSTNAME_GTW01 }}: +110
+            default_{{ HOSTNAME_PRX01 }}: +121
+          ip_ranges:
+            dhcp: [+10, -10]
+
+
+    groups:
+      - name: default
+        driver:
+          name: devops.driver.libvirt
+          params:
+            connection_string: !os_env CONNECTION_STRING, qemu:///system
+            storage_pool_name: !os_env STORAGE_POOL_NAME, default
+            stp: False
+            hpet: True
+            use_hugepages: True
+            enable_acpi: true
+            use_host_cpu: !os_env DRIVER_USE_HOST_CPU, true
+            use_hugepages: !os_env DRIVER_USE_HUGEPAGES, false
+
+        network_pools:
+          admin: admin-pool01
+          private: private-pool01
+          tenant: tenant-pool01
+          external: external-pool01
+
+        l2_network_devices:
+          private:
+            address_pool: private-pool01
+            dhcp: false
+            forward:
+              mode: route
+
+          admin:
+            address_pool: admin-pool01
+            dhcp: true
+            forward:
+              mode: nat
+
+          tenant:
+            address_pool: tenant-pool01
+            dhcp: false
+
+          external:
+            address_pool: external-pool01
+            dhcp: true
+            forward:
+              mode: route
+
+
+        group_volumes:
+         - name: cloudimage1604    # This name is used for 'backing_store' option for node volumes.
+           source_image: {{ os_env('MCP_IMAGE_PATH1604') }}  # http://ci.mcp.mirantis.net:8085/images/ubuntu-16-04-x64-mcpproposed.qcow2
+           format: qcow2
+
+         - name: cfg01_day01_image               # Pre-configured day01 image
+           source_image: {{ os_env('IMAGE_PATH_CFG01_DAY01') }} # http://images.mirantis.com/cfg01-day01.qcow2 or fallback to IMAGE_PATH1604
+           format: qcow2
+
+        nodes:
+          - name: {{ HOSTNAME_CFG01 }}
+            role: salt_master
+            params:
+              vcpu: {{ os_env('CFG_NODE_CPU', 3) }}
+              memory: {{ os_env('CFG_NODE_MEMORY', 8192) }}
+              boot:
+                - hd
+              cloud_init_volume_name: iso
+              cloud_init_iface_up: ens3
+              volumes:
+                - name: system
+                  capacity: {{ os_env('CFG_NODE_VOLUME_SIZE', 150) }}
+                  backing_store: cfg01_day01_image
+                  format: qcow2
+                - name: config
+                  capacity: 1
+                  format: raw
+                  device: cdrom
+                  bus: ide
+                  # source_image: !os_env CFG01_CONFIG_PATH # no source image required.
+                                                            # it will be uploaded after config drive generation
+                - name: iso  # Volume with name 'iso' will be used
+                             # for store image with cloud-init metadata.
+                  capacity: 1
+                  format: raw
+                  device: cdrom
+                  bus: ide
+                  cloudinit_meta_data: *cloudinit_meta_data
+                  cloudinit_user_data: *cloudinit_user_data_cfg01
+
+              interfaces:
+                - label: ens3
+                  l2_network_device: admin
+                  interface_model: *interface_model
+                - label: ens4
+                  l2_network_device: private
+                  interface_model: *interface_model
+              network_config:
+                ens3:
+                  networks:
+                    - admin
+                ens4:
+                  networks:
+                    - private
+
+          - name: {{ HOSTNAME_CTL01 }}
+            role: salt_minion
+            params:
+              vcpu: !os_env SLAVE_NODE_CPU, 2
+              memory: !os_env SLAVE_NODE_MEMORY, 16384
+              boot:
+                - hd
+              cloud_init_volume_name: iso
+              cloud_init_iface_up: ens3
+              volumes:
+                - name: system
+                  capacity: !os_env NODE_VOLUME_SIZE, 150
+                  backing_store: cloudimage1604
+                  format: qcow2
+                - name: iso  # Volume with name 'iso' will be used
+                             # for store image with cloud-init metadata.
+                  capacity: 1
+                  format: raw
+                  device: cdrom
+                  bus: ide
+                  cloudinit_meta_data: *cloudinit_meta_data
+                  cloudinit_user_data: *cloudinit_user_data_1604_swp
+
+              interfaces: &interfaces
+                - label: ens3
+                  l2_network_device: admin
+                  interface_model: *interface_model
+                - label: ens4
+                  l2_network_device: private
+                  interface_model: *interface_model
+              network_config: &network_config
+                ens3:
+                  networks:
+                    - admin
+                ens4:
+                  networks:
+                    - private
+
+          - name: {{ HOSTNAME_CTL02 }}
+            role: salt_minion
+            params:
+              vcpu: !os_env SLAVE_NODE_CPU, 2
+              memory: !os_env SLAVE_NODE_MEMORY, 16384
+              boot:
+                - hd
+              cloud_init_volume_name: iso
+              cloud_init_iface_up: ens3
+              volumes:
+                - name: system
+                  capacity: !os_env NODE_VOLUME_SIZE, 150
+                  backing_store: cloudimage1604
+                  format: qcow2
+                - name: iso  # Volume with name 'iso' will be used
+                             # for store image with cloud-init metadata.
+                  capacity: 1
+                  format: raw
+                  device: cdrom
+                  bus: ide
+                  cloudinit_meta_data: *cloudinit_meta_data
+                  cloudinit_user_data: *cloudinit_user_data_1604_swp
+
+              interfaces: *interfaces
+              network_config: *network_config
+
+          - name: {{ HOSTNAME_CTL03 }}
+            role: salt_minion
+            params:
+              vcpu: !os_env SLAVE_NODE_CPU, 2
+              memory: !os_env SLAVE_NODE_MEMORY, 16384
+              boot:
+                - hd
+              cloud_init_volume_name: iso
+              cloud_init_iface_up: ens3
+              volumes:
+                - name: system
+                  capacity: !os_env NODE_VOLUME_SIZE, 150
+                  backing_store: cloudimage1604
+                  format: qcow2
+                - name: iso  # Volume with name 'iso' will be used
+                             # for store image with cloud-init metadata.
+                  capacity: 1
+                  format: raw
+                  device: cdrom
+                  bus: ide
+                  cloudinit_meta_data: *cloudinit_meta_data
+                  cloudinit_user_data: *cloudinit_user_data_1604_swp
+
+              interfaces: *interfaces
+              network_config: *network_config
+
+          - name: {{ HOSTNAME_DBS01 }}
+            role: salt_minion
+            params:
+              vcpu: !os_env SLAVE_NODE_CPU, 2
+              memory: !os_env SLAVE_NODE_MEMORY, 8192
+              boot:
+                - hd
+              cloud_init_volume_name: iso
+              cloud_init_iface_up: ens3
+              volumes:
+                - name: system
+                  capacity: !os_env NODE_VOLUME_SIZE, 150
+                  backing_store: cloudimage1604
+                  format: qcow2
+                - name: iso  # Volume with name 'iso' will be used
+                             # for store image with cloud-init metadata.
+                  capacity: 1
+                  format: raw
+                  device: cdrom
+                  bus: ide
+                  cloudinit_meta_data: *cloudinit_meta_data
+                  cloudinit_user_data: *cloudinit_user_data_1604_swp
+
+              interfaces: *interfaces
+              network_config: *network_config
+
+          - name: {{ HOSTNAME_DBS02 }}
+            role: salt_minion
+            params:
+              vcpu: !os_env SLAVE_NODE_CPU, 2
+              memory: !os_env SLAVE_NODE_MEMORY, 8192
+              boot:
+                - hd
+              cloud_init_volume_name: iso
+              cloud_init_iface_up: ens3
+              volumes:
+                - name: system
+                  capacity: !os_env NODE_VOLUME_SIZE, 150
+                  backing_store: cloudimage1604
+                  format: qcow2
+                - name: iso  # Volume with name 'iso' will be used
+                             # for store image with cloud-init metadata.
+                  capacity: 1
+                  format: raw
+                  device: cdrom
+                  bus: ide
+                  cloudinit_meta_data: *cloudinit_meta_data
+                  cloudinit_user_data: *cloudinit_user_data_1604_swp
+
+              interfaces: *interfaces
+              network_config: *network_config
+
+          - name: {{ HOSTNAME_DBS03 }}
+            role: salt_minion
+            params:
+              vcpu: !os_env SLAVE_NODE_CPU, 2
+              memory: !os_env SLAVE_NODE_MEMORY, 8192
+              boot:
+                - hd
+              cloud_init_volume_name: iso
+              cloud_init_iface_up: ens3
+              volumes:
+                - name: system
+                  capacity: !os_env NODE_VOLUME_SIZE, 150
+                  backing_store: cloudimage1604
+                  format: qcow2
+                - name: iso  # Volume with name 'iso' will be used
+                             # for store image with cloud-init metadata.
+                  capacity: 1
+                  format: raw
+                  device: cdrom
+                  bus: ide
+                  cloudinit_meta_data: *cloudinit_meta_data
+                  cloudinit_user_data: *cloudinit_user_data_1604_swp
+
+              interfaces: *interfaces
+              network_config: *network_config
+
+          - name: {{ HOSTNAME_MSG01 }}
+            role: salt_minion
+            params:
+              vcpu: !os_env SLAVE_NODE_CPU, 2
+              memory: !os_env SLAVE_NODE_MEMORY, 4096
+              boot:
+                - hd
+              cloud_init_volume_name: iso
+              cloud_init_iface_up: ens3
+              volumes:
+                - name: system
+                  capacity: !os_env NODE_VOLUME_SIZE, 150
+                  backing_store: cloudimage1604
+                  format: qcow2
+                - name: iso  # Volume with name 'iso' will be used
+                             # for store image with cloud-init metadata.
+                  capacity: 1
+                  format: raw
+                  device: cdrom
+                  bus: ide
+                  cloudinit_meta_data: *cloudinit_meta_data
+                  cloudinit_user_data: *cloudinit_user_data_1604_swp
+
+              interfaces: *interfaces
+              network_config: *network_config
+
+          - name: {{ HOSTNAME_MSG02 }}
+            role: salt_minion
+            params:
+              vcpu: !os_env SLAVE_NODE_CPU, 2
+              memory: !os_env SLAVE_NODE_MEMORY, 4096
+              boot:
+                - hd
+              cloud_init_volume_name: iso
+              cloud_init_iface_up: ens3
+              volumes:
+                - name: system
+                  capacity: !os_env NODE_VOLUME_SIZE, 150
+                  backing_store: cloudimage1604
+                  format: qcow2
+                - name: iso  # Volume with name 'iso' will be used
+                             # for store image with cloud-init metadata.
+                  capacity: 1
+                  format: raw
+                  device: cdrom
+                  bus: ide
+                  cloudinit_meta_data: *cloudinit_meta_data
+                  cloudinit_user_data: *cloudinit_user_data_1604_swp
+
+              interfaces: *interfaces
+              network_config: *network_config
+
+          - name: {{ HOSTNAME_MSG03 }}
+            role: salt_minion
+            params:
+              vcpu: !os_env SLAVE_NODE_CPU, 2
+              memory: !os_env SLAVE_NODE_MEMORY, 4096
+              boot:
+                - hd
+              cloud_init_volume_name: iso
+              cloud_init_iface_up: ens3
+              volumes:
+                - name: system
+                  capacity: !os_env NODE_VOLUME_SIZE, 150
+                  backing_store: cloudimage1604
+                  format: qcow2
+                - name: iso  # Volume with name 'iso' will be used
+                             # for store image with cloud-init metadata.
+                  capacity: 1
+                  format: raw
+                  device: cdrom
+                  bus: ide
+                  cloudinit_meta_data: *cloudinit_meta_data
+                  cloudinit_user_data: *cloudinit_user_data_1604_swp
+
+              interfaces: *interfaces
+              network_config: *network_config
+
+          - name: {{ HOSTNAME_PRX01 }}
+            role: salt_minion
+            params:
+              vcpu: !os_env SLAVE_NODE_CPU, 1
+              memory: !os_env SLAVE_NODE_MEMORY, 8192
+              boot:
+                - hd
+              cloud_init_volume_name: iso
+              cloud_init_iface_up: ens3
+              volumes:
+                - name: system
+                  capacity: !os_env NODE_VOLUME_SIZE, 150
+                  backing_store: cloudimage1604
+                  format: qcow2
+                - name: cinder
+                  capacity: 50
+                  format: qcow2
+                - name: iso  # Volume with name 'iso' will be used
+                             # for store image with cloud-init metadata.
+                  capacity: 1
+                  format: raw
+                  device: cdrom
+                  bus: ide
+                  cloudinit_meta_data: *cloudinit_meta_data
+                  cloudinit_user_data: *cloudinit_user_data_1604_swp
+
+              interfaces: *interfaces
+              network_config: *network_config
+
+
+          - name: {{ HOSTNAME_CMP01 }}
+            role: salt_minion
+            params:
+              vcpu: !os_env SLAVE_NODE_CPU, 12
+              memory: !os_env SLAVE_NODE_MEMORY, 8192
+              numa:
+              - cpus: 0,1,2,3,4,5
+                memory: 4096
+              - cpus: 6,7,8,9,10,11
+                memory: 4096
+              boot:
+                - hd
+              cloud_init_volume_name: iso
+              cloud_init_iface_up: ens3
+              volumes:
+                - name: system
+                  capacity: !os_env NODE_VOLUME_SIZE, 150
+                  backing_store: cloudimage1604
+                  format: qcow2
+                - name: cinder
+                  capacity: 50
+                  format: qcow2
+                - name: iso  # Volume with name 'iso' will be used
+                             # for store image with cloud-init metadata.
+                  capacity: 1
+                  format: raw
+                  device: cdrom
+                  bus: ide
+                  cloudinit_meta_data: *cloudinit_meta_data
+                  cloudinit_user_data: *cloudinit_user_data_1604_swp
+
+
+              interfaces: &all_interfaces
+                - label: ens3
+                  l2_network_device: admin
+                  interface_model: *interface_model
+                - label: ens4
+                  l2_network_device: private
+                  interface_model: *interface_model
+                - label: ens5
+                  l2_network_device: tenant
+                  interface_model: e1000
+                - label: ens6
+                  l2_network_device: tenant
+                  interface_model: e1000
+                - label: ens7
+                  l2_network_device: external
+                  interface_model: *interface_model
+              network_config: &all_network_config
+                ens3:
+                  networks:
+                    - admin
+                ens4:
+                  networks:
+                    - private
+                ens5:
+                  networks:
+                    - tenant
+                ens6:
+                  networks:
+                    - tenant
+                ens7:
+                  networks:
+                    - external
+
+          - name: {{ HOSTNAME_CMP02 }}
+            role: salt_minion
+            params:
+              vcpu: !os_env SLAVE_NODE_CPU, 12
+              memory: !os_env SLAVE_NODE_MEMORY, 8192
+              numa:
+              - cpus: 0,1,2,3,4,5
+                memory: 4096
+              - cpus: 6,7,8,9,10,11
+                memory: 4096
+              boot:
+                - hd
+              cloud_init_volume_name: iso
+              cloud_init_iface_up: ens3
+              volumes:
+                - name: system
+                  capacity: !os_env NODE_VOLUME_SIZE, 150
+                  backing_store: cloudimage1604
+                  format: qcow2
+                - name: cinder
+                  capacity: 50
+                  format: qcow2
+                - name: iso  # Volume with name 'iso' will be used
+                             # for store image with cloud-init metadata.
+                  capacity: 1
+                  format: raw
+                  device: cdrom
+                  bus: ide
+                  cloudinit_meta_data: *cloudinit_meta_data
+                  cloudinit_user_data: *cloudinit_user_data_1604_swp
+
+              interfaces: *all_interfaces
+              network_config: *all_network_config
+
+          - name: {{ HOSTNAME_GTW01 }}
+            role: salt_minion
+            params:
+              vcpu: !os_env SLAVE_NODE_CPU, 4
+              memory: !os_env SLAVE_NODE_MEMORY, 4096
+              boot:
+                - hd
+              cloud_init_volume_name: iso
+              cloud_init_iface_up: ens3
+              volumes:
+                - name: system
+                  capacity: !os_env NODE_VOLUME_SIZE, 150
+                  backing_store: cloudimage1604
+                  format: qcow2
+                - name: iso  # Volume with name 'iso' will be used
+                             # for store image with cloud-init metadata.
+                  capacity: 1
+                  format: raw
+                  device: cdrom
+                  bus: ide
+                  cloudinit_meta_data: *cloudinit_meta_data
+                  cloudinit_user_data: *cloudinit_user_data_1604_swp
+
+              interfaces: *all_interfaces
+              network_config: *all_network_config
+
+          - name: {{ HOSTNAME_KVM01 }}
+            role: salt_minion
+            params:
+              vcpu: {{ os_env('KVM_NODE_CPU', 1) }}
+              memory: {{ os_env('KVM_NODE_MEMORY', 1024) }}
+              boot:
+                - hd
+              cloud_init_volume_name: iso
+              cloud_init_iface_up: ens3
+              volumes:
+                - name: system
+                  capacity: {{ os_env('KVM_NODE_VOLUME_SIZE', 150) }}
+                  backing_store: mcp_ubuntu_1604_image
+                  format: qcow2
+                - name: iso  # Volume with name 'iso' will be used
+                             # for store image with cloud-init metadata.
+                  capacity: 1
+                  format: raw
+                  device: cdrom
+                  bus: ide
+                  cloudinit_meta_data: *cloudinit_meta_data
+                  cloudinit_user_data: *cloudinit_user_data_1604_swp
+
+              interfaces: *interfaces
+              network_config: *network_config
+
+          - name: {{ HOSTNAME_KVM02 }}
+            role: salt_minion
+            params:
+              vcpu: {{ os_env('KVM_NODE_CPU', 1) }}
+              memory: {{ os_env('KVM_NODE_MEMORY', 1024) }}
+              boot:
+                - hd
+              cloud_init_volume_name: iso
+              cloud_init_iface_up: ens3
+              volumes:
+                - name: system
+                  capacity: {{ os_env('KVM_NODE_VOLUME_SIZE', 150) }}
+                  backing_store: mcp_ubuntu_1604_image
+                  format: qcow2
+                - name: iso  # Volume with name 'iso' will be used
+                             # for store image with cloud-init metadata.
+                  capacity: 1
+                  format: raw
+                  device: cdrom
+                  bus: ide
+                  cloudinit_meta_data: *cloudinit_meta_data
+                  cloudinit_user_data: *cloudinit_user_data_1604_swp
+
+              interfaces: *interfaces
+              network_config: *network_config
+
+          - name: {{ HOSTNAME_KVM03 }}
+            role: salt_minion
+            params:
+              vcpu: {{ os_env('KVM_NODE_CPU', 1) }}
+              memory: {{ os_env('KVM_NODE_MEMORY', 1024) }}
+              boot:
+                - hd
+              cloud_init_volume_name: iso
+              cloud_init_iface_up: ens3
+              volumes:
+                - name: system
+                  capacity: {{ os_env('KVM_NODE_VOLUME_SIZE', 150) }}
+                  backing_store: mcp_ubuntu_1604_image
+                  format: qcow2
+                - name: iso  # Volume with name 'iso' will be used
+                             # for store image with cloud-init metadata.
+                  capacity: 1
+                  format: raw
+                  device: cdrom
+                  bus: ide
+                  cloudinit_meta_data: *cloudinit_meta_data
+                  cloudinit_user_data: *cloudinit_user_data_1604_swp
+
+              interfaces: *interfaces
+              network_config: *network_config
+
+          - name: {{ HOSTNAME_CID01 }}
+            role: salt_minion
+            params:
+              vcpu: {{ os_env('CID_NODE_CPU', 3) }}
+              memory: {{ os_env('CID_NODE_MEMORY', 8192) }}
+              boot:
+                - hd
+              cloud_init_volume_name: iso
+              cloud_init_iface_up: ens3
+              volumes:
+                - name: system
+                  capacity: {{ os_env('CID_NODE_VOLUME_SIZE', 150) }}
+                  backing_store: mcp_ubuntu_1604_image
+                  format: qcow2
+                - name: iso  # Volume with name 'iso' will be used
+                             # for store image with cloud-init metadata.
+                  capacity: 1
+                  format: raw
+                  device: cdrom
+                  bus: ide
+                  cloudinit_meta_data: *cloudinit_meta_data
+                  cloudinit_user_data: *cloudinit_user_data_1604_swp
+
+              interfaces: *interfaces
+              network_config: *network_config
+
+          - name: {{ HOSTNAME_CID02 }}
+            role: salt_minion
+            params:
+              vcpu: {{ os_env('CID_NODE_CPU', 3) }}
+              memory: {{ os_env('CID_NODE_MEMORY', 8192) }}
+              boot:
+                - hd
+              cloud_init_volume_name: iso
+              cloud_init_iface_up: ens3
+              volumes:
+                - name: system
+                  capacity: {{ os_env('CID_NODE_VOLUME_SIZE', 150) }}
+                  backing_store: mcp_ubuntu_1604_image
+                  format: qcow2
+                - name: iso  # Volume with name 'iso' will be used
+                             # for store image with cloud-init metadata.
+                  capacity: 1
+                  format: raw
+                  device: cdrom
+                  bus: ide
+                  cloudinit_meta_data: *cloudinit_meta_data
+                  cloudinit_user_data: *cloudinit_user_data_1604_swp
+
+              interfaces: *interfaces
+              network_config: *network_config
+
+          - name: {{ HOSTNAME_CID03 }}
+            role: salt_minion
+            params:
+              vcpu: {{ os_env('CID_NODE_CPU', 3) }}
+              memory: {{ os_env('CID_NODE_MEMORY', 8192) }}
+              boot:
+                - hd
+              cloud_init_volume_name: iso
+              cloud_init_iface_up: ens3
+              volumes:
+                - name: system
+                  capacity: {{ os_env('CID_NODE_VOLUME_SIZE', 150) }}
+                  backing_store: mcp_ubuntu_1604_image
+                  format: qcow2
+                - name: iso  # Volume with name 'iso' will be used
+                             # for store image with cloud-init metadata.
+                  capacity: 1
+                  format: raw
+                  device: cdrom
+                  bus: ide
+                  cloudinit_meta_data: *cloudinit_meta_data
+                  cloudinit_user_data: *cloudinit_user_data_1604_swp
+
+              interfaces: *interfaces
+              network_config: *network_config
diff --git a/tcp_tests/templates/cookied-cicd-pike-dvr-sl/cookiecutter-context-pike-dvr-sl.yaml b/tcp_tests/templates/cookied-cicd-pike-dvr-sl/cookiecutter-context-pike-dvr-sl.yaml
new file mode 100644
index 0000000..375d734
--- /dev/null
+++ b/tcp_tests/templates/cookied-cicd-pike-dvr-sl/cookiecutter-context-pike-dvr-sl.yaml
@@ -0,0 +1,230 @@
+default_context:
+  auditd_enabled: 'False'
+  bmk_enabled: 'False'
+  ceph_enabled: 'False'
+  cicd_control_node01_address: 10.167.4.91
+  cicd_control_node01_hostname: cid01
+  cicd_control_node02_address: 10.167.4.92
+  cicd_control_node02_hostname: cid02
+  cicd_control_node03_address: 10.167.4.93
+  cicd_control_node03_hostname: cid03
+  cicd_control_vip_address: 10.167.4.90
+  cicd_control_vip_hostname: cid
+  cicd_enabled: 'True'
+  cicd_private_key: |-
+    -----BEGIN RSA PRIVATE KEY-----
+    MIIEpAIBAAKCAQEAv64AnFbEuuOQHLlmMkmaZ+Hh/8hJ+VfFpJ/MzW1wWzYyhis7
+    3A8rxNFWJ/I1/LJSsFI8qU0DpxjFjS9LMTTFXhDPPpuzgRLwfVusEmuQdXjOiT34
+    AIs07Q4w1nlvJ2+/l788ie1AEfnewd9erUHOs8Wt/PT3OOM/0ikY7EibvYF4L1Lb
+    xGRKYnUkY7G3eal9XcQpsTzAFRXoK3WafbCFBFsfzEWOhx1T+tn1SwaxPYJDt1OB
+    B1s77enFtBwbmbd0m1F1ufSXmdWea2xF3+5caS6tps/hwhCoOSJUQb7+dK4ri8og
+    q2YIhfEptrMP1R+nVqEY76P31aa/YSw4zOvcQwIDAQABAoIBAQCLKOzQlD4n4ObT
+    s9Z6U+2B1gCaDvOFzy9yoYGy8u1Li0GLHwBKd8kzDzgZsEN5vo1B7bKUx5ELU3S5
+    V8ijZMiVzmZn8eqUnwdyO4flp6otXxOzmAXhfy9hm5fhXjBQ1VSn+vMcv95wLpSG
+    9IBsEQbchXwX1lFWP8Yp8iRiByTqoz6A7qSxRzIOtq1/coYS9Vcy7VZDMiUjqvuc
+    pYvwYHvrgeYqxLXyDRzbZX1BbkSoNI/5VwxLb9IMG901IXph0r4V3uVgnnq+Xzkk
+    MoOfmB3cyOrvtWblZAjkyA+jzTs/QNALRUeI7wUeh4FvlwEGHE6v5G4G28zOS0vL
+    7IEhCqThAoGBAOeyDO07b060l+NOO+Jkv+NV31VD0w3S4TMyLPVSxXsrRPoHM9RM
+    udi6lewmALE4wk2Lc1Il6n0UrUGVbXxf55NJp2BQoSic+ZK2nTki0cZ/CkUDVNwY
+    R0WtWE0i3J+eF3e8j9VYm1mIlv0aDoYeH4qCp5is/JanvLy4MUl6tM7/AoGBANPJ
+    XheDO5lmqq1ejDTo3GAzYuAs44dQLDs0znEuuaUKZ4MKgQ4ax0L9n0MxvsuUGVcN
+    Nm7fZS4uMY3zLCOLcAXyD1jXY210gmOgFdXeYrH+2kSmqfflV8KHOLCatxLzRtbe
+    KBflcrEnrpUVNGKlpZaYr+4AyapXeMuXIxwveva9AoGAYtoDS9/UwHaqau+A+zlS
+    6TJFA8LZNAepz0b0CYLUAJXYavhRs508mWwZ9NPN7c6yj5UUkZLdtZnxxY50VOEy
+    ExQUljIwX/yBOogxEiR57b9b6U/fj7vIBMFNcDOUf4Far9pCX5rbRNrS2I+abLxD
+    ZrwRt0Duz3QnQTkwxhHVPI8CgYAaIjQJJLl7AW84O32DneRrvouJ7CAbd2ot2CNN
+    Vh20XudNBUPNkMJb4t3/Nak8h8bktg2sesaKf0rAIGym6jLlmOwJ43IydHkOgBeR
+    r4JwQml+pS4+F7/Pkk4NhNnobbqlEv7RjA+uCp6BaP9w2M3pGmhDLzezXF3ciYbc
+    mINM5QKBgQCyM9ZWwSiA0D3oitnhs7C4eC0IHBfnSoa7f40osKm4VvmqKBFgRu8L
+    qYK9qX++pUm4sk0q7poGUscc1udMlejAkfc/HLIlUi6MM+S7ZQ2NHtnZ7COZa5O4
+    9fG8FTiigLvMHka9ihYXtPbyGvusCaqyHp3D9VyOT+WsyM5eJe40lA==
+    -----END RSA PRIVATE KEY-----
+  cicd_public_key: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQC/rgCcVsS645AcuWYySZpn4eH/yEn5V8Wkn8zNbXBbNjKGKzvcDyvE0VYn8jX8slKwUjypTQOnGMWNL0sxNMVeEM8+m7OBEvB9W6wSa5B1eM6JPfgAizTtDjDWeW8nb7+XvzyJ7UAR+d7B316tQc6zxa389Pc44z/SKRjsSJu9gXgvUtvEZEpidSRjsbd5qX1dxCmxPMAVFegrdZp9sIUEWx/MRY6HHVP62fVLBrE9gkO3U4EHWzvt6cW0HBuZt3SbUXW59JeZ1Z5rbEXf7lxpLq2mz+HCEKg5IlRBvv50riuLyiCrZgiF8Sm2sw/VH6dWoRjvo/fVpr9hLDjM69xD
+  cluster_domain: cookied-cicd-pike-dvr-sl.local
+  cluster_name: cookied-cicd-pike-dvr-sl
+  compute_bond_mode: active-backup
+  compute_primary_first_nic: eth1
+  compute_primary_second_nic: eth2
+  context_seed: wUqrwKeBTCpRpVrhK1KwZQv4cjM9VhG7L2vQ0iQsTuMrXASklEBDmJEf6bnPEqcK
+  control_network_netmask: 255.255.255.0
+  control_network_subnet: 10.167.4.0/24
+  control_vlan: '10'
+  cookiecutter_template_branch: master
+  cookiecutter_template_credentials: gerrit
+  cookiecutter_template_url: https://gerrit.mcp.mirantis.net/mk/cookiecutter-templates.git
+  deploy_network_gateway: 10.167.5.1
+  deploy_network_netmask: 255.255.255.0
+  deploy_network_subnet: 10.167.5.0/24
+  deployment_type: physical
+  dns_server01: 172.18.176.6
+  dns_server02: 172.18.208.44
+  email_address: ddmitriev@mirantis.com
+  gateway_primary_first_nic: eth1
+  gateway_primary_second_nic: eth2
+  infra_bond_mode: active-backup
+  infra_deploy_nic: eth0
+  infra_kvm01_control_address: 10.167.4.241
+  infra_kvm01_deploy_address: 10.167.5.91
+  infra_kvm01_hostname: kvm01
+  infra_kvm02_control_address: 10.167.4.242
+  infra_kvm02_deploy_address: 10.167.5.92
+  infra_kvm02_hostname: kvm02
+  infra_kvm03_control_address: 10.167.4.243
+  infra_kvm03_deploy_address: 10.167.5.93
+  infra_kvm03_hostname: kvm03
+  infra_kvm_vip_address: 10.167.4.240
+  infra_primary_first_nic: eth1
+  infra_primary_second_nic: eth2
+  kubernetes_enabled: 'False'
+  local_repositories: 'False'
+  maas_deploy_address: 10.167.5.15
+  maas_deploy_range_end: 10.167.5.199
+  maas_deploy_range_start: 10.167.5.180
+  maas_deploy_vlan: '0'
+  maas_fabric_name: deploy-fabric0
+  maas_hostname: cfg01
+  mcp_version: proposed
+  offline_deployment: 'False'
+  opencontrail_enabled: 'False'
+  openldap_domain: ${_param:cluster_name}.local
+  openldap_enabled: 'True'
+  openldap_organisation: ${_param:cluster_name}
+  openssh_groups: cicd
+  openstack_benchmark_node01_address: 10.167.4.95
+  openstack_benchmark_node01_hostname: bmk01
+  openstack_cluster_size: compact
+  openstack_compute_count: '2'
+  openstack_compute_rack01_hostname: cmp
+  openstack_compute_rack01_single_subnet: 10.167.4
+  openstack_compute_rack01_tenant_subnet: 10.167.6
+  openstack_control_address: 10.167.4.100
+  openstack_control_hostname: ctl
+  openstack_control_node01_address: 10.167.4.101
+  openstack_control_node01_hostname: ctl01
+  openstack_control_node02_address: 10.167.4.102
+  openstack_control_node02_hostname: ctl02
+  openstack_control_node03_address: 10.167.4.103
+  openstack_control_node03_hostname: ctl03
+  openstack_database_address: 10.167.4.100
+  openstack_database_hostname: ctl
+  openstack_database_node01_address: 10.167.4.101
+  openstack_database_node01_hostname: ctl01
+  openstack_database_node02_address: 10.167.4.102
+  openstack_database_node02_hostname: ctl02
+  openstack_database_node03_address: 10.167.4.103
+  openstack_database_node03_hostname: ctl03
+  openstack_enabled: 'True'
+  openstack_gateway_node01_address: 10.167.4.110
+  openstack_gateway_node01_hostname: gtw01
+  openstack_gateway_node01_tenant_address: 10.167.6.6
+  openstack_gateway_node02_address: 10.167.4.111
+  openstack_gateway_node02_hostname: gtw02
+  openstack_gateway_node02_tenant_address: 10.167.6.7
+  openstack_gateway_node03_address: 10.167.4.112
+  openstack_gateway_node03_hostname: gtw03
+  openstack_gateway_node03_tenant_address: 10.167.6.8
+  openstack_message_queue_address: 10.167.4.100
+  openstack_message_queue_hostname: ctl
+  openstack_message_queue_node01_address: 10.167.4.101
+  openstack_message_queue_node01_hostname: ctl01
+  openstack_message_queue_node02_address: 10.167.4.102
+  openstack_message_queue_node02_hostname: ctl02
+  openstack_message_queue_node03_address: 10.167.4.103
+  openstack_message_queue_node03_hostname: ctl03
+  openstack_network_engine: ovs
+  openstack_neutron_qos: 'False'
+  openstack_neutron_vlan_aware_vms: 'False'
+  openstack_nfv_dpdk_enabled: 'False'
+  openstack_nfv_sriov_enabled: 'False'
+  openstack_nova_compute_nfv_req_enabled: 'False'
+  openstack_ovs_dvr_enabled: 'True'
+  openstack_ovs_encapsulation_type: vxlan
+  openstack_proxy_address: 10.167.4.80
+  openstack_proxy_hostname: prx
+  openstack_proxy_node01_address: 10.167.4.121
+  openstack_proxy_node01_hostname: prx01
+  openstack_proxy_node02_address: 10.167.4.122
+  openstack_proxy_node02_hostname: prx02
+  openstack_upgrade_node01_address: 10.167.4.19
+  openstack_version: pike
+  oss_enabled: 'False'
+  oss_node03_address: ${_param:stacklight_monitor_node03_address}
+  oss_webhook_app_id: '24'
+  oss_pushkin_email_sender_password: password
+  oss_pushkin_smtp_port: '587'
+  oss_webhook_login_id: '13'
+  platform: openstack_enabled
+  public_host: ${_param:openstack_proxy_address}
+  publication_method: email
+  reclass_repository: https://github.com/Mirantis/mk-lab-salt-model.git
+  backup_private_key: |
+      -----BEGIN RSA PRIVATE KEY-----
+      MIIEowIBAAKCAQEAxL6/rVgCetsETpZaUmXmkj8cZ1WN0eubH1FvMDOi/La9ZJyT
+      k0C6AYpJnIyEm93pMj5cLm08qRqMW+2pdOhYjcH69yg5MrX5SkRk8jCmIHIYoIbh
+      Qnwbnj3dd3I39ZdfU2FO7u2vlbglVou6ZoQxlJDItuLNtzq6EG+w9eF19e7+OsC6
+      6iUItp618zfw1l3J/8nKvCGe2RYDf7mJW6XwCl/DwryJmwwzvPgYJ3QMuDD8/HFj
+      lrJ3xjFTXj4b4Ws1XIoy78fFbtiLr4OwqCYkho03u2E5rOOP1qZxZB63sivHMLMO
+      MM5bOAQKbulFNoyALADGYfc7sf0bZ4u9XXDXxQIDAQABAoIBAQCfmc2MJRT97KW1
+      yqpCpX9BrAiymuiNHf+cjEcSZxEUyHkjIRFmJt+9WB0W7ba1anM92vCUiPDojSzH
+      dig9Oi578JxR20NrK8uqv4jUHzrknynzLveVI3CUEcOSnglfJQijbxDFKfOCFPvV
+      FUyE1UATMNBh6+LNfMprgu+exuMWOPnDyUiYQ+WZ0JfuZY8fuaZte4woJJOb9LUu
+      5rsMG/smIzjpgZ0Z9ZVDMurfq565qhpaXRAqKeIuyht8pacTo31iMQdHB78AvY/3
+      g0z21Gk8k3z0Kr/YFKr2r4FmXY5m/gAUvZly2ZrVQM5XsbTVCzq/JpI5fssNvSbU
+      AKmXzf4RAoGBAOO3d4/cstxERzW6hyOTjZIN1ppR52CsnZTsVPbfd0pCtmzmVZce
+      CtHKdcXSbTwZvvkK09QSWAp3MoSpd0gIOiLU8Wx/R/RIZsu9BlhTS3r3EQLnk72d
+      H/1TTA+j4T/LIYLSojQ1RxvIrHetAD44j732aTwKAHj/SybEAVqNkOB/AoGBAN0u
+      gLcrgqIHGrk4VjWSvlCGymfF40equcx+ud7XhfZDGETUOSahW4dPZ52cjPAkrCBQ
+      MMfcDwSVGsOAjd+mNt11BHUKobnhXwFaWWuyqyn9NmWFbjMbICVh7E3Of5aVN38o
+      lrmo/7LuKMVG7XRwphCv5NkaJmQG4njDyUQWlaW7AoGADCd8wDb9bPhP/LQqBmIX
+      ylXmwHHisaxE9O/wUQT4bwREjGd25gv6c9wkkRx8LBsLsGs9hzI7dMOL9Ly+2x9l
+      SvqmsC3S/1zl77X1Ir2/Z57MT6Vgo1xBmtnZU3Rhz2/eKAdqFPNLClaZrgGT475N
+      HcyLLWMzR0IJFtabY+Puea0CgYA8Zb5wRkldxWLewSuJZZDinGwY+kieAVjLJq/K
+      0j+ah6fQ48LXcah0wpIgz+cMjHcUO9GWQdk3/x9X03rqX5EL2DBnZYfUIl63F9zj
+      M97ZkHOSNWVqPzX//0Vv2butewG0j3jZKfTo/2/SrxOYgEpYtC9huWpSVi7xm0US
+      erhSkQKBgFIf9JEsfgE57ANhvITZ3ZI0uZXNxZkXQaVg8jvScDi79IIhy9iPzhKC
+      aIIQoDNIlWv1ftCRZ5AlBvVXgvQ/QNrwy48JiQTzWZlb9Ezg8w+olQmSbG6fq7Y+
+      7r3i+QUZ7RBdOb24QcQ618q54ozNTCB7OywY78ptFzeoBeptiNr1
+      -----END RSA PRIVATE KEY-----
+  backup_public_key: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDEvr+tWAJ62wROllpSZeaSPxxnVY3R65sfUW8wM6L8tr1knJOTQLoBikmcjISb3ekyPlwubTypGoxb7al06FiNwfr3KDkytflKRGTyMKYgchighuFCfBuePd13cjf1l19TYU7u7a+VuCVWi7pmhDGUkMi24s23OroQb7D14XX17v46wLrqJQi2nrXzN/DWXcn/ycq8IZ7ZFgN/uYlbpfAKX8PCvImbDDO8+BgndAy4MPz8cWOWsnfGMVNePhvhazVcijLvx8Vu2Iuvg7CoJiSGjTe7YTms44/WpnFkHreyK8cwsw4wzls4BApu6UU2jIAsAMZh9zux/Rtni71dcNfF
+  salt_api_password: H0rTPdmktZ8RI7T7y6fjqY0uEbbs7Kwi
+  salt_api_password_hash: $6$lfbIFtMZ$.nTbTDMzs1iYv0WqkZHia8H8Fma963Nv3qyyz1x68jQh0YXK9i907B/hvoG4QHMvfolE7V7vQnFClJ1mVA3Yb.
+  salt_master_address: 10.167.4.15
+  salt_master_hostname: cfg01
+  salt_master_management_address: 10.167.5.15
+  shared_reclass_branch: 'proposed'
+  shared_reclass_url: https://gerrit.mcp.mirantis.net/salt-models/reclass-system.git
+  fluentd_enabled: 'True'
+  stacklight_enabled: 'True'
+  stacklight_log_address: 10.167.4.60
+  stacklight_log_hostname: log
+  stacklight_log_node01_address: 10.167.4.61
+  stacklight_log_node01_hostname: log01
+  stacklight_log_node02_address: 10.167.4.62
+  stacklight_log_node02_hostname: log02
+  stacklight_log_node03_address: 10.167.4.63
+  stacklight_log_node03_hostname: log03
+  stacklight_monitor_address: 10.167.4.70
+  stacklight_monitor_hostname: mon
+  stacklight_monitor_node01_address: 10.167.4.71
+  stacklight_monitor_node01_hostname: mon01
+  stacklight_monitor_node02_address: 10.167.4.72
+  stacklight_monitor_node02_hostname: mon02
+  stacklight_monitor_node03_address: 10.167.4.73
+  stacklight_monitor_node03_hostname: mon03
+  stacklight_telemetry_address: 10.167.4.85
+  stacklight_telemetry_hostname: mtr
+  stacklight_telemetry_node01_address: 10.167.4.86
+  stacklight_telemetry_node01_hostname: mtr01
+  stacklight_telemetry_node02_address: 10.167.4.87
+  stacklight_telemetry_node02_hostname: mtr02
+  stacklight_telemetry_node03_address: 10.167.4.88
+  stacklight_telemetry_node03_hostname: mtr03
+  stacklight_version: '2'
+  stacklight_long_term_storage_type: prometheus
+  static_ips_on_deploy_network_enabled: 'False'
+  tenant_network_gateway: 10.167.6.1
+  tenant_network_netmask: 255.255.255.0
+  tenant_network_subnet: 10.167.6.0/24
+  tenant_vlan: '20'
+  upstream_proxy_enabled: 'False'
+  use_default_network_scheme: 'False'
diff --git a/tcp_tests/templates/cookied-cicd-pike-dvr-sl/environment_context.yaml b/tcp_tests/templates/cookied-cicd-pike-dvr-sl/environment_context.yaml
new file mode 100644
index 0000000..1791477
--- /dev/null
+++ b/tcp_tests/templates/cookied-cicd-pike-dvr-sl/environment_context.yaml
@@ -0,0 +1,257 @@
+nodes:
+    cfg01:
+      reclass_storage_name: infra_config_node01
+      roles:
+      - infra_config
+      - linux_system_codename_xenial
+      interfaces:
+        ens3:
+          role: single_dhcp
+        ens4:
+          role: single_static_ctl
+
+    kvm01:
+      reclass_storage_name: infra_kvm_node01
+      roles:
+      - infra_kvm
+      - linux_system_codename_xenial
+      interfaces:
+        ens3:
+          role: single_dhcp
+        ens4:
+          role: single_ctl
+
+    kvm02:
+      reclass_storage_name: infra_kvm_node02
+      roles:
+      - infra_kvm
+      - linux_system_codename_xenial
+      interfaces:
+        ens3:
+          role: single_dhcp
+        ens4:
+          role: single_ctl
+
+    kvm03:
+      reclass_storage_name: infra_kvm_node03
+      roles:
+      - infra_kvm
+      - linux_system_codename_xenial
+      interfaces:
+        ens3:
+          role: single_dhcp
+        ens4:
+          role: single_ctl
+
+    cid01:
+      reclass_storage_name: cicd_control_node01
+      roles:
+      - cicd_control_leader
+      - linux_system_codename_xenial
+      interfaces:
+        ens3:
+          role: single_dhcp
+        ens4:
+          role: single_ctl
+
+    cid02:
+      reclass_storage_name: cicd_control_node02
+      roles:
+      - cicd_control_manager
+      - linux_system_codename_xenial
+      interfaces:
+        ens3:
+          role: single_dhcp
+        ens4:
+          role: single_ctl
+
+    cid03:
+      reclass_storage_name: cicd_control_node03
+      roles:
+      - cicd_control_manager
+      - linux_system_codename_xenial
+      interfaces:
+        ens3:
+          role: single_dhcp
+        ens4:
+          role: single_ctl
+
+    ctl01:
+      reclass_storage_name: openstack_control_node01
+      roles:
+      - openstack_control_leader
+      - openstack_database_leader
+      - openstack_message_queue
+      - linux_system_codename_xenial
+      interfaces:
+        ens3:
+          role: single_dhcp
+        ens4:
+          role: single_ctl
+
+    ctl02:
+      reclass_storage_name: openstack_control_node02
+      roles:
+      - openstack_control
+      - openstack_database
+      - openstack_message_queue
+      - linux_system_codename_xenial
+      interfaces:
+        ens3:
+          role: single_dhcp
+        ens4:
+          role: single_ctl
+
+    ctl03:
+      reclass_storage_name: openstack_control_node03
+      roles:
+      - openstack_control
+      - openstack_database
+      - openstack_message_queue
+      - linux_system_codename_xenial
+      interfaces:
+        ens3:
+          role: single_dhcp
+        ens4:
+          role: single_ctl
+
+    prx01:
+      reclass_storage_name: openstack_proxy_node01
+      roles:
+      - openstack_proxy
+      - linux_system_codename_xenial
+      interfaces:
+        ens3:
+          role: single_dhcp
+        ens4:
+          role: single_ctl
+
+    mon01:
+      reclass_storage_name: stacklight_server_node01
+      roles:
+      - stacklightv2_server_leader
+      - linux_system_codename_xenial
+      interfaces:
+        ens3:
+          role: single_dhcp
+        ens4:
+          role: single_ctl
+
+    mon02:
+      reclass_storage_name: stacklight_server_node02
+      roles:
+      - stacklightv2_server
+      - linux_system_codename_xenial
+      interfaces:
+        ens3:
+          role: single_dhcp
+        ens4:
+          role: single_ctl
+
+    mon03:
+      reclass_storage_name: stacklight_server_node03
+      roles:
+      - stacklightv2_server
+      - linux_system_codename_xenial
+      interfaces:
+        ens3:
+          role: single_dhcp
+        ens4:
+          role: single_ctl
+
+    log01:
+      reclass_storage_name: stacklight_log_node01
+      roles:
+      - stacklight_log_leader_v2
+      - linux_system_codename_xenial
+      interfaces:
+        ens3:
+          role: single_dhcp
+        ens4:
+          role: single_ctl
+
+    log02:
+      reclass_storage_name: stacklight_log_node02
+      roles:
+      - stacklight_log
+      - linux_system_codename_xenial
+      interfaces:
+        ens3:
+          role: single_dhcp
+        ens4:
+          role: single_ctl
+
+    log03:
+      reclass_storage_name: stacklight_log_node03
+      roles:
+      - stacklight_log
+      - linux_system_codename_xenial
+      interfaces:
+        ens3:
+          role: single_dhcp
+        ens4:
+          role: single_ctl
+
+    mtr01:
+      reclass_storage_name: stacklight_telemetry_node01
+      roles:
+      - stacklight_telemetry_leader
+      - linux_system_codename_xenial
+      interfaces:
+        ens3:
+          role: single_dhcp
+        ens4:
+          role: single_ctl
+
+    mtr02:
+      reclass_storage_name: stacklight_telemetry_node02
+      roles:
+      - stacklight_telemetry
+      - linux_system_codename_xenial
+      interfaces:
+        ens3:
+          role: single_dhcp
+        ens4:
+          role: single_ctl
+
+    mtr03:
+      reclass_storage_name: stacklight_telemetry_node03
+      roles:
+      - stacklight_telemetry
+      - linux_system_codename_xenial
+      interfaces:
+        ens3:
+          role: single_dhcp
+        ens4:
+          role: single_ctl
+
+    # Generator-based computes. For compatibility only
+    cmp<<count>>:
+      reclass_storage_name: openstack_compute_rack01
+      roles:
+      - openstack_compute
+      - linux_system_codename_xenial
+      interfaces:
+        ens3:
+          role: single_dhcp
+        ens4:
+          role: single_ctl
+        ens5:
+          role: bond0_ab_ovs_vxlan_mesh
+        ens6:
+          role: bond1_ab_ovs_floating
+
+    gtw01:
+      reclass_storage_name: openstack_gateway_node01
+      roles:
+      - openstack_gateway
+      - linux_system_codename_xenial
+      interfaces:
+        ens3:
+          role: single_dhcp
+        ens4:
+          role: single_ctl
+        ens5:
+          role: bond0_ab_ovs_vxlan_mesh
+        ens6:
+          role: bond1_ab_ovs_floating
diff --git a/tcp_tests/templates/cookied-cicd-pike-dvr-sl/overrides-policy.yml b/tcp_tests/templates/cookied-cicd-pike-dvr-sl/overrides-policy.yml
new file mode 100644
index 0000000..1f35a6b
--- /dev/null
+++ b/tcp_tests/templates/cookied-cicd-pike-dvr-sl/overrides-policy.yml
@@ -0,0 +1,40 @@
+parameters:
+  nova:
+    controller:
+      policy:
+        context_is_admin: 'role:admin or role:administrator'
+        'compute:create': 'rule:admin_or_owner'
+        'compute:create:attach_network':
+  cinder:
+    controller:
+      policy:
+        'volume:delete': 'rule:admin_or_owner'
+        'volume:extend':
+  neutron:
+    server:
+      policy:
+        create_subnet: 'rule:admin_or_network_owner'
+        'get_network:queue_id': 'rule:admin_only'
+        'create_network:shared':
+  glance:
+    server:
+      policy:
+        publicize_image: "role:admin"
+        add_member:
+  keystone:
+    server:
+      policy:
+        admin_or_token_subject: 'rule:admin_required or rule:token_subject'
+  heat:
+    server:
+      policy:
+        context_is_admin: 'role:admin and is_admin_project:True'
+        deny_stack_user: 'not role:heat_stack_user'
+        deny_everybody: '!'
+        'cloudformation:ValidateTemplate': 'rule:deny_everybody'
+        'cloudformation:DescribeStackResources':
+  ceilometer:
+    server:
+      policy:
+        segregation: 'rule:context_is_admin'
+        'telemetry:get_resource':
diff --git a/tcp_tests/templates/cookied-cicd-pike-dvr-sl/salt.yaml b/tcp_tests/templates/cookied-cicd-pike-dvr-sl/salt.yaml
new file mode 100644
index 0000000..e9d9408
--- /dev/null
+++ b/tcp_tests/templates/cookied-cicd-pike-dvr-sl/salt.yaml
@@ -0,0 +1,14 @@
+{% from 'cookied-cicd-pike-dvr-sl/underlay.yaml' import HOSTNAME_CFG01 with context %}
+{% from 'cookied-cicd-pike-dvr-sl/underlay.yaml' import LAB_CONFIG_NAME with context %}
+{% from 'cookied-cicd-pike-dvr-sl/underlay.yaml' import DOMAIN_NAME with context %}
+
+{% set SALT_MODELS_REPOSITORY = os_env('SALT_MODELS_REPOSITORY','https://gerrit.mcp.mirantis.net/salt-models/mcp-virtual-lab') %}
+# Other salt model repository parameters see in shared-salt.yaml
+
+{% import 'shared-salt.yaml' as SHARED with context %}
+
+{{ SHARED.MACRO_INSTALL_SALT_MINIONS() }}
+
+{{SHARED.MACRO_CHECK_SALT_VERSION_SERVICES_ON_CFG()}}
+
+{{SHARED.MACRO_CHECK_SALT_VERSION_ON_NODES()}}
diff --git a/tcp_tests/templates/cookied-cicd-pike-dvr-sl/underlay--meta-data.yaml b/tcp_tests/templates/cookied-cicd-pike-dvr-sl/underlay--meta-data.yaml
new file mode 100644
index 0000000..3699401
--- /dev/null
+++ b/tcp_tests/templates/cookied-cicd-pike-dvr-sl/underlay--meta-data.yaml
@@ -0,0 +1,4 @@
+| # All the data below will be stored as a string object
+  instance-id: iid-local1
+  hostname: {hostname}
+  local-hostname: {hostname}
diff --git a/tcp_tests/templates/cookied-cicd-pike-dvr-sl/underlay--user-data-cfg01.yaml b/tcp_tests/templates/cookied-cicd-pike-dvr-sl/underlay--user-data-cfg01.yaml
new file mode 100644
index 0000000..4c43578
--- /dev/null
+++ b/tcp_tests/templates/cookied-cicd-pike-dvr-sl/underlay--user-data-cfg01.yaml
@@ -0,0 +1,101 @@
+| # All the data below will be stored as a string object
+  #cloud-config, see http://cloudinit.readthedocs.io/en/latest/topics/examples.html
+
+  ssh_pwauth: True
+  users:
+   - name: root
+     sudo: ALL=(ALL) NOPASSWD:ALL
+     shell: /bin/bash
+     ssh_authorized_keys:
+     {% for key in config.underlay.ssh_keys %}
+      - ssh-rsa {{ key['public'] }}
+     {% endfor %}
+
+  disable_root: false
+  chpasswd:
+   list: |
+    root:r00tme
+   expire: False
+
+  bootcmd:
+   # Block access to SSH while node is preparing
+   - cloud-init-per once sudo touch /is_cloud_init_started
+   # Enable root access
+   - sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
+   - service sshd restart
+
+  output:
+    all: '| tee -a /var/log/cloud-init-output.log /dev/tty0'
+
+  runcmd:
+   - echo "******** MOUNT CONFIG DRIVE"
+   # Mount config drive
+   - mkdir /root/config-drive
+   - mount /dev/sr0 /root/config-drive
+
+   # Configure dhclient
+   - sudo echo "nameserver {gateway}" >> /etc/resolvconf/resolv.conf.d/base
+   - sudo resolvconf -u
+
+   # Enable grub menu using updated config below
+   - update-grub
+
+   # Prepare network connection
+   #- sudo ifdown ens3
+   #- sudo ip r d default || true  # remove existing default route to get it from dhcp
+   #- sudo ifup ens3
+   #- sudo route add default gw {gateway} {interface_name}
+
+   # Create swap
+   - fallocate -l 16G /swapfile
+   - chmod 600 /swapfile
+   - mkswap /swapfile
+   - swapon /swapfile
+   - echo "/swapfile   none    swap    defaults    0   0" >> /etc/fstab
+
+   # Run user data script from config drive
+   - ifdown --force ens3; ifconfig ens3 down; ip a flush dev ens3; rm -f /var/run/network/ifstate.ens3; ip l set down ens3
+   - ifdown --force ens4; ifconfig ens4 down; ip a flush dev ens4; rm -f /var/run/network/ifstate.ens4; ip l set down ens4
+   - rm -f /etc/network/interfaces
+   #- ifdown --force ens5; ifconfig ens5 down; ip a flush dev ens5; rm -f /var/run/network/ifstate.ens5
+   #- cp /root/config-drive/user-data /root/user-data
+   #- sed -i '/^reboot$/d' /root/user-data
+   #- set -x; cd /root && /bin/bash -xe ./user-data
+   - |
+     set -x
+     cd /root/config-drive
+     if /bin/bash -xe ./user-data; then
+         touch /is_cloud_init_finished
+     else
+         set +x
+         echo "bootstrap script /root/config-drive/user-data failed\n" > /is_cloud_init_failed
+     fi
+
+   # Enable root access (after reboot)
+   - sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
+
+  write_files:
+   - path: /etc/default/grub.d/97-enable-grub-menu.cfg
+     content: |
+         GRUB_RECORDFAIL_TIMEOUT=30
+         GRUB_TIMEOUT=3
+         GRUB_TIMEOUT_STYLE=menu
+
+   #- path: /etc/network/interfaces
+   - path: /root/interfaces
+     content: |
+          auto lo
+          iface lo inet loopback
+
+          auto ens3
+          iface ens3 inet dhcp
+
+   - path: /root/.ssh/config
+     owner: root:root
+     permissions: '0600'
+     content: |
+          Host *
+            ServerAliveInterval 60
+            ServerAliveCountMax 0
+            StrictHostKeyChecking no
+            UserKnownHostsFile /dev/null
diff --git a/tcp_tests/templates/cookied-cicd-pike-dvr-sl/underlay--user-data1604-swp.yaml b/tcp_tests/templates/cookied-cicd-pike-dvr-sl/underlay--user-data1604-swp.yaml
new file mode 100644
index 0000000..319c007
--- /dev/null
+++ b/tcp_tests/templates/cookied-cicd-pike-dvr-sl/underlay--user-data1604-swp.yaml
@@ -0,0 +1,59 @@
+| # All the data below will be stored as a string object
+  #cloud-config, see http://cloudinit.readthedocs.io/en/latest/topics/examples.html
+
+  ssh_pwauth: True
+  users:
+   - name: root
+     sudo: ALL=(ALL) NOPASSWD:ALL
+     shell: /bin/bash
+     ssh_authorized_keys:
+     {% for key in config.underlay.ssh_keys %}
+      - ssh-rsa {{ key['public'] }}
+     {% endfor %}
+
+  disable_root: false
+  chpasswd:
+   list: |
+    root:r00tme
+   expire: False
+
+  bootcmd:
+   # Enable root access
+   - sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
+   - service sshd restart
+  output:
+    all: '| tee -a /var/log/cloud-init-output.log /dev/tty0'
+
+  runcmd:
+   - export TERM=linux
+   - export LANG=C
+   # Configure dhclient
+   - sudo echo "nameserver {gateway}" >> /etc/resolvconf/resolv.conf.d/base
+   - sudo resolvconf -u
+
+   # Enable grub menu using updated config below
+   - update-grub
+
+   # Prepare network connection
+   - sudo ifup ens3
+   #- sudo route add default gw {gateway} {interface_name}
+
+   # Create swap
+   - fallocate -l 16G /swapfile
+   - chmod 600 /swapfile
+   - mkswap /swapfile
+   - swapon /swapfile
+   - echo "/swapfile   none    swap    defaults   0   0" >> /etc/fstab
+
+  write_files:
+   - path: /etc/default/grub.d/97-enable-grub-menu.cfg
+     content: |
+         GRUB_RECORDFAIL_TIMEOUT=30
+         GRUB_TIMEOUT=3
+         GRUB_TIMEOUT_STYLE=menu
+
+   - path: /etc/network/interfaces
+     content: |
+          auto ens3
+          iface ens3 inet dhcp
+
diff --git a/tcp_tests/templates/cookied-cicd-pike-dvr-sl/underlay.yaml b/tcp_tests/templates/cookied-cicd-pike-dvr-sl/underlay.yaml
new file mode 100644
index 0000000..a964d2b
--- /dev/null
+++ b/tcp_tests/templates/cookied-cicd-pike-dvr-sl/underlay.yaml
@@ -0,0 +1,887 @@
+# Set the repository suite, one of the: 'nightly', 'testing', 'stable', or any other required
+{% set REPOSITORY_SUITE = os_env('REPOSITORY_SUITE', 'testing') %}
+
+{% import 'cookied-cicd-pike-dvr-sl/underlay--meta-data.yaml' as CLOUDINIT_META_DATA with context %}
+{% import 'cookied-cicd-pike-dvr-sl/underlay--user-data-cfg01.yaml' as CLOUDINIT_USER_DATA_CFG01 with context %}
+{% import 'cookied-cicd-pike-dvr-sl/underlay--user-data1604-swp.yaml' as CLOUDINIT_USER_DATA_1604_SWP with context %}
+
+---
+aliases:
+ - &interface_model {{ os_env('INTERFACE_MODEL', 'virtio') }}
+ - &cloudinit_meta_data {{ CLOUDINIT_META_DATA }}
+ - &cloudinit_user_data_cfg01 {{ CLOUDINIT_USER_DATA_CFG01 }}
+ - &cloudinit_user_data_1604_swp {{ CLOUDINIT_USER_DATA_1604_SWP }}
+
+{% set LAB_CONFIG_NAME = os_env('LAB_CONFIG_NAME', 'cookied-cicd-pike-dvr-sl') %}
+{% set DOMAIN_NAME = os_env('DOMAIN_NAME', LAB_CONFIG_NAME) + '.local' %}
+{% set HOSTNAME_CFG01 = os_env('HOSTNAME_CFG01', 'cfg01.' + DOMAIN_NAME) %}
+{% set HOSTNAME_KVM01 = os_env('HOSTNAME_KVM01', 'kvm01.' + DOMAIN_NAME) %}
+{% set HOSTNAME_KVM02 = os_env('HOSTNAME_KVM02', 'kvm02.' + DOMAIN_NAME) %}
+{% set HOSTNAME_KVM03 = os_env('HOSTNAME_KVM03', 'kvm03.' + DOMAIN_NAME) %}
+{% set HOSTNAME_CID01 = os_env('HOSTNAME_CID01', 'cid01.' + DOMAIN_NAME) %}
+{% set HOSTNAME_CID02 = os_env('HOSTNAME_CID02', 'cid02.' + DOMAIN_NAME) %}
+{% set HOSTNAME_CID03 = os_env('HOSTNAME_CID03', 'cid03.' + DOMAIN_NAME) %}
+{% set HOSTNAME_CTL01 = os_env('HOSTNAME_CTL01', 'ctl01.' + DOMAIN_NAME) %}
+{% set HOSTNAME_CTL02 = os_env('HOSTNAME_CTL02', 'ctl02.' + DOMAIN_NAME) %}
+{% set HOSTNAME_CTL03 = os_env('HOSTNAME_CTL03', 'ctl03.' + DOMAIN_NAME) %}
+{% set HOSTNAME_CMP01 = os_env('HOSTNAME_CMP01', 'cmp001.' + DOMAIN_NAME) %}
+{% set HOSTNAME_CMP02 = os_env('HOSTNAME_CMP02', 'cmp002.' + DOMAIN_NAME) %}
+{% set HOSTNAME_MON01 = os_env('HOSTNAME_MON01', 'mon01.' + DOMAIN_NAME) %}
+{% set HOSTNAME_MON02 = os_env('HOSTNAME_MON02', 'mon02.' + DOMAIN_NAME) %}
+{% set HOSTNAME_MON03 = os_env('HOSTNAME_MON03', 'mon03.' + DOMAIN_NAME) %}
+{% set HOSTNAME_LOG01 = os_env('HOSTNAME_LOG01', 'log01.' + DOMAIN_NAME) %}
+{% set HOSTNAME_LOG02 = os_env('HOSTNAME_LOG02', 'log02.' + DOMAIN_NAME) %}
+{% set HOSTNAME_LOG03 = os_env('HOSTNAME_LOG03', 'log03.' + DOMAIN_NAME) %}
+{% set HOSTNAME_MTR01 = os_env('HOSTNAME_MTR01', 'mtr01.' + DOMAIN_NAME) %}
+{% set HOSTNAME_MTR02 = os_env('HOSTNAME_MTR02', 'mtr02.' + DOMAIN_NAME) %}
+{% set HOSTNAME_MTR03 = os_env('HOSTNAME_MTR03', 'mtr03.' + DOMAIN_NAME) %}
+{% set HOSTNAME_GTW01 = os_env('HOSTNAME_GTW01', 'gtw01.' + DOMAIN_NAME) %}
+{% set HOSTNAME_PRX01 = os_env('HOSTNAME_PRX01', 'prx01.' + DOMAIN_NAME) %}
+
+template:
+  devops_settings:
+    env_name: {{ os_env('ENV_NAME', 'cookied-cicd-pike-dvr-sl_' + REPOSITORY_SUITE + "_" + os_env('BUILD_NUMBER', '')) }}
+
+    address_pools:
+      private-pool01:
+        net: {{ os_env('PRIVATE_ADDRESS_POOL01', '10.60.0.0/16:24') }}
+        params:
+          ip_reserved:
+            gateway: +1
+            l2_network_device: +1
+            default_{{ HOSTNAME_CFG01 }}: +15
+            default_{{ HOSTNAME_KVM }}: +240
+            default_{{ HOSTNAME_KVM01 }}: +241
+            default_{{ HOSTNAME_KVM02 }}: +242
+            default_{{ HOSTNAME_KVM03 }}: +243
+            default_{{ HOSTNAME_CID }}: +90
+            default_{{ HOSTNAME_CID01 }}: +91
+            default_{{ HOSTNAME_CID02 }}: +92
+            default_{{ HOSTNAME_CID03 }}: +93
+            default_{{ HOSTNAME_CTL01 }}: +101
+            default_{{ HOSTNAME_CTL02 }}: +102
+            default_{{ HOSTNAME_CTL03 }}: +103
+            default_{{ HOSTNAME_CMP01 }}: +105
+            default_{{ HOSTNAME_CMP02 }}: +106
+            default_{{ HOSTNAME_MON01 }}: +71
+            default_{{ HOSTNAME_MON02 }}: +72
+            default_{{ HOSTNAME_MON03 }}: +73
+            default_{{ HOSTNAME_LOG01 }}: +61
+            default_{{ HOSTNAME_LOG02 }}: +62
+            default_{{ HOSTNAME_LOG03 }}: +63
+            default_{{ HOSTNAME_MTR01 }}: +86
+            default_{{ HOSTNAME_MTR02 }}: +87
+            default_{{ HOSTNAME_MTR03 }}: +88
+            default_{{ HOSTNAME_GTW01 }}: +110
+            default_{{ HOSTNAME_PRX01 }}: +121
+          ip_ranges:
+            dhcp: [+90, -10]
+
+      admin-pool01:
+        net: {{ os_env('ADMIN_ADDRESS_POOL01', '10.70.0.0/16:24') }}
+        params:
+          ip_reserved:
+            gateway: +1
+            l2_network_device: +1
+            default_{{ HOSTNAME_CFG01 }}: +15
+            default_{{ HOSTNAME_KVM }}: +240
+            default_{{ HOSTNAME_KVM01 }}: +241
+            default_{{ HOSTNAME_KVM02 }}: +242
+            default_{{ HOSTNAME_KVM03 }}: +243
+            default_{{ HOSTNAME_CID }}: +90
+            default_{{ HOSTNAME_CID01 }}: +91
+            default_{{ HOSTNAME_CID02 }}: +92
+            default_{{ HOSTNAME_CID03 }}: +93
+            default_{{ HOSTNAME_CTL01 }}: +101
+            default_{{ HOSTNAME_CTL02 }}: +102
+            default_{{ HOSTNAME_CTL03 }}: +103
+            default_{{ HOSTNAME_CMP01 }}: +105
+            default_{{ HOSTNAME_CMP02 }}: +106
+            default_{{ HOSTNAME_MON01 }}: +71
+            default_{{ HOSTNAME_MON02 }}: +72
+            default_{{ HOSTNAME_MON03 }}: +73
+            default_{{ HOSTNAME_LOG01 }}: +61
+            default_{{ HOSTNAME_LOG02 }}: +62
+            default_{{ HOSTNAME_LOG03 }}: +63
+            default_{{ HOSTNAME_MTR01 }}: +86
+            default_{{ HOSTNAME_MTR02 }}: +87
+            default_{{ HOSTNAME_MTR03 }}: +88
+            default_{{ HOSTNAME_GTW01 }}: +110
+            default_{{ HOSTNAME_PRX01 }}: +121
+          ip_ranges:
+            dhcp: [+90, -10]
+
+      tenant-pool01:
+        net: {{ os_env('TENANT_ADDRESS_POOL01', '10.80.0.0/16:24') }}
+        params:
+          ip_reserved:
+            gateway: +1
+            l2_network_device: +1
+            default_{{ HOSTNAME_CFG01 }}: +15
+            default_{{ HOSTNAME_CTL01 }}: +101
+            default_{{ HOSTNAME_CTL02 }}: +102
+            default_{{ HOSTNAME_CTL03 }}: +103
+            default_{{ HOSTNAME_CMP01 }}: +105
+            default_{{ HOSTNAME_CMP02 }}: +106
+            default_{{ HOSTNAME_MON01 }}: +71
+            default_{{ HOSTNAME_MON02 }}: +72
+            default_{{ HOSTNAME_MON03 }}: +73
+            default_{{ HOSTNAME_LOG01 }}: +61
+            default_{{ HOSTNAME_LOG02 }}: +62
+            default_{{ HOSTNAME_LOG03 }}: +63
+            default_{{ HOSTNAME_MTR01 }}: +86
+            default_{{ HOSTNAME_MTR02 }}: +87
+            default_{{ HOSTNAME_MTR03 }}: +88
+            default_{{ HOSTNAME_GTW01 }}: +110
+            default_{{ HOSTNAME_PRX01 }}: +121
+          ip_ranges:
+            dhcp: [+10, -10]
+
+      external-pool01:
+        net: {{ os_env('EXTERNAL_ADDRESS_POOL01', '10.90.0.0/16:24') }}
+        params:
+          ip_reserved:
+            gateway: +1
+            l2_network_device: +1
+            default_{{ HOSTNAME_CFG01 }}: +15
+            default_{{ HOSTNAME_CTL01 }}: +101
+            default_{{ HOSTNAME_CTL02 }}: +102
+            default_{{ HOSTNAME_CTL03 }}: +103
+            default_{{ HOSTNAME_CMP01 }}: +105
+            default_{{ HOSTNAME_CMP02 }}: +106
+            default_{{ HOSTNAME_MON01 }}: +71
+            default_{{ HOSTNAME_MON02 }}: +72
+            default_{{ HOSTNAME_MON03 }}: +73
+            default_{{ HOSTNAME_LOG01 }}: +61
+            default_{{ HOSTNAME_LOG02 }}: +62
+            default_{{ HOSTNAME_LOG03 }}: +63
+            default_{{ HOSTNAME_MTR01 }}: +86
+            default_{{ HOSTNAME_MTR02 }}: +87
+            default_{{ HOSTNAME_MTR03 }}: +88
+            default_{{ HOSTNAME_GTW01 }}: +110
+            default_{{ HOSTNAME_PRX01 }}: +121
+          ip_ranges:
+            dhcp: [+10, -10]
+
+
+    groups:
+      - name: default
+        driver:
+          name: devops.driver.libvirt
+          params:
+            connection_string: !os_env CONNECTION_STRING, qemu:///system
+            storage_pool_name: !os_env STORAGE_POOL_NAME, default
+            stp: False
+            hpet: False
+            enable_acpi: true
+            use_host_cpu: !os_env DRIVER_USE_HOST_CPU, true
+            use_hugepages: !os_env DRIVER_USE_HUGEPAGES, false
+
+        network_pools:
+          admin: admin-pool01
+          private: private-pool01
+          tenant: tenant-pool01
+          external: external-pool01
+
+        l2_network_devices:
+          private:
+            address_pool: private-pool01
+            dhcp: false
+            forward:
+              mode: route
+
+          admin:
+            address_pool: admin-pool01
+            dhcp: true
+            forward:
+              mode: nat
+
+          tenant:
+            address_pool: tenant-pool01
+            dhcp: false
+
+          external:
+            address_pool: external-pool01
+            dhcp: false
+            forward:
+              mode: route
+
+
+        group_volumes:
+         - name: cloudimage1604    # This name is used for 'backing_store' option for node volumes.
+           source_image: {{ os_env('MCP_IMAGE_PATH1604') }}  # http://ci.mcp.mirantis.net:8085/images/ubuntu-16-04-x64-mcpproposed.qcow2
+           format: qcow2
+         - name: cfg01_day01_image               # Pre-configured day01 image
+           source_image: {{ os_env('IMAGE_PATH_CFG01_DAY01') }} # http://images.mirantis.com/cfg01-day01.qcow2 or fallback to IMAGE_PATH1604
+           format: qcow2
+         - name: mcp_ubuntu_1604_image           # Pre-configured image for control plane
+           source_image: !os_env MCP_IMAGE_PATH1604
+           format: qcow2
+
+        nodes:
+          - name: {{ HOSTNAME_CFG01 }}
+            role: salt_master
+            params:
+              vcpu: {{ os_env('CFG_NODE_CPU', 3) }}
+              memory: {{ os_env('CFG_NODE_MEMORY', 8192) }}
+              boot:
+                - hd
+              cloud_init_volume_name: iso
+              cloud_init_iface_up: ens3
+              volumes:
+                - name: system
+                  capacity: {{ os_env('CFG_NODE_VOLUME_SIZE', 150) }}
+                  backing_store: cfg01_day01_image
+                  format: qcow2
+                - name: config
+                  capacity: 1
+                  format: raw
+                  device: cdrom
+                  bus: ide
+                  # source_image: !os_env CFG01_CONFIG_PATH # no source image required.
+                                                            # it will be uploaded after config drive generation
+                - name: iso  # Volume with name 'iso' will be used
+                             # for store image with cloud-init metadata.
+                  capacity: 1
+                  format: raw
+                  device: cdrom
+                  bus: ide
+                  cloudinit_meta_data: *cloudinit_meta_data
+                  cloudinit_user_data: *cloudinit_user_data_cfg01
+
+              interfaces:
+                - label: ens3
+                  l2_network_device: admin
+                  interface_model: *interface_model
+                - label: ens4
+                  l2_network_device: private
+                  interface_model: *interface_model
+              network_config:
+                ens3:
+                  networks:
+                    - admin
+                ens4:
+                  networks:
+                    - private
+
+          - name: {{ HOSTNAME_CTL01 }}
+            role: salt_minion
+            params:
+              vcpu: !os_env SLAVE_NODE_CPU, 2
+              memory: !os_env SLAVE_NODE_MEMORY, 16384
+              boot:
+                - hd
+              cloud_init_volume_name: iso
+              cloud_init_iface_up: ens3
+              volumes:
+                - name: system
+                  capacity: !os_env NODE_VOLUME_SIZE, 150
+                  backing_store: mcp_ubuntu_1604_image
+                  format: qcow2
+                - name: cinder
+                  capacity: 50
+                  format: qcow2
+                - name: iso  # Volume with name 'iso' will be used
+                             # for store image with cloud-init metadata.
+                  capacity: 1
+                  format: raw
+                  device: cdrom
+                  bus: ide
+                  cloudinit_meta_data: *cloudinit_meta_data
+                  cloudinit_user_data: *cloudinit_user_data_1604_swp
+
+              interfaces: &interfaces
+                - label: ens3
+                  l2_network_device: admin
+                  interface_model: *interface_model
+                - label: ens4
+                  l2_network_device: private
+                  interface_model: *interface_model
+              network_config: &network_config
+                ens3:
+                  networks:
+                    - admin
+                ens4:
+                  networks:
+                    - private
+
+          - name: {{ HOSTNAME_CTL02 }}
+            role: salt_minion
+            params:
+              vcpu: !os_env SLAVE_NODE_CPU, 2
+              memory: !os_env SLAVE_NODE_MEMORY, 16384
+              boot:
+                - hd
+              cloud_init_volume_name: iso
+              cloud_init_iface_up: ens3
+              volumes:
+                - name: system
+                  capacity: !os_env NODE_VOLUME_SIZE, 150
+                  backing_store: mcp_ubuntu_1604_image
+                  format: qcow2
+                - name: cinder
+                  capacity: 50
+                  format: qcow2
+                - name: iso  # Volume with name 'iso' will be used
+                             # for store image with cloud-init metadata.
+                  capacity: 1
+                  format: raw
+                  device: cdrom
+                  bus: ide
+                  cloudinit_meta_data: *cloudinit_meta_data
+                  cloudinit_user_data: *cloudinit_user_data_1604_swp
+
+              interfaces: *interfaces
+              network_config: *network_config
+
+          - name: {{ HOSTNAME_CTL03 }}
+            role: salt_minion
+            params:
+              vcpu: !os_env SLAVE_NODE_CPU, 2
+              memory: !os_env SLAVE_NODE_MEMORY, 16384
+              boot:
+                - hd
+              cloud_init_volume_name: iso
+              cloud_init_iface_up: ens3
+              volumes:
+                - name: system
+                  capacity: !os_env NODE_VOLUME_SIZE, 150
+                  backing_store: mcp_ubuntu_1604_image
+                  format: qcow2
+                - name: cinder
+                  capacity: 50
+                  format: qcow2
+                - name: iso  # Volume with name 'iso' will be used
+                             # for store image with cloud-init metadata.
+                  capacity: 1
+                  format: raw
+                  device: cdrom
+                  bus: ide
+                  cloudinit_meta_data: *cloudinit_meta_data
+                  cloudinit_user_data: *cloudinit_user_data_1604_swp
+
+              interfaces: *interfaces
+              network_config: *network_config
+
+          - name: {{ HOSTNAME_MON01 }}
+            role: salt_minion
+            params:
+              vcpu: !os_env SLAVE_NODE_CPU, 2
+              memory: !os_env SLAVE_NODE_MEMORY, 2048
+              boot:
+                - hd
+              cloud_init_volume_name: iso
+              cloud_init_iface_up: ens3
+              volumes:
+                - name: system
+                  capacity: !os_env NODE_VOLUME_SIZE, 150
+                  backing_store: mcp_ubuntu_1604_image
+                  format: qcow2
+                - name: iso  # Volume with name 'iso' will be used
+                             # for store image with cloud-init metadata.
+                  capacity: 1
+                  format: raw
+                  device: cdrom
+                  bus: ide
+                  cloudinit_meta_data: *cloudinit_meta_data
+                  cloudinit_user_data: *cloudinit_user_data_1604_swp
+
+              interfaces: *interfaces
+              network_config: *network_config
+
+          - name: {{ HOSTNAME_MON02 }}
+            role: salt_minion
+            params:
+              vcpu: !os_env SLAVE_NODE_CPU, 2
+              memory: !os_env SLAVE_NODE_MEMORY, 2048
+              boot:
+                - hd
+              cloud_init_volume_name: iso
+              cloud_init_iface_up: ens3
+              volumes:
+                - name: system
+                  capacity: !os_env NODE_VOLUME_SIZE, 150
+                  backing_store: mcp_ubuntu_1604_image
+                  format: qcow2
+                - name: iso  # Volume with name 'iso' will be used
+                             # for store image with cloud-init metadata.
+                  capacity: 1
+                  format: raw
+                  device: cdrom
+                  bus: ide
+                  cloudinit_meta_data: *cloudinit_meta_data
+                  cloudinit_user_data: *cloudinit_user_data_1604_swp
+
+              interfaces: *interfaces
+              network_config: *network_config
+
+          - name: {{ HOSTNAME_MON03 }}
+            role: salt_minion
+            params:
+              vcpu: !os_env SLAVE_NODE_CPU, 2
+              memory: !os_env SLAVE_NODE_MEMORY, 2048
+              boot:
+                - hd
+              cloud_init_volume_name: iso
+              cloud_init_iface_up: ens3
+              volumes:
+                - name: system
+                  capacity: !os_env NODE_VOLUME_SIZE, 150
+                  backing_store: mcp_ubuntu_1604_image
+                  format: qcow2
+                - name: iso  # Volume with name 'iso' will be used
+                             # for store image with cloud-init metadata.
+                  capacity: 1
+                  format: raw
+                  device: cdrom
+                  bus: ide
+                  cloudinit_meta_data: *cloudinit_meta_data
+                  cloudinit_user_data: *cloudinit_user_data_1604_swp
+
+              interfaces: *interfaces
+              network_config: *network_config
+
+          - name: {{ HOSTNAME_LOG01 }}
+            role: salt_minion
+            params:
+              vcpu: !os_env SLAVE_NODE_CPU, 2
+              memory: !os_env SLAVE_NODE_MEMORY, 4096
+              boot:
+                - hd
+              cloud_init_volume_name: iso
+              cloud_init_iface_up: ens3
+              volumes:
+                - name: system
+                  capacity: !os_env NODE_VOLUME_SIZE, 150
+                  backing_store: mcp_ubuntu_1604_image
+                  format: qcow2
+                - name: iso  # Volume with name 'iso' will be used
+                             # for store image with cloud-init metadata.
+                  capacity: 1
+                  format: raw
+                  device: cdrom
+                  bus: ide
+                  cloudinit_meta_data: *cloudinit_meta_data
+                  cloudinit_user_data: *cloudinit_user_data_1604_swp
+
+              interfaces: *interfaces
+              network_config: *network_config
+
+          - name: {{ HOSTNAME_LOG02 }}
+            role: salt_minion
+            params:
+              vcpu: !os_env SLAVE_NODE_CPU, 2
+              memory: !os_env SLAVE_NODE_MEMORY, 4096
+              boot:
+                - hd
+              cloud_init_volume_name: iso
+              cloud_init_iface_up: ens3
+              volumes:
+                - name: system
+                  capacity: !os_env NODE_VOLUME_SIZE, 150
+                  backing_store: mcp_ubuntu_1604_image
+                  format: qcow2
+                - name: iso  # Volume with name 'iso' will be used
+                             # for store image with cloud-init metadata.
+                  capacity: 1
+                  format: raw
+                  device: cdrom
+                  bus: ide
+                  cloudinit_meta_data: *cloudinit_meta_data
+                  cloudinit_user_data: *cloudinit_user_data_1604_swp
+
+              interfaces: *interfaces
+              network_config: *network_config
+
+          - name: {{ HOSTNAME_LOG03 }}
+            role: salt_minion
+            params:
+              vcpu: !os_env SLAVE_NODE_CPU, 2
+              memory: !os_env SLAVE_NODE_MEMORY, 4096
+              boot:
+                - hd
+              cloud_init_volume_name: iso
+              cloud_init_iface_up: ens3
+              volumes:
+                - name: system
+                  capacity: !os_env NODE_VOLUME_SIZE, 150
+                  backing_store: mcp_ubuntu_1604_image
+                  format: qcow2
+                - name: iso  # Volume with name 'iso' will be used
+                             # for store image with cloud-init metadata.
+                  capacity: 1
+                  format: raw
+                  device: cdrom
+                  bus: ide
+                  cloudinit_meta_data: *cloudinit_meta_data
+                  cloudinit_user_data: *cloudinit_user_data_1604_swp
+
+              interfaces: *interfaces
+              network_config: *network_config
+
+          - name: {{ HOSTNAME_MTR01 }}
+            role: salt_minion
+            params:
+              vcpu: !os_env SLAVE_NODE_CPU, 2
+              memory: !os_env SLAVE_NODE_MEMORY, 2048
+              boot:
+                - hd
+              cloud_init_volume_name: iso
+              cloud_init_iface_up: ens3
+              volumes:
+                - name: system
+                  capacity: !os_env NODE_VOLUME_SIZE, 150
+                  backing_store: mcp_ubuntu_1604_image
+                  format: qcow2
+                - name: iso  # Volume with name 'iso' will be used
+                             # for store image with cloud-init metadata.
+                  capacity: 1
+                  format: raw
+                  device: cdrom
+                  bus: ide
+                  cloudinit_meta_data: *cloudinit_meta_data
+                  cloudinit_user_data: *cloudinit_user_data_1604_swp
+
+              interfaces: *interfaces
+              network_config: *network_config
+
+          - name: {{ HOSTNAME_MTR02 }}
+            role: salt_minion
+            params:
+              vcpu: !os_env SLAVE_NODE_CPU, 2
+              memory: !os_env SLAVE_NODE_MEMORY, 2048
+              boot:
+                - hd
+              cloud_init_volume_name: iso
+              cloud_init_iface_up: ens3
+              volumes:
+                - name: system
+                  capacity: !os_env NODE_VOLUME_SIZE, 150
+                  backing_store: mcp_ubuntu_1604_image
+                  format: qcow2
+                - name: iso  # Volume with name 'iso' will be used
+                             # for store image with cloud-init metadata.
+                  capacity: 1
+                  format: raw
+                  device: cdrom
+                  bus: ide
+                  cloudinit_meta_data: *cloudinit_meta_data
+                  cloudinit_user_data: *cloudinit_user_data_1604_swp
+
+              interfaces: *interfaces
+              network_config: *network_config
+
+          - name: {{ HOSTNAME_MTR03 }}
+            role: salt_minion
+            params:
+              vcpu: !os_env SLAVE_NODE_CPU, 2
+              memory: !os_env SLAVE_NODE_MEMORY, 2048
+              boot:
+                - hd
+              cloud_init_volume_name: iso
+              cloud_init_iface_up: ens3
+              volumes:
+                - name: system
+                  capacity: !os_env NODE_VOLUME_SIZE, 150
+                  backing_store: mcp_ubuntu_1604_image
+                  format: qcow2
+                - name: iso  # Volume with name 'iso' will be used
+                             # for store image with cloud-init metadata.
+                  capacity: 1
+                  format: raw
+                  device: cdrom
+                  bus: ide
+                  cloudinit_meta_data: *cloudinit_meta_data
+                  cloudinit_user_data: *cloudinit_user_data_1604_swp
+
+              interfaces: *interfaces
+              network_config: *network_config
+
+          - name: {{ HOSTNAME_PRX01 }}
+            role: salt_minion
+            params:
+              vcpu: !os_env SLAVE_NODE_CPU, 1
+              memory: !os_env SLAVE_NODE_MEMORY, 2048
+              boot:
+                - hd
+              cloud_init_volume_name: iso
+              cloud_init_iface_up: ens3
+              volumes:
+                - name: system
+                  capacity: !os_env NODE_VOLUME_SIZE, 150
+                  backing_store: mcp_ubuntu_1604_image
+                  format: qcow2
+                - name: cinder
+                  capacity: 50
+                  format: qcow2
+                - name: iso  # Volume with name 'iso' will be used
+                             # for store image with cloud-init metadata.
+                  capacity: 1
+                  format: raw
+                  device: cdrom
+                  bus: ide
+                  cloudinit_meta_data: *cloudinit_meta_data
+                  cloudinit_user_data: *cloudinit_user_data_1604_swp
+
+              interfaces: *interfaces
+              network_config: *network_config
+
+
+          - name: {{ HOSTNAME_CMP01 }}
+            role: salt_minion
+            params:
+              vcpu: !os_env SLAVE_NODE_CPU, 3
+              memory: !os_env SLAVE_NODE_MEMORY, 4096
+              boot:
+                - hd
+              cloud_init_volume_name: iso
+              cloud_init_iface_up: ens3
+              volumes:
+                - name: system
+                  capacity: !os_env NODE_VOLUME_SIZE, 150
+                  backing_store: cloudimage1604
+                  format: qcow2
+                - name: iso  # Volume with name 'iso' will be used
+                             # for store image with cloud-init metadata.
+                  capacity: 1
+                  format: raw
+                  device: cdrom
+                  bus: ide
+                  cloudinit_meta_data: *cloudinit_meta_data
+                  cloudinit_user_data: *cloudinit_user_data_1604_swp
+
+
+              interfaces: &all_interfaces
+                - label: ens3
+                  l2_network_device: admin
+                  interface_model: *interface_model
+                - label: ens4
+                  l2_network_device: private
+                  interface_model: *interface_model
+                - label: ens5
+                  l2_network_device: tenant
+                  interface_model: *interface_model
+                - label: ens6
+                  l2_network_device: external
+                  interface_model: *interface_model
+              network_config: &all_network_config
+                ens3:
+                  networks:
+                    - admin
+                ens4:
+                  networks:
+                    - private
+                ens5:
+                  networks:
+                    - tenant
+                ens6:
+                  networks:
+                    - external
+
+          - name: {{ HOSTNAME_CMP02 }}
+            role: salt_minion
+            params:
+              vcpu: !os_env SLAVE_NODE_CPU, 3
+              memory: !os_env SLAVE_NODE_MEMORY, 4096
+              boot:
+                - hd
+              cloud_init_volume_name: iso
+              cloud_init_iface_up: ens3
+              volumes:
+                - name: system
+                  capacity: !os_env NODE_VOLUME_SIZE, 150
+                  backing_store: cloudimage1604
+                  format: qcow2
+                - name: iso  # Volume with name 'iso' will be used
+                             # for store image with cloud-init metadata.
+                  capacity: 1
+                  format: raw
+                  device: cdrom
+                  bus: ide
+                  cloudinit_meta_data: *cloudinit_meta_data
+                  cloudinit_user_data: *cloudinit_user_data_1604_swp
+
+              interfaces: *all_interfaces
+              network_config: *all_network_config
+
+          - name: {{ HOSTNAME_GTW01 }}
+            role: salt_minion
+            params:
+              vcpu: !os_env SLAVE_NODE_CPU, 4
+              memory: !os_env SLAVE_NODE_MEMORY, 4096
+              boot:
+                - hd
+              cloud_init_volume_name: iso
+              cloud_init_iface_up: ens3
+              volumes:
+                - name: system
+                  capacity: !os_env NODE_VOLUME_SIZE, 150
+                  backing_store: cloudimage1604
+                  format: qcow2
+                - name: iso  # Volume with name 'iso' will be used
+                             # for store image with cloud-init metadata.
+                  capacity: 1
+                  format: raw
+                  device: cdrom
+                  bus: ide
+                  cloudinit_meta_data: *cloudinit_meta_data
+                  cloudinit_user_data: *cloudinit_user_data_1604_swp
+
+              interfaces: *all_interfaces
+              network_config: *all_network_config
+
+          - name: {{ HOSTNAME_KVM01 }}
+            role: salt_minion
+            params:
+              vcpu: {{ os_env('KVM_NODE_CPU', 1) }}
+              memory: {{ os_env('KVM_NODE_MEMORY', 1024) }}
+              boot:
+                - hd
+              cloud_init_volume_name: iso
+              cloud_init_iface_up: ens3
+              volumes:
+                - name: system
+                  capacity: {{ os_env('KVM_NODE_VOLUME_SIZE', 150) }}
+                  backing_store: mcp_ubuntu_1604_image
+                  format: qcow2
+                - name: iso  # Volume with name 'iso' will be used
+                             # for store image with cloud-init metadata.
+                  capacity: 1
+                  format: raw
+                  device: cdrom
+                  bus: ide
+                  cloudinit_meta_data: *cloudinit_meta_data
+                  cloudinit_user_data: *cloudinit_user_data_1604_swp
+
+              interfaces: *interfaces
+              network_config: *network_config
+
+          - name: {{ HOSTNAME_KVM02 }}
+            role: salt_minion
+            params:
+              vcpu: {{ os_env('KVM_NODE_CPU', 1) }}
+              memory: {{ os_env('KVM_NODE_MEMORY', 1024) }}
+              boot:
+                - hd
+              cloud_init_volume_name: iso
+              cloud_init_iface_up: ens3
+              volumes:
+                - name: system
+                  capacity: {{ os_env('KVM_NODE_VOLUME_SIZE', 150) }}
+                  backing_store: mcp_ubuntu_1604_image
+                  format: qcow2
+                - name: iso  # Volume with name 'iso' will be used
+                             # for store image with cloud-init metadata.
+                  capacity: 1
+                  format: raw
+                  device: cdrom
+                  bus: ide
+                  cloudinit_meta_data: *cloudinit_meta_data
+                  cloudinit_user_data: *cloudinit_user_data_1604_swp
+
+              interfaces: *interfaces
+              network_config: *network_config
+
+          - name: {{ HOSTNAME_KVM03 }}
+            role: salt_minion
+            params:
+              vcpu: {{ os_env('KVM_NODE_CPU', 1) }}
+              memory: {{ os_env('KVM_NODE_MEMORY', 1024) }}
+              boot:
+                - hd
+              cloud_init_volume_name: iso
+              cloud_init_iface_up: ens3
+              volumes:
+                - name: system
+                  capacity: {{ os_env('KVM_NODE_VOLUME_SIZE', 150) }}
+                  backing_store: mcp_ubuntu_1604_image
+                  format: qcow2
+                - name: iso  # Volume with name 'iso' will be used
+                             # for store image with cloud-init metadata.
+                  capacity: 1
+                  format: raw
+                  device: cdrom
+                  bus: ide
+                  cloudinit_meta_data: *cloudinit_meta_data
+                  cloudinit_user_data: *cloudinit_user_data_1604_swp
+
+              interfaces: *interfaces
+              network_config: *network_config
+
+          - name: {{ HOSTNAME_CID01 }}
+            role: salt_minion
+            params:
+              vcpu: {{ os_env('CID_NODE_CPU', 3) }}
+              memory: {{ os_env('CID_NODE_MEMORY', 8192) }}
+              boot:
+                - hd
+              cloud_init_volume_name: iso
+              cloud_init_iface_up: ens3
+              volumes:
+                - name: system
+                  capacity: {{ os_env('CID_NODE_VOLUME_SIZE', 150) }}
+                  backing_store: mcp_ubuntu_1604_image
+                  format: qcow2
+                - name: iso  # Volume with name 'iso' will be used
+                             # for store image with cloud-init metadata.
+                  capacity: 1
+                  format: raw
+                  device: cdrom
+                  bus: ide
+                  cloudinit_meta_data: *cloudinit_meta_data
+                  cloudinit_user_data: *cloudinit_user_data_1604_swp
+
+              interfaces: *interfaces
+              network_config: *network_config
+
+          - name: {{ HOSTNAME_CID02 }}
+            role: salt_minion
+            params:
+              vcpu: {{ os_env('CID_NODE_CPU', 3) }}
+              memory: {{ os_env('CID_NODE_MEMORY', 8192) }}
+              boot:
+                - hd
+              cloud_init_volume_name: iso
+              cloud_init_iface_up: ens3
+              volumes:
+                - name: system
+                  capacity: {{ os_env('CID_NODE_VOLUME_SIZE', 150) }}
+                  backing_store: mcp_ubuntu_1604_image
+                  format: qcow2
+                - name: iso  # Volume with name 'iso' will be used
+                             # for store image with cloud-init metadata.
+                  capacity: 1
+                  format: raw
+                  device: cdrom
+                  bus: ide
+                  cloudinit_meta_data: *cloudinit_meta_data
+                  cloudinit_user_data: *cloudinit_user_data_1604_swp
+
+              interfaces: *interfaces
+              network_config: *network_config
+
+          - name: {{ HOSTNAME_CID03 }}
+            role: salt_minion
+            params:
+              vcpu: {{ os_env('CID_NODE_CPU', 3) }}
+              memory: {{ os_env('CID_NODE_MEMORY', 8192) }}
+              boot:
+                - hd
+              cloud_init_volume_name: iso
+              cloud_init_iface_up: ens3
+              volumes:
+                - name: system
+                  capacity: {{ os_env('CID_NODE_VOLUME_SIZE', 150) }}
+                  backing_store: mcp_ubuntu_1604_image
+                  format: qcow2
+                - name: iso  # Volume with name 'iso' will be used
+                             # for store image with cloud-init metadata.
+                  capacity: 1
+                  format: raw
+                  device: cdrom
+                  bus: ide
+                  cloudinit_meta_data: *cloudinit_meta_data
+                  cloudinit_user_data: *cloudinit_user_data_1604_swp
+
+              interfaces: *interfaces
+              network_config: *network_config
diff --git a/tcp_tests/templates/cookied-cicd-pike-ovs-sl/cookiecutter-context-pike-ovs-sl.yaml b/tcp_tests/templates/cookied-cicd-pike-ovs-sl/cookiecutter-context-pike-ovs-sl.yaml
new file mode 100644
index 0000000..d1e447c
--- /dev/null
+++ b/tcp_tests/templates/cookied-cicd-pike-ovs-sl/cookiecutter-context-pike-ovs-sl.yaml
@@ -0,0 +1,230 @@
+default_context:
+  auditd_enabled: 'False'
+  bmk_enabled: 'False'
+  ceph_enabled: 'False'
+  cicd_control_node01_address: 10.167.4.91
+  cicd_control_node01_hostname: cid01
+  cicd_control_node02_address: 10.167.4.92
+  cicd_control_node02_hostname: cid02
+  cicd_control_node03_address: 10.167.4.93
+  cicd_control_node03_hostname: cid03
+  cicd_control_vip_address: 10.167.4.90
+  cicd_control_vip_hostname: cid
+  cicd_enabled: 'True'
+  cicd_private_key: |-
+    -----BEGIN RSA PRIVATE KEY-----
+    MIIEpAIBAAKCAQEAv64AnFbEuuOQHLlmMkmaZ+Hh/8hJ+VfFpJ/MzW1wWzYyhis7
+    3A8rxNFWJ/I1/LJSsFI8qU0DpxjFjS9LMTTFXhDPPpuzgRLwfVusEmuQdXjOiT34
+    AIs07Q4w1nlvJ2+/l788ie1AEfnewd9erUHOs8Wt/PT3OOM/0ikY7EibvYF4L1Lb
+    xGRKYnUkY7G3eal9XcQpsTzAFRXoK3WafbCFBFsfzEWOhx1T+tn1SwaxPYJDt1OB
+    B1s77enFtBwbmbd0m1F1ufSXmdWea2xF3+5caS6tps/hwhCoOSJUQb7+dK4ri8og
+    q2YIhfEptrMP1R+nVqEY76P31aa/YSw4zOvcQwIDAQABAoIBAQCLKOzQlD4n4ObT
+    s9Z6U+2B1gCaDvOFzy9yoYGy8u1Li0GLHwBKd8kzDzgZsEN5vo1B7bKUx5ELU3S5
+    V8ijZMiVzmZn8eqUnwdyO4flp6otXxOzmAXhfy9hm5fhXjBQ1VSn+vMcv95wLpSG
+    9IBsEQbchXwX1lFWP8Yp8iRiByTqoz6A7qSxRzIOtq1/coYS9Vcy7VZDMiUjqvuc
+    pYvwYHvrgeYqxLXyDRzbZX1BbkSoNI/5VwxLb9IMG901IXph0r4V3uVgnnq+Xzkk
+    MoOfmB3cyOrvtWblZAjkyA+jzTs/QNALRUeI7wUeh4FvlwEGHE6v5G4G28zOS0vL
+    7IEhCqThAoGBAOeyDO07b060l+NOO+Jkv+NV31VD0w3S4TMyLPVSxXsrRPoHM9RM
+    udi6lewmALE4wk2Lc1Il6n0UrUGVbXxf55NJp2BQoSic+ZK2nTki0cZ/CkUDVNwY
+    R0WtWE0i3J+eF3e8j9VYm1mIlv0aDoYeH4qCp5is/JanvLy4MUl6tM7/AoGBANPJ
+    XheDO5lmqq1ejDTo3GAzYuAs44dQLDs0znEuuaUKZ4MKgQ4ax0L9n0MxvsuUGVcN
+    Nm7fZS4uMY3zLCOLcAXyD1jXY210gmOgFdXeYrH+2kSmqfflV8KHOLCatxLzRtbe
+    KBflcrEnrpUVNGKlpZaYr+4AyapXeMuXIxwveva9AoGAYtoDS9/UwHaqau+A+zlS
+    6TJFA8LZNAepz0b0CYLUAJXYavhRs508mWwZ9NPN7c6yj5UUkZLdtZnxxY50VOEy
+    ExQUljIwX/yBOogxEiR57b9b6U/fj7vIBMFNcDOUf4Far9pCX5rbRNrS2I+abLxD
+    ZrwRt0Duz3QnQTkwxhHVPI8CgYAaIjQJJLl7AW84O32DneRrvouJ7CAbd2ot2CNN
+    Vh20XudNBUPNkMJb4t3/Nak8h8bktg2sesaKf0rAIGym6jLlmOwJ43IydHkOgBeR
+    r4JwQml+pS4+F7/Pkk4NhNnobbqlEv7RjA+uCp6BaP9w2M3pGmhDLzezXF3ciYbc
+    mINM5QKBgQCyM9ZWwSiA0D3oitnhs7C4eC0IHBfnSoa7f40osKm4VvmqKBFgRu8L
+    qYK9qX++pUm4sk0q7poGUscc1udMlejAkfc/HLIlUi6MM+S7ZQ2NHtnZ7COZa5O4
+    9fG8FTiigLvMHka9ihYXtPbyGvusCaqyHp3D9VyOT+WsyM5eJe40lA==
+    -----END RSA PRIVATE KEY-----
+  cicd_public_key: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQC/rgCcVsS645AcuWYySZpn4eH/yEn5V8Wkn8zNbXBbNjKGKzvcDyvE0VYn8jX8slKwUjypTQOnGMWNL0sxNMVeEM8+m7OBEvB9W6wSa5B1eM6JPfgAizTtDjDWeW8nb7+XvzyJ7UAR+d7B316tQc6zxa389Pc44z/SKRjsSJu9gXgvUtvEZEpidSRjsbd5qX1dxCmxPMAVFegrdZp9sIUEWx/MRY6HHVP62fVLBrE9gkO3U4EHWzvt6cW0HBuZt3SbUXW59JeZ1Z5rbEXf7lxpLq2mz+HCEKg5IlRBvv50riuLyiCrZgiF8Sm2sw/VH6dWoRjvo/fVpr9hLDjM69xD
+  cluster_domain: cookied-cicd-pike-ovs-sl.local
+  cluster_name: cookied-cicd-pike-ovs-sl
+  compute_bond_mode: active-backup
+  compute_primary_first_nic: eth1
+  compute_primary_second_nic: eth2
+  context_seed: wUqrwKeBTCpRpVrhK1KwZQv4cjM9VhG7L2vQ0iQsTuMrXASklEBDmJEf6bnPEqcK
+  control_network_netmask: 255.255.255.0
+  control_network_subnet: 10.167.4.0/24
+  control_vlan: '10'
+  cookiecutter_template_branch: master
+  cookiecutter_template_credentials: gerrit
+  cookiecutter_template_url: https://gerrit.mcp.mirantis.net/mk/cookiecutter-templates.git
+  deploy_network_gateway: 10.167.5.1
+  deploy_network_netmask: 255.255.255.0
+  deploy_network_subnet: 10.167.5.0/24
+  deployment_type: physical
+  dns_server01: 172.18.176.6
+  dns_server02: 172.18.208.44
+  email_address: ddmitriev@mirantis.com
+  gateway_primary_first_nic: eth1
+  gateway_primary_second_nic: eth2
+  infra_bond_mode: active-backup
+  infra_deploy_nic: eth0
+  infra_kvm01_control_address: 10.167.4.241
+  infra_kvm01_deploy_address: 10.167.5.91
+  infra_kvm01_hostname: kvm01
+  infra_kvm02_control_address: 10.167.4.242
+  infra_kvm02_deploy_address: 10.167.5.92
+  infra_kvm02_hostname: kvm02
+  infra_kvm03_control_address: 10.167.4.243
+  infra_kvm03_deploy_address: 10.167.5.93
+  infra_kvm03_hostname: kvm03
+  infra_kvm_vip_address: 10.167.4.240
+  infra_primary_first_nic: eth1
+  infra_primary_second_nic: eth2
+  kubernetes_enabled: 'False'
+  local_repositories: 'False'
+  maas_deploy_address: 10.167.5.15
+  maas_deploy_range_end: 10.167.5.199
+  maas_deploy_range_start: 10.167.5.180
+  maas_deploy_vlan: '0'
+  maas_fabric_name: deploy-fabric0
+  maas_hostname: cfg01
+  mcp_version: proposed
+  offline_deployment: 'False'
+  opencontrail_enabled: 'False'
+  openldap_domain: ${_param:cluster_name}.local
+  openldap_enabled: 'True'
+  openldap_organisation: ${_param:cluster_name}
+  openssh_groups: cicd
+  openstack_benchmark_node01_address: 10.167.4.95
+  openstack_benchmark_node01_hostname: bmk01
+  openstack_cluster_size: compact
+  openstack_compute_count: '2'
+  openstack_compute_rack01_hostname: cmp
+  openstack_compute_rack01_single_subnet: 10.167.4
+  openstack_compute_rack01_tenant_subnet: 10.167.6
+  openstack_control_address: 10.167.4.100
+  openstack_control_hostname: ctl
+  openstack_control_node01_address: 10.167.4.101
+  openstack_control_node01_hostname: ctl01
+  openstack_control_node02_address: 10.167.4.102
+  openstack_control_node02_hostname: ctl02
+  openstack_control_node03_address: 10.167.4.103
+  openstack_control_node03_hostname: ctl03
+  openstack_database_address: 10.167.4.100
+  openstack_database_hostname: ctl
+  openstack_database_node01_address: 10.167.4.101
+  openstack_database_node01_hostname: ctl01
+  openstack_database_node02_address: 10.167.4.102
+  openstack_database_node02_hostname: ctl02
+  openstack_database_node03_address: 10.167.4.103
+  openstack_database_node03_hostname: ctl03
+  openstack_enabled: 'True'
+  openstack_gateway_node01_address: 10.167.4.110
+  openstack_gateway_node01_hostname: gtw01
+  openstack_gateway_node01_tenant_address: 10.167.6.6
+  openstack_gateway_node02_address: 10.167.4.111
+  openstack_gateway_node02_hostname: gtw02
+  openstack_gateway_node02_tenant_address: 10.167.6.7
+  openstack_gateway_node03_address: 10.167.4.112
+  openstack_gateway_node03_hostname: gtw03
+  openstack_gateway_node03_tenant_address: 10.167.6.8
+  openstack_message_queue_address: 10.167.4.100
+  openstack_message_queue_hostname: ctl
+  openstack_message_queue_node01_address: 10.167.4.101
+  openstack_message_queue_node01_hostname: ctl01
+  openstack_message_queue_node02_address: 10.167.4.102
+  openstack_message_queue_node02_hostname: ctl02
+  openstack_message_queue_node03_address: 10.167.4.103
+  openstack_message_queue_node03_hostname: ctl03
+  openstack_network_engine: ovs
+  openstack_neutron_qos: 'False'
+  openstack_neutron_vlan_aware_vms: 'False'
+  openstack_nfv_dpdk_enabled: 'False'
+  openstack_nfv_sriov_enabled: 'False'
+  openstack_nova_compute_nfv_req_enabled: 'False'
+  openstack_ovs_dvr_enabled: 'False'
+  openstack_ovs_encapsulation_type: vxlan
+  openstack_proxy_address: 10.167.4.80
+  openstack_proxy_hostname: prx
+  openstack_proxy_node01_address: 10.167.4.121
+  openstack_proxy_node01_hostname: prx01
+  openstack_proxy_node02_address: 10.167.4.122
+  openstack_proxy_node02_hostname: prx02
+  openstack_upgrade_node01_address: 10.167.4.19
+  openstack_version: pike
+  oss_enabled: 'False'
+  oss_node03_address: ${_param:stacklight_monitor_node03_address}
+  oss_webhook_app_id: '24'
+  oss_pushkin_email_sender_password: password
+  oss_pushkin_smtp_port: '587'
+  oss_webhook_login_id: '13'
+  platform: openstack_enabled
+  public_host: ${_param:openstack_proxy_address}
+  publication_method: email
+  reclass_repository: https://github.com/Mirantis/mk-lab-salt-model.git
+  backup_private_key: |
+      -----BEGIN RSA PRIVATE KEY-----
+      MIIEowIBAAKCAQEAxL6/rVgCetsETpZaUmXmkj8cZ1WN0eubH1FvMDOi/La9ZJyT
+      k0C6AYpJnIyEm93pMj5cLm08qRqMW+2pdOhYjcH69yg5MrX5SkRk8jCmIHIYoIbh
+      Qnwbnj3dd3I39ZdfU2FO7u2vlbglVou6ZoQxlJDItuLNtzq6EG+w9eF19e7+OsC6
+      6iUItp618zfw1l3J/8nKvCGe2RYDf7mJW6XwCl/DwryJmwwzvPgYJ3QMuDD8/HFj
+      lrJ3xjFTXj4b4Ws1XIoy78fFbtiLr4OwqCYkho03u2E5rOOP1qZxZB63sivHMLMO
+      MM5bOAQKbulFNoyALADGYfc7sf0bZ4u9XXDXxQIDAQABAoIBAQCfmc2MJRT97KW1
+      yqpCpX9BrAiymuiNHf+cjEcSZxEUyHkjIRFmJt+9WB0W7ba1anM92vCUiPDojSzH
+      dig9Oi578JxR20NrK8uqv4jUHzrknynzLveVI3CUEcOSnglfJQijbxDFKfOCFPvV
+      FUyE1UATMNBh6+LNfMprgu+exuMWOPnDyUiYQ+WZ0JfuZY8fuaZte4woJJOb9LUu
+      5rsMG/smIzjpgZ0Z9ZVDMurfq565qhpaXRAqKeIuyht8pacTo31iMQdHB78AvY/3
+      g0z21Gk8k3z0Kr/YFKr2r4FmXY5m/gAUvZly2ZrVQM5XsbTVCzq/JpI5fssNvSbU
+      AKmXzf4RAoGBAOO3d4/cstxERzW6hyOTjZIN1ppR52CsnZTsVPbfd0pCtmzmVZce
+      CtHKdcXSbTwZvvkK09QSWAp3MoSpd0gIOiLU8Wx/R/RIZsu9BlhTS3r3EQLnk72d
+      H/1TTA+j4T/LIYLSojQ1RxvIrHetAD44j732aTwKAHj/SybEAVqNkOB/AoGBAN0u
+      gLcrgqIHGrk4VjWSvlCGymfF40equcx+ud7XhfZDGETUOSahW4dPZ52cjPAkrCBQ
+      MMfcDwSVGsOAjd+mNt11BHUKobnhXwFaWWuyqyn9NmWFbjMbICVh7E3Of5aVN38o
+      lrmo/7LuKMVG7XRwphCv5NkaJmQG4njDyUQWlaW7AoGADCd8wDb9bPhP/LQqBmIX
+      ylXmwHHisaxE9O/wUQT4bwREjGd25gv6c9wkkRx8LBsLsGs9hzI7dMOL9Ly+2x9l
+      SvqmsC3S/1zl77X1Ir2/Z57MT6Vgo1xBmtnZU3Rhz2/eKAdqFPNLClaZrgGT475N
+      HcyLLWMzR0IJFtabY+Puea0CgYA8Zb5wRkldxWLewSuJZZDinGwY+kieAVjLJq/K
+      0j+ah6fQ48LXcah0wpIgz+cMjHcUO9GWQdk3/x9X03rqX5EL2DBnZYfUIl63F9zj
+      M97ZkHOSNWVqPzX//0Vv2butewG0j3jZKfTo/2/SrxOYgEpYtC9huWpSVi7xm0US
+      erhSkQKBgFIf9JEsfgE57ANhvITZ3ZI0uZXNxZkXQaVg8jvScDi79IIhy9iPzhKC
+      aIIQoDNIlWv1ftCRZ5AlBvVXgvQ/QNrwy48JiQTzWZlb9Ezg8w+olQmSbG6fq7Y+
+      7r3i+QUZ7RBdOb24QcQ618q54ozNTCB7OywY78ptFzeoBeptiNr1
+      -----END RSA PRIVATE KEY-----
+  backup_public_key: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDEvr+tWAJ62wROllpSZeaSPxxnVY3R65sfUW8wM6L8tr1knJOTQLoBikmcjISb3ekyPlwubTypGoxb7al06FiNwfr3KDkytflKRGTyMKYgchighuFCfBuePd13cjf1l19TYU7u7a+VuCVWi7pmhDGUkMi24s23OroQb7D14XX17v46wLrqJQi2nrXzN/DWXcn/ycq8IZ7ZFgN/uYlbpfAKX8PCvImbDDO8+BgndAy4MPz8cWOWsnfGMVNePhvhazVcijLvx8Vu2Iuvg7CoJiSGjTe7YTms44/WpnFkHreyK8cwsw4wzls4BApu6UU2jIAsAMZh9zux/Rtni71dcNfF
+  salt_api_password: H0rTPdmktZ8RI7T7y6fjqY0uEbbs7Kwi
+  salt_api_password_hash: $6$lfbIFtMZ$.nTbTDMzs1iYv0WqkZHia8H8Fma963Nv3qyyz1x68jQh0YXK9i907B/hvoG4QHMvfolE7V7vQnFClJ1mVA3Yb.
+  salt_master_address: 10.167.4.15
+  salt_master_hostname: cfg01
+  salt_master_management_address: 10.167.5.15
+  shared_reclass_branch: 'proposed'
+  shared_reclass_url: https://gerrit.mcp.mirantis.net/salt-models/reclass-system.git
+  fluentd_enabled: 'True'
+  stacklight_enabled: 'True'
+  stacklight_log_address: 10.167.4.60
+  stacklight_log_hostname: log
+  stacklight_log_node01_address: 10.167.4.61
+  stacklight_log_node01_hostname: log01
+  stacklight_log_node02_address: 10.167.4.62
+  stacklight_log_node02_hostname: log02
+  stacklight_log_node03_address: 10.167.4.63
+  stacklight_log_node03_hostname: log03
+  stacklight_monitor_address: 10.167.4.70
+  stacklight_monitor_hostname: mon
+  stacklight_monitor_node01_address: 10.167.4.71
+  stacklight_monitor_node01_hostname: mon01
+  stacklight_monitor_node02_address: 10.167.4.72
+  stacklight_monitor_node02_hostname: mon02
+  stacklight_monitor_node03_address: 10.167.4.73
+  stacklight_monitor_node03_hostname: mon03
+  stacklight_telemetry_address: 10.167.4.85
+  stacklight_telemetry_hostname: mtr
+  stacklight_telemetry_node01_address: 10.167.4.86
+  stacklight_telemetry_node01_hostname: mtr01
+  stacklight_telemetry_node02_address: 10.167.4.87
+  stacklight_telemetry_node02_hostname: mtr02
+  stacklight_telemetry_node03_address: 10.167.4.88
+  stacklight_telemetry_node03_hostname: mtr03
+  stacklight_version: '2'
+  stacklight_long_term_storage_type: influxdb
+  static_ips_on_deploy_network_enabled: 'False'
+  tenant_network_gateway: 10.167.6.1
+  tenant_network_netmask: 255.255.255.0
+  tenant_network_subnet: 10.167.6.0/24
+  tenant_vlan: '20'
+  upstream_proxy_enabled: 'False'
+  use_default_network_scheme: 'False'
diff --git a/tcp_tests/templates/cookied-cicd-pike-ovs-sl/environment-context.yaml b/tcp_tests/templates/cookied-cicd-pike-ovs-sl/environment-context.yaml
new file mode 100644
index 0000000..1791477
--- /dev/null
+++ b/tcp_tests/templates/cookied-cicd-pike-ovs-sl/environment-context.yaml
@@ -0,0 +1,257 @@
+nodes:
+    cfg01:
+      reclass_storage_name: infra_config_node01
+      roles:
+      - infra_config
+      - linux_system_codename_xenial
+      interfaces:
+        ens3:
+          role: single_dhcp
+        ens4:
+          role: single_static_ctl
+
+    kvm01:
+      reclass_storage_name: infra_kvm_node01
+      roles:
+      - infra_kvm
+      - linux_system_codename_xenial
+      interfaces:
+        ens3:
+          role: single_dhcp
+        ens4:
+          role: single_ctl
+
+    kvm02:
+      reclass_storage_name: infra_kvm_node02
+      roles:
+      - infra_kvm
+      - linux_system_codename_xenial
+      interfaces:
+        ens3:
+          role: single_dhcp
+        ens4:
+          role: single_ctl
+
+    kvm03:
+      reclass_storage_name: infra_kvm_node03
+      roles:
+      - infra_kvm
+      - linux_system_codename_xenial
+      interfaces:
+        ens3:
+          role: single_dhcp
+        ens4:
+          role: single_ctl
+
+    cid01:
+      reclass_storage_name: cicd_control_node01
+      roles:
+      - cicd_control_leader
+      - linux_system_codename_xenial
+      interfaces:
+        ens3:
+          role: single_dhcp
+        ens4:
+          role: single_ctl
+
+    cid02:
+      reclass_storage_name: cicd_control_node02
+      roles:
+      - cicd_control_manager
+      - linux_system_codename_xenial
+      interfaces:
+        ens3:
+          role: single_dhcp
+        ens4:
+          role: single_ctl
+
+    cid03:
+      reclass_storage_name: cicd_control_node03
+      roles:
+      - cicd_control_manager
+      - linux_system_codename_xenial
+      interfaces:
+        ens3:
+          role: single_dhcp
+        ens4:
+          role: single_ctl
+
+    ctl01:
+      reclass_storage_name: openstack_control_node01
+      roles:
+      - openstack_control_leader
+      - openstack_database_leader
+      - openstack_message_queue
+      - linux_system_codename_xenial
+      interfaces:
+        ens3:
+          role: single_dhcp
+        ens4:
+          role: single_ctl
+
+    ctl02:
+      reclass_storage_name: openstack_control_node02
+      roles:
+      - openstack_control
+      - openstack_database
+      - openstack_message_queue
+      - linux_system_codename_xenial
+      interfaces:
+        ens3:
+          role: single_dhcp
+        ens4:
+          role: single_ctl
+
+    ctl03:
+      reclass_storage_name: openstack_control_node03
+      roles:
+      - openstack_control
+      - openstack_database
+      - openstack_message_queue
+      - linux_system_codename_xenial
+      interfaces:
+        ens3:
+          role: single_dhcp
+        ens4:
+          role: single_ctl
+
+    prx01:
+      reclass_storage_name: openstack_proxy_node01
+      roles:
+      - openstack_proxy
+      - linux_system_codename_xenial
+      interfaces:
+        ens3:
+          role: single_dhcp
+        ens4:
+          role: single_ctl
+
+    mon01:
+      reclass_storage_name: stacklight_server_node01
+      roles:
+      - stacklightv2_server_leader
+      - linux_system_codename_xenial
+      interfaces:
+        ens3:
+          role: single_dhcp
+        ens4:
+          role: single_ctl
+
+    mon02:
+      reclass_storage_name: stacklight_server_node02
+      roles:
+      - stacklightv2_server
+      - linux_system_codename_xenial
+      interfaces:
+        ens3:
+          role: single_dhcp
+        ens4:
+          role: single_ctl
+
+    mon03:
+      reclass_storage_name: stacklight_server_node03
+      roles:
+      - stacklightv2_server
+      - linux_system_codename_xenial
+      interfaces:
+        ens3:
+          role: single_dhcp
+        ens4:
+          role: single_ctl
+
+    log01:
+      reclass_storage_name: stacklight_log_node01
+      roles:
+      - stacklight_log_leader_v2
+      - linux_system_codename_xenial
+      interfaces:
+        ens3:
+          role: single_dhcp
+        ens4:
+          role: single_ctl
+
+    log02:
+      reclass_storage_name: stacklight_log_node02
+      roles:
+      - stacklight_log
+      - linux_system_codename_xenial
+      interfaces:
+        ens3:
+          role: single_dhcp
+        ens4:
+          role: single_ctl
+
+    log03:
+      reclass_storage_name: stacklight_log_node03
+      roles:
+      - stacklight_log
+      - linux_system_codename_xenial
+      interfaces:
+        ens3:
+          role: single_dhcp
+        ens4:
+          role: single_ctl
+
+    mtr01:
+      reclass_storage_name: stacklight_telemetry_node01
+      roles:
+      - stacklight_telemetry_leader
+      - linux_system_codename_xenial
+      interfaces:
+        ens3:
+          role: single_dhcp
+        ens4:
+          role: single_ctl
+
+    mtr02:
+      reclass_storage_name: stacklight_telemetry_node02
+      roles:
+      - stacklight_telemetry
+      - linux_system_codename_xenial
+      interfaces:
+        ens3:
+          role: single_dhcp
+        ens4:
+          role: single_ctl
+
+    mtr03:
+      reclass_storage_name: stacklight_telemetry_node03
+      roles:
+      - stacklight_telemetry
+      - linux_system_codename_xenial
+      interfaces:
+        ens3:
+          role: single_dhcp
+        ens4:
+          role: single_ctl
+
+    # Generator-based computes. For compatibility only
+    cmp<<count>>:
+      reclass_storage_name: openstack_compute_rack01
+      roles:
+      - openstack_compute
+      - linux_system_codename_xenial
+      interfaces:
+        ens3:
+          role: single_dhcp
+        ens4:
+          role: single_ctl
+        ens5:
+          role: bond0_ab_ovs_vxlan_mesh
+        ens6:
+          role: bond1_ab_ovs_floating
+
+    gtw01:
+      reclass_storage_name: openstack_gateway_node01
+      roles:
+      - openstack_gateway
+      - linux_system_codename_xenial
+      interfaces:
+        ens3:
+          role: single_dhcp
+        ens4:
+          role: single_ctl
+        ens5:
+          role: bond0_ab_ovs_vxlan_mesh
+        ens6:
+          role: bond1_ab_ovs_floating
diff --git a/tcp_tests/templates/cookied-cicd-pike-ovs-sl/salt.yaml b/tcp_tests/templates/cookied-cicd-pike-ovs-sl/salt.yaml
new file mode 100644
index 0000000..3c4d021
--- /dev/null
+++ b/tcp_tests/templates/cookied-cicd-pike-ovs-sl/salt.yaml
@@ -0,0 +1,14 @@
+{% from 'cookied-cicd-pike-ovs-sl/underlay.yaml' import HOSTNAME_CFG01 with context %}
+{% from 'cookied-cicd-pike-ovs-sl/underlay.yaml' import LAB_CONFIG_NAME with context %}
+{% from 'cookied-cicd-pike-ovs-sl/underlay.yaml' import DOMAIN_NAME with context %}
+
+{% set SALT_MODELS_REPOSITORY = os_env('SALT_MODELS_REPOSITORY','https://gerrit.mcp.mirantis.net/salt-models/mcp-virtual-lab') %}
+# Other salt model repository parameters see in shared-salt.yaml
+
+{% import 'shared-salt.yaml' as SHARED with context %}
+
+{{ SHARED.MACRO_INSTALL_SALT_MINIONS() }}
+
+{{SHARED.MACRO_CHECK_SALT_VERSION_SERVICES_ON_CFG()}}
+
+{{SHARED.MACRO_CHECK_SALT_VERSION_ON_NODES()}}
diff --git a/tcp_tests/templates/cookied-cicd-pike-ovs-sl/underlay--meta-data.yaml b/tcp_tests/templates/cookied-cicd-pike-ovs-sl/underlay--meta-data.yaml
new file mode 100644
index 0000000..3699401
--- /dev/null
+++ b/tcp_tests/templates/cookied-cicd-pike-ovs-sl/underlay--meta-data.yaml
@@ -0,0 +1,4 @@
+| # All the data below will be stored as a string object
+  instance-id: iid-local1
+  hostname: {hostname}
+  local-hostname: {hostname}
diff --git a/tcp_tests/templates/cookied-cicd-pike-ovs-sl/underlay--user-data-cfg01.yaml b/tcp_tests/templates/cookied-cicd-pike-ovs-sl/underlay--user-data-cfg01.yaml
new file mode 100644
index 0000000..4c43578
--- /dev/null
+++ b/tcp_tests/templates/cookied-cicd-pike-ovs-sl/underlay--user-data-cfg01.yaml
@@ -0,0 +1,101 @@
+| # All the data below will be stored as a string object
+  #cloud-config, see http://cloudinit.readthedocs.io/en/latest/topics/examples.html
+
+  ssh_pwauth: True
+  users:
+   - name: root
+     sudo: ALL=(ALL) NOPASSWD:ALL
+     shell: /bin/bash
+     ssh_authorized_keys:
+     {% for key in config.underlay.ssh_keys %}
+      - ssh-rsa {{ key['public'] }}
+     {% endfor %}
+
+  disable_root: false
+  chpasswd:
+   list: |
+    root:r00tme
+   expire: False
+
+  bootcmd:
+   # Block access to SSH while node is preparing
+   - cloud-init-per once sudo touch /is_cloud_init_started
+   # Enable root access
+   - sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
+   - service sshd restart
+
+  output:
+    all: '| tee -a /var/log/cloud-init-output.log /dev/tty0'
+
+  runcmd:
+   - echo "******** MOUNT CONFIG DRIVE"
+   # Mount config drive
+   - mkdir /root/config-drive
+   - mount /dev/sr0 /root/config-drive
+
+   # Configure dhclient
+   - sudo echo "nameserver {gateway}" >> /etc/resolvconf/resolv.conf.d/base
+   - sudo resolvconf -u
+
+   # Enable grub menu using updated config below
+   - update-grub
+
+   # Prepare network connection
+   #- sudo ifdown ens3
+   #- sudo ip r d default || true  # remove existing default route to get it from dhcp
+   #- sudo ifup ens3
+   #- sudo route add default gw {gateway} {interface_name}
+
+   # Create swap
+   - fallocate -l 16G /swapfile
+   - chmod 600 /swapfile
+   - mkswap /swapfile
+   - swapon /swapfile
+   - echo "/swapfile   none    swap    defaults    0   0" >> /etc/fstab
+
+   # Run user data script from config drive
+   - ifdown --force ens3; ifconfig ens3 down; ip a flush dev ens3; rm -f /var/run/network/ifstate.ens3; ip l set down ens3
+   - ifdown --force ens4; ifconfig ens4 down; ip a flush dev ens4; rm -f /var/run/network/ifstate.ens4; ip l set down ens4
+   - rm -f /etc/network/interfaces
+   #- ifdown --force ens5; ifconfig ens5 down; ip a flush dev ens5; rm -f /var/run/network/ifstate.ens5
+   #- cp /root/config-drive/user-data /root/user-data
+   #- sed -i '/^reboot$/d' /root/user-data
+   #- set -x; cd /root && /bin/bash -xe ./user-data
+   - |
+     set -x
+     cd /root/config-drive
+     if /bin/bash -xe ./user-data; then
+         touch /is_cloud_init_finished
+     else
+         set +x
+         echo "bootstrap script /root/config-drive/user-data failed\n" > /is_cloud_init_failed
+     fi
+
+   # Enable root access (after reboot)
+   - sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
+
+  write_files:
+   - path: /etc/default/grub.d/97-enable-grub-menu.cfg
+     content: |
+         GRUB_RECORDFAIL_TIMEOUT=30
+         GRUB_TIMEOUT=3
+         GRUB_TIMEOUT_STYLE=menu
+
+   #- path: /etc/network/interfaces
+   - path: /root/interfaces
+     content: |
+          auto lo
+          iface lo inet loopback
+
+          auto ens3
+          iface ens3 inet dhcp
+
+   - path: /root/.ssh/config
+     owner: root:root
+     permissions: '0600'
+     content: |
+          Host *
+            ServerAliveInterval 60
+            ServerAliveCountMax 0
+            StrictHostKeyChecking no
+            UserKnownHostsFile /dev/null
diff --git a/tcp_tests/templates/cookied-cicd-pike-ovs-sl/underlay--user-data1604-swp.yaml b/tcp_tests/templates/cookied-cicd-pike-ovs-sl/underlay--user-data1604-swp.yaml
new file mode 100644
index 0000000..319c007
--- /dev/null
+++ b/tcp_tests/templates/cookied-cicd-pike-ovs-sl/underlay--user-data1604-swp.yaml
@@ -0,0 +1,59 @@
+| # All the data below will be stored as a string object
+  #cloud-config, see http://cloudinit.readthedocs.io/en/latest/topics/examples.html
+
+  ssh_pwauth: True
+  users:
+   - name: root
+     sudo: ALL=(ALL) NOPASSWD:ALL
+     shell: /bin/bash
+     ssh_authorized_keys:
+     {% for key in config.underlay.ssh_keys %}
+      - ssh-rsa {{ key['public'] }}
+     {% endfor %}
+
+  disable_root: false
+  chpasswd:
+   list: |
+    root:r00tme
+   expire: False
+
+  bootcmd:
+   # Enable root access
+   - sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
+   - service sshd restart
+  output:
+    all: '| tee -a /var/log/cloud-init-output.log /dev/tty0'
+
+  runcmd:
+   - export TERM=linux
+   - export LANG=C
+   # Configure dhclient
+   - sudo echo "nameserver {gateway}" >> /etc/resolvconf/resolv.conf.d/base
+   - sudo resolvconf -u
+
+   # Enable grub menu using updated config below
+   - update-grub
+
+   # Prepare network connection
+   - sudo ifup ens3
+   #- sudo route add default gw {gateway} {interface_name}
+
+   # Create swap
+   - fallocate -l 16G /swapfile
+   - chmod 600 /swapfile
+   - mkswap /swapfile
+   - swapon /swapfile
+   - echo "/swapfile   none    swap    defaults   0   0" >> /etc/fstab
+
+  write_files:
+   - path: /etc/default/grub.d/97-enable-grub-menu.cfg
+     content: |
+         GRUB_RECORDFAIL_TIMEOUT=30
+         GRUB_TIMEOUT=3
+         GRUB_TIMEOUT_STYLE=menu
+
+   - path: /etc/network/interfaces
+     content: |
+          auto ens3
+          iface ens3 inet dhcp
+
diff --git a/tcp_tests/templates/cookied-cicd-pike-ovs-sl/underlay.yaml b/tcp_tests/templates/cookied-cicd-pike-ovs-sl/underlay.yaml
new file mode 100644
index 0000000..f52a1a0
--- /dev/null
+++ b/tcp_tests/templates/cookied-cicd-pike-ovs-sl/underlay.yaml
@@ -0,0 +1,886 @@
+# Set the repository suite, one of the: 'nightly', 'testing', 'stable', or any other required
+{% set REPOSITORY_SUITE = os_env('REPOSITORY_SUITE', 'testing') %}
+
+{% import 'cookied-cicd-pike-ovs-sl/underlay--meta-data.yaml' as CLOUDINIT_META_DATA with context %}
+{% import 'cookied-cicd-pike-ovs-sl/underlay--user-data-cfg01.yaml' as CLOUDINIT_USER_DATA_CFG01 with context %}
+{% import 'cookied-cicd-pike-ovs-sl/underlay--user-data1604-swp.yaml' as CLOUDINIT_USER_DATA_1604_SWP with context %}
+
+---
+aliases:
+ - &interface_model {{ os_env('INTERFACE_MODEL', 'virtio') }}
+ - &cloudinit_meta_data {{ CLOUDINIT_META_DATA }}
+ - &cloudinit_user_data_cfg01 {{ CLOUDINIT_USER_DATA_CFG01 }}
+ - &cloudinit_user_data_1604_swp {{ CLOUDINIT_USER_DATA_1604_SWP }}
+
+{% set LAB_CONFIG_NAME = os_env('LAB_CONFIG_NAME', 'cookied-cicd-pike-ovs-sl') %}
+{% set DOMAIN_NAME = os_env('DOMAIN_NAME', LAB_CONFIG_NAME) + '.local' %}
+{% set HOSTNAME_CFG01 = os_env('HOSTNAME_CFG01', 'cfg01.' + DOMAIN_NAME) %}
+{% set HOSTNAME_KVM01 = os_env('HOSTNAME_KVM01', 'kvm01.' + DOMAIN_NAME) %}
+{% set HOSTNAME_KVM02 = os_env('HOSTNAME_KVM02', 'kvm02.' + DOMAIN_NAME) %}
+{% set HOSTNAME_KVM03 = os_env('HOSTNAME_KVM03', 'kvm03.' + DOMAIN_NAME) %}
+{% set HOSTNAME_CID01 = os_env('HOSTNAME_CID01', 'cid01.' + DOMAIN_NAME) %}
+{% set HOSTNAME_CID02 = os_env('HOSTNAME_CID02', 'cid02.' + DOMAIN_NAME) %}
+{% set HOSTNAME_CID03 = os_env('HOSTNAME_CID03', 'cid03.' + DOMAIN_NAME) %}
+{% set HOSTNAME_CTL01 = os_env('HOSTNAME_CTL01', 'ctl01.' + DOMAIN_NAME) %}
+{% set HOSTNAME_CTL02 = os_env('HOSTNAME_CTL02', 'ctl02.' + DOMAIN_NAME) %}
+{% set HOSTNAME_CTL03 = os_env('HOSTNAME_CTL03', 'ctl03.' + DOMAIN_NAME) %}
+{% set HOSTNAME_CMP01 = os_env('HOSTNAME_CMP01', 'cmp001.' + DOMAIN_NAME) %}
+{% set HOSTNAME_CMP02 = os_env('HOSTNAME_CMP02', 'cmp002.' + DOMAIN_NAME) %}
+{% set HOSTNAME_MON01 = os_env('HOSTNAME_MON01', 'mon01.' + DOMAIN_NAME) %}
+{% set HOSTNAME_MON02 = os_env('HOSTNAME_MON02', 'mon02.' + DOMAIN_NAME) %}
+{% set HOSTNAME_MON03 = os_env('HOSTNAME_MON03', 'mon03.' + DOMAIN_NAME) %}
+{% set HOSTNAME_LOG01 = os_env('HOSTNAME_LOG01', 'log01.' + DOMAIN_NAME) %}
+{% set HOSTNAME_LOG02 = os_env('HOSTNAME_LOG02', 'log02.' + DOMAIN_NAME) %}
+{% set HOSTNAME_LOG03 = os_env('HOSTNAME_LOG03', 'log03.' + DOMAIN_NAME) %}
+{% set HOSTNAME_MTR01 = os_env('HOSTNAME_MTR01', 'mtr01.' + DOMAIN_NAME) %}
+{% set HOSTNAME_MTR02 = os_env('HOSTNAME_MTR02', 'mtr02.' + DOMAIN_NAME) %}
+{% set HOSTNAME_MTR03 = os_env('HOSTNAME_MTR03', 'mtr03.' + DOMAIN_NAME) %}
+{% set HOSTNAME_GTW01 = os_env('HOSTNAME_GTW01', 'gtw01.' + DOMAIN_NAME) %}
+{% set HOSTNAME_PRX01 = os_env('HOSTNAME_PRX01', 'prx01.' + DOMAIN_NAME) %}
+
+template:
+  devops_settings:
+    env_name: {{ os_env('ENV_NAME', 'cookied-cicd-pike-ovs-sl_' + REPOSITORY_SUITE + "_" + os_env('BUILD_NUMBER', '')) }}
+
+    address_pools:
+      private-pool01:
+        net: {{ os_env('PRIVATE_ADDRESS_POOL01', '10.60.0.0/16:24') }}
+        params:
+          ip_reserved:
+            gateway: +1
+            l2_network_device: +1
+            default_{{ HOSTNAME_CFG01 }}: +15
+            default_{{ HOSTNAME_KVM }}: +240
+            default_{{ HOSTNAME_KVM01 }}: +241
+            default_{{ HOSTNAME_KVM02 }}: +242
+            default_{{ HOSTNAME_KVM03 }}: +243
+            default_{{ HOSTNAME_CID }}: +90
+            default_{{ HOSTNAME_CID01 }}: +91
+            default_{{ HOSTNAME_CID02 }}: +92
+            default_{{ HOSTNAME_CID03 }}: +93
+            default_{{ HOSTNAME_CTL01 }}: +101
+            default_{{ HOSTNAME_CTL02 }}: +102
+            default_{{ HOSTNAME_CTL03 }}: +103
+            default_{{ HOSTNAME_CMP01 }}: +105
+            default_{{ HOSTNAME_CMP02 }}: +106
+            default_{{ HOSTNAME_MON01 }}: +71
+            default_{{ HOSTNAME_MON02 }}: +72
+            default_{{ HOSTNAME_MON03 }}: +73
+            default_{{ HOSTNAME_LOG01 }}: +61
+            default_{{ HOSTNAME_LOG02 }}: +62
+            default_{{ HOSTNAME_LOG03 }}: +63
+            default_{{ HOSTNAME_MTR01 }}: +86
+            default_{{ HOSTNAME_MTR02 }}: +87
+            default_{{ HOSTNAME_MTR03 }}: +88
+            default_{{ HOSTNAME_GTW01 }}: +110
+            default_{{ HOSTNAME_PRX01 }}: +121
+          ip_ranges:
+            dhcp: [+90, -10]
+
+      admin-pool01:
+        net: {{ os_env('ADMIN_ADDRESS_POOL01', '10.70.0.0/16:24') }}
+        params:
+          ip_reserved:
+            gateway: +1
+            l2_network_device: +1
+            default_{{ HOSTNAME_CFG01 }}: +15
+            default_{{ HOSTNAME_KVM }}: +240
+            default_{{ HOSTNAME_KVM01 }}: +241
+            default_{{ HOSTNAME_KVM02 }}: +242
+            default_{{ HOSTNAME_KVM03 }}: +243
+            default_{{ HOSTNAME_CID }}: +90
+            default_{{ HOSTNAME_CID01 }}: +91
+            default_{{ HOSTNAME_CID02 }}: +92
+            default_{{ HOSTNAME_CID03 }}: +93
+            default_{{ HOSTNAME_CTL01 }}: +101
+            default_{{ HOSTNAME_CTL02 }}: +102
+            default_{{ HOSTNAME_CTL03 }}: +103
+            default_{{ HOSTNAME_CMP01 }}: +105
+            default_{{ HOSTNAME_CMP02 }}: +106
+            default_{{ HOSTNAME_MON01 }}: +71
+            default_{{ HOSTNAME_MON02 }}: +72
+            default_{{ HOSTNAME_MON03 }}: +73
+            default_{{ HOSTNAME_LOG01 }}: +61
+            default_{{ HOSTNAME_LOG02 }}: +62
+            default_{{ HOSTNAME_LOG03 }}: +63
+            default_{{ HOSTNAME_MTR01 }}: +86
+            default_{{ HOSTNAME_MTR02 }}: +87
+            default_{{ HOSTNAME_MTR03 }}: +88
+            default_{{ HOSTNAME_GTW01 }}: +110
+            default_{{ HOSTNAME_PRX01 }}: +121
+          ip_ranges:
+            dhcp: [+90, -10]
+
+      tenant-pool01:
+        net: {{ os_env('TENANT_ADDRESS_POOL01', '10.80.0.0/16:24') }}
+        params:
+          ip_reserved:
+            gateway: +1
+            l2_network_device: +1
+            default_{{ HOSTNAME_CFG01 }}: +15
+            default_{{ HOSTNAME_CTL01 }}: +101
+            default_{{ HOSTNAME_CTL02 }}: +102
+            default_{{ HOSTNAME_CTL03 }}: +103
+            default_{{ HOSTNAME_CMP01 }}: +105
+            default_{{ HOSTNAME_CMP02 }}: +106
+            default_{{ HOSTNAME_MON01 }}: +71
+            default_{{ HOSTNAME_MON02 }}: +72
+            default_{{ HOSTNAME_MON03 }}: +73
+            default_{{ HOSTNAME_LOG01 }}: +61
+            default_{{ HOSTNAME_LOG02 }}: +62
+            default_{{ HOSTNAME_LOG03 }}: +63
+            default_{{ HOSTNAME_MTR01 }}: +86
+            default_{{ HOSTNAME_MTR02 }}: +87
+            default_{{ HOSTNAME_MTR03 }}: +88
+            default_{{ HOSTNAME_GTW01 }}: +110
+            default_{{ HOSTNAME_PRX01 }}: +121
+          ip_ranges:
+            dhcp: [+10, -10]
+
+      external-pool01:
+        net: {{ os_env('EXTERNAL_ADDRESS_POOL01', '10.90.0.0/16:24') }}
+        params:
+          ip_reserved:
+            gateway: +1
+            l2_network_device: +1
+            default_{{ HOSTNAME_CFG01 }}: +15
+            default_{{ HOSTNAME_CTL01 }}: +101
+            default_{{ HOSTNAME_CTL02 }}: +102
+            default_{{ HOSTNAME_CTL03 }}: +103
+            default_{{ HOSTNAME_CMP01 }}: +105
+            default_{{ HOSTNAME_CMP02 }}: +106
+            default_{{ HOSTNAME_MON01 }}: +71
+            default_{{ HOSTNAME_MON02 }}: +72
+            default_{{ HOSTNAME_MON03 }}: +73
+            default_{{ HOSTNAME_LOG01 }}: +61
+            default_{{ HOSTNAME_LOG02 }}: +62
+            default_{{ HOSTNAME_LOG03 }}: +63
+            default_{{ HOSTNAME_MTR01 }}: +86
+            default_{{ HOSTNAME_MTR02 }}: +87
+            default_{{ HOSTNAME_MTR03 }}: +88
+            default_{{ HOSTNAME_GTW01 }}: +110
+            default_{{ HOSTNAME_PRX01 }}: +121
+          ip_ranges:
+            dhcp: [+10, -10]
+
+
+    groups:
+      - name: default
+        driver:
+          name: devops.driver.libvirt
+          params:
+            connection_string: !os_env CONNECTION_STRING, qemu:///system
+            storage_pool_name: !os_env STORAGE_POOL_NAME, default
+            stp: False
+            hpet: False
+            enable_acpi: true
+            use_host_cpu: !os_env DRIVER_USE_HOST_CPU, true
+            use_hugepages: !os_env DRIVER_USE_HUGEPAGES, false
+
+        network_pools:
+          admin: admin-pool01
+          private: private-pool01
+          tenant: tenant-pool01
+          external: external-pool01
+
+        l2_network_devices:
+          private:
+            address_pool: private-pool01
+            dhcp: false
+            forward:
+              mode: route
+
+          admin:
+            address_pool: admin-pool01
+            dhcp: true
+            forward:
+              mode: nat
+
+          tenant:
+            address_pool: tenant-pool01
+            dhcp: false
+
+          external:
+            address_pool: external-pool01
+            dhcp: false
+            forward:
+              mode: route
+
+
+        group_volumes:
+         - name: cloudimage1604    # This name is used for 'backing_store' option for node volumes.
+           source_image: {{ os_env('MCP_IMAGE_PATH1604') }}  # http://ci.mcp.mirantis.net:8085/images/ubuntu-16-04-x64-mcpproposed.qcow2
+           format: qcow2
+         - name: cfg01_day01_image               # Pre-configured day01 image
+           source_image: {{ os_env('IMAGE_PATH_CFG01_DAY01') }} # http://images.mirantis.com/cfg01-day01.qcow2 or fallback to IMAGE_PATH1604
+           format: qcow2
+         - name: mcp_ubuntu_1604_image           # Pre-configured image for control plane
+           source_image: !os_env MCP_IMAGE_PATH1604
+           format: qcow2
+
+        nodes:
+          - name: {{ HOSTNAME_CFG01 }}
+            role: salt_master
+            params:
+              vcpu: {{ os_env('CFG_NODE_CPU', 3) }}
+              memory: {{ os_env('CFG_NODE_MEMORY', 8192) }}
+              boot:
+                - hd
+              cloud_init_volume_name: iso
+              cloud_init_iface_up: ens3
+              volumes:
+                - name: system
+                  capacity: {{ os_env('CFG_NODE_VOLUME_SIZE', 150) }}
+                  backing_store: cfg01_day01_image
+                  format: qcow2
+                - name: config
+                  capacity: 1
+                  format: raw
+                  device: cdrom
+                  bus: ide
+                  # source_image: !os_env CFG01_CONFIG_PATH # no source image required.
+                                                            # it will be uploaded after config drive generation
+                - name: iso  # Volume with name 'iso' will be used
+                             # for store image with cloud-init metadata.
+                  capacity: 1
+                  format: raw
+                  device: cdrom
+                  bus: ide
+                  cloudinit_meta_data: *cloudinit_meta_data
+                  cloudinit_user_data: *cloudinit_user_data_cfg01
+
+              interfaces:
+                - label: ens3
+                  l2_network_device: admin
+                  interface_model: *interface_model
+                - label: ens4
+                  l2_network_device: private
+                  interface_model: *interface_model
+              network_config:
+                ens3:
+                  networks:
+                    - admin
+                ens4:
+                  networks:
+                    - private
+
+          - name: {{ HOSTNAME_CTL01 }}
+            role: salt_minion
+            params:
+              vcpu: !os_env SLAVE_NODE_CPU, 2
+              memory: !os_env SLAVE_NODE_MEMORY, 16384
+              boot:
+                - hd
+              cloud_init_volume_name: iso
+              cloud_init_iface_up: ens3
+              volumes:
+                - name: system
+                  capacity: !os_env NODE_VOLUME_SIZE, 150
+                  backing_store: mcp_ubuntu_1604_image
+                  format: qcow2
+                - name: cinder
+                  capacity: 50
+                  format: qcow2
+                - name: iso  # Volume with name 'iso' will be used
+                             # for store image with cloud-init metadata.
+                  capacity: 1
+                  format: raw
+                  device: cdrom
+                  bus: ide
+                  cloudinit_meta_data: *cloudinit_meta_data
+                  cloudinit_user_data: *cloudinit_user_data_1604_swp
+
+              interfaces: &interfaces
+                - label: ens3
+                  l2_network_device: admin
+                  interface_model: *interface_model
+                - label: ens4
+                  l2_network_device: private
+                  interface_model: *interface_model
+              network_config: &network_config
+                ens3:
+                  networks:
+                    - admin
+                ens4:
+                  networks:
+                    - private
+
+          - name: {{ HOSTNAME_CTL02 }}
+            role: salt_minion
+            params:
+              vcpu: !os_env SLAVE_NODE_CPU, 2
+              memory: !os_env SLAVE_NODE_MEMORY, 16384
+              boot:
+                - hd
+              cloud_init_volume_name: iso
+              cloud_init_iface_up: ens3
+              volumes:
+                - name: system
+                  capacity: !os_env NODE_VOLUME_SIZE, 150
+                  backing_store: mcp_ubuntu_1604_image
+                  format: qcow2
+                - name: cinder
+                  capacity: 50
+                  format: qcow2
+                - name: iso  # Volume with name 'iso' will be used
+                             # for store image with cloud-init metadata.
+                  capacity: 1
+                  format: raw
+                  device: cdrom
+                  bus: ide
+                  cloudinit_meta_data: *cloudinit_meta_data
+                  cloudinit_user_data: *cloudinit_user_data_1604_swp
+
+              interfaces: *interfaces
+              network_config: *network_config
+
+          - name: {{ HOSTNAME_CTL03 }}
+            role: salt_minion
+            params:
+              vcpu: !os_env SLAVE_NODE_CPU, 2
+              memory: !os_env SLAVE_NODE_MEMORY, 16384
+              boot:
+                - hd
+              cloud_init_volume_name: iso
+              cloud_init_iface_up: ens3
+              volumes:
+                - name: system
+                  capacity: !os_env NODE_VOLUME_SIZE, 150
+                  backing_store: mcp_ubuntu_1604_image
+                  format: qcow2
+                - name: cinder
+                  capacity: 50
+                  format: qcow2
+                - name: iso  # Volume with name 'iso' will be used
+                             # for store image with cloud-init metadata.
+                  capacity: 1
+                  format: raw
+                  device: cdrom
+                  bus: ide
+                  cloudinit_meta_data: *cloudinit_meta_data
+                  cloudinit_user_data: *cloudinit_user_data_1604_swp
+
+              interfaces: *interfaces
+              network_config: *network_config
+
+          - name: {{ HOSTNAME_MON01 }}
+            role: salt_minion
+            params:
+              vcpu: !os_env SLAVE_NODE_CPU, 2
+              memory: !os_env SLAVE_NODE_MEMORY, 2048
+              boot:
+                - hd
+              cloud_init_volume_name: iso
+              cloud_init_iface_up: ens3
+              volumes:
+                - name: system
+                  capacity: !os_env NODE_VOLUME_SIZE, 150
+                  backing_store: mcp_ubuntu_1604_image
+                  format: qcow2
+                - name: iso  # Volume with name 'iso' will be used
+                             # for store image with cloud-init metadata.
+                  capacity: 1
+                  format: raw
+                  device: cdrom
+                  bus: ide
+                  cloudinit_meta_data: *cloudinit_meta_data
+                  cloudinit_user_data: *cloudinit_user_data_1604_swp
+
+              interfaces: *interfaces
+              network_config: *network_config
+
+          - name: {{ HOSTNAME_MON02 }}
+            role: salt_minion
+            params:
+              vcpu: !os_env SLAVE_NODE_CPU, 2
+              memory: !os_env SLAVE_NODE_MEMORY, 2048
+              boot:
+                - hd
+              cloud_init_volume_name: iso
+              cloud_init_iface_up: ens3
+              volumes:
+                - name: system
+                  capacity: !os_env NODE_VOLUME_SIZE, 150
+                  backing_store: mcp_ubuntu_1604_image
+                  format: qcow2
+                - name: iso  # Volume with name 'iso' will be used
+                             # for store image with cloud-init metadata.
+                  capacity: 1
+                  format: raw
+                  device: cdrom
+                  bus: ide
+                  cloudinit_meta_data: *cloudinit_meta_data
+                  cloudinit_user_data: *cloudinit_user_data_1604_swp
+
+              interfaces: *interfaces
+              network_config: *network_config
+
+          - name: {{ HOSTNAME_MON03 }}
+            role: salt_minion
+            params:
+              vcpu: !os_env SLAVE_NODE_CPU, 2
+              memory: !os_env SLAVE_NODE_MEMORY, 2048
+              boot:
+                - hd
+              cloud_init_volume_name: iso
+              cloud_init_iface_up: ens3
+              volumes:
+                - name: system
+                  capacity: !os_env NODE_VOLUME_SIZE, 150
+                  backing_store: mcp_ubuntu_1604_image
+                  format: qcow2
+                - name: iso  # Volume with name 'iso' will be used
+                             # for store image with cloud-init metadata.
+                  capacity: 1
+                  format: raw
+                  device: cdrom
+                  bus: ide
+                  cloudinit_meta_data: *cloudinit_meta_data
+                  cloudinit_user_data: *cloudinit_user_data_1604_swp
+
+              interfaces: *interfaces
+              network_config: *network_config
+
+          - name: {{ HOSTNAME_LOG01 }}
+            role: salt_minion
+            params:
+              vcpu: !os_env SLAVE_NODE_CPU, 2
+              memory: !os_env SLAVE_NODE_MEMORY, 4096
+              boot:
+                - hd
+              cloud_init_volume_name: iso
+              cloud_init_iface_up: ens3
+              volumes:
+                - name: system
+                  capacity: !os_env NODE_VOLUME_SIZE, 150
+                  backing_store: mcp_ubuntu_1604_image
+                  format: qcow2
+                - name: iso  # Volume with name 'iso' will be used
+                             # for store image with cloud-init metadata.
+                  capacity: 1
+                  format: raw
+                  device: cdrom
+                  bus: ide
+                  cloudinit_meta_data: *cloudinit_meta_data
+                  cloudinit_user_data: *cloudinit_user_data_1604_swp
+
+              interfaces: *interfaces
+              network_config: *network_config
+
+          - name: {{ HOSTNAME_LOG02 }}
+            role: salt_minion
+            params:
+              vcpu: !os_env SLAVE_NODE_CPU, 2
+              memory: !os_env SLAVE_NODE_MEMORY, 4096
+              boot:
+                - hd
+              cloud_init_volume_name: iso
+              cloud_init_iface_up: ens3
+              volumes:
+                - name: system
+                  capacity: !os_env NODE_VOLUME_SIZE, 150
+                  backing_store: mcp_ubuntu_1604_image
+                  format: qcow2
+                - name: iso  # Volume with name 'iso' will be used
+                             # for store image with cloud-init metadata.
+                  capacity: 1
+                  format: raw
+                  device: cdrom
+                  bus: ide
+                  cloudinit_meta_data: *cloudinit_meta_data
+                  cloudinit_user_data: *cloudinit_user_data_1604_swp
+
+              interfaces: *interfaces
+              network_config: *network_config
+
+          - name: {{ HOSTNAME_LOG03 }}
+            role: salt_minion
+            params:
+              vcpu: !os_env SLAVE_NODE_CPU, 2
+              memory: !os_env SLAVE_NODE_MEMORY, 4096
+              boot:
+                - hd
+              cloud_init_volume_name: iso
+              cloud_init_iface_up: ens3
+              volumes:
+                - name: system
+                  capacity: !os_env NODE_VOLUME_SIZE, 150
+                  backing_store: mcp_ubuntu_1604_image
+                  format: qcow2
+                - name: iso  # Volume with name 'iso' will be used
+                             # for store image with cloud-init metadata.
+                  capacity: 1
+                  format: raw
+                  device: cdrom
+                  bus: ide
+                  cloudinit_meta_data: *cloudinit_meta_data
+                  cloudinit_user_data: *cloudinit_user_data_1604_swp
+
+              interfaces: *interfaces
+              network_config: *network_config
+
+          - name: {{ HOSTNAME_MTR01 }}
+            role: salt_minion
+            params:
+              vcpu: !os_env SLAVE_NODE_CPU, 2
+              memory: !os_env SLAVE_NODE_MEMORY, 2048
+              boot:
+                - hd
+              cloud_init_volume_name: iso
+              cloud_init_iface_up: ens3
+              volumes:
+                - name: system
+                  capacity: !os_env NODE_VOLUME_SIZE, 150
+                  backing_store: mcp_ubuntu_1604_image
+                  format: qcow2
+                - name: iso  # Volume with name 'iso' will be used
+                             # for store image with cloud-init metadata.
+                  capacity: 1
+                  format: raw
+                  device: cdrom
+                  bus: ide
+                  cloudinit_meta_data: *cloudinit_meta_data
+                  cloudinit_user_data: *cloudinit_user_data_1604_swp
+
+              interfaces: *interfaces
+              network_config: *network_config
+
+          - name: {{ HOSTNAME_MTR02 }}
+            role: salt_minion
+            params:
+              vcpu: !os_env SLAVE_NODE_CPU, 2
+              memory: !os_env SLAVE_NODE_MEMORY, 2048
+              boot:
+                - hd
+              cloud_init_volume_name: iso
+              cloud_init_iface_up: ens3
+              volumes:
+                - name: system
+                  capacity: !os_env NODE_VOLUME_SIZE, 150
+                  backing_store: mcp_ubuntu_1604_image
+                  format: qcow2
+                - name: iso  # Volume with name 'iso' will be used
+                             # for store image with cloud-init metadata.
+                  capacity: 1
+                  format: raw
+                  device: cdrom
+                  bus: ide
+                  cloudinit_meta_data: *cloudinit_meta_data
+                  cloudinit_user_data: *cloudinit_user_data_1604_swp
+
+              interfaces: *interfaces
+              network_config: *network_config
+
+          - name: {{ HOSTNAME_MTR03 }}
+            role: salt_minion
+            params:
+              vcpu: !os_env SLAVE_NODE_CPU, 2
+              memory: !os_env SLAVE_NODE_MEMORY, 2048
+              boot:
+                - hd
+              cloud_init_volume_name: iso
+              cloud_init_iface_up: ens3
+              volumes:
+                - name: system
+                  capacity: !os_env NODE_VOLUME_SIZE, 150
+                  backing_store: mcp_ubuntu_1604_image
+                  format: qcow2
+                - name: iso  # Volume with name 'iso' will be used
+                             # for store image with cloud-init metadata.
+                  capacity: 1
+                  format: raw
+                  device: cdrom
+                  bus: ide
+                  cloudinit_meta_data: *cloudinit_meta_data
+                  cloudinit_user_data: *cloudinit_user_data_1604_swp
+
+              interfaces: *interfaces
+              network_config: *network_config
+
+          - name: {{ HOSTNAME_PRX01 }}
+            role: salt_minion
+            params:
+              vcpu: !os_env SLAVE_NODE_CPU, 1
+              memory: !os_env SLAVE_NODE_MEMORY, 2048
+              boot:
+                - hd
+              cloud_init_volume_name: iso
+              cloud_init_iface_up: ens3
+              volumes:
+                - name: system
+                  capacity: !os_env NODE_VOLUME_SIZE, 150
+                  backing_store: mcp_ubuntu_1604_image
+                  format: qcow2
+                - name: cinder
+                  capacity: 50
+                  format: qcow2
+                - name: iso  # Volume with name 'iso' will be used
+                             # for store image with cloud-init metadata.
+                  capacity: 1
+                  format: raw
+                  device: cdrom
+                  bus: ide
+                  cloudinit_meta_data: *cloudinit_meta_data
+                  cloudinit_user_data: *cloudinit_user_data_1604_swp
+
+              interfaces: *interfaces
+              network_config: *network_config
+
+          - name: {{ HOSTNAME_CMP01 }}
+            role: salt_minion
+            params:
+              vcpu: !os_env SLAVE_NODE_CPU, 3
+              memory: !os_env SLAVE_NODE_MEMORY, 4096
+              boot:
+                - hd
+              cloud_init_volume_name: iso
+              cloud_init_iface_up: ens3
+              volumes:
+                - name: system
+                  capacity: !os_env NODE_VOLUME_SIZE, 150
+                  backing_store: cloudimage1604
+                  format: qcow2
+                - name: iso  # Volume with name 'iso' will be used
+                             # for store image with cloud-init metadata.
+                  capacity: 1
+                  format: raw
+                  device: cdrom
+                  bus: ide
+                  cloudinit_meta_data: *cloudinit_meta_data
+                  cloudinit_user_data: *cloudinit_user_data_1604_swp
+
+
+              interfaces: &all_interfaces
+                - label: ens3
+                  l2_network_device: admin
+                  interface_model: *interface_model
+                - label: ens4
+                  l2_network_device: private
+                  interface_model: *interface_model
+                - label: ens5
+                  l2_network_device: tenant
+                  interface_model: *interface_model
+                - label: ens6
+                  l2_network_device: external
+                  interface_model: *interface_model
+              network_config: &all_network_config
+                ens3:
+                  networks:
+                    - admin
+                ens4:
+                  networks:
+                    - private
+                ens5:
+                  networks:
+                    - tenant
+                ens6:
+                  networks:
+                    - external
+
+          - name: {{ HOSTNAME_CMP02 }}
+            role: salt_minion
+            params:
+              vcpu: !os_env SLAVE_NODE_CPU, 3
+              memory: !os_env SLAVE_NODE_MEMORY, 4096
+              boot:
+                - hd
+              cloud_init_volume_name: iso
+              cloud_init_iface_up: ens3
+              volumes:
+                - name: system
+                  capacity: !os_env NODE_VOLUME_SIZE, 150
+                  backing_store: cloudimage1604
+                  format: qcow2
+                - name: iso  # Volume with name 'iso' will be used
+                             # for store image with cloud-init metadata.
+                  capacity: 1
+                  format: raw
+                  device: cdrom
+                  bus: ide
+                  cloudinit_meta_data: *cloudinit_meta_data
+                  cloudinit_user_data: *cloudinit_user_data_1604_swp
+
+              interfaces: *all_interfaces
+              network_config: *all_network_config
+
+          - name: {{ HOSTNAME_GTW01 }}
+            role: salt_minion
+            params:
+              vcpu: !os_env SLAVE_NODE_CPU, 4
+              memory: !os_env SLAVE_NODE_MEMORY, 4096
+              boot:
+                - hd
+              cloud_init_volume_name: iso
+              cloud_init_iface_up: ens3
+              volumes:
+                - name: system
+                  capacity: !os_env NODE_VOLUME_SIZE, 150
+                  backing_store: cloudimage1604
+                  format: qcow2
+                - name: iso  # Volume with name 'iso' will be used
+                             # for store image with cloud-init metadata.
+                  capacity: 1
+                  format: raw
+                  device: cdrom
+                  bus: ide
+                  cloudinit_meta_data: *cloudinit_meta_data
+                  cloudinit_user_data: *cloudinit_user_data_1604_swp
+
+              interfaces: *all_interfaces
+              network_config: *all_network_config
+
+          - name: {{ HOSTNAME_KVM01 }}
+            role: salt_minion
+            params:
+              vcpu: {{ os_env('KVM_NODE_CPU', 1) }}
+              memory: {{ os_env('KVM_NODE_MEMORY', 1024) }}
+              boot:
+                - hd
+              cloud_init_volume_name: iso
+              cloud_init_iface_up: ens3
+              volumes:
+                - name: system
+                  capacity: {{ os_env('KVM_NODE_VOLUME_SIZE', 150) }}
+                  backing_store: mcp_ubuntu_1604_image
+                  format: qcow2
+                - name: iso  # Volume with name 'iso' will be used
+                             # for store image with cloud-init metadata.
+                  capacity: 1
+                  format: raw
+                  device: cdrom
+                  bus: ide
+                  cloudinit_meta_data: *cloudinit_meta_data
+                  cloudinit_user_data: *cloudinit_user_data_1604_swp
+
+              interfaces: *interfaces
+              network_config: *network_config
+
+          - name: {{ HOSTNAME_KVM02 }}
+            role: salt_minion
+            params:
+              vcpu: {{ os_env('KVM_NODE_CPU', 1) }}
+              memory: {{ os_env('KVM_NODE_MEMORY', 1024) }}
+              boot:
+                - hd
+              cloud_init_volume_name: iso
+              cloud_init_iface_up: ens3
+              volumes:
+                - name: system
+                  capacity: {{ os_env('KVM_NODE_VOLUME_SIZE', 150) }}
+                  backing_store: mcp_ubuntu_1604_image
+                  format: qcow2
+                - name: iso  # Volume with name 'iso' will be used
+                             # for store image with cloud-init metadata.
+                  capacity: 1
+                  format: raw
+                  device: cdrom
+                  bus: ide
+                  cloudinit_meta_data: *cloudinit_meta_data
+                  cloudinit_user_data: *cloudinit_user_data_1604_swp
+
+              interfaces: *interfaces
+              network_config: *network_config
+
+          - name: {{ HOSTNAME_KVM03 }}
+            role: salt_minion
+            params:
+              vcpu: {{ os_env('KVM_NODE_CPU', 1) }}
+              memory: {{ os_env('KVM_NODE_MEMORY', 1024) }}
+              boot:
+                - hd
+              cloud_init_volume_name: iso
+              cloud_init_iface_up: ens3
+              volumes:
+                - name: system
+                  capacity: {{ os_env('KVM_NODE_VOLUME_SIZE', 150) }}
+                  backing_store: mcp_ubuntu_1604_image
+                  format: qcow2
+                - name: iso  # Volume with name 'iso' will be used
+                             # for store image with cloud-init metadata.
+                  capacity: 1
+                  format: raw
+                  device: cdrom
+                  bus: ide
+                  cloudinit_meta_data: *cloudinit_meta_data
+                  cloudinit_user_data: *cloudinit_user_data_1604_swp
+
+              interfaces: *interfaces
+              network_config: *network_config
+
+          - name: {{ HOSTNAME_CID01 }}
+            role: salt_minion
+            params:
+              vcpu: {{ os_env('CID_NODE_CPU', 3) }}
+              memory: {{ os_env('CID_NODE_MEMORY', 8192) }}
+              boot:
+                - hd
+              cloud_init_volume_name: iso
+              cloud_init_iface_up: ens3
+              volumes:
+                - name: system
+                  capacity: {{ os_env('CID_NODE_VOLUME_SIZE', 150) }}
+                  backing_store: mcp_ubuntu_1604_image
+                  format: qcow2
+                - name: iso  # Volume with name 'iso' will be used
+                             # for store image with cloud-init metadata.
+                  capacity: 1
+                  format: raw
+                  device: cdrom
+                  bus: ide
+                  cloudinit_meta_data: *cloudinit_meta_data
+                  cloudinit_user_data: *cloudinit_user_data_1604_swp
+
+              interfaces: *interfaces
+              network_config: *network_config
+
+          - name: {{ HOSTNAME_CID02 }}
+            role: salt_minion
+            params:
+              vcpu: {{ os_env('CID_NODE_CPU', 3) }}
+              memory: {{ os_env('CID_NODE_MEMORY', 8192) }}
+              boot:
+                - hd
+              cloud_init_volume_name: iso
+              cloud_init_iface_up: ens3
+              volumes:
+                - name: system
+                  capacity: {{ os_env('CID_NODE_VOLUME_SIZE', 150) }}
+                  backing_store: mcp_ubuntu_1604_image
+                  format: qcow2
+                - name: iso  # Volume with name 'iso' will be used
+                             # for store image with cloud-init metadata.
+                  capacity: 1
+                  format: raw
+                  device: cdrom
+                  bus: ide
+                  cloudinit_meta_data: *cloudinit_meta_data
+                  cloudinit_user_data: *cloudinit_user_data_1604_swp
+
+              interfaces: *interfaces
+              network_config: *network_config
+
+          - name: {{ HOSTNAME_CID03 }}
+            role: salt_minion
+            params:
+              vcpu: {{ os_env('CID_NODE_CPU', 3) }}
+              memory: {{ os_env('CID_NODE_MEMORY', 8192) }}
+              boot:
+                - hd
+              cloud_init_volume_name: iso
+              cloud_init_iface_up: ens3
+              volumes:
+                - name: system
+                  capacity: {{ os_env('CID_NODE_VOLUME_SIZE', 150) }}
+                  backing_store: mcp_ubuntu_1604_image
+                  format: qcow2
+                - name: iso  # Volume with name 'iso' will be used
+                             # for store image with cloud-init metadata.
+                  capacity: 1
+                  format: raw
+                  device: cdrom
+                  bus: ide
+                  cloudinit_meta_data: *cloudinit_meta_data
+                  cloudinit_user_data: *cloudinit_user_data_1604_swp
+
+              interfaces: *interfaces
+              network_config: *network_config
diff --git a/tcp_tests/templates/cookied-mcp-mitaka-dvr/_context-cookiecutter-mcp-mitaka-dvr.yaml b/tcp_tests/templates/cookied-mcp-mitaka-dvr/_context-cookiecutter-mcp-mitaka-dvr.yaml
index 2493c63..0ade6cf 100644
--- a/tcp_tests/templates/cookied-mcp-mitaka-dvr/_context-cookiecutter-mcp-mitaka-dvr.yaml
+++ b/tcp_tests/templates/cookied-mcp-mitaka-dvr/_context-cookiecutter-mcp-mitaka-dvr.yaml
@@ -146,6 +146,7 @@
   salt_master_hostname: cfg01
   salt_master_management_address: 192.168.10.90
   shared_reclass_url: ssh://mcp-jenkins@gerrit.mcp.mirantis.net:29418/salt-models/reclass-system.git
+  fluentd_enabled: 'True'
   stacklight_enabled: 'True'
   stacklight_log_address: 172.16.10.70
   stacklight_log_hostname: mon
diff --git a/tcp_tests/templates/cookied-mcp-mitaka-dvr/common-services.yaml b/tcp_tests/templates/cookied-mcp-mitaka-dvr/core.yaml
similarity index 100%
rename from tcp_tests/templates/cookied-mcp-mitaka-dvr/common-services.yaml
rename to tcp_tests/templates/cookied-mcp-mitaka-dvr/core.yaml
diff --git a/tcp_tests/templates/cookied-mcp-mitaka-dvr/salt.yaml b/tcp_tests/templates/cookied-mcp-mitaka-dvr/salt.yaml
index 0e42155..98016f7 100644
--- a/tcp_tests/templates/cookied-mcp-mitaka-dvr/salt.yaml
+++ b/tcp_tests/templates/cookied-mcp-mitaka-dvr/salt.yaml
@@ -16,7 +16,7 @@
 
 {{ SHARED.MACRO_CLONE_RECLASS_MODELS() }}
 
-{{ SHARED.MACRO_CONFIGURE_RECLASS(FORMULA_SERVICES='"linux" "reclass" "salt" "openssh" "ntp" "git" "nginx" "collectd" "sensu" "heka" "sphinx" "keystone" "mysql" "grafana" "haproxy" "rsyslog" "horizon" "prometheus" "telegraf" "elasticsearch" "powerdns" "glusterfs" "jenkins" "maas" "backupninja" "fluentd"') }}
+{{ SHARED.MACRO_CONFIGURE_RECLASS(FORMULA_SERVICES='"linux" "reclass" "salt" "openssh" "ntp" "git" "nginx" "collectd" "sensu" "heka" "sphinx" "keystone" "mysql" "grafana" "haproxy" "rsyslog" "horizon" "prometheus" "telegraf" "elasticsearch" "powerdns" "glusterfs" "jenkins" "maas" "backupninja" "fluentd" "auditd"') }}
 
 {{ SHARED.MACRO_INSTALL_SALT_MINIONS() }}
 
diff --git a/tcp_tests/templates/cookied-mcp-mitaka-ovs/common-services.yaml b/tcp_tests/templates/cookied-mcp-mitaka-ovs/core.yaml
similarity index 100%
rename from tcp_tests/templates/cookied-mcp-mitaka-ovs/common-services.yaml
rename to tcp_tests/templates/cookied-mcp-mitaka-ovs/core.yaml
diff --git a/tcp_tests/templates/cookied-mcp-mitaka-ovs/salt.yaml b/tcp_tests/templates/cookied-mcp-mitaka-ovs/salt.yaml
index f4d7c8c..be11365 100644
--- a/tcp_tests/templates/cookied-mcp-mitaka-ovs/salt.yaml
+++ b/tcp_tests/templates/cookied-mcp-mitaka-ovs/salt.yaml
@@ -16,7 +16,7 @@
 
 {{ SHARED.MACRO_CLONE_RECLASS_MODELS() }}
 
-{{ SHARED.MACRO_CONFIGURE_RECLASS(FORMULA_SERVICES='"linux" "reclass" "salt" "openssh" "ntp" "git" "nginx" "collectd" "sensu" "heka" "sphinx" "keystone" "mysql" "grafana" "haproxy" "rsyslog" "horizon" "prometheus" "telegraf" "elasticsearch" "powerdns" "glusterfs" "jenkins" "maas" "backupninja" "fluentd"') }}
+{{ SHARED.MACRO_CONFIGURE_RECLASS(FORMULA_SERVICES='"linux" "reclass" "salt" "openssh" "ntp" "git" "nginx" "collectd" "sensu" "heka" "sphinx" "keystone" "mysql" "grafana" "haproxy" "rsyslog" "horizon" "prometheus" "telegraf" "elasticsearch" "powerdns" "glusterfs" "jenkins" "maas" "backupninja" "fluentd" "auditd"') }}
 
 {{ SHARED.MACRO_INSTALL_SALT_MINIONS() }}
 
diff --git a/tcp_tests/templates/cookied-mcp-ocata-dop-sl2/common-services.yaml b/tcp_tests/templates/cookied-mcp-ocata-dop-sl2/core.yaml
similarity index 100%
rename from tcp_tests/templates/cookied-mcp-ocata-dop-sl2/common-services.yaml
rename to tcp_tests/templates/cookied-mcp-ocata-dop-sl2/core.yaml
diff --git a/tcp_tests/templates/cookied-mcp-ocata-dvr-vxlan/common-services.yaml b/tcp_tests/templates/cookied-mcp-ocata-dvr-vxlan/core.yaml
similarity index 100%
rename from tcp_tests/templates/cookied-mcp-ocata-dvr-vxlan/common-services.yaml
rename to tcp_tests/templates/cookied-mcp-ocata-dvr-vxlan/core.yaml
diff --git a/tcp_tests/templates/cookied-mcp-pike-dpdk/common-services.yaml b/tcp_tests/templates/cookied-mcp-pike-dpdk/core.yaml
similarity index 100%
rename from tcp_tests/templates/cookied-mcp-pike-dpdk/common-services.yaml
rename to tcp_tests/templates/cookied-mcp-pike-dpdk/core.yaml
diff --git a/tcp_tests/templates/cookied-mcp-pike-dvr-ssl/common-services.yaml b/tcp_tests/templates/cookied-mcp-pike-dvr-ssl/core.yaml
similarity index 100%
rename from tcp_tests/templates/cookied-mcp-pike-dvr-ssl/common-services.yaml
rename to tcp_tests/templates/cookied-mcp-pike-dvr-ssl/core.yaml
diff --git a/tcp_tests/templates/cookied-mcp-pike-dvr/common-services.yaml b/tcp_tests/templates/cookied-mcp-pike-dvr/core.yaml
similarity index 100%
rename from tcp_tests/templates/cookied-mcp-pike-dvr/common-services.yaml
rename to tcp_tests/templates/cookied-mcp-pike-dvr/core.yaml
diff --git a/tcp_tests/templates/cookied-mcp-pike-ovs/common-services.yaml b/tcp_tests/templates/cookied-mcp-pike-ovs/core.yaml
similarity index 100%
rename from tcp_tests/templates/cookied-mcp-pike-ovs/common-services.yaml
rename to tcp_tests/templates/cookied-mcp-pike-ovs/core.yaml
diff --git a/tcp_tests/templates/cookied-model-generator/salt_cookied-bm-dpdk-pipeline.yaml b/tcp_tests/templates/cookied-model-generator/salt_cookied-bm-dpdk-pipeline.yaml
new file mode 100644
index 0000000..1cedd52
--- /dev/null
+++ b/tcp_tests/templates/cookied-model-generator/salt_cookied-bm-dpdk-pipeline.yaml
@@ -0,0 +1,60 @@
+{% from 'cookied-model-generator/underlay.yaml' import HOSTNAME_CFG01 with context %}
+{% from 'cookied-model-generator/underlay.yaml' import DOMAIN_NAME with context %}
+
+{% set SALT_MODELS_REPOSITORY = os_env('SALT_MODELS_REPOSITORY','https://gerrit.mcp.mirantis.net/salt-models/mcp-baremetal-lab') %}
+# Other salt model repository parameters see in shared-salt.yaml
+{% set LAB_CONFIG_NAME = 'cookied-bm-dpdk-pipeline' %}
+{% set SALT_VERSION = os_env('SALT_VERSION', '2017.7') %}
+# Name of the context file (without extension, that is fixed .yaml) used to render the Environment model
+{% set ENVIRONMENT_MODEL_INVENTORY_NAME = os_env('ENVIRONMENT_MODEL_INVENTORY_NAME','cookied-bm-dpdk-pipeline') %}
+# Path to the context files used to render Cluster and Environment models
+{%- set CLUSTER_CONTEXT_NAME = 'salt-context-cookiecutter-openstack_ovs_dpdk.yaml' %}
+{%- set ENVIRONMENT_CONTEXT_NAMES = ['salt-context-vcp-environment.yaml', 'salt-context-environment.yaml'] %}
+{%- set CONTROL_VLAN = os_env('CONTROL_VLAN', '2416') %}
+{%- set TENANT_VLAN = os_env('TENANT_VLAN', '2417') %}
+{% set REPOSITORY_SUITE = os_env('REPOSITORY_SUITE', 'testing') %}
+{% set SALT_REPOSITORY = os_env('SALT_REPOSITORY', "deb [arch=amd64] http://apt.mirantis.com/${DISTRIB_CODENAME}/salt/" + SALT_VERSION  + REPOSITORY_SUITE + " main") %}
+{# set SALT_REPOSITORY = os_env('SALT_REPOSITORY', "deb [arch=amd64] http://mirror.mirantis.com/" + REPOSITORY_SUITE+ "/saltstack-2016.3/${DISTRIB_CODENAME} ${DISTRIB_CODENAME} main") #}
+{% import 'shared-salt.yaml' as SHARED with context %}
+
+{{ SHARED.MACRO_INSTALL_PACKAGES_ON_NODES(HOSTNAME_CFG01) }}
+
+{{ SHARED.MACRO_INSTALL_FORMULAS('\*') }}
+
+{{ SHARED.MACRO_GENERATE_COOKIECUTTER_MODEL(CONTROL_VLAN=CONTROL_VLAN, TENANT_VLAN=TENANT_VLAN) }}
+
+{{ SHARED.MACRO_GENERATE_AND_ENABLE_ENVIRONMENT_MODEL() }}
+
+- description: "Workaround for rack01 compute generator"
+  cmd: |
+    set -e;
+    . /root/venv-reclass-tools/bin/activate;
+    # Remove rack01 key
+    reclass-tools del-key parameters.reclass.storage.node.openstack_compute_rack01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
+    # Add openstack_compute_node definition from system
+    reclass-tools add-key 'classes' 'system.reclass.storage.system.openstack_compute_multi' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml --merge;
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 10}
+  skip_fail: false
+
+
+- description: Temporary workaround for removing cinder-volume from CTL nodes
+  cmd: |
+    sed -i 's/\-\ system\.cinder\.volume\.single//g' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/control.yml;
+    sed -i 's/\-\ system\.cinder\.volume\.notification\.messagingv2//g' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/control.yml;
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: true
+
+- description: Temporary WR for correct bridge name according to envoronment templates
+  cmd: |
+    sed -i 's/br\-ctl/br\_ctl/g' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/kvm.yml;
+    sed -i 's/br\-mgm/br\_mgm/g' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/kvm.yml;
+    sed -i 's/br\-ctl/br\_ctl/g' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/openstack/gateway.yml;
+    salt '*' saltutil.refresh_pillar;
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 10}
+  skip_fail: false
+
+{{ SHARED.MACRO_GENERATE_INVENTORY(RERUN_SALTMASTER_STATE=true) }}
+
diff --git a/tcp_tests/templates/cookied-model-generator/salt_cookied-bm-mcp-dvr-vxlan.yaml b/tcp_tests/templates/cookied-model-generator/salt_cookied-bm-mcp-dvr-vxlan.yaml
index d27b6ae..dc4d2bc 100644
--- a/tcp_tests/templates/cookied-model-generator/salt_cookied-bm-mcp-dvr-vxlan.yaml
+++ b/tcp_tests/templates/cookied-model-generator/salt_cookied-bm-mcp-dvr-vxlan.yaml
@@ -2,73 +2,38 @@
 {% from 'cookied-model-generator/underlay.yaml' import DOMAIN_NAME with context %}
 
 {% set LAB_CONFIG_NAME = 'cookied-bm-mcp-dvr-vxlan' %}
+{% set SALT_VERSION = os_env('SALT_VERSION', '2017.7') %}
 # Name of the context file (without extension, that is fixed .yaml) used to render the Environment model
 {% set ENVIRONMENT_MODEL_INVENTORY_NAME = os_env('ENVIRONMENT_MODEL_INVENTORY_NAME','cookied-bm-mcp-dvr-vxlan') %}
 # Path to the context files used to render Cluster and Environment models
-{%- set CLUSTER_CONTEXT_NAME = 'salt-context-cookiecutter-openstack_ovs_dvr_vxlan.yaml' %}
-{%- set ENVIRONMENT_CONTEXT_NAMES = ['salt-context-vcp-environment.yaml', 'salt-context-lab03-environment.yaml'] %}
-{%- set CONTROL_VLAN = os_env('CONTROL_VLAN', '2416') %}
-{%- set TENANT_VLAN = os_env('TENANT_VLAN', '2417') %}
-
+{% set CLUSTER_CONTEXT_NAME = 'salt-context-cookiecutter-openstack_ovs_dvr_vxlan.yaml' %}
+{% set ENVIRONMENT_CONTEXT_NAMES = ['salt-context-vcp-environment.yaml', 'salt-context-lab03-environment.yaml'] %}
+{% set CONTROL_VLAN = os_env('CONTROL_VLAN', '2403') %}
+{% set TENANT_VLAN = os_env('TENANT_VLAN', '2406') %}
+{% set REPOSITORY_SUITE = os_env('REPOSITORY_SUITE', 'proposed') %}
+{% set SALT_REPOSITORY = os_env('SALT_REPOSITORY', "deb [arch=amd64] http://mirror.mirantis.com/" + REPOSITORY_SUITE+ "/saltstack-2016.3/${DISTRIB_CODENAME} ${DISTRIB_CODENAME} main") %}
 {% import 'shared-salt.yaml' as SHARED with context %}
 
 {{ SHARED.MACRO_INSTALL_PACKAGES_ON_NODES(HOSTNAME_CFG01) }}
-- description: Re-install all the fromulas
-  cmd: |
-    set -e;
-    apt-get install -y salt-formula-*
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 1}
-  skip_fail: false
-
-- description: Sync formulas to service
-  cmd: |
-    set -e;
-    RECLASS_ROOT=${RECLASS_ROOT:-/srv/salt/reclass/};
-    FORMULAS_PATH=${FORMULAS_PATH:-/usr/share/salt-formulas};
-    [ ! -d ${RECLASS_ROOT}/classes/service ] && mkdir -p ${RECLASS_ROOT}/classes/service;
-    for formula_service in $(ls /usr/share/salt-formulas/reclass/service/); do
-        #Since some salt formula names contain "-" and in symlinks they should contain "_" adding replacement;
-        formula_service=${formula_service//-/$'_'};
-        if [ ! -L "${RECLASS_ROOT}/classes/service/${formula_service}" ]; then
-            ln -sf ${FORMULAS_PATH}/reclass/service/${formula_service} ${RECLASS_ROOT}/classes/service/${formula_service};
-        fi;
-    done
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 1}
-  skip_fail: false
+{{ SHARED.MACRO_INSTALL_FORMULAS('\*') }}
 
 {{ SHARED.MACRO_GENERATE_COOKIECUTTER_MODEL(CONTROL_VLAN=CONTROL_VLAN, TENANT_VLAN=TENANT_VLAN) }}
 
 {{ SHARED.MACRO_GENERATE_AND_ENABLE_ENVIRONMENT_MODEL() }}
 
-- description: Temporary WR for cinder backend defined by default in reclass.system
-  cmd: |
-    sed -i 's/backend\:\ {}//g' /srv/salt/reclass/classes/system/cinder/control/cluster.yml;
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
 - description: "Workaround for rack01 compute generator"
   cmd: |
     set -e;
     . /root/venv-reclass-tools/bin/activate;
     # Remove rack01 key
     reclass-tools del-key parameters.reclass.storage.node.openstack_compute_rack01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
-
     # Add openstack_compute_node definition from system
     reclass-tools add-key 'classes' 'system.reclass.storage.system.openstack_compute_multi' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml --merge;
-
-    # Set ipaddresses for our nodes
-    reclass-tools add-key parameters._param.openstack_compute_node01_control_address 10.167.4.3 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
-    reclass-tools add-key parameters._param.openstack_compute_node02_control_address 10.167.4.31 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
-    reclass-tools add-key parameters._param.openstack_compute_node01_tenant_address 10.167.6.3 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
-    reclass-tools add-key parameters._param.openstack_compute_node02_tenant_address 10.167.6.31 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
-
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 10}
   skip_fail: false
 
+
 - description: Temporary workaround for removing cinder-volume from CTL nodes
   cmd: |
     sed -i 's/\-\ system\.cinder\.volume\.single//g' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/control.yml;
@@ -77,13 +42,15 @@
   retry: {count: 1, delay: 5}
   skip_fail: true
 
-#- description: Refresh pillars and install dependencies for salt-master
-#  cmd: |
-#    salt '*' saltutil.refresh_pillar; sleep 5;
-#    salt '*' state.sls salt.master;
-#    salt '*' saltutil.refresh_pillar; sleep 5;
-#  node_name: {{ HOSTNAME_CFG01 }}
-#  retry: {count: 1, delay: 5}
-#  skip_fail: false
+- description: Temporary WR for correct bridge name according to envoronment templates
+  cmd: |
+    sed -i 's/br\-ctl/br\_ctl/g' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/kvm.yml;
+    sed -i 's/br\-mgm/br\_mgm/g' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/kvm.yml;
+    sed -i 's/br\-ctl/br\_ctl/g' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/openstack/gateway.yml;
+    salt '*' saltutil.refresh_pillar;
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 10}
+  skip_fail: false
 
 {{ SHARED.MACRO_GENERATE_INVENTORY(RERUN_SALTMASTER_STATE=true) }}
+
diff --git a/tcp_tests/templates/cookied-model-generator/salt_cookied-bm-mcp-ovs-dpdk.yaml b/tcp_tests/templates/cookied-model-generator/salt_cookied-bm-mcp-ovs-dpdk.yaml
index 125b6e1..95b6442 100644
--- a/tcp_tests/templates/cookied-model-generator/salt_cookied-bm-mcp-ovs-dpdk.yaml
+++ b/tcp_tests/templates/cookied-model-generator/salt_cookied-bm-mcp-ovs-dpdk.yaml
@@ -17,37 +17,14 @@
 {# set SALT_REPOSITORY = os_env('SALT_REPOSITORY', "deb [arch=amd64] http://mirror.mirantis.com/" + REPOSITORY_SUITE+ "/saltstack-2016.3/${DISTRIB_CODENAME} ${DISTRIB_CODENAME} main") #}
 {% import 'shared-salt.yaml' as SHARED with context %}
 
+{{ SHARED.MACRO_INSTALL_PACKAGES_ON_NODES(HOSTNAME_CFG01) }}
+
+{{ SHARED.MACRO_INSTALL_FORMULAS('\*') }}
+
 {{ SHARED.MACRO_GENERATE_COOKIECUTTER_MODEL(CONTROL_VLAN=CONTROL_VLAN, TENANT_VLAN=TENANT_VLAN) }}
 
 {{ SHARED.MACRO_GENERATE_AND_ENABLE_ENVIRONMENT_MODEL() }}
 
-{{ SHARED.MACRO_INSTALL_PACKAGES_ON_NODES(HOSTNAME_CFG01) }}
-
-- description: Re-install all the fromulas
-  cmd: |
-    set -e;
-    apt-get install -y salt-formula-*
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 1}
-  skip_fail: false
-
-- description: Sync formulas to service
-  cmd: |
-    set -e;
-    RECLASS_ROOT=${RECLASS_ROOT:-/srv/salt/reclass/};
-    FORMULAS_PATH=${FORMULAS_PATH:-/usr/share/salt-formulas};
-    [ ! -d ${RECLASS_ROOT}/classes/service ] && mkdir -p ${RECLASS_ROOT}/classes/service;
-    for formula_service in $(ls /usr/share/salt-formulas/reclass/service/); do
-        #Since some salt formula names contain "-" and in symlinks they should contain "_" adding replacement;
-        formula_service=${formula_service//-/$'_'};
-        if [ ! -L "${RECLASS_ROOT}/classes/service/${formula_service}" ]; then
-            ln -sf ${FORMULAS_PATH}/reclass/service/${formula_service} ${RECLASS_ROOT}/classes/service/${formula_service};
-        fi;
-    done
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 1}
-  skip_fail: false
-
 - description: "Workaround for rack01 compute generator"
   cmd: |
     set -e;
@@ -69,5 +46,15 @@
   retry: {count: 1, delay: 5}
   skip_fail: true
 
+- description: Temporary WR for correct bridge name according to envoronment templates
+  cmd: |
+    sed -i 's/br\-ctl/br\_ctl/g' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/kvm.yml;
+    sed -i 's/br\-mgm/br\_mgm/g' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/kvm.yml;
+    sed -i 's/br\-ctl/br\_ctl/g' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/openstack/gateway.yml;
+    salt '*' saltutil.refresh_pillar;
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 10}
+  skip_fail: false
+
 {{ SHARED.MACRO_GENERATE_INVENTORY(RERUN_SALTMASTER_STATE=true) }}
 
diff --git a/tcp_tests/templates/cookied-model-generator/salt_cookied-cicd-k8s-calico-sl.yaml b/tcp_tests/templates/cookied-model-generator/salt_cookied-cicd-k8s-calico-sl.yaml
index 952e798..0e8095a 100644
--- a/tcp_tests/templates/cookied-model-generator/salt_cookied-cicd-k8s-calico-sl.yaml
+++ b/tcp_tests/templates/cookied-model-generator/salt_cookied-cicd-k8s-calico-sl.yaml
@@ -12,29 +12,12 @@
 
 {{ SHARED.MACRO_INSTALL_PACKAGES_ON_NODES(HOSTNAME_CFG01) }}
 
-- description: Sync formulas to service
-  cmd: |
-    set -e;
-    RECLASS_ROOT=${RECLASS_ROOT:-/srv/salt/reclass/};
-    FORMULAS_PATH=${FORMULAS_PATH:-/usr/share/salt-formulas};
-    [ ! -d ${RECLASS_ROOT}/classes/service ] && mkdir -p ${RECLASS_ROOT}/classes/service;
-    for formula_service in $(ls /usr/share/salt-formulas/reclass/service/); do
-        #Since some salt formula names contain "-" and in symlinks they should contain "_" adding replacement;
-        formula_service=${formula_service//-/$'_'};
-        if [ ! -L "${RECLASS_ROOT}/classes/service/${formula_service}" ]; then
-            ln -sf ${FORMULAS_PATH}/reclass/service/${formula_service} ${RECLASS_ROOT}/classes/service/${formula_service};
-        fi;
-    done
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 1}
-  skip_fail: false
+{{ SHARED.MACRO_INSTALL_FORMULAS('\*') }}
 
 {{ SHARED.MACRO_GENERATE_COOKIECUTTER_MODEL() }}
 
 {{ SHARED.MACRO_GENERATE_AND_ENABLE_ENVIRONMENT_MODEL() }}
 
-{{ SHARED.MACRO_INSTALL_FORMULAS('\*') }}
-
 - description: "Workaround for combined roles: remove unnecessary classes"
   cmd: |
     set -e;
diff --git a/tcp_tests/templates/cookied-model-generator/salt_cookied-cicd-k8s-calico.yaml b/tcp_tests/templates/cookied-model-generator/salt_cookied-cicd-k8s-calico.yaml
index bbf295c..776a516 100644
--- a/tcp_tests/templates/cookied-model-generator/salt_cookied-cicd-k8s-calico.yaml
+++ b/tcp_tests/templates/cookied-model-generator/salt_cookied-cicd-k8s-calico.yaml
@@ -12,29 +12,12 @@
 
 {{ SHARED.MACRO_INSTALL_PACKAGES_ON_NODES(HOSTNAME_CFG01) }}
 
-- description: Sync formulas to service
-  cmd: |
-    set -e;
-    RECLASS_ROOT=${RECLASS_ROOT:-/srv/salt/reclass/};
-    FORMULAS_PATH=${FORMULAS_PATH:-/usr/share/salt-formulas};
-    [ ! -d ${RECLASS_ROOT}/classes/service ] && mkdir -p ${RECLASS_ROOT}/classes/service;
-    for formula_service in $(ls /usr/share/salt-formulas/reclass/service/); do
-        #Since some salt formula names contain "-" and in symlinks they should contain "_" adding replacement;
-        formula_service=${formula_service//-/$'_'};
-        if [ ! -L "${RECLASS_ROOT}/classes/service/${formula_service}" ]; then
-            ln -sf ${FORMULAS_PATH}/reclass/service/${formula_service} ${RECLASS_ROOT}/classes/service/${formula_service};
-        fi;
-    done
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 1}
-  skip_fail: false
+{{ SHARED.MACRO_INSTALL_FORMULAS('\*') }}
 
 {{ SHARED.MACRO_GENERATE_COOKIECUTTER_MODEL() }}
 
 {{ SHARED.MACRO_GENERATE_AND_ENABLE_ENVIRONMENT_MODEL() }}
 
-{{ SHARED.MACRO_INSTALL_FORMULAS('\*') }}
-
 - description: "Workaround for combined roles: remove unnecessary classes"
   cmd: |
     set -e;
diff --git a/tcp_tests/templates/cookied-model-generator/salt_cookied-cicd-pike-dpdk.yaml b/tcp_tests/templates/cookied-model-generator/salt_cookied-cicd-pike-dpdk.yaml
new file mode 100644
index 0000000..6e43bfd
--- /dev/null
+++ b/tcp_tests/templates/cookied-model-generator/salt_cookied-cicd-pike-dpdk.yaml
@@ -0,0 +1,57 @@
+{% from 'cookied-model-generator/underlay.yaml' import HOSTNAME_CFG01 with context %}
+{% from 'cookied-model-generator/underlay.yaml' import DOMAIN_NAME with context %}
+
+{% set LAB_CONFIG_NAME = 'cookied-cicd-pike-dpdk' %}
+# Name of the context file (without extension, that is fixed .yaml) used to render the Environment model
+{% set ENVIRONMENT_MODEL_INVENTORY_NAME = os_env('ENVIRONMENT_MODEL_INVENTORY_NAME', LAB_CONFIG_NAME) %}
+# Path to the context files used to render Cluster and Environment models
+{%- set CLUSTER_CONTEXT_NAME = 'cookiecutter-context-pike-ovs-dpdk.yaml' %}
+{%- set ENVIRONMENT_CONTEXT_NAMES = ['environment-context.yaml'] %}
+
+{% import 'shared-salt.yaml' as SHARED with context %}
+
+{{ SHARED.MACRO_INSTALL_PACKAGES_ON_NODES(HOSTNAME_CFG01) }}
+
+{{ SHARED.MACRO_INSTALL_FORMULAS('\*') }}
+
+{{ SHARED.MACRO_GENERATE_COOKIECUTTER_MODEL() }}
+
+{{ SHARED.MACRO_GENERATE_AND_ENABLE_ENVIRONMENT_MODEL() }}
+
+- description: "Workaround for combined roles: remove unnecessary classes"
+  cmd: |
+    set -e;
+    sed -i '/system.reclass.storage.system.stacklight_telemetry_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
+    sed -i '/system.reclass.storage.system.stacklight_log_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
+
+    # Start compute node addresses from .105 , as in static models
+    sed -i 's/start: 101/start: 105/g' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
+
+    . /root/venv-reclass-tools/bin/activate;
+    reclass-tools del-key parameters.reclass.storage.node.stacklight_log_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
+
+    # set wider cpu mask for DPDK
+    salt-call reclass.cluster_meta_set name='compute_ovs_dpdk_lcore_mask' value='"0xF"' file_name=/srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/init.yml
+    salt-call reclass.cluster_meta_set name='compute_hugepages_size' value='"2M"' file_name=/srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/init.yml
+    salt-call reclass.cluster_meta_set name='compute_hugepages_mount' value='"/mnt/hugepages_2M"' file_name=/srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/init.yml
+    # set virtual disks for compute
+    sed -i 's/cinder_lvm_devices: \[ "\/dev\/sdb" \]/cinder_lvm_devices: \[ "\/dev\/vdb" \]/g' /srv/salt/reclass/classes/environment/{{ SHARED.CLUSTER_NAME }}/features/lvm_backend/init.yml
+
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+- description: "Workaround for combined roles: remove unnecessary classes"
+  cmd: |
+    cat << 'EOF' >> /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/compute/dpdk.yml
+    parameters:
+      neutron:
+        compute:
+          bridge_mappings:
+            physnet2: br-prv
+    EOF
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+{{ SHARED.MACRO_GENERATE_INVENTORY(RERUN_SALTMASTER_STATE=true) }}
diff --git a/tcp_tests/templates/cookied-model-generator/salt_cookied-cicd-pike-dvr-sl.yaml b/tcp_tests/templates/cookied-model-generator/salt_cookied-cicd-pike-dvr-sl.yaml
new file mode 100644
index 0000000..edfce51
--- /dev/null
+++ b/tcp_tests/templates/cookied-model-generator/salt_cookied-cicd-pike-dvr-sl.yaml
@@ -0,0 +1,59 @@
+{% from 'cookied-model-generator/underlay.yaml' import HOSTNAME_CFG01 with context %}
+{% from 'cookied-model-generator/underlay.yaml' import DOMAIN_NAME with context %}
+
+{% set LAB_CONFIG_NAME = 'cookied-cicd-pike-dvr-sl' %}
+# Name of the context file (without extension, that is fixed .yaml) used to render the Environment model
+{% set ENVIRONMENT_MODEL_INVENTORY_NAME = os_env('ENVIRONMENT_MODEL_INVENTORY_NAME', LAB_CONFIG_NAME) %}
+# Path to the context files used to render Cluster and Environment models
+{%- set CLUSTER_CONTEXT_NAME = 'cookiecutter-context-pike-dvr-sl.yaml' %}
+{%- set ENVIRONMENT_CONTEXT_NAMES = ['environment_context.yaml'] %}
+
+{% import 'shared-salt.yaml' as SHARED with context %}
+
+{{ SHARED.MACRO_INSTALL_PACKAGES_ON_NODES(HOSTNAME_CFG01) }}
+
+{{ SHARED.MACRO_INSTALL_FORMULAS('\*') }}
+
+{{ SHARED.MACRO_GENERATE_COOKIECUTTER_MODEL() }}
+
+{{ SHARED.MACRO_GENERATE_AND_ENABLE_ENVIRONMENT_MODEL() }}
+
+- description: "Workaround for combined roles: remove unnecessary classes"
+  cmd: |
+    set -e;
+    sed -i '/system.reclass.storage.system.openstack_database_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
+    sed -i '/system.reclass.storage.system.openstack_message_queue_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
+    # sed -i '/system.reclass.storage.system.stacklight_telemetry_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
+    # sed -i '/system.reclass.storage.system.stacklight_log_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
+
+    # Start compute node addresses from .105 , as in static models
+    sed -i 's/start: 101/start: 105/g' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
+
+    . /root/venv-reclass-tools/bin/activate;
+    reclass-tools del-key parameters.reclass.storage.node.openstack_database_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
+    reclass-tools del-key parameters.reclass.storage.node.openstack_database_node02 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
+    reclass-tools del-key parameters.reclass.storage.node.openstack_database_node03 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
+    reclass-tools del-key parameters.reclass.storage.node.openstack_message_queue_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
+    reclass-tools del-key parameters.reclass.storage.node.openstack_message_queue_node02 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
+    reclass-tools del-key parameters.reclass.storage.node.openstack_message_queue_node03 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
+    # reclass-tools del-key parameters.reclass.storage.node.stacklight_log_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
+
+    # salt-call reclass.cluster_meta_set name='openstack_dns_node01_address' value='{{ SHARED.IPV4_NET_CONTROL_PREFIX }}.111' file_name=/srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/init.yml
+    # salt-call reclass.cluster_meta_set name='openstack_dns_node02_address' value='{{ SHARED.IPV4_NET_CONTROL_PREFIX }}.112' file_name=/srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/init.yml
+    # Workaround of missing reclass.system for dns role
+    # salt-call reclass.cluster_meta_set name='salt_master_host' value='${_param:infra_config_deploy_address}' file_name=/srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/init.yml
+
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+- description: Temporary workaround for removing cinder-volume from CTL nodes
+  cmd: |
+    sed -i 's/\-\ system\.cinder\.volume\.single//g' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/control.yml;
+    sed -i 's/\-\ system\.cinder\.volume\.notification\.messagingv2//g' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/control.yml;
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: true
+  
+
+{{ SHARED.MACRO_GENERATE_INVENTORY(RERUN_SALTMASTER_STATE=true) }}
diff --git a/tcp_tests/templates/cookied-model-generator/salt_cookied-cicd-pike-ovs-sl.yaml b/tcp_tests/templates/cookied-model-generator/salt_cookied-cicd-pike-ovs-sl.yaml
new file mode 100644
index 0000000..182cbf8
--- /dev/null
+++ b/tcp_tests/templates/cookied-model-generator/salt_cookied-cicd-pike-ovs-sl.yaml
@@ -0,0 +1,48 @@
+{% from 'cookied-model-generator/underlay.yaml' import HOSTNAME_CFG01 with context %}
+{% from 'cookied-model-generator/underlay.yaml' import DOMAIN_NAME with context %}
+
+{% set LAB_CONFIG_NAME = 'cookied-cicd-pike-ovs-sl' %}
+# Name of the context file (without extension, that is fixed .yaml) used to render the Environment model
+{% set ENVIRONMENT_MODEL_INVENTORY_NAME = os_env('ENVIRONMENT_MODEL_INVENTORY_NAME', LAB_CONFIG_NAME) %}
+# Path to the context files used to render Cluster and Environment models
+{%- set CLUSTER_CONTEXT_NAME = 'cookiecutter-context-pike-ovs-sl.yaml' %}
+{%- set ENVIRONMENT_CONTEXT_NAMES = ['environment-context.yaml'] %}
+
+{% import 'shared-salt.yaml' as SHARED with context %}
+
+{{ SHARED.MACRO_INSTALL_PACKAGES_ON_NODES(HOSTNAME_CFG01) }}
+
+{{ SHARED.MACRO_INSTALL_FORMULAS('\*') }}
+
+{{ SHARED.MACRO_GENERATE_COOKIECUTTER_MODEL() }}
+
+{{ SHARED.MACRO_GENERATE_AND_ENABLE_ENVIRONMENT_MODEL() }}
+
+- description: "Workaround for combined roles: remove unnecessary classes"
+  cmd: |
+    set -e;
+    sed -i '/system.reclass.storage.system.openstack_database_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
+    sed -i '/system.reclass.storage.system.openstack_message_queue_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
+    # sed -i '/system.reclass.storage.system.stacklight_telemetry_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
+    # sed -i '/system.reclass.storage.system.stacklight_log_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
+
+    # Start compute node addresses from .105 , as in static models
+    sed -i 's/start: 101/start: 105/g' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
+
+    . /root/venv-reclass-tools/bin/activate;
+    reclass-tools del-key parameters.reclass.storage.node.openstack_database_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
+    reclass-tools del-key parameters.reclass.storage.node.openstack_database_node02 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
+    reclass-tools del-key parameters.reclass.storage.node.openstack_database_node03 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
+    reclass-tools del-key parameters.reclass.storage.node.openstack_message_queue_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
+    reclass-tools del-key parameters.reclass.storage.node.openstack_message_queue_node02 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
+    reclass-tools del-key parameters.reclass.storage.node.openstack_message_queue_node03 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
+    # reclass-tools del-key parameters.reclass.storage.node.stacklight_log_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
+
+    # Bind9 services are placed on the first two ctl nodes
+    # salt-call reclass.cluster_meta_set name='openstack_dns_node01_address' value='${_param:openstack_control_node01_address}' file_name=/srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/init.yml
+    # salt-call reclass.cluster_meta_set name='openstack_dns_node02_address' value='${_param:openstack_control_node02_address}' file_name=/srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/init.yml
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+{{ SHARED.MACRO_GENERATE_INVENTORY(RERUN_SALTMASTER_STATE=true) }}
diff --git a/tcp_tests/templates/k8s-ha-calico/common-services.yaml b/tcp_tests/templates/k8s-ha-calico/core.yaml
similarity index 100%
rename from tcp_tests/templates/k8s-ha-calico/common-services.yaml
rename to tcp_tests/templates/k8s-ha-calico/core.yaml
diff --git a/tcp_tests/templates/k8s-ha-contrail/common-services.yaml b/tcp_tests/templates/k8s-ha-contrail/core.yaml
similarity index 100%
rename from tcp_tests/templates/k8s-ha-contrail/common-services.yaml
rename to tcp_tests/templates/k8s-ha-contrail/core.yaml
diff --git a/tcp_tests/templates/mcp-ocata-local-repo-dvr/common-services.yaml b/tcp_tests/templates/mcp-ocata-local-repo-dvr/core.yaml
similarity index 100%
rename from tcp_tests/templates/mcp-ocata-local-repo-dvr/common-services.yaml
rename to tcp_tests/templates/mcp-ocata-local-repo-dvr/core.yaml
diff --git a/tcp_tests/templates/physical-mcp-ocata-offline-ovs/common-services.yaml b/tcp_tests/templates/physical-mcp-ocata-offline-ovs/core.yaml
similarity index 100%
rename from tcp_tests/templates/physical-mcp-ocata-offline-ovs/common-services.yaml
rename to tcp_tests/templates/physical-mcp-ocata-offline-ovs/core.yaml
diff --git a/tcp_tests/templates/shared-backup-restore.yaml b/tcp_tests/templates/shared-backup-restore.yaml
index 01240d9..02fdeec 100644
--- a/tcp_tests/templates/shared-backup-restore.yaml
+++ b/tcp_tests/templates/shared-backup-restore.yaml
@@ -140,7 +140,7 @@
     systemctl restart nginx;
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 1}
-  skip_fail: false
+  skip_fail: true
 
 {%- endmacro %}
 
diff --git a/tcp_tests/templates/shared-common-services.yaml b/tcp_tests/templates/shared-core.yaml
similarity index 100%
rename from tcp_tests/templates/shared-common-services.yaml
rename to tcp_tests/templates/shared-core.yaml
diff --git a/tcp_tests/templates/shared-salt.yaml b/tcp_tests/templates/shared-salt.yaml
index 9a2bcd9..36d68bb 100644
--- a/tcp_tests/templates/shared-salt.yaml
+++ b/tcp_tests/templates/shared-salt.yaml
@@ -958,7 +958,7 @@
 - description: Update minion information
   cmd: |
     salt --hard-crash --state-output=mixed --state-verbose=False '*' saltutil.sync_grains &&
-    salt --hard-crash --state-output=mixed --state-verbose=False '*' mine.update && sleep 15
+    salt --hard-crash --state-output=mixed --state-verbose=False '*' mine.update && sleep 60
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 10}
   skip_fail: false
@@ -1356,4 +1356,4 @@
   retry: {count: 1, delay: 5}
   skip_fail: false
 
-{%- endmacro %}
\ No newline at end of file
+{%- endmacro %}
diff --git a/tcp_tests/templates/virtual-mcp-ocata-ceph-offline/common-services.yaml b/tcp_tests/templates/virtual-mcp-ocata-ceph-offline/core.yaml
similarity index 100%
rename from tcp_tests/templates/virtual-mcp-ocata-ceph-offline/common-services.yaml
rename to tcp_tests/templates/virtual-mcp-ocata-ceph-offline/core.yaml
diff --git a/tcp_tests/templates/virtual-mcp-ocata-dvr/_context-cookiecutter-mcp-ocata-dvr.yaml b/tcp_tests/templates/virtual-mcp-ocata-dvr/_context-cookiecutter-mcp-ocata-dvr.yaml
index 83f45ea..505282d 100644
--- a/tcp_tests/templates/virtual-mcp-ocata-dvr/_context-cookiecutter-mcp-ocata-dvr.yaml
+++ b/tcp_tests/templates/virtual-mcp-ocata-dvr/_context-cookiecutter-mcp-ocata-dvr.yaml
@@ -146,6 +146,7 @@
   salt_master_hostname: cfg01
   salt_master_management_address: 192.168.10.90
   shared_reclass_url: ssh://mcp-jenkins@gerrit.mcp.mirantis.net:29418/salt-models/reclass-system.git
+  fluentd_enabled: 'True'
   stacklight_enabled: 'True'
   stacklight_log_address: 172.16.10.70
   stacklight_log_hostname: mon
diff --git a/tcp_tests/templates/virtual-mcp-ocata-dvr/common-services.yaml b/tcp_tests/templates/virtual-mcp-ocata-dvr/core.yaml
similarity index 100%
rename from tcp_tests/templates/virtual-mcp-ocata-dvr/common-services.yaml
rename to tcp_tests/templates/virtual-mcp-ocata-dvr/core.yaml
diff --git a/tcp_tests/templates/virtual-mcp-ocata-dvr/salt.yaml b/tcp_tests/templates/virtual-mcp-ocata-dvr/salt.yaml
index 2334b43..89f141d 100644
--- a/tcp_tests/templates/virtual-mcp-ocata-dvr/salt.yaml
+++ b/tcp_tests/templates/virtual-mcp-ocata-dvr/salt.yaml
@@ -16,7 +16,7 @@
 
 {{ SHARED.MACRO_CLONE_RECLASS_MODELS() }}
 
-{{ SHARED.MACRO_CONFIGURE_RECLASS(FORMULA_SERVICES='"linux" "reclass" "salt" "openssh" "ntp" "git" "nginx" "collectd" "sensu" "heka" "sphinx" "keystone" "mysql" "grafana" "haproxy" "rsyslog" "horizon" "prometheus" "telegraf" "elasticsearch" "powerdns" "glusterfs" "jenkins" "maas" "backupninja" "fluentd"') }}
+{{ SHARED.MACRO_CONFIGURE_RECLASS(FORMULA_SERVICES='"linux" "reclass" "salt" "openssh" "ntp" "git" "nginx" "collectd" "sensu" "heka" "sphinx" "keystone" "mysql" "grafana" "haproxy" "rsyslog" "horizon" "prometheus" "telegraf" "elasticsearch" "powerdns" "glusterfs" "jenkins" "maas" "backupninja" "fluentd" "auditd"') }}
 
 {{ SHARED.MACRO_INSTALL_SALT_MINIONS() }}
 
diff --git a/tcp_tests/templates/virtual-mcp-ocata-ovs-ceph/common-services.yaml b/tcp_tests/templates/virtual-mcp-ocata-ovs-ceph/core.yaml
similarity index 100%
rename from tcp_tests/templates/virtual-mcp-ocata-ovs-ceph/common-services.yaml
rename to tcp_tests/templates/virtual-mcp-ocata-ovs-ceph/core.yaml
diff --git a/tcp_tests/templates/virtual-mcp-ocata-ovs/_context-cookiecutter-mcp-ocata-ovs.yaml b/tcp_tests/templates/virtual-mcp-ocata-ovs/_context-cookiecutter-mcp-ocata-ovs.yaml
index 08db21a..eedb7d9 100644
--- a/tcp_tests/templates/virtual-mcp-ocata-ovs/_context-cookiecutter-mcp-ocata-ovs.yaml
+++ b/tcp_tests/templates/virtual-mcp-ocata-ovs/_context-cookiecutter-mcp-ocata-ovs.yaml
@@ -146,6 +146,7 @@
   salt_master_hostname: cfg01
   salt_master_management_address: 192.168.10.90
   shared_reclass_url: ssh://mcp-jenkins@gerrit.mcp.mirantis.net:29418/salt-models/reclass-system.git
+  fluentd_enabled: 'True'
   stacklight_enabled: 'True'
   stacklight_log_address: 172.16.10.70
   stacklight_log_hostname: mon
diff --git a/tcp_tests/templates/virtual-mcp-ocata-ovs/common-services.yaml b/tcp_tests/templates/virtual-mcp-ocata-ovs/core.yaml
similarity index 100%
rename from tcp_tests/templates/virtual-mcp-ocata-ovs/common-services.yaml
rename to tcp_tests/templates/virtual-mcp-ocata-ovs/core.yaml
diff --git a/tcp_tests/templates/virtual-mcp-ocata-ovs/salt.yaml b/tcp_tests/templates/virtual-mcp-ocata-ovs/salt.yaml
index dd196c0..aa859b0 100644
--- a/tcp_tests/templates/virtual-mcp-ocata-ovs/salt.yaml
+++ b/tcp_tests/templates/virtual-mcp-ocata-ovs/salt.yaml
@@ -16,7 +16,7 @@
 
 {{ SHARED.MACRO_CLONE_RECLASS_MODELS() }}
 
-{{ SHARED.MACRO_CONFIGURE_RECLASS(FORMULA_SERVICES='"linux" "reclass" "salt" "openssh" "ntp" "git" "nginx" "collectd" "sensu" "heka" "sphinx" "keystone" "mysql" "grafana" "haproxy" "rsyslog" "horizon" "prometheus" "telegraf" "elasticsearch" "glusterfs" "jenkins" "maas" "backupninja" "fluentd"') }}
+{{ SHARED.MACRO_CONFIGURE_RECLASS(FORMULA_SERVICES='"linux" "reclass" "salt" "openssh" "ntp" "git" "nginx" "collectd" "sensu" "heka" "sphinx" "keystone" "mysql" "grafana" "haproxy" "rsyslog" "horizon" "prometheus" "telegraf" "elasticsearch" "glusterfs" "jenkins" "maas" "backupninja" "fluentd" "auditd"') }}
 
 {{ SHARED.MACRO_INSTALL_SALT_MINIONS() }}
 
diff --git a/tcp_tests/templates/virtual-mcp-pike-dvr-ceph-rgw/common-services.yaml b/tcp_tests/templates/virtual-mcp-pike-dvr-ceph-rgw/core.yaml
similarity index 100%
rename from tcp_tests/templates/virtual-mcp-pike-dvr-ceph-rgw/common-services.yaml
rename to tcp_tests/templates/virtual-mcp-pike-dvr-ceph-rgw/core.yaml
diff --git a/tcp_tests/templates/virtual-mcp-pike-dvr-maas/common-services.yaml b/tcp_tests/templates/virtual-mcp-pike-dvr-maas/core.yaml
similarity index 100%
rename from tcp_tests/templates/virtual-mcp-pike-dvr-maas/common-services.yaml
rename to tcp_tests/templates/virtual-mcp-pike-dvr-maas/core.yaml
diff --git a/tcp_tests/templates/virtual-mcp-pike-dvr-ssl-barbican/common-services.yaml b/tcp_tests/templates/virtual-mcp-pike-dvr-ssl-barbican/core.yaml
similarity index 100%
rename from tcp_tests/templates/virtual-mcp-pike-dvr-ssl-barbican/common-services.yaml
rename to tcp_tests/templates/virtual-mcp-pike-dvr-ssl-barbican/core.yaml
diff --git a/tcp_tests/templates/virtual-mcp-pike-dvr-ssl-barbican/openstack.yaml b/tcp_tests/templates/virtual-mcp-pike-dvr-ssl-barbican/openstack.yaml
index 98e2784..653b461 100644
--- a/tcp_tests/templates/virtual-mcp-pike-dvr-ssl-barbican/openstack.yaml
+++ b/tcp_tests/templates/virtual-mcp-pike-dvr-ssl-barbican/openstack.yaml
@@ -331,13 +331,6 @@
   retry: {count: 3, delay: 5}
   skip_fail: false
 
-- description: Create manila type
-  cmd: |
-    salt 'ctl01*' cmd.run '. /root/keystonercv3; manila type-create default false --create_share_from_snapshot_support true --revert_to_snapshot_support true --mount_snapshot_support true --snapshot_support true --is_public true'
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
 - description: Create CIFS and NFS share and check it status
   cmd: |
     salt 'ctl01*' cmd.run '. /root/keystonercv3; manila create CIFS 1 --share-type=default';
diff --git a/tcp_tests/templates/virtual-mcp-pike-dvr-ssl-barbican/salt.yaml b/tcp_tests/templates/virtual-mcp-pike-dvr-ssl-barbican/salt.yaml
index c4373ce..62cb069 100644
--- a/tcp_tests/templates/virtual-mcp-pike-dvr-ssl-barbican/salt.yaml
+++ b/tcp_tests/templates/virtual-mcp-pike-dvr-ssl-barbican/salt.yaml
@@ -14,7 +14,7 @@
 
 {{ SHARED.MACRO_CLONE_RECLASS_MODELS() }}
 
-{{ SHARED.MACRO_CONFIGURE_RECLASS(FORMULA_SERVICES='"linux" "reclass" "salt" "openssh" "ntp" "git" "nginx" "collectd" "sensu" "heka" "sphinx" "keystone" "mysql" "grafana" "haproxy" "rsyslog" "horizon" "prometheus" "telegraf" "elasticsearch" "powerdns" "fluentd" "barbican" "dogtag" "runtest" "artifactory" "logrotate" "auditd"') }}
+{{ SHARED.MACRO_CONFIGURE_RECLASS(FORMULA_SERVICES='"linux" "reclass" "salt" "openssh" "ntp" "git" "nginx" "collectd" "sensu" "heka" "sphinx" "keystone" "mysql" "grafana" "haproxy" "rsyslog" "horizon" "prometheus" "telegraf" "elasticsearch" "powerdns" "fluentd" "barbican" "dogtag" "runtest" "artifactory" "logrotate" "auditd" "gnocchi" "manila" ') }}
 
 {{ SHARED.MACRO_INSTALL_SALT_MINIONS() }}
 
diff --git a/tcp_tests/templates/virtual-mcp-pike-dvr-ssl-barbican/underlay.yaml b/tcp_tests/templates/virtual-mcp-pike-dvr-ssl-barbican/underlay.yaml
index 60aa2c8..d2a1bcc 100644
--- a/tcp_tests/templates/virtual-mcp-pike-dvr-ssl-barbican/underlay.yaml
+++ b/tcp_tests/templates/virtual-mcp-pike-dvr-ssl-barbican/underlay.yaml
@@ -14,17 +14,17 @@
 
 {% set LAB_CONFIG_NAME = os_env('LAB_CONFIG_NAME', 'virtual-mcp-pike-dvr-ssl-barbican') %}
 {% set DOMAIN_NAME = os_env('DOMAIN_NAME', LAB_CONFIG_NAME) + '.local' %}
-{% set HOSTNAME_CFG01 = os_env('HOSTNAME_CFG01', 'cfg01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CTL01 = os_env('HOSTNAME_CTL01', 'ctl01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CTL02 = os_env('HOSTNAME_CTL02', 'ctl02.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CTL03 = os_env('HOSTNAME_CTL03', 'ctl03.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CMP01 = os_env('HOSTNAME_CMP01', 'cmp01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CMP02 = os_env('HOSTNAME_CMP02', 'cmp02.' + DOMAIN_NAME) %}
-{% set HOSTNAME_GTW01 = os_env('HOSTNAME_GTW01', 'gtw01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_PRX01 = os_env('HOSTNAME_PRX01', 'prx01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_DNS01 = os_env('HOSTNAME_DNS01', 'dns01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_DNS02 = os_env('HOSTNAME_DNS02', 'dns02.' + DOMAIN_NAME) %}
-{% set HOSTNAME_SHARE01 = os_env('HOSTNAME_SHARE01', 'share01.' + DOMAIN_NAME) %}
+{% set HOSTNAME_CFG01 = os_env('HOSTNAME_CFG01', 'cfg01') %}
+{% set HOSTNAME_CTL01 = os_env('HOSTNAME_CTL01', 'ctl01') %}
+{% set HOSTNAME_CTL02 = os_env('HOSTNAME_CTL02', 'ctl02') %}
+{% set HOSTNAME_CTL03 = os_env('HOSTNAME_CTL03', 'ctl03') %}
+{% set HOSTNAME_CMP01 = os_env('HOSTNAME_CMP01', 'cmp01') %}
+{% set HOSTNAME_CMP02 = os_env('HOSTNAME_CMP02', 'cmp02') %}
+{% set HOSTNAME_GTW01 = os_env('HOSTNAME_GTW01', 'gtw01') %}
+{% set HOSTNAME_PRX01 = os_env('HOSTNAME_PRX01', 'prx01') %}
+{% set HOSTNAME_DNS01 = os_env('HOSTNAME_DNS01', 'dns01') %}
+{% set HOSTNAME_DNS02 = os_env('HOSTNAME_DNS02', 'dns02') %}
+{% set HOSTNAME_SHARE01 = os_env('HOSTNAME_SHARE01', 'share01') %}
 
 template:
   devops_settings:
diff --git a/tcp_tests/templates/virtual-mcp-pike-dvr-ssl/common-services.yaml b/tcp_tests/templates/virtual-mcp-pike-dvr-ssl/core.yaml
similarity index 100%
rename from tcp_tests/templates/virtual-mcp-pike-dvr-ssl/common-services.yaml
rename to tcp_tests/templates/virtual-mcp-pike-dvr-ssl/core.yaml
diff --git a/tcp_tests/templates/virtual-mcp-pike-dvr-ssl/openstack.yaml b/tcp_tests/templates/virtual-mcp-pike-dvr-ssl/openstack.yaml
index 7417c09..bf1ab55 100644
--- a/tcp_tests/templates/virtual-mcp-pike-dvr-ssl/openstack.yaml
+++ b/tcp_tests/templates/virtual-mcp-pike-dvr-ssl/openstack.yaml
@@ -316,13 +316,6 @@
   retry: {count: 3, delay: 15}
   skip_fail: false
 
-- description: Create manila type
-  cmd: |
-    salt 'ctl01*' cmd.run '. /root/keystonercv3; manila type-create default false --create_share_from_snapshot_support true --revert_to_snapshot_support true --mount_snapshot_support true --snapshot_support true --is_public true'
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
 - description: Create CIFS and NFS share and check it status
   cmd: |
     salt 'ctl01*' cmd.run '. /root/keystonercv3; manila create CIFS 1 --share-type=default';
diff --git a/tcp_tests/templates/virtual-mcp-pike-dvr-ssl/salt.yaml b/tcp_tests/templates/virtual-mcp-pike-dvr-ssl/salt.yaml
index 9e8ca34..f9c719a 100644
--- a/tcp_tests/templates/virtual-mcp-pike-dvr-ssl/salt.yaml
+++ b/tcp_tests/templates/virtual-mcp-pike-dvr-ssl/salt.yaml
@@ -14,7 +14,7 @@
 
 {{ SHARED.MACRO_CLONE_RECLASS_MODELS() }}
 
-{{ SHARED.MACRO_CONFIGURE_RECLASS(FORMULA_SERVICES='"linux" "reclass" "salt" "openssh" "ntp" "git" "nginx" "collectd" "sensu" "heka" "sphinx" "keystone" "mysql" "grafana" "haproxy" "rsyslog" "horizon" "prometheus" "telegraf" "elasticsearch" "powerdns" "fluentd" "runtest" "logrotate" "auditd"') }}
+{{ SHARED.MACRO_CONFIGURE_RECLASS(FORMULA_SERVICES='"linux" "reclass" "salt" "openssh" "ntp" "git" "nginx" "collectd" "sensu" "heka" "sphinx" "keystone" "mysql" "grafana" "haproxy" "rsyslog" "horizon" "prometheus" "telegraf" "elasticsearch" "powerdns" "fluentd" "runtest" "logrotate" "auditd" "gnocchi" "manila" ') }}
 
 {{ SHARED.MACRO_INSTALL_SALT_MINIONS() }}
 
diff --git a/tcp_tests/templates/virtual-mcp-pike-dvr/common-services.yaml b/tcp_tests/templates/virtual-mcp-pike-dvr/core.yaml
similarity index 100%
rename from tcp_tests/templates/virtual-mcp-pike-dvr/common-services.yaml
rename to tcp_tests/templates/virtual-mcp-pike-dvr/core.yaml
diff --git a/tcp_tests/templates/virtual-mcp-pike-dvr/openstack.yaml b/tcp_tests/templates/virtual-mcp-pike-dvr/openstack.yaml
index abed769..4d64fe4 100644
--- a/tcp_tests/templates/virtual-mcp-pike-dvr/openstack.yaml
+++ b/tcp_tests/templates/virtual-mcp-pike-dvr/openstack.yaml
@@ -310,13 +310,6 @@
   retry: {count: 3, delay: 15}
   skip_fail: false
 
-- description: Create manila type
-  cmd: |
-    salt 'ctl01*' cmd.run '. /root/keystonercv3; manila type-create default false --create_share_from_snapshot_support true --revert_to_snapshot_support true --mount_snapshot_support true --snapshot_support true --is_public true'
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
 - description: Create CIFS and NFS share and check it status
   cmd: |
     salt 'ctl01*' cmd.run '. /root/keystonercv3; manila create CIFS 1 --share-type=default';
diff --git a/tcp_tests/templates/virtual-mcp-pike-dvr/salt.yaml b/tcp_tests/templates/virtual-mcp-pike-dvr/salt.yaml
index cc44eb5..788e267 100644
--- a/tcp_tests/templates/virtual-mcp-pike-dvr/salt.yaml
+++ b/tcp_tests/templates/virtual-mcp-pike-dvr/salt.yaml
@@ -11,7 +11,7 @@
 
 {{ SHARED.MACRO_CLONE_RECLASS_MODELS() }}
 
-{{ SHARED.MACRO_CONFIGURE_RECLASS(FORMULA_SERVICES='"linux" "reclass" "salt" "openssh" "ntp" "git" "nginx" "collectd" "sensu" "heka" "sphinx" "keystone" "mysql" "grafana" "haproxy" "rsyslog" "horizon" "prometheus" "telegraf" "elasticsearch" "powerdns" "fluentd" "backupninja" "runtest" "neutron" "logrotate" "auditd"') }}
+{{ SHARED.MACRO_CONFIGURE_RECLASS(FORMULA_SERVICES='"linux" "reclass" "salt" "openssh" "ntp" "git" "nginx" "collectd" "sensu" "heka" "sphinx" "keystone" "mysql" "grafana" "haproxy" "rsyslog" "horizon" "prometheus" "telegraf" "elasticsearch" "powerdns" "fluentd" "backupninja" "runtest" "neutron" "logrotate" "auditd" "gnocchi" "manila"') }}
 
 {{ SHARED.MACRO_INSTALL_SALT_MINIONS() }}
 
diff --git a/tcp_tests/templates/virtual-mcp-pike-ovs-ceph/common-services.yaml b/tcp_tests/templates/virtual-mcp-pike-ovs-ceph/core.yaml
similarity index 100%
rename from tcp_tests/templates/virtual-mcp-pike-ovs-ceph/common-services.yaml
rename to tcp_tests/templates/virtual-mcp-pike-ovs-ceph/core.yaml
diff --git a/tcp_tests/templates/virtual-mcp-pike-ovs-l2gw-bgpvpn/common-services.yaml b/tcp_tests/templates/virtual-mcp-pike-ovs-l2gw-bgpvpn/core.yaml
similarity index 100%
rename from tcp_tests/templates/virtual-mcp-pike-ovs-l2gw-bgpvpn/common-services.yaml
rename to tcp_tests/templates/virtual-mcp-pike-ovs-l2gw-bgpvpn/core.yaml
diff --git a/tcp_tests/templates/virtual-mcp-pike-ovs/common-services.yaml b/tcp_tests/templates/virtual-mcp-pike-ovs/core.yaml
similarity index 100%
rename from tcp_tests/templates/virtual-mcp-pike-ovs/common-services.yaml
rename to tcp_tests/templates/virtual-mcp-pike-ovs/core.yaml
diff --git a/tcp_tests/templates/virtual-mcp-pike-ovs/openstack.yaml b/tcp_tests/templates/virtual-mcp-pike-ovs/openstack.yaml
index e858bcd..0b9e418 100644
--- a/tcp_tests/templates/virtual-mcp-pike-ovs/openstack.yaml
+++ b/tcp_tests/templates/virtual-mcp-pike-ovs/openstack.yaml
@@ -329,13 +329,6 @@
   retry: {count: 3, delay: 15}
   skip_fail: false
 
-- description: Create manila type
-  cmd: |
-    salt 'ctl01*' cmd.run '. /root/keystonercv3; manila type-create default false --create_share_from_snapshot_support true --revert_to_snapshot_support true --mount_snapshot_support true --snapshot_support true --is_public true'
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
 - description: Create CIFS and NFS share and check it status
   cmd: |
     salt 'ctl01*' cmd.run '. /root/keystonercv3; manila create CIFS 1 --share-type=default';
diff --git a/tcp_tests/templates/virtual-mcp-pike-ovs/salt.yaml b/tcp_tests/templates/virtual-mcp-pike-ovs/salt.yaml
index 0bef03e..75a8d21 100644
--- a/tcp_tests/templates/virtual-mcp-pike-ovs/salt.yaml
+++ b/tcp_tests/templates/virtual-mcp-pike-ovs/salt.yaml
@@ -11,7 +11,7 @@
 
 {{ SHARED.MACRO_CLONE_RECLASS_MODELS() }}
 
-{{ SHARED.MACRO_CONFIGURE_RECLASS(FORMULA_SERVICES='"linux" "reclass" "salt" "openssh" "ntp" "git" "nginx" "collectd" "sensu" "heka" "sphinx" "keystone" "mysql" "grafana" "haproxy" "rsyslog" "horizon" "prometheus" "telegraf" "elasticsearch" "fluentd" "backupninja" "runtest" "neutron" "logrotate" "auditd"') }}
+{{ SHARED.MACRO_CONFIGURE_RECLASS(FORMULA_SERVICES='"linux" "reclass" "salt" "openssh" "ntp" "git" "nginx" "collectd" "sensu" "heka" "sphinx" "keystone" "mysql" "grafana" "haproxy" "rsyslog" "horizon" "prometheus" "telegraf" "elasticsearch" "fluentd" "backupninja" "runtest" "neutron" "logrotate" "auditd" "gnocchi" "manila" ') }}
 
 {{ SHARED.MACRO_INSTALL_SALT_MINIONS() }}
 
diff --git a/tcp_tests/templates/virtual-mcp-sl-os/common-services.yaml b/tcp_tests/templates/virtual-mcp-sl-os/core.yaml
similarity index 100%
rename from tcp_tests/templates/virtual-mcp-sl-os/common-services.yaml
rename to tcp_tests/templates/virtual-mcp-sl-os/core.yaml
diff --git a/tcp_tests/templates/virtual-mcp-trusty/common-services.yaml b/tcp_tests/templates/virtual-mcp-trusty/core.yaml
similarity index 100%
rename from tcp_tests/templates/virtual-mcp-trusty/common-services.yaml
rename to tcp_tests/templates/virtual-mcp-trusty/core.yaml
diff --git a/tcp_tests/templates/virtual-mcp11-dvr/common-services.yaml b/tcp_tests/templates/virtual-mcp11-dvr/core.yaml
similarity index 100%
rename from tcp_tests/templates/virtual-mcp11-dvr/common-services.yaml
rename to tcp_tests/templates/virtual-mcp11-dvr/core.yaml
diff --git a/tcp_tests/templates/virtual-mcp11-k8s-calico-minimal/common-services.yaml b/tcp_tests/templates/virtual-mcp11-k8s-calico-minimal/core.yaml
similarity index 100%
rename from tcp_tests/templates/virtual-mcp11-k8s-calico-minimal/common-services.yaml
rename to tcp_tests/templates/virtual-mcp11-k8s-calico-minimal/core.yaml
diff --git a/tcp_tests/templates/virtual-mcp11-k8s-calico/common-services.yaml b/tcp_tests/templates/virtual-mcp11-k8s-calico/core.yaml
similarity index 100%
rename from tcp_tests/templates/virtual-mcp11-k8s-calico/common-services.yaml
rename to tcp_tests/templates/virtual-mcp11-k8s-calico/core.yaml
diff --git a/tcp_tests/templates/virtual-mcp11-k8s-contrail/common-services.yaml b/tcp_tests/templates/virtual-mcp11-k8s-contrail/core.yaml
similarity index 100%
rename from tcp_tests/templates/virtual-mcp11-k8s-contrail/common-services.yaml
rename to tcp_tests/templates/virtual-mcp11-k8s-contrail/core.yaml
diff --git a/tcp_tests/templates/virtual-mcp11-ovs-dpdk/common-services.yaml b/tcp_tests/templates/virtual-mcp11-ovs-dpdk/core.yaml
similarity index 100%
rename from tcp_tests/templates/virtual-mcp11-ovs-dpdk/common-services.yaml
rename to tcp_tests/templates/virtual-mcp11-ovs-dpdk/core.yaml
diff --git a/tcp_tests/templates/virtual-mcp11-ovs.new/common-services.yaml b/tcp_tests/templates/virtual-mcp11-ovs.new/core.yaml
similarity index 100%
rename from tcp_tests/templates/virtual-mcp11-ovs.new/common-services.yaml
rename to tcp_tests/templates/virtual-mcp11-ovs.new/core.yaml
diff --git a/tcp_tests/templates/virtual-mcp11-ovs/common-services.yaml b/tcp_tests/templates/virtual-mcp11-ovs/core.yaml
similarity index 100%
rename from tcp_tests/templates/virtual-mcp11-ovs/common-services.yaml
rename to tcp_tests/templates/virtual-mcp11-ovs/core.yaml
diff --git a/tcp_tests/templates/virtual-offline-pike-ovs-dpdk/common-services.yaml b/tcp_tests/templates/virtual-offline-pike-ovs-dpdk/core.yaml
similarity index 100%
rename from tcp_tests/templates/virtual-offline-pike-ovs-dpdk/common-services.yaml
rename to tcp_tests/templates/virtual-offline-pike-ovs-dpdk/core.yaml
diff --git a/tcp_tests/templates/virtual-offline-pike-ovs-dpdk/openstack.yaml b/tcp_tests/templates/virtual-offline-pike-ovs-dpdk/openstack.yaml
index 0bf2b70..cb188b8 100644
--- a/tcp_tests/templates/virtual-offline-pike-ovs-dpdk/openstack.yaml
+++ b/tcp_tests/templates/virtual-offline-pike-ovs-dpdk/openstack.yaml
@@ -6,10 +6,13 @@
 {% from 'shared-salt.yaml' import IPV4_NET_EXTERNAL_PREFIX with context %}
 {% from 'shared-salt.yaml' import IPV4_NET_TENANT_PREFIX with context %}
 {% set REPOSITORY_SUITE = os_env('REPOSITORY_SUITE', 'testing') %}
-{% set DOCKER_LOCAL_REPO = os_env('DOCKER_LOCAL_REPO', 'deb [arch=amd64] http://mirror.mcp.mirantis.local.test/ubuntu-xenial/docker/ ' + REPOSITORY_SUITE + ' stable') %}
 # Install OpenStack control services
-
+{% set DOMAIN_NAME = os_env('DOMAIN_NAME', 'virtual-offline-pike-ovs-dpdk') %}
 {% import 'shared-backup-restore.yaml' as BACKUP with context %}
+{% import 'shared-salt.yaml' as SHARED with context %}
+
+{% set DOCKER_LOCAL_REPO = os_env('DOCKER_LOCAL_REPO', 'deb [arch=amd64] http://mirror.mcp.mirantis.local.test/ubuntu-xenial/docker ' + REPOSITORY_SUITE + ' stable') %}
+
 
 - description: Install glance on all controllers
   cmd: salt --hard-crash --state-output=mixed --state-verbose=False
@@ -186,7 +189,6 @@
   retry: {count: 10, delay: 30}
   skip_fail: false
 
-
   # Upload cirros image
 # Configure cinder-volume salt-call
 - description: Set disks 01
@@ -366,5 +368,6 @@
   retry: {count: 1, delay: 30}
   skip_fail: false
 
+{{ BACKUP.MACRO_WR_NGINX_MASTER() }}
 {{ BACKUP.MACRO_BACKUP_BACKUPNINJA() }}
 {{ BACKUP.MACRO_BACKUP_XTRABACKUP() }}
\ No newline at end of file
diff --git a/tcp_tests/templates/virtual-offline-pike-ovs-dpdk/salt.yaml b/tcp_tests/templates/virtual-offline-pike-ovs-dpdk/salt.yaml
index 3feec84..74679f9 100644
--- a/tcp_tests/templates/virtual-offline-pike-ovs-dpdk/salt.yaml
+++ b/tcp_tests/templates/virtual-offline-pike-ovs-dpdk/salt.yaml
@@ -55,7 +55,7 @@
   retry: {count: 1, delay: 10}
   skip_fail: false
 
-{{ SHARED.MACRO_CONFIGURE_RECLASS(FORMULA_SERVICES='"linux" "reclass" "salt" "openssh" "ntp" "git" "nginx" "collectd" "sensu" "heka" "sphinx" "keystone" "mysql" "grafana" "haproxy" "rsyslog" "horizon" "prometheus" "telegraf" "elasticsearch" "powerdns" "fluentd" "backupninja" "watchdog"') }}
+{{ SHARED.MACRO_CONFIGURE_RECLASS(FORMULA_SERVICES='"linux" "reclass" "salt" "openssh" "ntp" "git" "nginx" "collectd" "sensu" "heka" "sphinx" "keystone" "mysql" "grafana" "haproxy" "rsyslog" "horizon" "prometheus" "telegraf" "elasticsearch" "powerdns" "fluentd" "backupninja" "watchdog" "runtest"') }}
 
 {{ SHARED.MACRO_INSTALL_SALT_MINIONS() }}
 
@@ -71,3 +71,16 @@
 {{ VSWITCH.MACRO_CHECK_BGPVPN_ENABLED_BY_DEFAULT() }}
 
 {{ VSWITCH.MACRO_ENABLE_L2GW(SHARED.CLUSTER_NAME, VSWITCH_IP) }}
+
+- description: Enable hugepages on cmp nodes
+  cmd: salt 'cmp*' cmd.run "apt-get install -y hugepages; echo 2048 > /proc/sys/vm/nr_hugepages";
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 10}
+  skip_fail: false
+
+- description: Workaround to avoid reboot cmp nodes bring OVS interfaces UP
+  cmd: |
+    salt 'cmp*' cmd.run "ifup br-mesh";
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 10}
+  skip_fail: false
\ No newline at end of file
diff --git a/tcp_tests/templates/virtual-offline-pike-ovs-dpdk/underlay.yaml b/tcp_tests/templates/virtual-offline-pike-ovs-dpdk/underlay.yaml
index dabe708..178ef29 100644
--- a/tcp_tests/templates/virtual-offline-pike-ovs-dpdk/underlay.yaml
+++ b/tcp_tests/templates/virtual-offline-pike-ovs-dpdk/underlay.yaml
@@ -376,8 +376,13 @@
           - name: {{ HOSTNAME_CMP01 }}
             role: salt_minion
             params:
-              vcpu: !os_env SLAVE_NODE_CPU, 3
+              vcpu: !os_env SLAVE_NODE_CPU, 12
               memory: !os_env SLAVE_NODE_MEMORY, 8192
+              numa:
+              - cpus: 0,1,2,3,4,5
+                memory: 4096
+              - cpus: 6,7,8,9,10,11
+                memory: 4096
               boot:
                 - hd
               cloud_init_volume_name: iso
@@ -427,8 +432,13 @@
           - name: {{ HOSTNAME_CMP02 }}
             role: salt_minion
             params:
-              vcpu: !os_env SLAVE_NODE_CPU, 3
+              vcpu: !os_env SLAVE_NODE_CPU, 12
               memory: !os_env SLAVE_NODE_MEMORY, 8192
+              numa:
+              - cpus: 0,1,2,3,4,5
+                memory: 4096
+              - cpus: 6,7,8,9,10,11
+                memory: 4096
               boot:
                 - hd
               cloud_init_volume_name: iso
diff --git a/tcp_tests/templates/virtual-offline-pike-ovs/common-services.yaml b/tcp_tests/templates/virtual-offline-pike-ovs/core.yaml
similarity index 100%
rename from tcp_tests/templates/virtual-offline-pike-ovs/common-services.yaml
rename to tcp_tests/templates/virtual-offline-pike-ovs/core.yaml
diff --git a/tcp_tests/templates/virtual-offline-pike-ovs/openstack.yaml b/tcp_tests/templates/virtual-offline-pike-ovs/openstack.yaml
index 1e5d62e..6aadd32 100644
--- a/tcp_tests/templates/virtual-offline-pike-ovs/openstack.yaml
+++ b/tcp_tests/templates/virtual-offline-pike-ovs/openstack.yaml
@@ -3,10 +3,14 @@
 {% from 'virtual-offline-pike-ovs/underlay.yaml' import HOSTNAME_CTL02 with context %}
 {% from 'virtual-offline-pike-ovs/underlay.yaml' import HOSTNAME_CTL03 with context %}
 {% from 'virtual-offline-pike-ovs/underlay.yaml' import HOSTNAME_GTW01 with context %}
+{% from 'virtual-offline-pike-ovs/underlay.yaml' import LAB_CONFIG_NAME with context %}
 {% from 'shared-salt.yaml' import IPV4_NET_EXTERNAL_PREFIX with context %}
 {% from 'shared-salt.yaml' import IPV4_NET_TENANT_PREFIX with context %}
 {% set REPOSITORY_SUITE = os_env('REPOSITORY_SUITE', 'testing') %}
-{% set DOCKER_LOCAL_REPO = os_env('DOCKER_LOCAL_REPO', 'deb [arch=amd64] http://mirror.mcp.mirantis.local.test/ubuntu-xenial/docker/ ' + REPOSITORY_SUITE + ' stable') %}
+{% set DOMAIN_NAME = os_env('DOMAIN_NAME', 'virtual-offline-pike-ovs') %}
+{% import 'shared-backup-restore.yaml' as BACKUP with context %}
+{% import 'shared-salt.yaml' as SHARED with context %}
+{% set DOCKER_LOCAL_REPO = os_env('DOCKER_LOCAL_REPO', 'deb [arch=amd64] http://mirror.mcp.mirantis.local.test/ubuntu-xenial/docker ' + REPOSITORY_SUITE + ' stable') %}
 # Install OpenStack control services
 
 - description: Install glance on all controllers
diff --git a/tcp_tests/templates/virtual-offline-pike-ovs/salt.yaml b/tcp_tests/templates/virtual-offline-pike-ovs/salt.yaml
index a1c39d9..bcabac5 100644
--- a/tcp_tests/templates/virtual-offline-pike-ovs/salt.yaml
+++ b/tcp_tests/templates/virtual-offline-pike-ovs/salt.yaml
@@ -59,7 +59,7 @@
   retry: {count: 1, delay: 10}
   skip_fail: false
 
-{{ SHARED.MACRO_CONFIGURE_RECLASS(FORMULA_SERVICES='"linux" "reclass" "salt" "openssh" "ntp" "git" "nginx" "collectd" "sensu" "heka" "sphinx" "keystone" "mysql" "grafana" "haproxy" "rsyslog" "horizon" "prometheus" "telegraf" "elasticsearch" "powerdns" "fluentd" "backupninja" "watchdog"') }}
+{{ SHARED.MACRO_CONFIGURE_RECLASS(FORMULA_SERVICES='"linux" "reclass" "salt" "openssh" "ntp" "git" "nginx" "collectd" "sensu" "heka" "sphinx" "keystone" "mysql" "grafana" "haproxy" "rsyslog" "horizon" "prometheus" "telegraf" "elasticsearch" "powerdns" "fluentd" "backupninja" "watchdog" "runtest"') }}
 
 {{ SHARED.MACRO_INSTALL_SALT_MINIONS() }}
 
diff --git a/tcp_tests/templates/virtual-offline-ssl/common-services.yaml b/tcp_tests/templates/virtual-offline-ssl/core.yaml
similarity index 100%
rename from tcp_tests/templates/virtual-offline-ssl/common-services.yaml
rename to tcp_tests/templates/virtual-offline-ssl/core.yaml
diff --git a/tcp_tests/templates/virtual-offline-ssl/openstack.yaml b/tcp_tests/templates/virtual-offline-ssl/openstack.yaml
index d3586a1..31cfe01 100644
--- a/tcp_tests/templates/virtual-offline-ssl/openstack.yaml
+++ b/tcp_tests/templates/virtual-offline-ssl/openstack.yaml
@@ -10,10 +10,12 @@
 {% from 'virtual-offline-ssl/underlay.yaml' import DOMAIN_NAME with context %}
 
 {% set REPOSITORY_SUITE = os_env('REPOSITORY_SUITE', 'proposed') %}
-{% set DOCKER_LOCAL_REPO = os_env('DOCKER_LOCAL_REPO', 'deb [arch=amd64] http://mirror.mcp.mirantis.local.test/ubuntu-xenial/docker/ ' + REPOSITORY_SUITE + ' stable') %}
-
-{% import 'shared-salt.yaml' as SHARED with context %}
+{% set DOMAIN_NAME = os_env('DOMAIN_NAME', 'virtual-offline-ssl') %}
 {% import 'shared-backup-restore.yaml' as BACKUP with context %}
+{% import 'shared-salt.yaml' as SHARED with context %}
+
+{% set DOCKER_LOCAL_REPO = os_env('DOCKER_LOCAL_REPO', 'deb [arch=amd64] http://mirror.mcp.mirantis.local.test/ubuntu-xenial/docker ' + REPOSITORY_SUITE + ' stable') %}
+
 
 # Install OpenStack control services
 
@@ -272,13 +274,6 @@
   retry: {count: 3, delay: 5}
   skip_fail: false
 
-- description: Create manila type
-  cmd: |
-    salt 'ctl01*' cmd.run '. /root/keystonercv3; manila type-create default false --create_share_from_snapshot_support true --revert_to_snapshot_support true --mount_snapshot_support true --snapshot_support true --is_public true'
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
 - description: Create CIFS and NFS share and check it status
   cmd: |
     salt 'ctl01*' cmd.run '. /root/keystonercv3; manila create CIFS 1 --share-type=default';
@@ -490,6 +485,7 @@
   retry: {count: 1, delay: 30}
   skip_fail: false
 
+
 - description: Enable local docker repo
   cmd: |
     set -e;
@@ -524,8 +520,3 @@
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 30}
   skip_fail: false
-
-{{ BACKUP.MACRO_BACKUP_BACKUPNINJA() }}
-{{ BACKUP.MACRO_BACKUP_XTRABACKUP() }}
-
-{{ SHARED.RUN_NEW_TEMPEST() }}
\ No newline at end of file
diff --git a/tcp_tests/templates/virtual-offline-ssl/salt.yaml b/tcp_tests/templates/virtual-offline-ssl/salt.yaml
index 3851882..f2d794a 100644
--- a/tcp_tests/templates/virtual-offline-ssl/salt.yaml
+++ b/tcp_tests/templates/virtual-offline-ssl/salt.yaml
@@ -58,7 +58,7 @@
   retry: {count: 1, delay: 10}
   skip_fail: false
 
-{{ SHARED.MACRO_CONFIGURE_RECLASS(FORMULA_SERVICES='"linux" "reclass" "salt" "openssh" "ntp" "git" "nginx" "collectd" "sensu" "heka" "sphinx" "keystone" "mysql" "grafana" "haproxy" "rsyslog" "horizon" "prometheus" "telegraf" "elasticsearch" "powerdns" "fluentd" "backupninja" "watchdog" "dogtag" "runtest"') }}
+{{ SHARED.MACRO_CONFIGURE_RECLASS(FORMULA_SERVICES='"linux" "reclass" "salt" "openssh" "ntp" "git" "nginx" "collectd" "sensu" "heka" "sphinx" "keystone" "mysql" "grafana" "haproxy" "rsyslog" "horizon" "prometheus" "telegraf" "elasticsearch" "powerdns" "fluentd" "backupninja" "watchdog" "dogtag" "runtest" "manila"') }}
 
 {{ SHARED.MACRO_INSTALL_SALT_MINIONS() }}
 
@@ -67,3 +67,16 @@
 {{ SHARED.MACRO_GENERATE_INVENTORY() }}
 
 {{ SHARED.MACRO_BOOTSTRAP_ALL_MINIONS() }}
+
+- description: Enable hugepages on cmp nodes
+  cmd: salt 'cmp*' cmd.run "apt-get install -y hugepages; echo 2048 > /proc/sys/vm/nr_hugepages";
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 10}
+  skip_fail: false
+
+- description: Workaround to avoid reboot cmp nodes bring OVS interfaces UP
+  cmd: |
+    salt 'cmp*' cmd.run "ifup br-mesh";
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 10}
+  skip_fail: false
diff --git a/tcp_tests/templates/virtual-offline-ssl/underlay.yaml b/tcp_tests/templates/virtual-offline-ssl/underlay.yaml
index 18c2bf1..6277d6a 100644
--- a/tcp_tests/templates/virtual-offline-ssl/underlay.yaml
+++ b/tcp_tests/templates/virtual-offline-ssl/underlay.yaml
@@ -484,8 +484,13 @@
           - name: {{ HOSTNAME_CMP01 }}
             role: salt_minion
             params:
-              vcpu: !os_env SLAVE_NODE_CPU, 3
-              memory: !os_env SLAVE_NODE_MEMORY, 4096
+              vcpu: !os_env SLAVE_NODE_CPU, 12
+              memory: !os_env SLAVE_NODE_MEMORY, 8192
+              numa:
+              - cpus: 0,1,2,3,4,5
+                memory: 4096
+              - cpus: 6,7,8,9,10,11
+                memory: 4096
               boot:
                 - hd
               cloud_init_volume_name: iso
@@ -535,8 +540,13 @@
           - name: {{ HOSTNAME_CMP02 }}
             role: salt_minion
             params:
-              vcpu: !os_env SLAVE_NODE_CPU, 3
-              memory: !os_env SLAVE_NODE_MEMORY, 4096
+              vcpu: !os_env SLAVE_NODE_CPU, 12
+              memory: !os_env SLAVE_NODE_MEMORY, 8192
+              numa:
+              - cpus: 0,1,2,3,4,5
+                memory: 4096
+              - cpus: 6,7,8,9,10,11
+                memory: 4096
               boot:
                 - hd
               cloud_init_volume_name: iso
diff --git a/tcp_tests/templates/virtual-pike-ovs-dpdk/common-services.yaml b/tcp_tests/templates/virtual-pike-ovs-dpdk/core.yaml
similarity index 100%
rename from tcp_tests/templates/virtual-pike-ovs-dpdk/common-services.yaml
rename to tcp_tests/templates/virtual-pike-ovs-dpdk/core.yaml
diff --git a/tcp_tests/templates/virtual-pike-ovs-dpdk/openstack.yaml b/tcp_tests/templates/virtual-pike-ovs-dpdk/openstack.yaml
index 2ed45be..6fea795 100644
--- a/tcp_tests/templates/virtual-pike-ovs-dpdk/openstack.yaml
+++ b/tcp_tests/templates/virtual-pike-ovs-dpdk/openstack.yaml
@@ -5,6 +5,11 @@
 {% from 'shared-salt.yaml' import IPV4_NET_EXTERNAL_PREFIX with context %}
 {% from 'shared-salt.yaml' import IPV4_NET_TENANT_PREFIX with context %}
 
+{% set DOMAIN_NAME = os_env('DOMAIN_NAME', 'virtual-pike-ovs-dpdk') %}
+{% import 'shared-backup-restore.yaml' as BACKUP with context %}
+{% import 'shared-salt.yaml' as SHARED with context %}
+
+
 # Install OpenStack control services
 
 - description: Install glance on all controllers
@@ -275,3 +280,8 @@
   node_name: {{ HOSTNAME_CTL03 }}
   retry: {count: 1, delay: 30}
   skip_fail: false
+
+{{ SHARED.INSTALL_DOCKER_ON_GTW() }}
+{{ BACKUP.MACRO_WR_NGINX_MASTER() }}
+{{ BACKUP.MACRO_BACKUP_BACKUPNINJA() }}
+{{ BACKUP.MACRO_BACKUP_XTRABACKUP() }}
\ No newline at end of file
diff --git a/tcp_tests/templates/virtual-pike-ovs-dpdk/salt.yaml b/tcp_tests/templates/virtual-pike-ovs-dpdk/salt.yaml
index 3fac01a..e972107 100644
--- a/tcp_tests/templates/virtual-pike-ovs-dpdk/salt.yaml
+++ b/tcp_tests/templates/virtual-pike-ovs-dpdk/salt.yaml
@@ -11,7 +11,7 @@
 
 {{ SHARED.MACRO_CLONE_RECLASS_MODELS() }}
 
-{{ SHARED.MACRO_CONFIGURE_RECLASS(FORMULA_SERVICES='"linux" "reclass" "salt" "openssh" "ntp" "git" "nginx" "collectd" "sensu" "heka" "sphinx" "keystone" "mysql" "grafana" "haproxy" "rsyslog" "horizon" "prometheus" "telegraf" "elasticsearch" "fluentd" "backupninja"') }}
+{{ SHARED.MACRO_CONFIGURE_RECLASS(FORMULA_SERVICES='"linux" "reclass" "salt" "openssh" "ntp" "git" "nginx" "collectd" "sensu" "heka" "sphinx" "keystone" "mysql" "grafana" "haproxy" "rsyslog" "horizon" "prometheus" "telegraf" "elasticsearch" "fluentd" "backupninja" "runtest"') }}
 
 {{ SHARED.MACRO_INSTALL_SALT_MINIONS() }}
 
diff --git a/tcp_tests/tests/component/conftest.py b/tcp_tests/tests/component/conftest.py
new file mode 100644
index 0000000..f33b1b4
--- /dev/null
+++ b/tcp_tests/tests/component/conftest.py
@@ -0,0 +1,61 @@
+#    Copyright 2016 Mirantis, Inc.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+from tcp_tests.fixtures.common_fixtures import *  # noqa
+from tcp_tests.fixtures.ceph_fixtures import *  # noqa
+from tcp_tests.fixtures.config_fixtures import *  # noqa
+from tcp_tests.fixtures.underlay_fixtures import *  # noqa
+from tcp_tests.fixtures.rally_fixtures import *  # noqa
+from tcp_tests.fixtures.salt_fixtures import *  # noqa
+from tcp_tests.fixtures.core_fixtures import *  # noqa
+from tcp_tests.fixtures.openstack_fixtures import *  # noqa
+from tcp_tests.fixtures.opencontrail_fixtures import *  # noqa
+from tcp_tests.fixtures.oss_fixtures import *  # noqa
+from tcp_tests.fixtures.decapod_fixtures import *  # noqa
+from tcp_tests.fixtures.stacklight_fixtures import *  # noqa
+from tcp_tests.fixtures.k8s_fixtures import *  # noqa
+from tcp_tests.fixtures.drivetrain_fixtures import *  # noqa
+from tcp_tests.fixtures.runtest_fixtures import * # noqa
+
+
+__all__ = sorted([  # sort for documentation
+    # common_fixtures
+    'show_step',
+    'func_name',
+    # config_fixtures
+    'config',
+    # rally_fixtures
+    'rally',
+    # salt_fixtures
+    'salt_actions',
+    # core_fixtures
+    'core_actions',
+    # openstack_fixtures
+    'openstack_actions',
+    # oss_fixtures
+    'oss_actions',
+    # drivetrain_fixtures
+    'drivetrain_actions',
+    # decapod_fixtures
+    'decapod_actions',
+    # component fixtures
+    'opencontrail',
+    # stacklight_fixtures
+    'sl_actions',
+    'ceph_action',
+    # k8s fixtures
+    'k8s_actions',
+    # tempest
+    'tempest_actions'
+])
diff --git a/tcp_tests/tests/component/readme_first.txt b/tcp_tests/tests/component/readme_first.txt
new file mode 100644
index 0000000..8ac5988
--- /dev/null
+++ b/tcp_tests/tests/component/readme_first.txt
@@ -0,0 +1,10 @@
+Component tests in this directory are used for sanity checks for
+various of components during deployment.
+Results of these steps will be assumed as the results of the
+corresponding deployment step in the TestRail report.
+
+DO NOT use deployment fixtures that are depended on the 'hardware'
+fixture because 'hardware' fixture won't be initialized for these checks.
+
+To access the environment, please use '*_actions' fixtures that
+provide managers to work with the components directly.
\ No newline at end of file
diff --git a/tcp_tests/tests/component/test_check_calico.py b/tcp_tests/tests/component/test_check_calico.py
new file mode 100644
index 0000000..6cae1e1
--- /dev/null
+++ b/tcp_tests/tests/component/test_check_calico.py
@@ -0,0 +1,24 @@
+#    Copyright 2018 Mirantis, Inc.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+import pytest
+
+from tcp_tests import logger
+
+LOG = logger.logger
+
+
+@pytest.mark.check_calico
+def test_check_calico(config, underlay_actions):
+    LOG.info("*************** DONE **************")
diff --git a/tcp_tests/tests/component/test_check_ceph.py b/tcp_tests/tests/component/test_check_ceph.py
new file mode 100644
index 0000000..7f0cc8d
--- /dev/null
+++ b/tcp_tests/tests/component/test_check_ceph.py
@@ -0,0 +1,24 @@
+#    Copyright 2018 Mirantis, Inc.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+import pytest
+
+from tcp_tests import logger
+
+LOG = logger.logger
+
+
+@pytest.mark.check_ceph
+def test_check_ceph(config, underlay_actions):
+    LOG.info("*************** DONE **************")
diff --git a/tcp_tests/tests/component/test_check_cicd.py b/tcp_tests/tests/component/test_check_cicd.py
new file mode 100644
index 0000000..3c5382e
--- /dev/null
+++ b/tcp_tests/tests/component/test_check_cicd.py
@@ -0,0 +1,24 @@
+#    Copyright 2018 Mirantis, Inc.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+import pytest
+
+from tcp_tests import logger
+
+LOG = logger.logger
+
+
+@pytest.mark.check_cicd
+def test_check_cicd(config, underlay_actions):
+    LOG.info("*************** DONE **************")
diff --git a/tcp_tests/tests/component/test_check_contrail.py b/tcp_tests/tests/component/test_check_contrail.py
new file mode 100644
index 0000000..1eb80f2
--- /dev/null
+++ b/tcp_tests/tests/component/test_check_contrail.py
@@ -0,0 +1,24 @@
+#    Copyright 2018 Mirantis, Inc.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+import pytest
+
+from tcp_tests import logger
+
+LOG = logger.logger
+
+
+@pytest.mark.check_contrail
+def test_check_contrail(config, underlay_actions):
+    LOG.info("*************** DONE **************")
diff --git a/tcp_tests/tests/component/test_check_core.py b/tcp_tests/tests/component/test_check_core.py
new file mode 100644
index 0000000..940b54c
--- /dev/null
+++ b/tcp_tests/tests/component/test_check_core.py
@@ -0,0 +1,24 @@
+#    Copyright 2018 Mirantis, Inc.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+import pytest
+
+from tcp_tests import logger
+
+LOG = logger.logger
+
+
+@pytest.mark.check_core
+def test_check_core(config, underlay_actions):
+    LOG.info("*************** DONE **************")
diff --git a/tcp_tests/tests/component/test_check_finalize.py b/tcp_tests/tests/component/test_check_finalize.py
new file mode 100644
index 0000000..e979c26
--- /dev/null
+++ b/tcp_tests/tests/component/test_check_finalize.py
@@ -0,0 +1,31 @@
+#    Copyright 2018 Mirantis, Inc.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+import pytest
+
+from tcp_tests import logger
+
+LOG = logger.logger
+
+
+@pytest.mark.check_finalize
+def test_check_finalize(config, underlay_actions):
+    """Not a component test, just a placeholder.
+
+    'finalize' stands for state.highstate on all nodes
+    at the end of the deployment.
+    Here, can be checked that all the nodes completed this state
+    without errors (so no errors were skipped by pipelines)
+    """
+    LOG.info("*************** DONE **************")
diff --git a/tcp_tests/tests/component/test_check_k8s.py b/tcp_tests/tests/component/test_check_k8s.py
new file mode 100644
index 0000000..10e3040
--- /dev/null
+++ b/tcp_tests/tests/component/test_check_k8s.py
@@ -0,0 +1,24 @@
+#    Copyright 2018 Mirantis, Inc.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+import pytest
+
+from tcp_tests import logger
+
+LOG = logger.logger
+
+
+@pytest.mark.check_k8s
+def test_check_k8s(config, underlay_actions):
+    LOG.info("*************** DONE **************")
diff --git a/tcp_tests/tests/component/test_check_kvm.py b/tcp_tests/tests/component/test_check_kvm.py
new file mode 100644
index 0000000..16db232
--- /dev/null
+++ b/tcp_tests/tests/component/test_check_kvm.py
@@ -0,0 +1,24 @@
+#    Copyright 2018 Mirantis, Inc.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+import pytest
+
+from tcp_tests import logger
+
+LOG = logger.logger
+
+
+@pytest.mark.check_kvm
+def test_check_kvm(config, underlay_actions):
+    LOG.info("*************** DONE **************")
diff --git a/tcp_tests/tests/component/test_check_openstack.py b/tcp_tests/tests/component/test_check_openstack.py
new file mode 100644
index 0000000..14eb2a9
--- /dev/null
+++ b/tcp_tests/tests/component/test_check_openstack.py
@@ -0,0 +1,24 @@
+#    Copyright 2018 Mirantis, Inc.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+import pytest
+
+from tcp_tests import logger
+
+LOG = logger.logger
+
+
+@pytest.mark.check_openstack
+def test_check_openstack(config, underlay_actions):
+    LOG.info("*************** DONE **************")
diff --git a/tcp_tests/tests/component/test_check_oss.py b/tcp_tests/tests/component/test_check_oss.py
new file mode 100644
index 0000000..47847bf
--- /dev/null
+++ b/tcp_tests/tests/component/test_check_oss.py
@@ -0,0 +1,24 @@
+#    Copyright 2018 Mirantis, Inc.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+import pytest
+
+from tcp_tests import logger
+
+LOG = logger.logger
+
+
+@pytest.mark.check_oss
+def test_check_oss(config, underlay_actions):
+    LOG.info("*************** DONE **************")
diff --git a/tcp_tests/tests/component/test_check_ovs.py b/tcp_tests/tests/component/test_check_ovs.py
new file mode 100644
index 0000000..74445fa
--- /dev/null
+++ b/tcp_tests/tests/component/test_check_ovs.py
@@ -0,0 +1,24 @@
+#    Copyright 2018 Mirantis, Inc.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+import pytest
+
+from tcp_tests import logger
+
+LOG = logger.logger
+
+
+@pytest.mark.check_ovs
+def test_check_ovs(config, underlay_actions):
+    LOG.info("*************** DONE **************")
diff --git a/tcp_tests/tests/component/test_check_stacklight.py b/tcp_tests/tests/component/test_check_stacklight.py
new file mode 100644
index 0000000..14b4169
--- /dev/null
+++ b/tcp_tests/tests/component/test_check_stacklight.py
@@ -0,0 +1,24 @@
+#    Copyright 2018 Mirantis, Inc.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+import pytest
+
+from tcp_tests import logger
+
+LOG = logger.logger
+
+
+@pytest.mark.check_stacklight
+def test_check_stacklight(config, underlay_actions):
+    LOG.info("*************** DONE **************")
diff --git a/tcp_tests/tests/environment/conftest.py b/tcp_tests/tests/environment/conftest.py
index 47ca68a..a1db84c 100644
--- a/tcp_tests/tests/environment/conftest.py
+++ b/tcp_tests/tests/environment/conftest.py
@@ -17,7 +17,7 @@
 from tcp_tests.fixtures.underlay_fixtures import *  # noqa
 from tcp_tests.fixtures.rally_fixtures import *  # noqa
 from tcp_tests.fixtures.salt_fixtures import *  # noqa
-from tcp_tests.fixtures.common_services_fixtures import *  # noqa
+from tcp_tests.fixtures.core_fixtures import *  # noqa
 from tcp_tests.fixtures.openstack_fixtures import *  # noqa
 from tcp_tests.fixtures.opencontrail_fixtures import *  # noqa
 
@@ -36,9 +36,9 @@
     # salt_fixtures
     'salt_actions',
     'salt_deployed',
-    # common_services_fixtures
-    'common_services_actions',
-    'common_services_deployed',
+    # core_fixtures
+    'core_actions',
+    'core_deployed',
     # openstack_fixtures
     'openstack_actions',
     'openstack_deployed',
diff --git a/tcp_tests/tests/environment/test_bootstrap_salt.py b/tcp_tests/tests/environment/test_bootstrap_salt.py
new file mode 100644
index 0000000..f852ffa
--- /dev/null
+++ b/tcp_tests/tests/environment/test_bootstrap_salt.py
@@ -0,0 +1,31 @@
+#    Copyright 2017 Mirantis, Inc.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+import pytest
+
+from tcp_tests import logger
+
+LOG = logger.logger
+
+
+class TestBootstrapCore(object):
+    """Test class for deploy local dns_vm"""
+
+    def test_create_environment(self, config, hardware):
+        """Create environment hardware in disabled state"""
+        LOG.info("*************** DONE **************")
+
+    @pytest.mark.fail_snapshot
+    def test_bootstrap_salt(self, config, underlay, salt_deployed):
+        LOG.info("*************** DONE **************")
diff --git a/tcp_tests/tests/environment/test_local_dns.py b/tcp_tests/tests/environment/test_local_dns.py
index dfd29a1..fcc0978 100644
--- a/tcp_tests/tests/environment/test_local_dns.py
+++ b/tcp_tests/tests/environment/test_local_dns.py
@@ -25,7 +25,3 @@
     @pytest.mark.fail_snapshot
     def test_install_local_dns(self, config, underlay):
         LOG.info("*************** DONE **************")
-
-    @pytest.mark.fail_snapshot
-    def test_install_local_salt(self, config, underlay, salt_deployed):
-        LOG.info("*************** DONE **************")
diff --git a/tcp_tests/tests/system/conftest.py b/tcp_tests/tests/system/conftest.py
index 61dd8e2..0bd4232 100644
--- a/tcp_tests/tests/system/conftest.py
+++ b/tcp_tests/tests/system/conftest.py
@@ -18,7 +18,7 @@
 from tcp_tests.fixtures.underlay_fixtures import *  # noqa
 from tcp_tests.fixtures.rally_fixtures import *  # noqa
 from tcp_tests.fixtures.salt_fixtures import *  # noqa
-from tcp_tests.fixtures.common_services_fixtures import *  # noqa
+from tcp_tests.fixtures.core_fixtures import *  # noqa
 from tcp_tests.fixtures.openstack_fixtures import *  # noqa
 from tcp_tests.fixtures.opencontrail_fixtures import *  # noqa
 from tcp_tests.fixtures.oss_fixtures import *  # noqa
@@ -46,9 +46,9 @@
     # salt_fixtures
     'salt_actions',
     'salt_deployed',
-    # common_services_fixtures
-    'common_services_actions',
-    'common_services_deployed',
+    # core_fixtures
+    'core_actions',
+    'core_deployed',
     # openstack_fixtures
     'openstack_actions',
     'openstack_deployed',
@@ -66,7 +66,7 @@
     'opencontrail',
     # stacklight_fixtures
     'sl_actions',
-    'sl_deployed',
+    'stacklight_deployed',
     'sl_os_deployed',
     'ceph_deployed',
     'ceph_action',
diff --git a/tcp_tests/tests/system/test_failover_ceph.py b/tcp_tests/tests/system/test_failover_ceph.py
index 2f1bc25..934e28b 100644
--- a/tcp_tests/tests/system/test_failover_ceph.py
+++ b/tcp_tests/tests/system/test_failover_ceph.py
@@ -110,7 +110,7 @@
     @pytest.mark.fail_snapshot
     def test_restart_cmn_node(self, func_name, underlay, config,
                               openstack_deployed, ceph_deployed,
-                              common_services_actions,
+                              core_actions,
                               salt_actions, openstack_actions,
                               rally, show_step, hardware):
         """Test restart ceph cmn node
@@ -177,7 +177,7 @@
     @pytest.mark.fail_snapshot
     def test_restart_rgw_node(self, func_name, underlay, config,
                               openstack_deployed, ceph_deployed,
-                              common_services_actions, hardware,
+                              core_actions, hardware,
                               salt_actions, openstack_actions,
                               rally, show_step):
         """Test restart ceph rgw node
diff --git a/tcp_tests/tests/system/test_failover_k8s.py b/tcp_tests/tests/system/test_failover_k8s.py
index 1ad43b9..a334a42 100644
--- a/tcp_tests/tests/system/test_failover_k8s.py
+++ b/tcp_tests/tests/system/test_failover_k8s.py
@@ -24,7 +24,7 @@
     @pytest.mark.grap_versions
     @pytest.mark.fail_snapshot
     def test_k8s_master_vip_migration(self, show_step, k8s_deployed, underlay,
-                                      k8s_actions, common_services_actions,
+                                      k8s_actions, core_actions,
                                       config, hardware):
         """Test restart and shutdown master with VIP
 
@@ -40,12 +40,12 @@
         """
         show_step(1)
         show_step(2)
-        common_services_actions.check_keepalived_pillar()
+        core_actions.check_keepalived_pillar()
 
         show_step(3)
         vip = k8s_actions.get_keepalived_vip()
         LOG.info("VIP ip address: {}".format(vip))
-        minion_vip = common_services_actions.get_keepalived_vip_minion_id(vip)
+        minion_vip = core_actions.get_keepalived_vip_minion_id(vip)
         LOG.info("VIP {0} is on {1}".format(vip, minion_vip))
 
         show_step(4)
@@ -54,16 +54,16 @@
         show_step(5)
         try:
             new_minion_vip =\
-                common_services_actions.get_keepalived_vip_minion_id(vip)
+                core_actions.get_keepalived_vip_minion_id(vip)
         except Exception:
                 time.sleep(15)
                 new_minion_vip = \
-                    common_services_actions.get_keepalived_vip_minion_id(vip)
+                    core_actions.get_keepalived_vip_minion_id(vip)
         LOG.info("VIP {0} migrated to {1}".format(vip, new_minion_vip))
         assert new_minion_vip != minion_vip
 
         show_step(6)
-        common_services_actions.check_keepalived_pillar()
+        core_actions.check_keepalived_pillar()
 
         show_step(7)
         curl_output = ''.join(underlay.check_call(
diff --git a/tcp_tests/tests/system/test_failover_nodes.py b/tcp_tests/tests/system/test_failover_nodes.py
index 87a7de8..1f628c4 100644
--- a/tcp_tests/tests/system/test_failover_nodes.py
+++ b/tcp_tests/tests/system/test_failover_nodes.py
@@ -139,7 +139,7 @@
 
     @pytest.mark.grab_versions
     @pytest.mark.fail_snapshot
-    @pytest.mark.revert_snapshot(ext.SNAPSHOT.sl_deployed)
+    @pytest.mark.revert_snapshot(ext.SNAPSHOT.stacklight_deployed)
     def test_restart_mon01_node(self, openstack_actions, hardware, underlay,
                                 sl_os_deployed, show_step):
         """Test restart mon01
@@ -190,7 +190,7 @@
 
     @pytest.mark.grab_versions
     @pytest.mark.fail_snapshot
-    @pytest.mark.revert_snapshot(ext.SNAPSHOT.sl_deployed)
+    @pytest.mark.revert_snapshot(ext.SNAPSHOT.stacklight_deployed)
     def test_warm_shutdown_mon01_node(self, underlay, hardware, sl_os_deployed,
                                       openstack_actions, show_step):
         """Test warm shutdown mon01
@@ -237,10 +237,10 @@
 
     @pytest.mark.grab_versions
     @pytest.mark.fail_snapshot
-    @pytest.mark.revert_snapshot(ext.SNAPSHOT.sl_deployed)
+    @pytest.mark.revert_snapshot(ext.SNAPSHOT.stacklight_deployed)
     def test_restart_mon_with_vip(self, underlay, hardware, sl_os_deployed,
                                   openstack_actions, salt_actions,
-                                  common_services_actions, show_step):
+                                  core_actions, show_step):
         """Test restart mon with VIP
 
         Scenario:
@@ -256,7 +256,7 @@
 
         """
         # TR case #4753939
-        common_services_actions.check_keepalived_pillar()
+        core_actions.check_keepalived_pillar()
         salt = salt_actions
 
         # STEP #1,2,3
@@ -280,7 +280,7 @@
             tgt="mon0*",
             pillar="_param:cluster_vip_address")[0]
         vip = [vip for minion_id, vip in mon_vip_pillar.items()][0]
-        minion_vip = common_services_actions.get_keepalived_vip_minion_id(vip)
+        minion_vip = core_actions.get_keepalived_vip_minion_id(vip)
         LOG.info("VIP {0} is on {1}".format(vip, minion_vip))
 
         # STEP #6
@@ -290,13 +290,13 @@
         # STEP #7
         show_step(7)
         # Check that VIP has been actually migrated to a new node
-        new_minion_vip = common_services_actions.get_keepalived_vip_minion_id(
+        new_minion_vip = core_actions.get_keepalived_vip_minion_id(
             vip)
         LOG.info("Migrated VIP {0} is on {1}".format(vip, new_minion_vip))
         assert new_minion_vip != minion_vip, (
             "VIP {0} wasn't migrated from {1} after node reboot!"
             .format(vip, new_minion_vip))
-        common_services_actions.check_keepalived_pillar()
+        core_actions.check_keepalived_pillar()
 
         # STEP #8
         show_step(8)
@@ -315,7 +315,7 @@
     @pytest.mark.revert_snapshot(ext.SNAPSHOT.openstack_deployed)
     def test_restart_ctl_with_vip(self, underlay, hardware, openstack_deployed,
                                   openstack_actions, salt_actions,
-                                  common_services_actions, show_step):
+                                  core_actions, show_step):
         """Test restart clt with VIP
 
         Scenario:
@@ -330,7 +330,7 @@
 
         """
         # TR case #3385671
-        common_services_actions.check_keepalived_pillar()
+        core_actions.check_keepalived_pillar()
         salt = salt_actions
 
         # STEP #1,2,3
@@ -344,7 +344,7 @@
             tgt="I@nova:controller:enabled:True",
             pillar="_param:cluster_vip_address")[0]
         vip = [vip for minion_id, vip in ctl_vip_pillar.items()][0]
-        minion_vip = common_services_actions.get_keepalived_vip_minion_id(vip)
+        minion_vip = core_actions.get_keepalived_vip_minion_id(vip)
         LOG.info("VIP {0} is on {1}".format(vip, minion_vip))
 
         # STEP #5
@@ -354,13 +354,13 @@
         # STEP #6
         show_step(6)
         # Check that VIP has been actually migrated to a new node
-        new_minion_vip = common_services_actions.get_keepalived_vip_minion_id(
+        new_minion_vip = core_actions.get_keepalived_vip_minion_id(
             vip)
         LOG.info("Migrated VIP {0} is on {1}".format(vip, new_minion_vip))
         assert new_minion_vip != minion_vip, (
             "VIP {0} wasn't migrated from {1} after node reboot!"
             .format(vip, new_minion_vip))
-        common_services_actions.check_keepalived_pillar()
+        core_actions.check_keepalived_pillar()
 
         # STEP #7
         show_step(7)
diff --git a/tcp_tests/tests/system/test_failover_openstack_services.py b/tcp_tests/tests/system/test_failover_openstack_services.py
index 08a928b..d06c2af 100644
--- a/tcp_tests/tests/system/test_failover_openstack_services.py
+++ b/tcp_tests/tests/system/test_failover_openstack_services.py
@@ -96,7 +96,7 @@
     @pytest.mark.with_rally(rally_node="gtw01.", prepare_openstack=True)
     def test_restart_keepalived(self, func_name, underlay, config,
                                 openstack_deployed,
-                                common_services_actions,
+                                core_actions,
                                 salt_actions, openstack_actions,
                                 rally, show_step):
         """Test restart keepalived on ctl* nodes
@@ -114,7 +114,7 @@
             - OpenStack cluster
         """
         # TR case #4756965
-        common_services_actions.check_keepalived_pillar()
+        core_actions.check_keepalived_pillar()
         salt = salt_actions
 
         ctl_node_names = underlay.get_target_node_names(
@@ -165,7 +165,7 @@
     @pytest.mark.with_rally(rally_node="gtw01.", prepare_openstack=True)
     def test_stop_keepalived(self, func_name, underlay, config,
                              openstack_deployed,
-                             common_services_actions,
+                             core_actions,
                              salt_actions, openstack_actions,
                              rally, show_step):
         """Test stop keepalived on ctl node with VIP under load
@@ -184,7 +184,7 @@
             - OpenStack cluster
         """
         # TR case #3385682
-        common_services_actions.check_keepalived_pillar()
+        core_actions.check_keepalived_pillar()
         salt = salt_actions
 
         ctl_node_names = underlay.get_target_node_names(
@@ -201,7 +201,7 @@
             tgt="I@nova:controller:enabled:True",
             pillar="_param:cluster_vip_address")[0]
         vip = [vip for minion_id, vip in ctl_vip_pillar.items()][0]
-        minion_vip = common_services_actions.get_keepalived_vip_minion_id(vip)
+        minion_vip = core_actions.get_keepalived_vip_minion_id(vip)
         LOG.info("VIP {0} is on {1}".format(vip, minion_vip))
 
         # STEP #2
@@ -252,7 +252,7 @@
     @pytest.mark.with_rally(rally_node="gtw01.", prepare_openstack=True)
     def test_kill_keepalived(self, func_name, underlay, config,
                              openstack_deployed,
-                             common_services_actions,
+                             core_actions,
                              salt_actions, openstack_actions,
                              rally, show_step):
         """Test kill keepalived and haproxy on ctl node with VIP under load
@@ -283,7 +283,7 @@
             - Salt cluster
             - OpenStack cluster
         """
-        common_services_actions.check_keepalived_pillar()
+        core_actions.check_keepalived_pillar()
         salt = salt_actions
 
         ctl_node_names = underlay.get_target_node_names(
@@ -301,7 +301,7 @@
             tgt="I@nova:controller:enabled:True",
             pillar="_param:cluster_vip_address")[0]
         vip = [vip for minion_id, vip in ctl_vip_pillar.items()][0]
-        minion_vip = common_services_actions.get_keepalived_vip_minion_id(vip)
+        minion_vip = core_actions.get_keepalived_vip_minion_id(vip)
         LOG.info("VIP {0} is on {1}".format(vip, minion_vip))
 
         # STEP #2
@@ -362,13 +362,13 @@
         # STEP #6
         show_step(6)
         # Check that VIP has been actually migrated to a new node
-        new_minion_vip = common_services_actions.get_keepalived_vip_minion_id(
+        new_minion_vip = core_actions.get_keepalived_vip_minion_id(
             vip)
         LOG.info("Migrated VIP {0} is on {1}".format(vip, new_minion_vip))
         assert new_minion_vip != minion_vip, (
             "VIP {0} wasn't migrated from {1} after killing keepalived!"
             .format(vip, new_minion_vip))
-        common_services_actions.check_keepalived_pillar()
+        core_actions.check_keepalived_pillar()
 
         # Haproxy case
         # STEP #7
@@ -431,7 +431,7 @@
     @pytest.mark.with_rally(rally_node="gtw01.", prepare_openstack=True)
     def test_kill_rabbit_galera(self, func_name, underlay, config,
                                 openstack_deployed,
-                                common_services_actions,
+                                core_actions,
                                 salt_actions, openstack_actions,
                                 rally, show_step):
         """Test kill rabbitmq and galera on ctl node with VIP under load
@@ -455,7 +455,7 @@
             - Salt cluster
             - OpenStack cluster
         """
-        common_services_actions.check_keepalived_pillar()
+        core_actions.check_keepalived_pillar()
         salt = salt_actions
 
         ctl_node_names = underlay.get_target_node_names(
@@ -476,7 +476,7 @@
             pillar="_param:cluster_vip_address")[0]
         vip = [vip for minion_id, vip in ctl_vip_pillar.items()][0]
         ctl_minions = ctl_vip_pillar.keys()
-        minion_vip = common_services_actions.get_keepalived_vip_minion_id(vip)
+        minion_vip = core_actions.get_keepalived_vip_minion_id(vip)
         LOG.info("VIP {0} is on {1}".format(vip, minion_vip))
 
         # STEP #2
@@ -533,7 +533,7 @@
 
         # Check haproxy status on the node with VIP and find the mysql backend
         # which is receiving the connections
-        haproxy_status = common_services_actions.get_haproxy_status(minion_vip)
+        haproxy_status = core_actions.get_haproxy_status(minion_vip)
         mysql_status = haproxy_status['mysql_cluster']
         mysql_tgt = ''
         scur = 0
diff --git a/tcp_tests/tests/system/test_failover_stacklight_services.py b/tcp_tests/tests/system/test_failover_stacklight_services.py
index 3bb47eb..4f4381f 100644
--- a/tcp_tests/tests/system/test_failover_stacklight_services.py
+++ b/tcp_tests/tests/system/test_failover_stacklight_services.py
@@ -24,10 +24,11 @@
     """Test class for testing OpenStack nodes failover"""
 
     @staticmethod
-    def check_influxdb_xfail(sl_deployed, node_name, value):
+    def check_influxdb_xfail(stacklight_deployed, node_name, value):
 
         def check_influxdb_data():
-            return value in sl_deployed.check_data_in_influxdb(node_name)
+            return value in stacklight_deployed.check_data_in_influxdb(
+                node_name)
 
         try:
             helpers.wait(
@@ -43,7 +44,7 @@
 
     @pytest.mark.grab_versions
     @pytest.mark.fail_snapshot
-    @pytest.mark.revert_snapshot(ext.SNAPSHOT.sl_deployed)
+    @pytest.mark.revert_snapshot(ext.SNAPSHOT.stacklight_deployed)
     def test_kill_influxdb_relay_mon01_node(self, sl_os_deployed,
                                             show_step):
         """Test kill influxdb relay on mon01 node
@@ -111,7 +112,7 @@
 
     @pytest.mark.grab_versions
     @pytest.mark.fail_snapshot
-    @pytest.mark.revert_snapshot(ext.SNAPSHOT.sl_deployed)
+    @pytest.mark.revert_snapshot(ext.SNAPSHOT.stacklight_deployed)
     def test_kill_influxdb_mon01_node(self, sl_os_deployed, show_step):
         """Test kill influxdb on mon01 node
 
@@ -177,7 +178,7 @@
 
     @pytest.mark.grab_versions
     @pytest.mark.fail_snapshot
-    @pytest.mark.revert_snapshot(ext.SNAPSHOT.sl_deployed)
+    @pytest.mark.revert_snapshot(ext.SNAPSHOT.stacklight_deployed)
     def test_stop_influxdb_relay_mon_nodes(self, sl_os_deployed,
                                            show_step):
         """Test stop influxdb relay on mon01 node
@@ -246,7 +247,7 @@
 
     @pytest.mark.grab_versions
     @pytest.mark.fail_snapshot
-    @pytest.mark.revert_snapshot(ext.SNAPSHOT.sl_deployed)
+    @pytest.mark.revert_snapshot(ext.SNAPSHOT.stacklight_deployed)
     def test_stop_influxdb_mon_nodes(self, sl_os_deployed, show_step):
         """Test stop influxdb on mon01 node
 
diff --git a/tcp_tests/tests/system/test_install_cookied_ocata.py b/tcp_tests/tests/system/test_install_cookied_ocata.py
index a6d2313..7aa41b2 100644
--- a/tcp_tests/tests/system/test_install_cookied_ocata.py
+++ b/tcp_tests/tests/system/test_install_cookied_ocata.py
@@ -60,7 +60,7 @@
     @pytest.mark.fail_snapshot
     def test_cookied_ocata_cicd_oss_install(self, underlay, salt_actions,
                                             openstack_deployed,
-                                            oss_deployed, sl_deployed,
+                                            oss_deployed, stacklight_deployed,
                                             show_step):
         """Test for deploying an mcp environment and check it
         Scenario:
@@ -87,7 +87,7 @@
                                  'monitoring_remote_collector',
                                  'monitoring_pushgateway']
         show_step(6)
-        mon_nodes = sl_deployed.get_monitoring_nodes()
+        mon_nodes = stacklight_deployed.get_monitoring_nodes()
         LOG.debug('Mon nodes list {0}'.format(mon_nodes))
 
         show_step(7)
@@ -98,14 +98,15 @@
             # InfluxDB is used if prometheus relay service is not installed
             expected_service_list.append('monitoring_remote_storage_adapter')
 
-        sl_deployed.check_docker_services(mon_nodes, expected_service_list)
+        stacklight_deployed.check_docker_services(mon_nodes,
+                                                  expected_service_list)
 
         show_step(8)
-        sl_deployed.check_prometheus_targets(mon_nodes)
+        stacklight_deployed.check_prometheus_targets(mon_nodes)
 
         show_step(9)
         # Run SL component tetsts
-        sl_deployed.run_sl_functional_tests(
+        stacklight_deployed.run_sl_functional_tests(
             'cfg01',
             '/root/stacklight-pytest/stacklight_tests/',
             'tests/prometheus',
@@ -113,7 +114,7 @@
 
         show_step(10)
         # Download report
-        sl_deployed.download_sl_test_report(
+        stacklight_deployed.download_sl_test_report(
             'cfg01',
             '/root/stacklight-pytest/stacklight_tests/report.xml')
         LOG.info("*************** DONE **************")
diff --git a/tcp_tests/tests/system/test_install_k8s.py b/tcp_tests/tests/system/test_install_k8s.py
index ac4d1d1..ec90863 100644
--- a/tcp_tests/tests/system/test_install_k8s.py
+++ b/tcp_tests/tests/system/test_install_k8s.py
@@ -29,8 +29,8 @@
     @pytest.mark.cz8116
     @pytest.mark.k8s_calico_sl
     def test_k8s_install_calico_lma(self, config, show_step,
-                                    k8s_deployed, k8s_actions,
-                                    sl_deployed, sl_actions):
+                                    k8s_deployed,
+                                    stacklight_deployed):
         """Test for deploying MCP with k8s+stacklight_calico and check it
 
         Scenario:
@@ -48,6 +48,8 @@
 
         """
         # STEP #5
+        # k8s_actions = k8s_deployed
+        sl_actions = stacklight_deployed
         show_step(5)
         k8sclient = k8s_deployed.api
         assert k8sclient.nodes.list() is not None, "Can not get nodes list"
@@ -85,7 +87,7 @@
                 'Mandotory metric {0} is missing in {1}'.format(
                     metric, res.text)
 
-        prometheus_client = sl_deployed.api
+        prometheus_client = stacklight_deployed.api
         try:
             current_targets = prometheus_client.get_targets()
             LOG.debug('Current targets after install {0}'
@@ -112,14 +114,14 @@
             # with acceptance criteria
         show_step(10)
         # Run SL component tests
-        sl_deployed.run_sl_functional_tests(
+        stacklight_deployed.run_sl_functional_tests(
             'cfg01',
             '/root/stacklight-pytest/stacklight_tests/',
             'tests/prometheus',
             'test_alerts.py')
 
         # Download report
-        sl_deployed.download_sl_test_report(
+        stacklight_deployed.download_sl_test_report(
             'cfg01',
             '/root/stacklight-pytest/stacklight_tests/report.xml')
         LOG.info("*************** DONE **************")
@@ -128,8 +130,8 @@
     @pytest.mark.fail_snapshot
     @pytest.mark.cz8115
     def test_k8s_install_contrail_lma(self, config, show_step,
-                                      k8s_deployed, k8s_actions,
-                                      sl_deployed, sl_actions):
+                                      k8s_deployed,
+                                      stacklight_deployed):
         """Test for deploying MCP with k8s+stacklight+contrail and check it
 
         Scenario:
@@ -142,12 +144,14 @@
             7. Optionally run k8s e2e conformance
 
         """
+        k8s_actions = k8s_deployed
+        sl_actions = stacklight_deployed
         # STEP #5
         show_step(5)
         k8sclient = k8s_deployed.api
         assert k8sclient.nodes.list() is not None, "Can not get nodes list"
 
-        prometheus_client = sl_deployed.api
+        prometheus_client = stacklight_deployed.api
         try:
             current_targets = prometheus_client.get_targets()
             LOG.debug('Current targets after install {0}'
@@ -163,20 +167,20 @@
             current_targets = prometheus_client.get_targets()
             LOG.debug('Current targets after install {0}'
                       .format(current_targets))
-        mon_nodes = sl_deployed.get_monitoring_nodes()
+        mon_nodes = stacklight_deployed.get_monitoring_nodes()
         LOG.debug('Mon nodes list {0}'.format(mon_nodes))
 
-        sl_deployed.check_prometheus_targets(mon_nodes)
+        stacklight_deployed.check_prometheus_targets(mon_nodes)
         show_step(6)
         # Run SL component tests
-        sl_deployed.run_sl_functional_tests(
+        stacklight_deployed.run_sl_functional_tests(
             'cfg01',
             '/root/stacklight-pytest/stacklight_tests/',
             'tests/prometheus',
             'test_alerts.py')
 
         # Download report
-        sl_deployed.download_sl_test_report(
+        stacklight_deployed.download_sl_test_report(
             'cfg01',
             '/root/stacklight-pytest/stacklight_tests/report.xml')
 
@@ -196,7 +200,7 @@
     @pytest.mark.cz8116
     @pytest.mark.k8s_calico
     def test_only_k8s_install(self, config, show_step,
-                              k8s_deployed, k8s_actions, k8s_logs):
+                              k8s_deployed, k8s_logs):
         """Test for deploying MCP environment with k8s and check it
 
         Scenario:
@@ -207,6 +211,7 @@
             5. Run conformance if need
 
         """
+        k8s_actions = k8s_deployed
         if config.k8s.k8s_conformance_run:
             show_step(5)
             k8s_actions.run_conformance()
diff --git a/tcp_tests/tests/system/test_install_mcp11_ovs_ocata.py b/tcp_tests/tests/system/test_install_mcp11_ovs_ocata.py
index 9579139..4f78c0d 100644
--- a/tcp_tests/tests/system/test_install_mcp11_ovs_ocata.py
+++ b/tcp_tests/tests/system/test_install_mcp11_ovs_ocata.py
@@ -52,7 +52,7 @@
     @pytest.mark.cz8119
     def test_mcp11_ocata_ovs_sl_install(self, underlay, config,
                                         openstack_deployed,
-                                        sl_deployed):
+                                        stacklight_deployed):
         """Test for deploying an mcp environment and check it
         Scenario:
         1. Prepare salt on hosts
@@ -64,20 +64,20 @@
         7. Run SL component tests
         8. Download SL component tests report
         """
-        mon_nodes = sl_deployed.get_monitoring_nodes()
+        mon_nodes = stacklight_deployed.get_monitoring_nodes()
         LOG.debug('Mon nodes list {0}'.format(mon_nodes))
 
-        sl_deployed.check_prometheus_targets(mon_nodes)
+        stacklight_deployed.check_prometheus_targets(mon_nodes)
 
         # Run SL component tetsts
-        sl_deployed.run_sl_functional_tests(
+        stacklight_deployed.run_sl_functional_tests(
             'cfg01',
             '/root/stacklight-pytest/stacklight_tests/',
             'tests/prometheus',
             'test_alerts.py')
 
         # Download report
-        sl_deployed.download_sl_test_report(
+        stacklight_deployed.download_sl_test_report(
             'cfg01',
             '/root/stacklight-pytest/stacklight_tests/report.xml')
         LOG.info("*************** DONE **************")
@@ -110,7 +110,7 @@
     @pytest.mark.cz8120
     def test_mcp11_ocata_dvr_sl_install(self, underlay, config,
                                         openstack_deployed,
-                                        sl_deployed):
+                                        stacklight_deployed):
         """Test for deploying an mcp environment and check it
         Scenario:
         1. Prepare salt on hosts
@@ -123,20 +123,20 @@
         8. Download SL component tests report
         """
 
-        mon_nodes = sl_deployed.get_monitoring_nodes()
+        mon_nodes = stacklight_deployed.get_monitoring_nodes()
         LOG.debug('Mon nodes list {0}'.format(mon_nodes))
 
-        sl_deployed.check_prometheus_targets(mon_nodes)
+        stacklight_deployed.check_prometheus_targets(mon_nodes)
 
         # Run SL component tests
-        sl_deployed.run_sl_functional_tests(
+        stacklight_deployed.run_sl_functional_tests(
             'cfg01',
             '/root/stacklight-pytest/stacklight_tests/',
             'tests/prometheus',
             'test_alerts.py')
 
         # Download report
-        sl_deployed.download_sl_test_report(
+        stacklight_deployed.download_sl_test_report(
             'cfg01',
             '/root/stacklight-pytest/stacklight_tests/report.xml')
         LOG.info("*************** DONE **************")
diff --git a/tcp_tests/tests/system/test_install_mcp_ovs_pike.py b/tcp_tests/tests/system/test_install_mcp_ovs_pike.py
index d21eca7..eca7a20 100644
--- a/tcp_tests/tests/system/test_install_mcp_ovs_pike.py
+++ b/tcp_tests/tests/system/test_install_mcp_ovs_pike.py
@@ -52,7 +52,7 @@
     @pytest.mark.pike_ovs_sl
     def test_mcp_pike_ovs_sl_install(self, underlay, config,
                                      openstack_deployed,
-                                     sl_deployed):
+                                     stacklight_deployed):
         """Test for deploying an mcp environment and check it
         Scenario:
         1. Prepare salt on hosts
@@ -64,20 +64,20 @@
         7. Run SL component tests
         8. Download SL component tests report
         """
-        mon_nodes = sl_deployed.get_monitoring_nodes()
+        mon_nodes = stacklight_deployed.get_monitoring_nodes()
         LOG.debug('Mon nodes list {0}'.format(mon_nodes))
 
-        sl_deployed.check_prometheus_targets(mon_nodes)
+        stacklight_deployed.check_prometheus_targets(mon_nodes)
 
         # Run SL component tetsts
-        sl_deployed.run_sl_functional_tests(
+        stacklight_deployed.run_sl_functional_tests(
             'cfg01',
             '/root/stacklight-pytest/stacklight_tests/',
             'tests/prometheus',
             'test_alerts.py')
 
         # Download report
-        sl_deployed.download_sl_test_report(
+        stacklight_deployed.download_sl_test_report(
             'cfg01',
             '/root/stacklight-pytest/stacklight_tests/report.xml')
         LOG.info("*************** DONE **************")
@@ -110,7 +110,7 @@
     @pytest.mark.pike_ovs_dvr_sl
     def test_mcp_pike_dvr_sl_install(self, underlay, config,
                                      openstack_deployed,
-                                     sl_deployed):
+                                     stacklight_deployed):
         """Test for deploying an mcp environment and check it
         Scenario:
         1. Prepare salt on hosts
@@ -123,20 +123,20 @@
         8. Download SL component tests report
         """
 
-        mon_nodes = sl_deployed.get_monitoring_nodes()
+        mon_nodes = stacklight_deployed.get_monitoring_nodes()
         LOG.debug('Mon nodes list {0}'.format(mon_nodes))
 
-        sl_deployed.check_prometheus_targets(mon_nodes)
+        stacklight_deployed.check_prometheus_targets(mon_nodes)
 
         # Run SL component tests
-        sl_deployed.run_sl_functional_tests(
+        stacklight_deployed.run_sl_functional_tests(
             'cfg01',
             '/root/stacklight-pytest/stacklight_tests/',
             'tests/prometheus',
             'test_alerts.py')
 
         # Download report
-        sl_deployed.download_sl_test_report(
+        stacklight_deployed.download_sl_test_report(
             'cfg01',
             '/root/stacklight-pytest/stacklight_tests/report.xml')
         LOG.info("*************** DONE **************")
@@ -167,7 +167,7 @@
     def test_mcp_pike_cookied_ovs_install(self, underlay,
                                           openstack_deployed,
                                           openstack_actions,
-                                          sl_deployed,
+                                          stacklight_deployed,
                                           tempest_actions):
         """Test for deploying an mcp environment and check it
         Scenario:
@@ -192,7 +192,7 @@
                                           underlay,
                                           openstack_deployed,
                                           openstack_actions,
-                                          sl_deployed,
+                                          stacklight_deployed,
                                           tempest_actions):
         """Test for deploying an mcp environment and check it
         Scenario:
diff --git a/tcp_tests/tests/system/test_install_mcp_sl_os.py b/tcp_tests/tests/system/test_install_mcp_sl_os.py
index d416875..6e5452c 100644
--- a/tcp_tests/tests/system/test_install_mcp_sl_os.py
+++ b/tcp_tests/tests/system/test_install_mcp_sl_os.py
@@ -48,7 +48,7 @@
     @pytest.mark.grab_versions
     @pytest.mark.fail_snapshot
     def test_mcp_sl_os_install(self, underlay, config, openstack_deployed,
-                               sl_deployed, openstack_actions):
+                               stacklight_deployed, openstack_actions):
         """Test for deploying an mcp environment and check it
         Scenario:
         1. Prepare salt on hosts
@@ -60,20 +60,20 @@
         7. Run SL component tests
         8. Download SL component tests report
         """
-        mon_nodes = sl_deployed.get_monitoring_nodes()
+        mon_nodes = stacklight_deployed.get_monitoring_nodes()
         LOG.debug('Mon nodes list {0}'.format(mon_nodes))
 
-        sl_deployed.check_prometheus_targets(mon_nodes)
+        stacklight_deployed.check_prometheus_targets(mon_nodes)
 
         # Run SL component tetsts
-        sl_deployed.run_sl_functional_tests(
+        stacklight_deployed.run_sl_functional_tests(
             'cfg01',
             '/root/stacklight-pytest/stacklight_tests/',
             'tests/prometheus',
             'test_alerts.py')
 
         # Download report
-        sl_deployed.download_sl_test_report(
+        stacklight_deployed.download_sl_test_report(
             'cfg01',
             '/root/stacklight-pytest/stacklight_tests/report.xml')
         LOG.info("*************** DONE **************")
diff --git a/tcp_tests/tests/system/test_install_mcp_trusty.py b/tcp_tests/tests/system/test_install_mcp_trusty.py
index 8c91faa..c86c79c 100644
--- a/tcp_tests/tests/system/test_install_mcp_trusty.py
+++ b/tcp_tests/tests/system/test_install_mcp_trusty.py
@@ -49,7 +49,7 @@
     @pytest.mark.grab_versions
     @pytest.mark.fail_snapshot
     def test_mcp_trusty_sl_os_install(self, underlay, config,
-                                      openstack_deployed, sl_deployed,
+                                      openstack_deployed, stacklight_deployed,
                                       openstack_actions):
         """Test for deploying an mcp environment and check it
         Scenario:
@@ -62,20 +62,20 @@
         7. Run SL component tests
         8. Download SL component tests report
         """
-        mon_nodes = sl_deployed.get_monitoring_nodes()
+        mon_nodes = stacklight_deployed.get_monitoring_nodes()
         LOG.debug('Mon nodes list {0}'.format(mon_nodes))
 
-        sl_deployed.check_prometheus_targets(mon_nodes)
+        stacklight_deployed.check_prometheus_targets(mon_nodes)
 
         # Run SL component tetsts
-        sl_deployed.run_sl_functional_tests(
+        stacklight_deployed.run_sl_functional_tests(
             'cfg01',
             '/root/stacklight-pytest/stacklight_tests/',
             'tests/prometheus',
             'test_alerts.py')
 
         # Download report
-        sl_deployed.download_sl_test_report(
+        stacklight_deployed.download_sl_test_report(
             'cfg01',
             '/root/stacklight-pytest/stacklight_tests/report.xml')
         LOG.info("*************** DONE **************")
diff --git a/tcp_tests/tests/system/test_install_opencontrail.py b/tcp_tests/tests/system/test_install_opencontrail.py
index 74b5d2b..efc37c8 100644
--- a/tcp_tests/tests/system/test_install_opencontrail.py
+++ b/tcp_tests/tests/system/test_install_opencontrail.py
@@ -27,7 +27,8 @@
     @pytest.mark.fail_snapshot
     @pytest.mark.with_rally(rally_node="ctl01.")
     def test_opencontrail_simple(self, config, underlay, salt_deployed,
-                                 openstack_deployed, sl_deployed, show_step):
+                                 openstack_deployed, stacklight_deployed,
+                                 show_step):
         """Runner for Juniper contrail-tests
 
         Scenario:
@@ -50,14 +51,14 @@
         # Run SL component tetsts
         if settings.RUN_SL_TESTS:
             show_step(5)
-            sl_deployed.run_sl_functional_tests(
+            stacklight_deployed.run_sl_functional_tests(
                 'ctl01',
                 '/root/stacklight-pytest/stacklight_tests/',
                 'tests/prometheus',
                 'test_alerts.py')
             show_step(8)
             # Download report
-            sl_deployed.download_sl_test_report(
+            stacklight_deployed.download_sl_test_report(
                 'ctl01',
                 '/root/stacklight-pytest/stacklight_tests/report.xml')
         LOG.info("*************** DONE **************")
@@ -65,7 +66,8 @@
     @pytest.mark.fail_snapshot
     @pytest.mark.with_rally(rally_node="ctl01.")
     def test_opencontrail3_maas(self, config, underlay, salt_actions,
-                                openstack_deployed, show_step, sl_deployed):
+                                openstack_deployed, show_step,
+                                stacklight_deployed):
         """Runner for Juniper contrail-tests
 
         Scenario:
@@ -94,7 +96,7 @@
                                  'monitoring_alertmanager',
                                  'monitoring_remote_collector',
                                  'monitoring_pushgateway']
-        mon_nodes = sl_deployed.get_monitoring_nodes()
+        mon_nodes = stacklight_deployed.get_monitoring_nodes()
         LOG.debug('Mon nodes list {0}'.format(mon_nodes))
 
         prometheus_relay_enabled = salt_actions.get_pillar(
@@ -104,18 +106,19 @@
             # InfluxDB is used if prometheus relay service is not installed
             expected_service_list.append('monitoring_remote_storage_adapter')
         show_step(6)
-        sl_deployed.check_docker_services(mon_nodes, expected_service_list)
+        stacklight_deployed.check_docker_services(mon_nodes,
+                                                  expected_service_list)
         # Run SL component tetsts
         if settings.RUN_SL_TESTS:
             show_step(7)
-            sl_deployed.run_sl_functional_tests(
+            stacklight_deployed.run_sl_functional_tests(
                 'ctl01',
                 '/root/stacklight-pytest/stacklight_tests/',
                 'tests/prometheus',
                 'test_alerts.py')
             show_step(8)
             # Download report
-            sl_deployed.download_sl_test_report(
+            stacklight_deployed.download_sl_test_report(
                 'ctl01',
                 '/root/stacklight-pytest/stacklight_tests/report.xml')
 
diff --git a/tcp_tests/tests/system/test_offline.py b/tcp_tests/tests/system/test_offline.py
index 83f3766..44b82f0 100644
--- a/tcp_tests/tests/system/test_offline.py
+++ b/tcp_tests/tests/system/test_offline.py
@@ -162,7 +162,7 @@
         LOG.info("*************** DONE **************")
 
     def test_deploy_day1(self, show_step, config, underlay, hardware,
-                         common_services_deployed, salt_deployed):
+                         core_deployed, salt_deployed):
         """Test for deploying an mcp from day01 images
 
         Scenario:
diff --git a/tcp_tests/tests/system/test_oss_install.py b/tcp_tests/tests/system/test_oss_install.py
index f62f1c8..15bdf3e 100644
--- a/tcp_tests/tests/system/test_oss_install.py
+++ b/tcp_tests/tests/system/test_oss_install.py
@@ -26,7 +26,7 @@
     @pytest.mark.fail_snapshot
     def test_oss_install_default(self, underlay, show_step,
                                  oss_deployed, openstack_deployed,
-                                 sl_deployed):
+                                 stacklight_deployed):
         """Test for deploying an OSS environment and check it
 
         Scenario:
diff --git a/tcp_tests/tests/system/test_pipeline_deploy.py b/tcp_tests/tests/system/test_pipeline_deploy.py
index 81e5e8b..c98db38 100644
--- a/tcp_tests/tests/system/test_pipeline_deploy.py
+++ b/tcp_tests/tests/system/test_pipeline_deploy.py
@@ -16,6 +16,7 @@
 from tcp_tests.managers.jenkins.client import JenkinsClient
 
 from tcp_tests import logger
+from tcp_tests import settings
 
 LOG = logger.logger
 
@@ -26,7 +27,7 @@
 
     @pytest.mark.fail_snapshot
     def test_pipeline(self, show_step, underlay,
-                      common_services_deployed, salt_deployed):
+                      core_deployed, salt_deployed):
         """Runner for Juniper contrail-tests
 
         Scenario:
@@ -73,4 +74,40 @@
                                     build_id=build[1])['result']
         assert result == 'SUCCESS', "Deploy CICD was failed"
 
+    @pytest.mark.fail_snapshot
+    def test_pipeline_dpdk(self, show_step, underlay,
+                           salt_deployed, tempest_actions):
+        """Deploy bm via pipeline
+
+        Scenario:
+            1. Prepare salt on hosts.
+            .........................
+        """
+        nodes = underlay.node_names()
+        LOG.info("Nodes - {}".format(nodes))
+        cfg_node = 'cfg01.cookied-bm-mcp-ovs-dpdk.local'
+        salt_api = salt_deployed.get_pillar(
+            cfg_node, '_param:jenkins_salt_api_url')
+        salt_api = salt_api[0].get(cfg_node)
+        jenkins = JenkinsClient(
+            host='http://172.16.49.2:8081',
+            username='admin',
+            password='r00tme')
+
+        # Creating param list for openstack deploy
+        params = jenkins.make_defults_params('deploy_openstack')
+        params['SALT_MASTER_URL'] = salt_api
+        params['STACK_INSTALL'] = 'core,kvm,cicd,ovs,openstack'
+        show_step(4)
+        build = jenkins.run_build('deploy_openstack', params)
+        jenkins.wait_end_of_build(
+            name=build[0],
+            build_id=build[1],
+            timeout=60 * 60 * 4)
+        result = jenkins.build_info(name=build[0],
+                                    build_id=build[1])['result']
+        assert result == 'SUCCESS', "Deploy openstack was failed"
+
+        if settings.RUN_TEMPEST:
+            tempest_actions.prepare_and_run_tempest()
         LOG.info("*************** DONE **************")
diff --git a/tcp_tests/tests/system/test_upgrade_stacklight.py b/tcp_tests/tests/system/test_upgrade_stacklight.py
index e946185..afad0c8 100644
--- a/tcp_tests/tests/system/test_upgrade_stacklight.py
+++ b/tcp_tests/tests/system/test_upgrade_stacklight.py
@@ -25,7 +25,7 @@
     @pytest.mark.grab_versions
     @pytest.mark.fail_snapshot
     def test_upgrade_stacklight(self, underlay, config,
-                                hardware, sl_actions, sl_deployed):
+                                hardware, sl_actions, stacklight_deployed):
         """Runner
 
         Scenario:
@@ -42,19 +42,19 @@
         sl_actions.install(commands, label='Upgrade SL services')
         hardware.create_snapshot(name='sl_v1_upgraded')
 
-        mon_nodes = sl_deployed.get_monitoring_nodes()
+        mon_nodes = stacklight_deployed.get_monitoring_nodes()
         LOG.debug('Mon nodes list {0}'.format(mon_nodes))
 
 
 # Run SL component tetsts
-        sl_deployed.run_sl_functional_tests(
+        stacklight_deployed.run_sl_functional_tests(
             'cfg01',
             '/root/stacklight-pytest/stacklight_tests/',
             'tests',
             'tests/prometheus')
 
 # Download report
-        sl_deployed.download_sl_test_report(
+        stacklight_deployed.download_sl_test_report(
             'cfg01',
             '/root/stacklight-pytest/stacklight_tests/report.xml')
         LOG.info("*************** DONE **************")
diff --git a/tcp_tests/utils/env_salt b/tcp_tests/utils/env_salt
index 12c4575..4e21f85 100755
--- a/tcp_tests/utils/env_salt
+++ b/tcp_tests/utils/env_salt
@@ -10,7 +10,9 @@
 CURRENT_DIR=$(dirname $(readlink -f "${BASH_SOURCE[0]}"))
 export PYTHONPATH=${CURRENT_DIR}/../..
 
-export SALT_MASTER_IP=${SALT_MASTER_IP:-$(for node in $(dos.py slave-ip-list --address-pool-name admin-pool01 ${ENV_NAME}); do echo $node|grep cfg01|cut -d',' -f2; done)}
+export SALT_MASTER_INFO=$(for node in $(dos.py slave-ip-list --address-pool-name admin-pool01 ${ENV_NAME}); do echo $node|grep cfg01; done)
+export SALT_MASTER_HOSTNAME=${SALT_MASTER_HOSTNAME:-$(echo $SALT_MASTER_INFO|cut -d',' -f1)}
+export SALT_MASTER_IP=${SALT_MASTER_IP:-$(echo $SALT_MASTER_INFO|cut -d',' -f2)}
 
 if [ -z "$SALT_MASTER_IP" ]; then
     echo "SALT_MASTER_IP not found in the environment '${ENV_NAME}'"