Merge "Revert "[newton] WORKAROUND PROD-23354""
diff --git a/README.md b/README.md
index 0f0ebd4..fdf0ebb 100644
--- a/README.md
+++ b/README.md
@@ -35,8 +35,8 @@
 LAB_CONFIG_NAME variable maps cluster name from the model repository with
 the set of templates in the ./tcp_tests/templates/ folder.
 ```
-export LAB_CONFIG_NAME=virtual-mcp-ocata-dvr  # OVS-DVR with ocata packages
-export LAB_CONFIG_NAME=virtual-mcp-ocata-ovs  # OVS-NO-DVR with ocata packages
+export LAB_CONFIG_NAME=cookied-mcp-ocata-dvr  # OVS-DVR with ocata packages
+export LAB_CONFIG_NAME=cookied-mcp-ocata-ovs  # OVS-NO-DVR with ocata packages
 export LAB_CONFIG_NAME=virtual-mcp-ocata-cicd  # Operational Support System Tools
 export LAB_CONFIG_NAME=virtual-mcp11-dvr  # OVS-DVR with neutron packages
 export LAB_CONFIG_NAME=virtual-mcp11-ovs  # OVS-NO-DVR with neutron packages
diff --git a/jobs/pipelines/deploy-cicd-and-run-tests.groovy b/jobs/pipelines/deploy-cicd-and-run-tests.groovy
index 55dda48..9ceea67 100644
--- a/jobs/pipelines/deploy-cicd-and-run-tests.groovy
+++ b/jobs/pipelines/deploy-cicd-and-run-tests.groovy
@@ -21,12 +21,12 @@
 
         stage("Install core infrastructure and deploy CICD nodes") {
             // steps: env.DRIVETRAIN_STACK_INSTALL
-            shared.swarm_deploy_cicd(env.DRIVETRAIN_STACK_INSTALL)
+            shared.swarm_deploy_cicd(env.DRIVETRAIN_STACK_INSTALL, env.DRIVETRAIN_STACK_INSTALL_TIMEOUT)
         }
 
         stage("Deploy platform components") {
             // steps: env.PLATFORM_STACK_INSTALL
-            shared.swarm_deploy_platform(env.PLATFORM_STACK_INSTALL)
+            shared.swarm_deploy_platform(env.PLATFORM_STACK_INSTALL, env.PLATFORM_STACK_INSTALL_TIMEOUT)
         }
 
         currentBuild.result = 'SUCCESS'
@@ -92,8 +92,18 @@
                 dos.py destroy ${ENV_NAME} || true
             """)
         }
-        // report results to testrail
-        shared.swarm_testrail_report(steps)
+
+        stage("Archive all xml reports") {
+            archiveArtifacts artifacts: "**/*.xml,**/*.ini,**/*.log,**/*.tar.gz"
+        }
+        stage("report results to testrail") {
+            shared.swarm_testrail_report(steps)
+        }
+        stage("Store TestRail reports to job description") {
+            def String description = readFile("description.txt")
+            currentBuild.description += "\n${description}"
+        }
+
     }
   }
 }
\ No newline at end of file
diff --git a/jobs/pipelines/swarm-bootstrap-salt-cluster-devops.groovy b/jobs/pipelines/swarm-bootstrap-salt-cluster-devops.groovy
index 64c8783..36ea29a 100644
--- a/jobs/pipelines/swarm-bootstrap-salt-cluster-devops.groovy
+++ b/jobs/pipelines/swarm-bootstrap-salt-cluster-devops.groovy
@@ -24,6 +24,8 @@
 
 @Library('tcp-qa')_
 
+import groovy.xml.XmlUtil
+
 common = new com.mirantis.mk.Common()
 shared = new com.mirantis.system_qa.SharedPipeline()
 
@@ -38,81 +40,90 @@
         error "'PARENT_WORKSPACE' contains path to non-existing directory ${PARENT_WORKSPACE} on the node '${PARENT_NODE_NAME}'."
     }
     dir("${PARENT_WORKSPACE}") {
-        try {
-            stage("Cleanup: erase ${ENV_NAME} and remove config drive") {
-                println "Remove environment ${ENV_NAME}"
-                shared.run_cmd("""\
-                    dos.py erase ${ENV_NAME} || true
-                """)
-                println "Remove config drive ISO"
-                shared.run_cmd("""\
-                    rm /home/jenkins/images/${CFG01_CONFIG_IMAGE_NAME} || true
-                """)
-            }
+        stage("Cleanup: erase ${ENV_NAME} and remove config drive") {
+            println "Remove environment ${ENV_NAME}"
+            shared.run_cmd("""\
+                dos.py erase ${ENV_NAME} || true
+            """)
+            println "Remove config drive ISO"
+            shared.run_cmd("""\
+                rm /home/jenkins/images/${CFG01_CONFIG_IMAGE_NAME} || true
+            """)
+        }
 
-            if (env.TCP_QA_REFS) {
-                stage("Update working dir to patch ${TCP_QA_REFS}") {
-                    shared.update_working_dir()
-                }
+        if (env.TCP_QA_REFS) {
+            stage("Update working dir to patch ${TCP_QA_REFS}") {
+                shared.update_working_dir()
             }
+        }
 
-            stage("Create an environment ${ENV_NAME} in disabled state") {
-                // deploy_hardware.xml
-                shared.run_cmd("""\
-                    export ENV_NAME=${ENV_NAME}
-                    export LAB_CONFIG_NAME=${LAB_CONFIG_NAME}
-                    export MANAGER=devops
-                    export PYTHONIOENCODING=UTF-8
-                    export REPOSITORY_SUITE=${MCP_VERSION}
-                    export TEST_GROUP=test_create_environment
-                    py.test -vvv -s -p no:django -p no:ipdb --junit-xml=deploy_hardware.xml -k \${TEST_GROUP}
-                """)
-            }
+        stage("Create an environment ${ENV_NAME} in disabled state") {
+            // deploy_hardware.xml
+            shared.run_cmd("""\
+                export ENV_NAME=${ENV_NAME}
+                export LAB_CONFIG_NAME=${LAB_CONFIG_NAME}
+                export MANAGER=devops
+                export PYTHONIOENCODING=UTF-8
+                export REPOSITORY_SUITE=${MCP_VERSION}
+                export TEST_GROUP=test_create_environment
+                py.test -vvv -s -p no:django -p no:ipdb --junit-xml=deploy_hardware.xml -k \${TEST_GROUP}
+            """)
+        }
 
-            stage("Generate the model") {
-                shared.generate_cookied_model()
-            }
+        stage("Generate the model") {
+            shared.generate_cookied_model()
+        }
 
-            stage("Generate config drive ISO") {
-                shared.generate_configdrive_iso()
-            }
+        stage("Generate config drive ISO") {
+            shared.generate_configdrive_iso()
+        }
 
-            stage("Upload generated config drive ISO into volume on cfg01 node") {
-                shared.run_cmd("""\
-                    # Get SALT_MASTER_HOSTNAME to determine the volume name
-                    . ./tcp_tests/utils/env_salt
-                    virsh vol-upload ${ENV_NAME}_\${SALT_MASTER_HOSTNAME}_config /home/jenkins/images/${CFG01_CONFIG_IMAGE_NAME} --pool default
-                    virsh pool-refresh --pool default
-                """)
-            }
+        stage("Upload generated config drive ISO into volume on cfg01 node") {
+            shared.run_cmd("""\
+                # Get SALT_MASTER_HOSTNAME to determine the volume name
+                . ./tcp_tests/utils/env_salt
+                virsh vol-upload ${ENV_NAME}_\${SALT_MASTER_HOSTNAME}_config /home/jenkins/images/${CFG01_CONFIG_IMAGE_NAME} --pool default
+                virsh pool-refresh --pool default
+            """)
+        }
 
-            stage("Run the 'underlay' and 'salt-deployed' fixtures to bootstrap salt cluster") {
+        stage("Run the 'underlay' and 'salt-deployed' fixtures to bootstrap salt cluster") {
+            def xml_report_name = "deploy_salt.xml"
+            try {
                 // deploy_salt.xml
-                shared.run_cmd("""\
+                shared.run_sh("""\
                     export ENV_NAME=${ENV_NAME}
                     export LAB_CONFIG_NAME=${LAB_CONFIG_NAME}
                     export MANAGER=devops
                     export SHUTDOWN_ENV_ON_TEARDOWN=false
-                    export BOOTSTRAP_TIMEOUT=1200
+                    export BOOTSTRAP_TIMEOUT=1800
                     export PYTHONIOENCODING=UTF-8
                     export REPOSITORY_SUITE=${MCP_VERSION}
                     export TEST_GROUP=test_bootstrap_salt
-                    py.test -vvv -s -p no:django -p no:ipdb --junit-xml=deploy_salt.xml -k \${TEST_GROUP}
-                    sleep 60  # wait for jenkins to start and IO calm down
+                    py.test -vvv -s -p no:django -p no:ipdb --junit-xml=${xml_report_name} -k \${TEST_GROUP}
                 """)
-            }
+                // Wait for jenkins to start and IO calm down
+                sleep(60)
 
-          } catch (e) {
-              common.printMsg("Job is failed", "purple")
-              throw e
-          } finally {
-            // TODO(ddmitriev): analyze the "def currentResult = currentBuild.result ?: 'SUCCESS'"
-            // and report appropriate data to TestRail
-            // TODO(ddmitriev): add checks for salt cluster
-            if ("${env.SHUTDOWN_ENV_ON_TEARDOWN}" == "true") {
-                shared.run_cmd("""\
-                    dos.py destroy ${ENV_NAME}
-                """)
+            } catch (e) {
+                  common.printMsg("Saltstack cluster deploy is failed", "purple")
+                  if (fileExists(xml_report_name)) {
+                      shared.download_logs("deploy_salt")
+                      def String junit_report_xml = readFile(xml_report_name)
+                      def String junit_report_xml_pretty = new XmlUtil().serialize(junit_report_xml)
+                      throw new Exception(junit_report_xml_pretty)
+                  } else {
+                      throw e
+                  }
+            } finally {
+                // TODO(ddmitriev): analyze the "def currentResult = currentBuild.result ?: 'SUCCESS'"
+                // and report appropriate data to TestRail
+                // TODO(ddmitriev): add checks for salt cluster
+                if ("${env.SHUTDOWN_ENV_ON_TEARDOWN}" == "true") {
+                    shared.run_cmd("""\
+                        dos.py destroy ${ENV_NAME}
+                    """)
+                }
             }
         }
     }
diff --git a/jobs/pipelines/swarm-create-cfg-config-drive.groovy b/jobs/pipelines/swarm-create-cfg-config-drive.groovy
index c43fb56..72d278f 100644
--- a/jobs/pipelines/swarm-create-cfg-config-drive.groovy
+++ b/jobs/pipelines/swarm-create-cfg-config-drive.groovy
@@ -1,5 +1,6 @@
 import java.text.SimpleDateFormat
 
+def gerrit = new com.mirantis.mk.Gerrit()
 def dateFormat = new SimpleDateFormat("yyyyMMddHHmm")
 def date = new Date()
 def common_scripts_commit = "${COMMON_SCRIPTS_COMMIT}"
@@ -31,28 +32,109 @@
         step([$class: 'WsCleanup'])
     }
 
-    stage("Get scripts") {
-      // apt package genisoimage is required for this stage
-      // download create-config-drive
+    stage("Get mk-pipelines, pipeline-library and mcp-common-scripts repositories") {
+        def cloned = true
+        withCredentials([[$class: 'SSHUserPrivateKeyBinding',
+                          keyFileVariable: "GERRIT_KEY",
+                          credentialsId: env.GERRIT_MCP_CREDENTIALS_ID,
+                          usernameVariable: "GERRIT_USERNAME",
+                          passwordVariable: "GERRIT_PASSWORD"]]) {
 
-      def config_drive_script_url = "https://raw.githubusercontent.com/Mirantis/mcp-common-scripts/${common_scripts_commit}/config-drive/create_config_drive.sh"
-      sh "wget -O create-config-drive ${config_drive_script_url} && chmod +x create-config-drive"
+            dir("mcp-common-scripts-git") {
+                cloned = gerrit.gerritPatchsetCheckout([
+                    credentialsId : "${GERRIT_MCP_CREDENTIALS_ID}",
+                    gerritBranch: "${MCP_VERSION}",
+                    gerritRefSpec: "${MCP_COMMON_SCRIPTS_REFS}",
+                    gerritScheme: "ssh",
+                    gerritName: "${GERRIT_USERNAME}",
+                    gerritHost: "gerrit.mcp.mirantis.net",
+                    gerritPort: "29418",
+                    gerritProject: "mcp/mcp-common-scripts"
+                ])
+            }
+            if (!cloned) {
+                error("Failed to clone the repository mcp/mcp-common-scripts")
+            }
 
-      def user_data_script_url = "https://raw.githubusercontent.com/Mirantis/mcp-common-scripts/${common_scripts_commit}/config-drive/master_config.yaml"
-      sh "wget -O user_data ${user_data_script_url}"
+            sh ("""\
+                set -ex
+                eval \$(ssh-agent)
+                ssh-add ${GERRIT_KEY}
+                git clone --mirror ssh://${GERRIT_USERNAME}@gerrit.mcp.mirantis.net:29418/mk/mk-pipelines mk-pipelines
+                git clone --mirror ssh://${GERRIT_USERNAME}@gerrit.mcp.mirantis.net:29418/mcp-ci/pipeline-library pipeline-library
+            """)
+
+            if (PIPELINE_LIBRARY_REF != '') {
+                sh ("""\
+                    set -ex
+                    eval \$(ssh-agent)
+                    ssh-add ${GERRIT_KEY}
+                    cd pipeline-library
+                    git fetch https://gerrit.mcp.mirantis.net/mcp-ci/pipeline-library ${PIPELINE_LIBRARY_REF}
+                    git tag ${MCP_VERSION} FETCH_HEAD -f
+                """)
+            }
+            if (MK_PIPELINES_REF != '') {
+                sh ("""\
+                    set -ex
+                    eval \$(ssh-agent)
+                    ssh-add ${GERRIT_KEY}
+                    cd mk-pipelines
+                    git fetch https://gerrit.mcp.mirantis.net/mcp-ci/mk-pipelines ${MK_PIPELINES_REF}
+                    git tag ${MCP_VERSION} FETCH_HEAD -f
+                """)
+            }
+
+            // dir("mk-pipelines-git") {
+            //     cloned = gerrit.gerritPatchsetCheckout([
+            //         credentialsId : "${GERRIT_MCP_CREDENTIALS_ID}",
+            //         gerritRefSpec: "${MK_PIPELINES_REF}",
+            //         gerritScheme: "ssh",
+            //         gerritName: "${GERRIT_USERNAME}",
+            //         gerritHost: "gerrit.mcp.mirantis.net",
+            //         gerritPort: "29418",
+            //         gerritProject: "mk/mk-pipelines"
+            //     ])
+            // }
+            // if (!cloned) {
+            //     error("Failed to clone the repository mk/mk-pipelines")
+            // }
+
+            // dir("pipeline-library-git") {
+            //     cloned = gerrit.gerritPatchsetCheckout([
+            //         credentialsId : "${GERRIT_MCP_CREDENTIALS_ID}",
+            //         gerritRefSpec: "${PIPELINE_LIBRARY_REF}",
+            //         gerritScheme: "ssh",
+            //         gerritName: "${GERRIT_USERNAME}",
+            //         gerritHost: "gerrit.mcp.mirantis.net",
+            //         gerritPort: "29418",
+            //         gerritProject: "mcp-ci/pipeline-library"
+            //     ])
+            // }
+            // if (!cloned) {
+            //     error("Failed to clone the repository mcp-ci/pipeline-library")
+            // }
+        }
+        //if (PIPELINE_LIBRARY_REF != '') {
+        //   sh "cd pipeline-library; git tag ${MCP_VERSION} FETCH_HEAD -f ; cd .."
+        //}
+        //if (MK_PIPELINES_REF != '') {
+        //   sh "cd mk-pipelines; git tag ${MCP_VERSION} FETCH_HEAD -f; cd .."
+        //}
+
+        // gerrit.gerritPatchsetCheckout() doesn't support clonning bare repository
+        // sh "git clone --mirror mk-pipelines-git mk-pipelines"
+        // sh "git clone --mirror pipeline-library-git pipeline-library"
     }
 
-    stage("Clone mk-pipelines and pipeline-library") {
-        sh "git clone --mirror https://github.com/Mirantis/mk-pipelines.git -b ${MCP_VERSION} mk-pipelines"
-        sh "git clone --mirror https://github.com/Mirantis/pipeline-library.git -b ${MCP_VERSION} pipeline-library"
-        if (PIPELINE_LIBRARY_REF != '') {
-           sh "cd pipeline-library; git fetch https://gerrit.mcp.mirantis.net/mcp-ci/pipeline-library ${PIPELINE_LIBRARY_REF} ; git tag ${MCP_VERSION} FETCH_HEAD -f ; cd .."
-        }
-        if (MK_PIPELINES_REF != '') {
-           sh "cd mk-pipelines; git fetch https://gerrit.mcp.mirantis.net/mcp-ci/mk-pipelines ${MK_PIPELINES_REF} ; git tag ${MCP_VERSION} FETCH_HEAD -f; cd .."
-        }
+    stage("Prepare arguments for generation config drive") {
+
+        config_drive_script_path = "mcp-common-scripts-git/config-drive/create_config_drive.sh"
+        user_data_script_path = "mcp-common-scripts-git/config-drive/master_config.yaml"
+        sh "chmod +x ${config_drive_script_path}"
+
         //args = "--user-data user_data --vendor-data user_data2 --hostname cfg01 --model model --mk-pipelines mk-pipelines/ --pipeline-library pipeline-library/ ${iso_name}"
-        args = "--user-data user_data2 --vendor-data user_data --hostname cfg01 --model model --mk-pipelines mk-pipelines/ --pipeline-library pipeline-library/ ${iso_name}"
+        args = "--user-data user_data2 --vendor-data ${user_data_script_path} --hostname cfg01 --model model --mk-pipelines mk-pipelines/ --pipeline-library pipeline-library/ ${iso_name}"
     }
 
     stage("Get cluster model") {
@@ -70,7 +152,7 @@
 
     stage("Set data"){
         for (i in entries(smc)) {
-            sh "sed -i \"s,export ${i[0]}=.*,export ${i[0]}=${i[1]},\" user_data"
+            sh "sed -i \"s,export ${i[0]}=.*,export ${i[0]}=${i[1]},\" ${user_data_script_path}"
         }
     }
 
@@ -151,8 +233,8 @@
 
     stage("Create config-drive"){
       // create cfg config-drive
-      //sh "sed -i 's,config_dir/vendor-data,config_dir/user-data1,g' ./create-config-drive"
-      sh "./create-config-drive ${args}"
+      // apt package genisoimage is required for this stage
+      sh "./${config_drive_script_path} ${args}"
     }
 
     stage("Save artifacts") {
diff --git a/jobs/pipelines/swarm-deploy-cicd.groovy b/jobs/pipelines/swarm-deploy-cicd.groovy
index 5ace2ca..58474b9 100644
--- a/jobs/pipelines/swarm-deploy-cicd.groovy
+++ b/jobs/pipelines/swarm-deploy-cicd.groovy
@@ -7,7 +7,8 @@
  *   PARENT_NODE_NAME              Name of the jenkins slave to create the environment
  *   PARENT_WORKSPACE              Path to the workspace of the parent job to use tcp-qa repo
  *   ENV_NAME                      Fuel-devops environment name
- *   STACK_INSTALL                 Stacks to install using Jenkins on cfg01 node: "core:1800,cicd:1800", where 1800 is timeout
+ *   STACK_INSTALL                 Stacks to install using Jenkins on cfg01 node: "core,cicd"
+ *   STACK_INSTALL_TIMEOUT         Stacks installation timeout
  *   TCP_QA_REFS                   Reference to the tcp-qa change on review.gerrithub.io, like refs/changes/46/418546/41
  *   SHUTDOWN_ENV_ON_TEARDOWN      optional, shutdown fuel-devops environment at the end of the job
  *
@@ -24,12 +25,15 @@
 
 currentBuild.description = "${PARENT_NODE_NAME}:${ENV_NAME}"
 
-node ("${PARENT_NODE_NAME}") {
-    if (! fileExists("${PARENT_WORKSPACE}")) {
-        error "'PARENT_WORKSPACE' contains path to non-existing directory ${PARENT_WORKSPACE} on the node '${PARENT_NODE_NAME}'."
-    }
-    dir("${PARENT_WORKSPACE}") {
-        try {
+def install_timeout = env.STACK_INSTALL_TIMEOUT.toInteger()
+
+timeout(time: install_timeout + 600, unit: 'SECONDS') {
+
+    node ("${PARENT_NODE_NAME}") {
+        if (! fileExists("${PARENT_WORKSPACE}")) {
+            error "'PARENT_WORKSPACE' contains path to non-existing directory ${PARENT_WORKSPACE} on the node '${PARENT_NODE_NAME}'."
+        }
+        dir("${PARENT_WORKSPACE}") {
 
             if (! env.STACK_INSTALL) {
                 error "'STACK_INSTALL' must contain one or more comma separated stack names for [deploy_openstack] pipeline"
@@ -41,42 +45,36 @@
                 }
             }
 
-            // Install core and cicd
-            def stack
-            def timeout
-
-            for (element in "${env.STACK_INSTALL}".split(",")) {
-                if (element.contains(':')) {
-                    (stack, timeout) = element.split(':')
-                } else {
-                    stack = element
-                    timeout = '1800'
-                }
-                stage("Run Jenkins job on salt-master [deploy_openstack:${stack}]") {
-                    shared.run_job_on_day01_node(stack, timeout)
+            try {
+                // Install core and cicd
+                stage("Run Jenkins job on salt-master [deploy_openstack:${env.STACK_INSTALL}]") {
+                    shared.run_job_on_day01_node(env.STACK_INSTALL, install_timeout)
                 }
 
-                stage("Sanity check the deployed component [${stack}]") {
-                    shared.sanity_check_component(stack)
-                }
+                for (stack in "${env.STACK_INSTALL}".split(",")) {
+                    stage("Sanity check the deployed component [${stack}]") {
+                        shared.sanity_check_component(stack)
+                    }
+                    stage("Make environment snapshot [${stack}_deployed]") {
+                        shared.devops_snapshot(stack)
+                    }
+                } // for
 
-                stage("Make environment snapshot [${stack}_deployed]") {
-                    shared.devops_snapshot(stack)
+            } catch (e) {
+                common.printMsg("Job is failed", "purple")
+                shared.download_logs("deploy_drivetrain")
+                throw e
+            } finally {
+                // TODO(ddmitriev): analyze the "def currentResult = currentBuild.result ?: 'SUCCESS'"
+                // and report appropriate data to TestRail
+                // TODO(ddmitriev): add checks for cicd cluster
+                if ("${env.SHUTDOWN_ENV_ON_TEARDOWN}" == "true") {
+                    shared.run_cmd("""\
+                        dos.py destroy ${ENV_NAME}
+                    """)
                 }
             }
 
-        } catch (e) {
-            common.printMsg("Job is failed", "purple")
-            throw e
-        } finally {
-            // TODO(ddmitriev): analyze the "def currentResult = currentBuild.result ?: 'SUCCESS'"
-            // and report appropriate data to TestRail
-            // TODO(ddmitriev): add checks for cicd cluster
-            if ("${env.SHUTDOWN_ENV_ON_TEARDOWN}" == "true") {
-                shared.run_cmd("""\
-                    dos.py destroy ${ENV_NAME}
-                """)
-            }
-        }
-    }
-}
+        } // dir
+    } // node
+}
\ No newline at end of file
diff --git a/jobs/pipelines/swarm-deploy-platform.groovy b/jobs/pipelines/swarm-deploy-platform.groovy
index 9a6b1d1..c854c73 100644
--- a/jobs/pipelines/swarm-deploy-platform.groovy
+++ b/jobs/pipelines/swarm-deploy-platform.groovy
@@ -7,7 +7,8 @@
  *   PARENT_NODE_NAME              Name of the jenkins slave to create the environment
  *   PARENT_WORKSPACE              Path to the workspace of the parent job to use tcp-qa repo
  *   ENV_NAME                      Fuel-devops environment name
- *   STACK_INSTALL                 Stacks to install using Jenkins on CICD cluster: "openstack:3200,stacklight:2400", where 3200 and 2400 are timeouts
+ *   STACK_INSTALL                 Stacks to install using Jenkins on CICD cluster: "openstack,stacklight"
+ *   STACK_INSTALL_TIMEOUT         Stacks installation timeout
  *   TCP_QA_REFS                   Reference to the tcp-qa change on review.gerrithub.io, like refs/changes/46/418546/41
  *   SHUTDOWN_ENV_ON_TEARDOWN      optional, shutdown fuel-devops environment at the end of the job
  *
@@ -24,12 +25,15 @@
 
 currentBuild.description = "${PARENT_NODE_NAME}:${ENV_NAME}"
 
-node ("${PARENT_NODE_NAME}") {
-    if (! fileExists("${PARENT_WORKSPACE}")) {
-        error "'PARENT_WORKSPACE' contains path to non-existing directory ${PARENT_WORKSPACE} on the node '${PARENT_NODE_NAME}'."
-    }
-    dir("${PARENT_WORKSPACE}") {
-        try {
+def install_timeout = env.STACK_INSTALL_TIMEOUT.toInteger()
+
+timeout(time: install_timeout + 600, unit: 'SECONDS') {
+
+    node ("${PARENT_NODE_NAME}") {
+        if (! fileExists("${PARENT_WORKSPACE}")) {
+            error "'PARENT_WORKSPACE' contains path to non-existing directory ${PARENT_WORKSPACE} on the node '${PARENT_NODE_NAME}'."
+        }
+        dir("${PARENT_WORKSPACE}") {
 
             if (! env.STACK_INSTALL) {
                 error "'STACK_INSTALL' must contain one or more comma separated stack names for [deploy_openstack] pipeline"
@@ -41,42 +45,36 @@
                 }
             }
 
-            // Install the cluster
-            def stack
-            def timeout
-
-            for (element in "${STACK_INSTALL}".split(",")) {
-                if (element.contains(':')) {
-                    (stack, timeout) = element.split(':')
-                } else {
-                    stack = element
-                    timeout = '1800'
-                }
-                stage("Run Jenkins job on CICD [deploy_openstack:${stack}]") {
-                    shared.run_job_on_cicd_nodes(stack, timeout)
+            try {
+                // Install the cluster
+                stage("Run Jenkins job on CICD [deploy_openstack:${env.STACK_INSTALL}]") {
+                    shared.run_job_on_cicd_nodes(env.STACK_INSTALL, install_timeout)
                 }
 
-                stage("Sanity check the deployed component [${stack}]") {
-                    shared.sanity_check_component(stack)
-                }
+                for (stack in "${env.STACK_INSTALL}".split(",")) {
+                    stage("Sanity check the deployed component [${stack}]") {
+                        shared.sanity_check_component(stack)
+                    }
+                    stage("Make environment snapshot [${stack}_deployed]") {
+                        shared.devops_snapshot(stack)
+                    }
+                } // for
 
-                stage("Make environment snapshot [${stack}_deployed]") {
-                    shared.devops_snapshot(stack)
+            } catch (e) {
+                common.printMsg("Job is failed", "purple")
+                shared.download_logs("deploy_platform")
+                throw e
+            } finally {
+                // TODO(ddmitriev): analyze the "def currentResult = currentBuild.result ?: 'SUCCESS'"
+                // and report appropriate data to TestRail
+                // TODO(ddmitriev): add checks for the installed stacks
+                if ("${env.SHUTDOWN_ENV_ON_TEARDOWN}" == "true") {
+                    shared.run_cmd("""\
+                        dos.py destroy ${ENV_NAME}
+                    """)
                 }
             }
 
-        } catch (e) {
-            common.printMsg("Job is failed", "purple")
-            throw e
-        } finally {
-            // TODO(ddmitriev): analyze the "def currentResult = currentBuild.result ?: 'SUCCESS'"
-            // and report appropriate data to TestRail
-            // TODO(ddmitriev): add checks for the installed stacks
-            if ("${env.SHUTDOWN_ENV_ON_TEARDOWN}" == "true") {
-                shared.run_cmd("""\
-                    dos.py destroy ${ENV_NAME}
-                """)
-            }
-        }
-    }
-}
+        } // dir
+    } // node
+}
\ No newline at end of file
diff --git a/jobs/pipelines/swarm-run-pytest.groovy b/jobs/pipelines/swarm-run-pytest.groovy
index 0dd2d7a..780229d 100644
--- a/jobs/pipelines/swarm-run-pytest.groovy
+++ b/jobs/pipelines/swarm-run-pytest.groovy
@@ -16,6 +16,7 @@
  *   REPOSITORY_SUITE              Not used (backward compatibility, for manual deployment steps only)
  *   MCP_IMAGE_PATH1604            Not used (backward compatibility, for manual deployment steps only)
  *   IMAGE_PATH_CFG01_DAY01        Not used (backward compatibility, for manual deployment steps only)
+ *   TEMPEST_IMAGE_VERSION         Tempest image version: pike by default, can be queens.
  */
 
 @Library('tcp-qa')_
@@ -54,6 +55,7 @@
                 }
                 if (steps.contains('openstack')) {
                     sources += """
+                    export TEMPEST_IMAGE_VERSION=${TEMPEST_IMAGE_VERSION}
                     # TODO: . ./tcp_tests/utils/env_keystonercv3\n"""
                 }
                 def installed = steps.collect {"""\
@@ -72,6 +74,7 @@
                     """)
 
                 def snapshot_name = "test_completed"
+                shared.download_logs("test_completed")
                 shared.run_cmd("""\
                     dos.py suspend ${ENV_NAME}
                     dos.py snapshot ${ENV_NAME} ${snapshot_name}
@@ -86,6 +89,9 @@
 
         } catch (e) {
             common.printMsg("Job is failed", "purple")
+            // Downloading logs usually not needed here
+            // because tests should use the decorator @pytest.mark.grab_versions
+            // shared.download_logs("test_failed")
             throw e
         } finally {
             // TODO(ddmitriev): analyze the "def currentResult = currentBuild.result ?: 'SUCCESS'"
diff --git a/jobs/pipelines/swarm-testrail-report.groovy b/jobs/pipelines/swarm-testrail-report.groovy
index c43b3bb..3849e16 100644
--- a/jobs/pipelines/swarm-testrail-report.groovy
+++ b/jobs/pipelines/swarm-testrail-report.groovy
@@ -10,6 +10,7 @@
                                    where 3200 and 2400 might be timeouts (not used in the testing pipeline)
  *   PARENT_NODE_NAME              Name of the jenkins slave to create the environment
  *   PARENT_WORKSPACE              Path to the workspace of the parent job to use tcp-qa repo
+ *   TEMPEST_TEST_SUITE_NAME       Name of tempest suite
  *   TCP_QA_REFS                   Reference to the tcp-qa change on review.gerrithub.io, like refs/changes/46/418546/41
  */
 
@@ -30,6 +31,7 @@
         error "'PARENT_WORKSPACE' contains path to non-existing directory ${PARENT_WORKSPACE} on the node '${PARENT_NODE_NAME}'."
     }
     dir("${PARENT_WORKSPACE}") {
+        def description = ''
         try {
 
             if (env.TCP_QA_REFS) {
@@ -44,25 +46,31 @@
             def testrail_name_template = ''
             def reporter_extra_options = []
 
-            stage("Archive all xml reports") {
-                archiveArtifacts artifacts: "**/*.xml"
-            }
+            def report_result = ''
+            def report_url = ''
 
+            //  deployment_report_name = "deployment_${ENV_NAME}.xml"
             def deployment_report_name = sh(script: "find ${PARENT_WORKSPACE} -name \"deployment_${ENV_NAME}.xml\"", returnStdout: true)
+            // tcpqa_report_name =~ "nosetests.xml"
             def tcpqa_report_name = sh(script: "find ${PARENT_WORKSPACE} -name \"nosetests.xml\"", returnStdout: true)
+            // tempest_report_name =~ "report_*.xml"
             def tempest_report_name = sh(script: "find ${PARENT_WORKSPACE} -name \"report_*.xml\"", returnStdout: true)
+            // k8s_conformance_report_name =~ conformance_result.xml
             def k8s_conformance_report_name = sh(script: "find ${PARENT_WORKSPACE} -name \"conformance_result.xml\"", returnStdout: true)
-            def stacklight_report_name = sh(script: "find ${PARENT_WORKSPACE} -name \"stacklight_report.xml\"", returnStdout: true)
+            // k8s_conformance_report_name =~ conformance_virtlet_result.xml
+            def k8s_conformance_virtlet_report_name = sh(script: "find ${PARENT_WORKSPACE} -name \"conformance_virtlet_result.xml\"", returnStdout: true)
+            // stacklight_report_name =~ "stacklight_report.xml" or "report.xml"
+            def stacklight_report_name = sh(script: "find ${PARENT_WORKSPACE} -name \"*report.xml\"", returnStdout: true)
             common.printMsg(deployment_report_name ? "Found deployment report: ${deployment_report_name}" : "Deployment report not found", deployment_report_name ? "blue" : "red")
             common.printMsg(tcpqa_report_name ? "Found tcp-qa report: ${tcpqa_report_name}" : "tcp-qa report not found", tcpqa_report_name ? "blue" : "red")
             common.printMsg(tempest_report_name ? "Found tempest report: ${tempest_report_name}" : "tempest report not found", tempest_report_name ? "blue" : "red")
             common.printMsg(k8s_conformance_report_name ? "Found k8s conformance report: ${k8s_conformance_report_name}" : "k8s conformance report not found", k8s_conformance_report_name ? "blue" : "red")
+            common.printMsg(k8s_conformance_virtlet_report_name ? "Found k8s conformance virtlet report: ${k8s_conformance_virtlet_report_name}" : "k8s conformance virtlet report not found", k8s_conformance_virtlet_report_name ? "blue" : "red")
             common.printMsg(stacklight_report_name ? "Found stacklight-pytest report: ${stacklight_report_name}" : "stacklight-pytest report not found", stacklight_report_name ? "blue" : "red")
 
 
             if (deployment_report_name) {
                 stage("Deployment report") {
-//                    report_name = "deployment_${ENV_NAME}.xml"
                     testSuiteName = "[MCP] Integration automation"
                     methodname = '{methodname}'
                     testrail_name_template = '{title}'
@@ -71,13 +79,19 @@
                       "--testrail-case-custom-fields {\\\"custom_qa_team\\\":\\\"9\\\"}",
                       "--testrail-case-section-name \'All\'",
                     ]
-                    shared.upload_results_to_testrail(deployment_report_name, testSuiteName, methodname, testrail_name_template, reporter_extra_options)
+                    report_result = shared.upload_results_to_testrail(deployment_report_name, testSuiteName, methodname, testrail_name_template, reporter_extra_options)
+                    common.printMsg(report_result, "blue")
+                    report_url = report_result.split("\n").each {
+                        if (it.contains("[TestRun URL]")) {
+                            common.printMsg("Found report URL: " + it.trim().split().last(), "blue")
+                            description += "\n<a href=" + it.trim().split().last() + ">${testSuiteName}</a>"
+                        }
+                    }
                 }
             }
 
             if (tcpqa_report_name) {
                 stage("tcp-qa cases report") {
-                    // tcpqa_report_name =~ "nosetests.xml"
                     testSuiteName = "[MCP_X] integration cases"
                     methodname = "{methodname}"
                     testrail_name_template = "{title}"
@@ -86,25 +100,35 @@
                       "--testrail-case-custom-fields {\\\"custom_qa_team\\\":\\\"9\\\"}",
                       "--testrail-case-section-name \'All\'",
                     ]
-                    shared.upload_results_to_testrail(tcpqa_report_name, testSuiteName, methodname, testrail_name_template, reporter_extra_options)
+                    report_result = shared.upload_results_to_testrail(tcpqa_report_name, testSuiteName, methodname, testrail_name_template, reporter_extra_options)
+                    common.printMsg(report_result, "blue")
+                    report_url = report_result.split("\n").each {
+                        if (it.contains("[TestRun URL]")) {
+                            common.printMsg("Found report URL: " + it.trim().split().last(), "blue")
+                            description += "\n<a href=" + it.trim().split().last() + ">${testSuiteName}</a>"
+                        }
+                    }
                 }
             }
 
             if ('openstack' in stacks && tempest_report_name) {
                 stage("Tempest report") {
-                    // tempest_report_name =~ "report_*.xml"
-                    testSuiteName = "[MCP1.1_PIKE]Tempest"
+                    testSuiteName = env.TEMPEST_TEST_SUITE_NAME
                     methodname = "{classname}.{methodname}"
                     testrail_name_template = "{title}"
-                    shared.upload_results_to_testrail(tempest_report_name, testSuiteName, methodname, testrail_name_template)
+                    report_result = shared.upload_results_to_testrail(tempest_report_name, testSuiteName, methodname, testrail_name_template)
+                    common.printMsg(report_result, "blue")
+                    report_url = report_result.split("\n").each {
+                        if (it.contains("[TestRun URL]")) {
+                            common.printMsg("Found report URL: " + it.trim().split().last(), "blue")
+                            description += "\n<a href=" + it.trim().split().last() + ">${testSuiteName}</a>"
+                        }
+                    }
                 }
             }
 
             if ('k8s' in stacks && k8s_conformance_report_name) {
                 stage("K8s conformance report") {
-                    // k8s_conformance_report_name =~ conformance_result.xml
-                    // TODO(ddmitriev): it's better to get the k8s version right after deployment
-                    // and store in some artifact that can be re-used here.
                     def k8s_version=shared.run_cmd_stdout("""\
                         . ./env_k8s_version;
                         echo "\$KUBE_SERVER_VERSION"
@@ -118,17 +142,52 @@
                       "--testrail-case-custom-fields {\\\"custom_qa_team\\\":\\\"9\\\"}",
                       "--testrail-case-section-name \'Conformance\'",
                     ]
-                    shared.upload_results_to_testrail(k8s_conformance_report_name, testSuiteName, methodname, testrail_name_template, reporter_extra_options)
+                    report_result = shared.upload_results_to_testrail(k8s_conformance_report_name, testSuiteName, methodname, testrail_name_template, reporter_extra_options)
+                    common.printMsg(report_result, "blue")
+                    report_url = report_result.split("\n").each {
+                        if (it.contains("[TestRun URL]")) {
+                            common.printMsg("Found report URL: " + it.trim().split().last(), "blue")
+                            description += "\n<a href=" + it.trim().split().last() + ">${testSuiteName}</a>"
+                        }
+                    }
+                }
+            }
+
+            if ('k8s' in stacks && k8s_conformance_virtlet_report_name) {
+                stage("K8s conformance virtlet report") {
+                    testSuiteName = "[k8s] Virtlet"
+                    methodname = "{methodname}"
+                    testrail_name_template = "{title}"
+                    reporter_extra_options = [
+                      "--send-duplicates",
+                      "--testrail-add-missing-cases",
+                      "--testrail-case-custom-fields {\\\"custom_qa_team\\\":\\\"9\\\"}",
+                      "--testrail-case-section-name \'Conformance\'",
+                    ]
+                    report_result = shared.upload_results_to_testrail(k8s_conformance_virtlet_report_name, testSuiteName, methodname, testrail_name_template, reporter_extra_options)
+                    common.printMsg(report_result, "blue")
+                    report_url = report_result.split("\n").each {
+                        if (it.contains("[TestRun URL]")) {
+                            common.printMsg("Found report URL: " + it.trim().split().last(), "blue")
+                            description += "\n<a href=" + it.trim().split().last() + ">${testSuiteName}</a>"
+                        }
+                    }
                 }
             }
 
             if ('stacklight' in stacks && stacklight_report_name) {
                 stage("stacklight-pytest report") {
-                    // stacklight_report_name =~ "stacklight_report.xml"
                     testSuiteName = "LMA2.0_Automated"
                     methodname = "{methodname}"
                     testrail_name_template = "{title}"
-                    shared.upload_results_to_testrail(stacklight_report_name, testSuiteName, methodname, testrail_name_template)
+                    report_result = shared.upload_results_to_testrail(stacklight_report_name, testSuiteName, methodname, testrail_name_template)
+                    common.printMsg(report_result, "blue")
+                    report_url = report_result.split("\n").each {
+                        if (it.contains("[TestRun URL]")) {
+                            common.printMsg("Found report URL: " + it.trim().split().last(), "blue")
+                            description += "\n<a href=" + it.trim().split().last() + ">${testSuiteName}</a>"
+                        }
+                    }
                 }
             }
 
@@ -137,6 +196,7 @@
             throw e
         } finally {
             // reporting is failed for some reason
+            writeFile(file: "description.txt", text: description, encoding: "UTF-8")
         }
     }
 }
diff --git a/src/com/mirantis/system_qa/SharedPipeline.groovy b/src/com/mirantis/system_qa/SharedPipeline.groovy
index dcf05da..92b43b2 100644
--- a/src/com/mirantis/system_qa/SharedPipeline.groovy
+++ b/src/com/mirantis/system_qa/SharedPipeline.groovy
@@ -76,10 +76,23 @@
         parameters: parameters,
         propagate: false
 
+    def build_number = job_info.getNumber()
+    def build_url = job_info.getAbsoluteUrl()
+    def build_status = job_info.getResult()
+    try {
+        // Try to grab 'tar.gz' articacts from the shell job'
+        step($class: 'hudson.plugins.copyartifact.CopyArtifact',
+             projectName: job_name,
+             selector: specific("${build_number}"),
+             filter: "**/*.tar.gz",
+             target: '.',
+             flatten: true,
+             fingerprintArtifacts: true)
+    } catch (none) {
+        common.printMsg("No *.tar.gz files found in artifacts of the build ${build_url}", "purple")
+    }
+
     if (job_info.getResult() != "SUCCESS") {
-        def build_status = job_info.getResult()
-        def build_number = job_info.getNumber()
-        def build_url = job_info.getAbsoluteUrl()
         def job_url = "${build_url}"
         currentBuild.result = build_status
         if (junit_report_filename) {
@@ -168,7 +181,7 @@
         build_pipeline_job('swarm-bootstrap-salt-cluster-devops', parameters)
 }
 
-def swarm_deploy_cicd(String stack_to_install='core,cicd') {
+def swarm_deploy_cicd(String stack_to_install, String install_timeout) {
         // Run openstack_deploy job on cfg01 Jenkins for specified stacks
         def common = new com.mirantis.mk.Common()
         def tcp_qa_refs = env.TCP_QA_REFS ?: ''
@@ -177,13 +190,14 @@
                 string(name: 'PARENT_WORKSPACE', value: pwd()),
                 string(name: 'ENV_NAME', value: "${ENV_NAME}"),
                 string(name: 'STACK_INSTALL', value: stack_to_install),
+                string(name: 'STACK_INSTALL_TIMEOUT', value: install_timeout),
                 string(name: 'TCP_QA_REFS', value: "${tcp_qa_refs}"),
                 booleanParam(name: 'SHUTDOWN_ENV_ON_TEARDOWN', value: false),
             ]
         build_pipeline_job('swarm-deploy-cicd', parameters)
 }
 
-def swarm_deploy_platform(String stack_to_install) {
+def swarm_deploy_platform(String stack_to_install, String install_timeout) {
         // Run openstack_deploy job on CICD Jenkins for specified stacks
         def common = new com.mirantis.mk.Common()
         def tcp_qa_refs = env.TCP_QA_REFS ?: ''
@@ -192,6 +206,7 @@
                 string(name: 'PARENT_WORKSPACE', value: pwd()),
                 string(name: 'ENV_NAME', value: "${ENV_NAME}"),
                 string(name: 'STACK_INSTALL', value: stack_to_install),
+                string(name: 'STACK_INSTALL_TIMEOUT', value: install_timeout),
                 string(name: 'TCP_QA_REFS', value: "${tcp_qa_refs}"),
                 booleanParam(name: 'SHUTDOWN_ENV_ON_TEARDOWN', value: false),
             ]
@@ -202,6 +217,7 @@
         // Run pytest tests
         def common = new com.mirantis.mk.Common()
         def tcp_qa_refs = env.TCP_QA_REFS ?: ''
+        def tempest_image_version = env.TEMPEST_IMAGE_VERSION ?: 'pike'
         def parameters = [
                 string(name: 'ENV_NAME', value: "${ENV_NAME}"),
                 string(name: 'PASSED_STEPS', value: passed_steps),
@@ -214,6 +230,8 @@
                 string(name: 'REPOSITORY_SUITE', value: "${MCP_VERSION}"),
                 string(name: 'MCP_IMAGE_PATH1604', value: "${MCP_IMAGE_PATH1604}"),
                 string(name: 'IMAGE_PATH_CFG01_DAY01', value: "${IMAGE_PATH_CFG01_DAY01}"),
+                string(name: 'TEMPEST_IMAGE_VERSION', value: "${tempest_image_version}"),
+
             ]
         common.printMsg("Start building job 'swarm-run-pytest' with parameters:", "purple")
         common.prettyPrint(parameters)
@@ -225,6 +243,7 @@
         // Run pytest tests
         def common = new com.mirantis.mk.Common()
         def tcp_qa_refs = env.TCP_QA_REFS ?: ''
+        def tempest_test_suite_name = env.TEMPEST_TEST_SUITE_NAME
         def parameters = [
                 string(name: 'ENV_NAME', value: "${ENV_NAME}"),
                 string(name: 'MCP_VERSION', value: "${MCP_VERSION}"),
@@ -232,6 +251,7 @@
                 string(name: 'PARENT_NODE_NAME', value: "${NODE_NAME}"),
                 string(name: 'PARENT_WORKSPACE', value: pwd()),
                 string(name: 'TCP_QA_REFS', value: "${tcp_qa_refs}"),
+                string(name: 'TEMPEST_TEST_SUITE_NAME', value: "${tempest_test_suite_name}"),
             ]
         common.printMsg("Start building job 'swarm-testrail-report' with parameters:", "purple")
         common.prettyPrint(parameters)
@@ -322,7 +342,9 @@
 
 def run_job_on_day01_node(stack_to_install, timeout=2400) {
     // stack_to_install="core,cicd"
+    def common = new com.mirantis.mk.Common()
     def stack = "${stack_to_install}"
+    common.printMsg("Deploy DriveTrain CICD components: ${stack_to_install}", "blue")
     try {
         run_cmd("""\
             export ENV_NAME=${ENV_NAME}
@@ -333,11 +355,12 @@
                 \\\"SALT_MASTER_URL\\\": \\\"\${SALTAPI_URL}\\\",
                 \\\"STACK_INSTALL\\\": \\\"${stack}\\\"
             }\"
-            JOB_PREFIX="[ ${ENV_NAME}/{build_number}:${stack} {time} ] "
+            JOB_PREFIX="[ ${ENV_NAME}/{build_number}:drivetrain {time} ] "
             python ./tcp_tests/utils/run_jenkins_job.py --verbose --job-name=deploy_openstack --job-parameters="\$JOB_PARAMETERS" --job-output-prefix="\$JOB_PREFIX"
         """)
+        // Wait for IO calm down on cluster nodes
+        sleep(60)
     } catch (e) {
-        def common = new com.mirantis.mk.Common()
         common.printMsg("Product job 'deploy_openstack' failed, getting details", "purple")
         def workflow_details=run_cmd_stdout("""\
             . ./tcp_tests/utils/env_salt
@@ -352,7 +375,9 @@
 
 def run_job_on_cicd_nodes(stack_to_install, timeout=2400) {
     // stack_to_install="k8s,calico,stacklight"
+    def common = new com.mirantis.mk.Common()
     def stack = "${stack_to_install}"
+    common.printMsg("Deploy Platform components: ${stack_to_install}", "blue")
     try {
         run_cmd("""\
             export ENV_NAME=${ENV_NAME}
@@ -363,12 +388,12 @@
                 \\\"SALT_MASTER_URL\\\": \\\"\${SALTAPI_URL}\\\",
                 \\\"STACK_INSTALL\\\": \\\"${stack}\\\"
             }\"
-            JOB_PREFIX="[ ${ENV_NAME}/{build_number}:${stack} {time} ] "
+            JOB_PREFIX="[ ${ENV_NAME}/{build_number}:platform {time} ] "
             python ./tcp_tests/utils/run_jenkins_job.py --verbose --job-name=deploy_openstack --job-parameters="\$JOB_PARAMETERS" --job-output-prefix="\$JOB_PREFIX"
-            sleep 60  # Wait for IO calm down on cluster nodes
         """)
+        // Wait for IO calm down on cluster nodes
+        sleep(60)
     } catch (e) {
-        def common = new com.mirantis.mk.Common()
         common.printMsg("Product job 'deploy_openstack' failed, getting details", "purple")
         def workflow_details=run_cmd_stdout("""\
             . ./tcp_tests/utils/env_salt
@@ -396,6 +421,17 @@
     }
 }
 
+def download_logs(archive_name_prefix) {
+    // Archive and download logs and debug info from salt nodes in the lab
+    // Do not fail in case of error to not lose the original error from the parent exception.
+    def common = new com.mirantis.mk.Common()
+    common.printMsg("Downloading nodes logs by ${archive_name_prefix}", "blue")
+    run_cmd("""\
+        export TESTS_CONFIGS=\$(pwd)/${ENV_NAME}_salt_deployed.ini
+        ./tcp_tests/utils/get_logs.py --archive-name-prefix ${archive_name_prefix} || true
+    """)
+}
+
 def devops_snapshot_info(snapshot_name) {
     // Print helper message after snapshot
     def common = new com.mirantis.mk.Common()
@@ -477,6 +513,7 @@
   def testrailProject = "Mirantis Cloud Platform"
   def testPlanName = "[MCP-Q2]System-${MCP_VERSION}-${new Date().format('yyyy-MM-dd')}"
   def testrailMilestone = "MCP1.1"
+  def testrailCaseMaxNameLenght = 250
   def jobURL = env.BUILD_URL
 
   def reporterOptions = [
@@ -493,6 +530,7 @@
     "--xunit-name-template \"${methodname}\"",
     "--testrail-name-template \"${testrail_name_template}\"",
     "--test-results-link \"${jobURL}\"",
+    "--testrail-case-max-name-lenght ${testrailCaseMaxNameLenght}",
   ] + reporter_extra_options
 
   def script = """
@@ -509,7 +547,7 @@
              passwordVariable: 'TESTRAIL_PASSWORD',
              usernameVariable: 'TESTRAIL_USER']
   ]) {
-    return run_cmd(script)
+    return run_cmd_stdout(script)
   }
 }
 
diff --git a/tcp_tests/fixtures/day1_fixtures.py b/tcp_tests/fixtures/day1_fixtures.py
index ff3a0b5..e223a2b 100644
--- a/tcp_tests/fixtures/day1_fixtures.py
+++ b/tcp_tests/fixtures/day1_fixtures.py
@@ -71,10 +71,6 @@
                     "region": {
                         "machines": macs}}}}
 
-        if not config.day1_underlay.lvm:
-            underlay.enable_lvm(hardware.lvm_storages())
-            config.day1_underlay.lvm = underlay.config_lvm
-
         hardware.create_snapshot(ext.SNAPSHOT.day1_underlay)
 
     else:
diff --git a/tcp_tests/fixtures/k8s_fixtures.py b/tcp_tests/fixtures/k8s_fixtures.py
index e581b86..409034e 100644
--- a/tcp_tests/fixtures/k8s_fixtures.py
+++ b/tcp_tests/fixtures/k8s_fixtures.py
@@ -77,16 +77,16 @@
         interfaces_pillar = k8s_actions._salt.get_pillar(
             tgt=tgt, pillar='linux:network:interface')[0]
 
-        for node_name, interfaces in interfaces_pillar.items():
+        for minion_id, interfaces in interfaces_pillar.items():
             for iface_name, iface in interfaces.items():
                 iface_name = iface.get('name', iface_name)
                 default_proto = 'static' if 'address' in iface else 'dhcp'
                 if iface.get('proto', default_proto) != 'dhcp':
                     LOG.warning('Trying to kill dhclient for iface {0} '
-                                'on node {1}'.format(iface_name, node_name))
+                                'on node {1}'.format(iface_name, minion_id))
                     underlay.check_call(
                         cmd='pkill -f "dhclient.*{}"'.format(iface_name),
-                        node_name=node_name, raise_on_err=False)
+                        node_name=minion_id, raise_on_err=False)
 
         LOG.warning('Restarting keepalived service on controllers...')
         k8s_actions._salt.local(tgt='ctl*', fun='cmd.run',
@@ -180,8 +180,11 @@
         if hasattr(request.node, 'rep_call') and \
                 (request.node.rep_call.passed or request.node.rep_call.failed)\
                 and cncf_publisher:
+            LOG.info("Waiting 60 sec for sonobuoy to generate results archive")
+            time.sleep(60)
+            LOG.info("Downloading sonobuoy results archive")
             files = utils.extract_name_from_mark(cncf_publisher) \
-                    or "{}".format(func_name)
+                or "{}".format(func_name)
             k8s_deployed.extract_file_to_node(
                 system='k8s', file_path='tmp/sonobuoy',
                 pod_name='sonobuoy', pod_namespace='heptio-sonobuoy'
diff --git a/tcp_tests/fixtures/salt_fixtures.py b/tcp_tests/fixtures/salt_fixtures.py
index 7f4ce60..226ab22 100644
--- a/tcp_tests/fixtures/salt_fixtures.py
+++ b/tcp_tests/fixtures/salt_fixtures.py
@@ -71,13 +71,7 @@
         LOG.info("############ Executing command ####### {0}".format(commands))
         salt_actions.install(commands)
 
-        salt_nodes = salt_actions.get_ssh_data()
-        config.underlay.ssh = config.underlay.ssh + \
-            [node for node in salt_nodes
-             if not any(node['node_name'] == n['node_name']
-                        for n in config.underlay.ssh)]
-        underlay.config_ssh = []
-        underlay.add_config_ssh(config.underlay.ssh)
+        salt_actions.update_ssh_data_from_minions()
 
         hardware.create_snapshot(ext.SNAPSHOT.salt_deployed)
         salt_actions.sync_time()
diff --git a/tcp_tests/fixtures/underlay_fixtures.py b/tcp_tests/fixtures/underlay_fixtures.py
index 65677a9..a3bcea4 100644
--- a/tcp_tests/fixtures/underlay_fixtures.py
+++ b/tcp_tests/fixtures/underlay_fixtures.py
@@ -200,10 +200,6 @@
         LOG.info("Config - {}".format(config))
         underlay_actions.add_config_ssh(config.underlay.ssh)
 
-        if not config.underlay.lvm:
-            underlay_actions.enable_lvm(hardware.lvm_storages())
-            config.underlay.lvm = underlay_actions.config_lvm
-
         hardware.create_snapshot(ext.SNAPSHOT.underlay)
 
         return underlay_actions
@@ -234,10 +230,6 @@
                     "region": {
                         "machines": macs}}}}
 
-        if not config.underlay.lvm:
-            underlay_actions.enable_lvm(hardware.lvm_storages())
-            config.underlay.lvm = underlay_actions.config_lvm
-
         for node in hardware.slave_nodes:
             # For correct comissioning by MaaS nodes should be powered off
             node.destroy()
diff --git a/tcp_tests/helpers/netchecker.py b/tcp_tests/helpers/netchecker.py
index dc58d9c..d3ba466 100644
--- a/tcp_tests/helpers/netchecker.py
+++ b/tcp_tests/helpers/netchecker.py
@@ -66,7 +66,7 @@
         else:
             assert self.get_connectivity_status().status_code == 400
 
-    def wait_check_network(self, works, timeout=60, interval=10):
+    def wait_check_network(self, works, timeout=600, interval=10):
         helpers.wait_pass(
             lambda: self.check_network(works=works),
             timeout=timeout,
diff --git a/tcp_tests/helpers/utils.py b/tcp_tests/helpers/utils.py
index f2311d4..480a646 100644
--- a/tcp_tests/helpers/utils.py
+++ b/tcp_tests/helpers/utils.py
@@ -356,6 +356,12 @@
 
         return var
 
+    def basename(path):
+        return os.path.basename(path)
+
+    def dirname(path):
+        return os.path.dirname(path)
+
     if options is None:
         options = {}
     options.update({'os_env': os_env, })
@@ -366,6 +372,9 @@
     environment = jinja2.Environment(
         loader=jinja2.FileSystemLoader([path, os.path.dirname(path)],
                                        followlinks=True))
+    environment.filters['basename'] = basename
+    environment.filters['dirname'] = dirname
+
     template = environment.get_template(filename).render(options)
 
     if required_env_vars and log_env_vars:
diff --git a/tcp_tests/managers/envmanager_devops.py b/tcp_tests/managers/envmanager_devops.py
index 4cd6c93..17ad452 100644
--- a/tcp_tests/managers/envmanager_devops.py
+++ b/tcp_tests/managers/envmanager_devops.py
@@ -94,39 +94,6 @@
             )
         self.__devops_config = conf
 
-    def lvm_storages(self):
-        """Returns a dict object of lvm storages in current environment
-
-        returned data example:
-            {
-                "master": {
-                    "id": "virtio-bff72959d1a54cb19d08"
-                },
-                "slave-0": {
-                    "id": "virtio-5e33affc8fe44503839f"
-                },
-                "slave-1": {
-                    "id": "virtio-10b6a262f1ec4341a1ba"
-                },
-            }
-
-        :rtype: dict
-        """
-        result = {}
-        for node in self.__env.get_nodes(role__in=ext.UNDERLAY_NODE_ROLES):
-            lvm = filter(lambda x: x.volume.name == 'lvm', node.disk_devices)
-            if len(lvm) == 0:
-                continue
-            lvm = lvm[0]
-            result[node.name] = {}
-            result_node = result[node.name]
-            result_node['id'] = "{bus}-{serial}".format(
-                bus=lvm.bus,
-                serial=lvm.volume.serial[:20])
-            LOG.info("Got disk-id '{}' for node '{}'".format(
-                result_node['id'], node.name))
-        return result
-
     @property
     def _d_env_name(self):
         """Get environment name from fuel devops config
@@ -162,6 +129,7 @@
         for d_node in self.__env.get_nodes(role__in=roles):
             ssh_data = {
                 'node_name': d_node.name,
+                'minion_id': d_node.name,
                 'roles': [d_node.role],
                 'address_pool': self._get_network_pool(
                     ext.NETWORK_TYPE.admin).address_pool.name,
diff --git a/tcp_tests/managers/envmanager_empty.py b/tcp_tests/managers/envmanager_empty.py
index c4bb57e..39fd126 100644
--- a/tcp_tests/managers/envmanager_empty.py
+++ b/tcp_tests/managers/envmanager_empty.py
@@ -30,26 +30,6 @@
         """
         self.__config = config
 
-    def lvm_storages(self):
-        """Returns data of lvm_storages on nodes in environment
-
-        It's expected that data of self.__config.lvm_storages will be
-        like this:
-            {
-                "node1": {
-                    "device": "vdb"
-                },
-                "node2": {
-                    "device": "vdb"
-                },
-                "node3": {
-                    "device": "vdb"
-                },
-            }
-        :rtype: dict
-        """
-        return self.__config.underlay.lvm
-
     def get_ssh_data(self, roles=None):
         raise Exception("EnvironmentManagerEmpty doesn't have SSH details. "
                         "Please provide SSH details in config.underlay.ssh")
diff --git a/tcp_tests/managers/execute_commands.py b/tcp_tests/managers/execute_commands.py
index 193153c..6dcf615 100644
--- a/tcp_tests/managers/execute_commands.py
+++ b/tcp_tests/managers/execute_commands.py
@@ -115,6 +115,9 @@
                         failed += 1
                     if 'Minion did not return. [Not connected]' in s:
                         failed += 1
+                    if ('Salt request timed out. The master is not responding.'
+                            in s):
+                        failed += 1
                     if s.startswith("[CRITICAL]"):
                         failed += 1
                     if 'Fatal' in s:
@@ -132,8 +135,17 @@
 
                 if x == 1 and skip_fail is False:
                     # In the last retry iteration, raise an exception
-                    raise Exception("Step '{0}' failed"
-                                    .format(description))
+                    raise Exception("Step '{0}' failed:\n"
+                                    "=============== Command: ==============\n"
+                                    "{1}\n"
+                                    "=============== STDOUT: ===============\n"
+                                    "{2}\n"
+                                    "=============== STDERR: ===============\n"
+                                    "{3}\n"
+                                    .format(description,
+                                            cmd,
+                                            result.stdout_str,
+                                            result.stderr_str))
 
     def command2(self, step, msg):
         # Required fields
@@ -227,7 +239,8 @@
 
         result = {}
         with self.__underlay.local() as local:
-            result = local.execute('cd {0} && find . -type f -name "{1}"'
+            result = local.execute('cd {0} && find . -maxdepth 1 -type f'
+                                   ' -name "{1}"'
                                    .format(local_path, local_filename))
             LOG.info("Found files to upload:\n{0}".format(result))
 
@@ -271,7 +284,7 @@
 
         with self.__underlay.remote(node_name=node_name) as remote:
 
-            result = remote.execute('find {0} -type f -name {1}'
+            result = remote.execute('find {0} -maxdepth 1 -type f -name {1}'
                                     .format(remote_path, remote_filename))
             LOG.info("Found files to download:\n{0}".format(result))
 
diff --git a/tcp_tests/managers/k8smanager.py b/tcp_tests/managers/k8smanager.py
index d84451f..79974d3 100644
--- a/tcp_tests/managers/k8smanager.py
+++ b/tcp_tests/managers/k8smanager.py
@@ -112,6 +112,41 @@
         names.sort()
         return names[0]
 
+    @property
+    def controller_minion_id(self):
+        """ Return node name of controller node that used for all actions """
+        minion_ids = [minion_id['minion_id'] for minion_id in
+                      self.get_controllers()]
+        # we want to return same controller name every time
+        minion_ids.sort()
+        return minion_ids[0]
+
+    @property
+    def is_metallb_enabled(self):
+        ctl_tgt = self.controller_minion_id
+        LOG.debug("Controller target: {}".format(ctl_tgt))
+
+        result = self._salt.get_pillar(
+            tgt=ctl_tgt,
+            pillar='kubernetes:common:addons:metallb:enabled')
+        metallb = result[0].get(ctl_tgt, False)
+        LOG.info("{} kubernetes:common:addons:metallb:enabled: {}"
+                 .format(ctl_tgt, bool(metallb)))
+        return metallb
+
+    @property
+    def is_ingress_nginx_enabled(self):
+        ctl_tgt = self.controller_minion_id
+        LOG.debug("Controller target: {}".format(ctl_tgt))
+
+        result = self._salt.get_pillar(
+            tgt=ctl_tgt,
+            pillar='kubernetes:common:addons:ingress-nginx:enabled')
+        ingress_nginx = result[0].get(ctl_tgt, False)
+        LOG.info("{} kubernetes:common:addons:ingress-nginx:enabled: {}"
+                 .format(ctl_tgt, bool(ingress_nginx)))
+        return ingress_nginx
+
     def controller_check_call(self, cmd, **kwargs):
         """ Run command on controller and return result """
         LOG.info("running cmd on k8s controller: {}".format(cmd))
@@ -195,28 +230,29 @@
                raise_on_err=raise_on_err, verbose=True)
 
     def run_virtlet_conformance(self, timeout=60 * 120,
-                                log_file='virtlet_conformance.log'):
+                                log_file='virtlet_conformance.log',
+                                report_name="report.xml"):
         if self.__config.k8s.run_extended_virtlet_conformance:
             ci_image = "cloud-images.ubuntu.com/xenial/current/" \
                        "xenial-server-cloudimg-amd64-disk1.img"
             cmd = ("set -o pipefail; "
                    "docker run --net=host {0} /virtlet-e2e-tests "
-                   "-include-cloud-init-tests -junitOutput report.xml "
+                   "-include-cloud-init-tests -junitOutput {3} "
                    "-image {2} -sshuser ubuntu -memoryLimit 1024 "
                    "-alsologtostderr -cluster-url http://127.0.0.1:8080 "
                    "-ginkgo.focus '\[Conformance\]' "
                    "| tee {1}".format(
                     self.__config.k8s_deploy.kubernetes_virtlet_image,
-                    log_file, ci_image))
+                    log_file, ci_image, report_name))
         else:
             cmd = ("set -o pipefail; "
                    "docker run --net=host {0} /virtlet-e2e-tests "
-                   "-junitOutput report.xml "
+                   "-junitOutput {2} "
                    "-alsologtostderr -cluster-url http://127.0.0.1:8080 "
                    "-ginkgo.focus '\[Conformance\]' "
                    "| tee {1}".format(
                     self.__config.k8s_deploy.kubernetes_virtlet_image,
-                    log_file))
+                    log_file, report_name))
         LOG.info("Executing: {}".format(cmd))
         with self.__underlay.remote(
                 node_name=self.controller_name) as remote:
@@ -227,7 +263,7 @@
             LOG.info("Test results stderr: {}".format(stderr))
         return result
 
-    def start_k8s_cncf_verification(self, timeout=60 * 90):
+    def start_k8s_cncf_verification(self, timeout=60 * 180):
         """
             Build sonobuoy using golang docker image and install it in system
             Then generate sonobuoy verification manifest using gen command
@@ -259,7 +295,7 @@
         LOG.info("Waiting for CNCF to complete")
         helpers.wait(
             lambda: sonobuoy_status() == 'complete',
-            interval=30, timeout=timeout,
+            interval=120, timeout=timeout,
             timeout_msg="Timeout for CNCF reached."
         )
 
diff --git a/tcp_tests/managers/runtestmanager.py b/tcp_tests/managers/runtestmanager.py
index 135a467..e7fc15c 100644
--- a/tcp_tests/managers/runtestmanager.py
+++ b/tcp_tests/managers/runtestmanager.py
@@ -15,9 +15,12 @@
 import json
 import os
 
+from devops.helpers import helpers
+
 from tcp_tests import logger
 from tcp_tests import settings
 
+
 LOG = logger.logger
 
 TEMPEST_CFG_DIR = '/tmp/test'
@@ -31,6 +34,7 @@
     container_name = 'run-tempest-ci'
     master_host = "cfg01"
     control_host = "ctl01"
+    compute_host = "cmp"
     class_name = "runtest"
     run_cmd = '/bin/bash -c "run-tempest"'
 
@@ -51,6 +55,9 @@
             self.master_host)[0]
         self.control_name = self.underlay.get_target_node_names(
             self.control_host)[0]
+        self.compute_name = self.underlay.get_target_node_names(
+            self.compute_host)[0]
+        self.barbican = False
 
     @property
     def salt_api(self):
@@ -64,8 +71,9 @@
         public_cidr = public_net["cidr"].encode("ascii")
         public_allocation_start = public_net["start"].encode("ascii")
         public_allocation_end = public_net["end"].encode("ascii")
+        tempest_test_target = self.target_name.encode("ascii") + "*"
 
-        return {
+        pillar = {
             'classes': ['service.runtest.tempest',
                         'service.runtest.tempest.public_net',
                         'service.runtest.tempest.services.manila.glance'],
@@ -80,7 +88,16 @@
                         public_allocation_start,
                     'openstack_public_neutron_subnet_allocation_end':
                         public_allocation_end,
-                    'tempest_test_target': self.target_name.encode("ascii"),
+                    'tempest_test_target': tempest_test_target,
+                    'glance_image_cirros_location':
+                        'http://cz8133.bud.mirantis.net:8099'
+                        '/cirros-0.3.5-x86_64-disk.img',
+                    'glance_image_fedora_location':
+                        'http://cz8133.bud.mirantis.net:8099'
+                        '/Fedora-Cloud-Base-27-1.6.x86_64.qcow2',
+                    'glance_image_manila_location':
+                        'http://cz8133.bud.mirantis.net:8099'
+                        '/manila-service-image-master.qcow2',
                 },
                 'neutron': {
                     'client': {
@@ -94,6 +111,8 @@
                         'enabled': True,
                         'cfg_dir': '${_param:runtest_tempest_cfg_dir}',
                         'cfg_name': '${_param:runtest_tempest_cfg_name}',
+                        'put_keystone_rc_enabled': True,
+                        'put_local_image_file_enabled': False,
                         'DEFAULT': {
                             'log_file': 'tempest.log'
                         },
@@ -106,6 +125,9 @@
                                 '${_param:runtest_tempest_public_net}'
                             }
                         },
+                        'heat_plugin': {
+                            'build_timeout': '600'
+                        },
                         'share': {
                             'capability_snapshot_support': True,
                             'run_driver_assisted_migration_tests': False,
@@ -118,6 +140,11 @@
                             'run_snapshot_tests': True,
                         }}}}}
 
+        if self.barbican:
+            pillar['classes'].append('service.runtest.tempest.barbican')
+
+        return pillar
+
     def fetch_arficats(self, username=None, file_format='xml'):
         with self.underlay.remote(node_name=self.target_name,
                                   username=None) as tgt:
@@ -170,11 +197,26 @@
                                                indent=4, sort_keys=True)
                 f.write(container_inspect)
 
-    def prepare(self, dpdk=None):
+    def prepare(self):
+        barbican_pillar = "nova:controller:barbican:enabled"
+        result = self.__salt_api.get_pillar(tgt=self.control_name,
+                                            pillar=barbican_pillar)
+        self.barbican = result[0].get(self.control_name, False)
         self.store_runtest_model()
-
+        cirros_pillar = ("salt-call --out=newline_values_only "
+                         "pillar.get "
+                         "glance:client:identity:"
+                         "admin_identity:image:cirros:location")
+        dpdk_pillar = "linux:network:dpdk:enabled"
         salt_cmd = "salt -l info --hard-crash --state-output=mixed "
         salt_call_cmd = "salt-call -l info --hard-crash --state-output=mixed "
+
+        result = self.__salt_api.get_pillar(tgt=self.compute_name,
+                                            pillar=dpdk_pillar)
+
+        dpdk = result[0].get(self.compute_name, False)
+        LOG.info("DPDK enabled: {}".format(bool(dpdk)))
+
         commands = [
             {
                 'description': "Sync salt objects for runtest model",
@@ -217,7 +259,7 @@
                 'cmd': ("set -ex;" +
                         salt_call_cmd + " state.sls nova.client")},
             {
-                'description': "Create cirros image for Tempest",
+                'description': "Upload images for Tempest",
                 'node_name': self.master_name,
                 'cmd': ("set -ex;" +
                         salt_call_cmd + " state.sls glance.client")},
@@ -226,6 +268,13 @@
                 'node_name': self.master_name,
                 'cmd': ("set -ex;" +
                         salt_call_cmd + " state.sls runtest")},
+            {
+                'description': "Upload cirros image",
+                'node_name': self.master_name,
+                'cmd': ("set -ex;"
+                        "cirros_url=$({}) && {} '{}' cmd.run "
+                        "\"wget $cirros_url -O /tmp/TestCirros-0.3.5.img\""
+                        .format(cirros_pillar, salt_cmd, self.target_name))},
         ]
 
         if dpdk:
@@ -235,8 +284,23 @@
                 'cmd': ("set -ex;" +
                         salt_call_cmd + " cmd.run "
                         " '. /root/keystonercv3;"
+                        "  openstack flavor set m1.extra_tiny_test"
+                        "  --property hw:mem_page_size=any;"
                         "  openstack flavor set m1.tiny_test"
-                        "  --property hw:mem_page_size=small'")},
+                        "  --property hw:mem_page_size=any'")},
+            )
+
+        if self.barbican:
+            commands.append({
+                'description': "Configure barbican",
+                'node_name': self.master_name,
+                'cmd': ("set -ex;" +
+                        salt_call_cmd +
+                        " state.sls barbican.client && " +
+                        salt_call_cmd +
+                        " state.sls runtest.test_accounts && " +
+                        salt_call_cmd +
+                        " state.sls runtest.barbican_sign_image")},
             )
 
         self.__salt_api.execute_commands(commands=commands,
@@ -247,13 +311,14 @@
         image_nameversion = "{}:{}".format(self.image_name, self.image_version)
 
         docker_args = (
+            " -t "
             " --name {container_name} "
             " -e ARGS=\"-r {tempest_pattern} -w {tempest_threads}\""
             " -v {cfg_dir}/tempest.conf:/etc/tempest/tempest.conf"
             " -v /tmp/:/tmp/"
             " -v {cfg_dir}:/root/tempest"
             " -v /etc/ssl/certs/:/etc/ssl/certs/"
-            " --rm"
+            " -d "
             " {image_nameversion} {run_cmd}"
             .format(
                 container_name=self.container_name,
@@ -280,6 +345,25 @@
         self.__salt_api.execute_commands(commands=commands,
                                          label="Run Tempest tests")
 
+        def wait_status(s):
+            inspect_res = self.salt_api.local(tgt,
+                                              'dockerng.inspect',
+                                              self.container_name)
+            if 'return' in inspect_res:
+                inspect = inspect_res['return']
+                inspect = inspect[0]
+                inspect = next(inspect.iteritems())[1]
+                status = inspect['State']['Status']
+
+                return status.lower() == s.lower()
+
+            return False
+
+        helpers.wait(lambda: wait_status('exited'),
+                     timeout=timeout,
+                     timeout_msg=('Tempest run didnt finished '
+                                  'in {}'.format(timeout)))
+
         inspect_res = self.salt_api.local(tgt,
                                           'dockerng.inspect',
                                           self.container_name)
@@ -298,12 +382,12 @@
         return {'inspect': inspect,
                 'logs': logs}
 
-    def prepare_and_run_tempest(self, username='root', dpdk=None):
+    def prepare_and_run_tempest(self, username='root'):
         """
         Run tempest tests
         """
         tempest_timeout = settings.TEMPEST_TIMEOUT
-        self.prepare(dpdk=dpdk)
+        self.prepare()
         test_res = self.run_tempest(tempest_timeout)
         self.fetch_arficats(username=username)
         self.save_runtime_logs(**test_res)
diff --git a/tcp_tests/managers/saltmanager.py b/tcp_tests/managers/saltmanager.py
index 6fad0e4..a468b02 100644
--- a/tcp_tests/managers/saltmanager.py
+++ b/tcp_tests/managers/saltmanager.py
@@ -188,13 +188,14 @@
         if len(hosts) == 0:
             raise LookupError("Hosts is empty or absent")
 
-        def host(node_name, ip):
+        def host(minion_id, ip):
             return {
                 'roles': ['salt_minion'],
                 'keys': [
                     k['private'] for k in self.__config.underlay.ssh_keys
                 ],
-                'node_name': node_name,
+                'node_name': minion_id,
+                'minion_id': minion_id,
                 'host': ip,
                 'address_pool': pool_name,
                 'login': settings.SSH_NODE_CREDENTIALS['login'],
@@ -216,6 +217,25 @@
                        host_list={k: v['ipv4'] for k, v in hosts.items()}))
             raise StopIteration(msg)
 
+    def update_ssh_data_from_minions(self):
+        """Combine existing underlay.ssh with VCP salt minions"""
+        salt_nodes = self.get_ssh_data()
+
+        for salt_node in salt_nodes:
+            nodes = [n for n in self.__config.underlay.ssh
+                     if salt_node['host'] == n['host']
+                     and salt_node['address_pool'] == n['address_pool']]
+            if nodes:
+                # Assume that there can be only one node with such IP address
+                # Just update minion_id for this node
+                nodes[0]['minion_id'] = salt_node['minion_id']
+            else:
+                # New node, add to config.underlay.ssh
+                self.__config.underlay.ssh.append(salt_node)
+
+        self.__underlay.config_ssh = []
+        self.__underlay.add_config_ssh(self.__config.underlay.ssh)
+
     def service_status(self, tgt, service):
         result = self.local(tgt=tgt, fun='service.status', args=service)
         return result['return']
diff --git a/tcp_tests/managers/sl_manager.py b/tcp_tests/managers/sl_manager.py
index 3bb0a1f..f3ccef8 100644
--- a/tcp_tests/managers/sl_manager.py
+++ b/tcp_tests/managers/sl_manager.py
@@ -41,7 +41,6 @@
     def install(self, commands, label='Install SL services'):
         self.execute_commands(commands, label=label)
         self.__config.stack_light.stacklight_installed = True
-        self.__config.stack_light.sl_vip_host = self.get_sl_vip()
 
     def get_sl_vip(self):
         tgt = 'I@prometheus:server:enabled:True'
@@ -76,6 +75,7 @@
     @property
     def api(self):
         if self._p_client is None:
+            self.__config.stack_light.sl_vip_host = self.get_sl_vip()
             self._p_client = prometheus_client.PrometheusClient(
                 host=self.__config.stack_light.sl_vip_host,
                 port=self.__config.stack_light.sl_prometheus_port,
diff --git a/tcp_tests/managers/underlay_ssh_manager.py b/tcp_tests/managers/underlay_ssh_manager.py
index ee23654..66f686b 100644
--- a/tcp_tests/managers/underlay_ssh_manager.py
+++ b/tcp_tests/managers/underlay_ssh_manager.py
@@ -39,6 +39,7 @@
           [
             {
               node_name: node1,
+              minion_id: node1.local,
               address_pool: 'public-pool01',
               host: ,
               port: ,
@@ -50,6 +51,7 @@
             },
             {
               node_name: node1,
+              minion_id: node1.local,
               address_pool: 'private-pool01',
               host:
               port:
@@ -61,6 +63,7 @@
             },
             {
               node_name: node2,
+              minion_id: node2.local,
               address_pool: 'public-pool01',
               keys_source_host: node1
               ...
@@ -75,7 +78,6 @@
     """
     __config = None
     config_ssh = None
-    config_lvm = None
 
     def __init__(self, config):
         """Read config.underlay.ssh object
@@ -86,9 +88,6 @@
         if self.config_ssh is None:
             self.config_ssh = []
 
-        if self.config_lvm is None:
-            self.config_lvm = {}
-
         self.add_config_ssh(self.__config.underlay.ssh)
 
     def add_config_ssh(self, config_ssh):
@@ -100,6 +99,7 @@
             ssh_data = {
                 # Required keys:
                 'node_name': ssh['node_name'],
+                'minion_id': ssh['minion_id'],
                 'host': ssh['host'],
                 'login': ssh['login'],
                 'password': ssh['password'],
@@ -126,6 +126,7 @@
             ssh_data = {
                 # Required keys:
                 'node_name': ssh['node_name'],
+                'minion_id': ssh['minion_id'],
                 'host': ssh['host'],
                 'login': ssh['login'],
                 'password': ssh['password'],
@@ -147,7 +148,7 @@
         return keys
 
     def __ssh_data(self, node_name=None, host=None, address_pool=None,
-                   node_role=None):
+                   node_role=None, minion_id=None):
 
         ssh_data = None
 
@@ -175,6 +176,16 @@
                             break
                     else:
                         ssh_data = ssh
+        elif minion_id is not None:
+            for ssh in self.config_ssh:
+                if minion_id == ssh['minion_id']:
+                    if address_pool is not None:
+                        if address_pool == ssh['address_pool']:
+                            ssh_data = ssh
+                            break
+                    else:
+                        ssh_data = ssh
+
         if ssh_data is None:
             LOG.debug("config_ssh - {}".format(self.config_ssh))
             raise Exception('Auth data for node was not found using '
@@ -191,41 +202,14 @@
                 names.append(ssh['node_name'])
         return names
 
-    def enable_lvm(self, lvmconfig):
-        """Method for enabling lvm oh hosts in environment
+    def minion_ids(self):
+        """Get list of minion ids registered in config.underlay.ssh"""
 
-        :param lvmconfig: dict with ids or device' names of lvm storage
-        :raises: devops.error.DevopsCalledProcessError,
-        devops.error.TimeoutError, AssertionError, ValueError
-        """
-        def get_actions(lvm_id):
-            return [
-                "systemctl enable lvm2-lvmetad.service",
-                "systemctl enable lvm2-lvmetad.socket",
-                "systemctl start lvm2-lvmetad.service",
-                "systemctl start lvm2-lvmetad.socket",
-                "pvcreate {} && pvs".format(lvm_id),
-                "vgcreate default {} && vgs".format(lvm_id),
-                "lvcreate -L 1G -T default/pool && lvs",
-            ]
-        lvmpackages = ["lvm2", "liblvm2-dev", "thin-provisioning-tools"]
-        for node_name in self.node_names():
-            lvm = lvmconfig.get(node_name, None)
-            if not lvm:
-                continue
-            if 'id' in lvm:
-                lvmdevice = '/dev/disk/by-id/{}'.format(lvm['id'])
-            elif 'device' in lvm:
-                lvmdevice = '/dev/{}'.format(lvm['device'])
-            else:
-                raise ValueError("Unknown LVM device type")
-            if lvmdevice:
-                self.apt_install_package(
-                    packages=lvmpackages, node_name=node_name, verbose=True)
-                for command in get_actions(lvmdevice):
-                    self.sudo_check_call(command, node_name=node_name,
-                                         verbose=True)
-        self.config_lvm = dict(lvmconfig)
+        ids = []  # List is used to keep the original order of ids
+        for ssh in self.config_ssh:
+            if ssh['minion_id'] not in ids:
+                ids.append(ssh['minion_id'])
+        return ids
 
     def host_by_node_name(self, node_name, address_pool=None):
         ssh_data = self.__ssh_data(node_name=node_name,
@@ -237,6 +221,11 @@
                                    address_pool=address_pool)
         return ssh_data['host']
 
+    def host_by_minion_id(self, minion_id, address_pool=None):
+        ssh_data = self.__ssh_data(minion_id=minion_id,
+                                   address_pool=address_pool)
+        return ssh_data['host']
+
     def remote(self, node_name=None, host=None, address_pool=None,
                username=None):
         """Get SSHClient by a node name or hostname.
@@ -438,12 +427,12 @@
             "docker ps > /root/\$(hostname -f)/dump_docker_ps.txt;"
             "docker service ls > "
             "  /root/\$(hostname -f)/dump_docker_services_ls.txt;"
-            "for SERVICE in \$(docker service ls | awk '{ print $2 }'); "
+            "for SERVICE in \$(docker service ls | awk '{ print \$2 }'); "
             "  do docker service ps --no-trunc 2>&1 \$SERVICE >> "
             "    /root/\$(hostname -f)/dump_docker_service_ps.txt;"
             "  done;"
-            "for SERVICE in \$(docker service ls | awk '{ print $2 }'); "
-            "  do docker service logs 2>&1 \$SERVICE > "
+            "for SERVICE in \$(docker service ls | awk '{ print \$2 }'); "
+            "  do timeout 30 docker service logs --no-trunc 2>&1 \$SERVICE > "
             "    /root/\$(hostname -f)/dump_docker_service_\${SERVICE}_logs;"
             "  done;"
             "vgdisplay > /root/\$(hostname -f)/dump_vgdisplay.txt;"
diff --git a/tcp_tests/settings.py b/tcp_tests/settings.py
index 9ebdf22..fca6a6d 100644
--- a/tcp_tests/settings.py
+++ b/tcp_tests/settings.py
@@ -77,7 +77,7 @@
     'docker-prod-virtual.docker.mirantis.net/mirantis/cicd/ci-tempest')  # noqa
 TEMPEST_IMAGE_VERSION = os.environ.get('TEMPEST_IMAGE_VERSION', 'pike')
 TEMPEST_PATTERN = os.environ.get('TEMPEST_PATTERN', 'tempest')
-TEMPEST_TIMEOUT = int(os.environ.get('TEMPEST_TIMEOUT', 60 * 60 * 5))
+TEMPEST_TIMEOUT = int(os.environ.get('TEMPEST_TIMEOUT', 60 * 60 * 6))
 TEMPEST_THREADS = int(os.environ.get('TEMPEST_THREADS', 2))
 TEMPEST_TARGET = os.environ.get('TEMPEST_TARGET', 'gtw01')
 SALT_VERSION = os.environ.get('SALT_VERSION', '2017.7')
diff --git a/tcp_tests/settings_oslo.py b/tcp_tests/settings_oslo.py
index 0361835..0a447d6 100644
--- a/tcp_tests/settings_oslo.py
+++ b/tcp_tests/settings_oslo.py
@@ -96,6 +96,7 @@
     ct.Cfg('ssh', ct.JSONList(),
            help="""SSH Settings for Underlay: [{
                   'node_name': node1,
+                  'minion_id': node1.local,
                   'roles': ['salt-master', 'salt-minion', ],
                   'host': hostname,
                   'login': login,
@@ -118,8 +119,6 @@
     ct.Cfg('upstream_dns_servers', ct.JSONList(),
            help="IP addresses of upstream DNS servers (dnsmasq)",
            default=[]),
-    ct.Cfg('lvm', ct.JSONDict(),
-           help="LVM settings for Underlay", default={}),
     ct.Cfg('address_pools', ct.JSONDict(),
            help="""Address pools (dynamically) allocated for the environment.
                    May be used to determine CIDR for a specific network from
diff --git a/tcp_tests/templates/cookied-bm-contrail-maas/core.yaml b/tcp_tests/templates/cookied-bm-contrail-maas/core.yaml
index 0aebf89..fc35f88 100644
--- a/tcp_tests/templates/cookied-bm-contrail-maas/core.yaml
+++ b/tcp_tests/templates/cookied-bm-contrail-maas/core.yaml
@@ -1,118 +1,19 @@
 {% from 'cookied-bm-contrail-maas/underlay.yaml' import HOSTNAME_CFG01 with context %}
 
-# Install support services
-- description: Install keepalived on ctl01
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@keepalived:cluster and *01*' state.sls keepalived
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 10}
-  skip_fail: true
+{% import 'shared-core.yaml' as SHARED_CORE with context %}
 
-- description: Install keepalived
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@keepalived:cluster' state.sls keepalived
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 10}
-  skip_fail: true
+{{ SHARED_CORE.MACRO_INSTALL_KEEPALIVED() }}
 
-- description: Check the VIP
-  cmd: |
-    OPENSTACK_CONTROL_ADDRESS=`salt-call --out=newline_values_only pillar.get _param:openstack_control_address`;
-    echo "_param:openstack_control_address (vip): ${OPENSTACK_CONTROL_ADDRESS}";
-    salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@keepalived:cluster' cmd.run "ip a | grep ${OPENSTACK_CONTROL_ADDRESS}" | grep -B1 ${OPENSTACK_CONTROL_ADDRESS}
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
+{{ SHARED_CORE.MACRO_INSTALL_GLUSTERFS() }}
 
+{{ SHARED_CORE.MACRO_INSTALL_RABBITMQ() }}
 
-- description: Install glusterfs
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@glusterfs:server' state.sls glusterfs.server.service
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
+{{ SHARED_CORE.MACRO_INSTALL_GALERA() }}
 
-- description: Setup glusterfs on primary controller
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@glusterfs:server' state.sls glusterfs.server.setup -b 1
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 2, delay: 5}
-  skip_fail: false
+{{ SHARED_CORE.MACRO_INSTALL_HAPROXY() }}
 
-- description: Check the gluster status
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@glusterfs:server' cmd.run 'gluster peer status; gluster volume status' -b 1
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
+{{ SHARED_CORE.MACRO_INSTALL_NGINX() }}
 
-- description: Install RabbitMQ on ctl01
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@rabbitmq:server and *01*' state.sls rabbitmq
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
+{{ SHARED_CORE.MACRO_INSTALL_MEMCACHED() }}
 
-- description: Install RabbitMQ
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@rabbitmq:server' state.sls rabbitmq
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Check the rabbitmq status
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@rabbitmq:server' cmd.run 'rabbitmqctl cluster_status'
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Install Galera on first server
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@galera:master' state.sls galera
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Install Galera on other servers
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@galera:slave' state.sls galera -b 1
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Check mysql status
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@galera:*' mysql.status | grep -A1 -e "wsrep_incoming_addresses\|wsrep_cluster_size"
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: true
-
-
-- description: Install haproxy
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@haproxy:proxy' state.sls haproxy
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Check haproxy status
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@haproxy:proxy' service.status haproxy
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Restart rsyslog
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@haproxy:proxy' service.restart rsyslog
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Install memcached on all controllers
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@memcached:server' state.sls memcached
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
+{{ SHARED_CORE.MACRO_CHECK_VIP() }}
diff --git a/tcp_tests/templates/cookied-bm-contrail-maas/salt.yaml b/tcp_tests/templates/cookied-bm-contrail-maas/salt.yaml
index 67833da..51dfc5d 100644
--- a/tcp_tests/templates/cookied-bm-contrail-maas/salt.yaml
+++ b/tcp_tests/templates/cookied-bm-contrail-maas/salt.yaml
@@ -42,9 +42,9 @@
     set -e;
     # Remove rack01 key
     . /root/venv-reclass-tools/bin/activate;
-    reclass-tools del-key parameters.reclass.storage.node.openstack_compute_rack01 /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/config.yml;
+    reclass-tools del-key parameters.reclass.storage.node.openstack_compute_rack01 /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/config/init.yml;
     # Add openstack_compute_node definition from system
-    reclass-tools add-key 'classes' 'system.reclass.storage.system.openstack_compute_multi' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/config.yml --merge;
+    reclass-tools add-key 'classes' 'system.reclass.storage.system.openstack_compute_multi' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/config/init.yml --merge;
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 10}
   skip_fail: false
diff --git a/tcp_tests/templates/cookied-bm-contrail-maas/underlay--user-data-cfg01.yaml b/tcp_tests/templates/cookied-bm-contrail-maas/underlay--user-data-cfg01.yaml
index 1e318a5..6617855 100644
--- a/tcp_tests/templates/cookied-bm-contrail-maas/underlay--user-data-cfg01.yaml
+++ b/tcp_tests/templates/cookied-bm-contrail-maas/underlay--user-data-cfg01.yaml
@@ -52,6 +52,13 @@
    - export MAAS_DHCP_POOL_END={{ os_env('MAAS_DHCP_POOL_END', '172.16.49.119') }}
    - ifconfig $MAAS_PXE_INTERFACE_NAME $MAAS_PXE_INTERFACE_ADDRESS/$MAAS_DHCP_POOL_NETMASK_PREFIX
 
+   - mkdir -p /srv/salt/reclass/nodes
+   - systemctl enable salt-master
+   - systemctl enable salt-minion
+   - systemctl start salt-master
+   - systemctl start salt-minion
+   - salt-call -l info --timeout=120 test.ping
+
   write_files:
    - path: /etc/network/interfaces
      content: |
diff --git a/tcp_tests/templates/cookied-bm-contrail-nfv-maas/core.yaml b/tcp_tests/templates/cookied-bm-contrail-nfv-maas/core.yaml
index 129693d..64f01fa 100644
--- a/tcp_tests/templates/cookied-bm-contrail-nfv-maas/core.yaml
+++ b/tcp_tests/templates/cookied-bm-contrail-nfv-maas/core.yaml
@@ -1,118 +1,19 @@
 {% from 'cookied-bm-contrail-nfv-maas/underlay.yaml' import HOSTNAME_CFG01 with context %}
 
-# Install support services
-- description: Install keepalived on ctl01
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@keepalived:cluster and *01*' state.sls keepalived
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 10}
-  skip_fail: true
+{% import 'shared-core.yaml' as SHARED_CORE with context %}
 
-- description: Install keepalived
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@keepalived:cluster' state.sls keepalived
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 10}
-  skip_fail: true
+{{ SHARED_CORE.MACRO_INSTALL_KEEPALIVED() }}
 
-- description: Check the VIP
-  cmd: |
-    OPENSTACK_CONTROL_ADDRESS=`salt-call --out=newline_values_only pillar.get _param:openstack_control_address`;
-    echo "_param:openstack_control_address (vip): ${OPENSTACK_CONTROL_ADDRESS}";
-    salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@keepalived:cluster' cmd.run "ip a | grep ${OPENSTACK_CONTROL_ADDRESS}" | grep -B1 ${OPENSTACK_CONTROL_ADDRESS}
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
+{{ SHARED_CORE.MACRO_INSTALL_GLUSTERFS() }}
 
+{{ SHARED_CORE.MACRO_INSTALL_RABBITMQ() }}
 
-- description: Install glusterfs
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@glusterfs:server' state.sls glusterfs.server.service
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
+{{ SHARED_CORE.MACRO_INSTALL_GALERA() }}
 
-- description: Setup glusterfs on primary controller
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@glusterfs:server' state.sls glusterfs.server.setup -b 1
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 2, delay: 5}
-  skip_fail: false
+{{ SHARED_CORE.MACRO_INSTALL_HAPROXY() }}
 
-- description: Check the gluster status
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@glusterfs:server' cmd.run 'gluster peer status; gluster volume status' -b 1
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
+{{ SHARED_CORE.MACRO_INSTALL_NGINX() }}
 
-- description: Install RabbitMQ on ctl01
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@rabbitmq:server and *01*' state.sls rabbitmq
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
+{{ SHARED_CORE.MACRO_INSTALL_MEMCACHED() }}
 
-- description: Install RabbitMQ
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@rabbitmq:server' state.sls rabbitmq
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Check the rabbitmq status
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@rabbitmq:server' cmd.run 'rabbitmqctl cluster_status'
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Install Galera on first server
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@galera:master' state.sls galera
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Install Galera on other servers
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@galera:slave' state.sls galera -b 1
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Check mysql status
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@galera:*' mysql.status | grep -A1 -e "wsrep_incoming_addresses\|wsrep_cluster_size"
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: true
-
-
-- description: Install haproxy
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@haproxy:proxy' state.sls haproxy
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Check haproxy status
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@haproxy:proxy' service.status haproxy
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Restart rsyslog
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@haproxy:proxy' service.restart rsyslog
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Install memcached on all controllers
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@memcached:server' state.sls memcached
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
+{{ SHARED_CORE.MACRO_CHECK_VIP() }}
diff --git a/tcp_tests/templates/cookied-bm-contrail-nfv-maas/salt.yaml b/tcp_tests/templates/cookied-bm-contrail-nfv-maas/salt.yaml
index 3b508ce..31e9736 100644
--- a/tcp_tests/templates/cookied-bm-contrail-nfv-maas/salt.yaml
+++ b/tcp_tests/templates/cookied-bm-contrail-nfv-maas/salt.yaml
@@ -42,9 +42,9 @@
     set -e;
     # Remove rack01 key
     . /root/venv-reclass-tools/bin/activate;
-    reclass-tools del-key parameters.reclass.storage.node.openstack_compute_rack01 /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/config.yml;
+    reclass-tools del-key parameters.reclass.storage.node.openstack_compute_rack01 /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/config/init.yml;
     # Add openstack_compute_node definition from system
-    reclass-tools add-key 'classes' 'system.reclass.storage.system.openstack_compute_multi' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/config.yml --merge;
+    reclass-tools add-key 'classes' 'system.reclass.storage.system.openstack_compute_multi' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/config/init.yml --merge;
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 10}
   skip_fail: false
diff --git a/tcp_tests/templates/cookied-bm-contrail-nfv-maas/underlay--user-data-cfg01.yaml b/tcp_tests/templates/cookied-bm-contrail-nfv-maas/underlay--user-data-cfg01.yaml
index 1e318a5..6617855 100644
--- a/tcp_tests/templates/cookied-bm-contrail-nfv-maas/underlay--user-data-cfg01.yaml
+++ b/tcp_tests/templates/cookied-bm-contrail-nfv-maas/underlay--user-data-cfg01.yaml
@@ -52,6 +52,13 @@
    - export MAAS_DHCP_POOL_END={{ os_env('MAAS_DHCP_POOL_END', '172.16.49.119') }}
    - ifconfig $MAAS_PXE_INTERFACE_NAME $MAAS_PXE_INTERFACE_ADDRESS/$MAAS_DHCP_POOL_NETMASK_PREFIX
 
+   - mkdir -p /srv/salt/reclass/nodes
+   - systemctl enable salt-master
+   - systemctl enable salt-minion
+   - systemctl start salt-master
+   - systemctl start salt-minion
+   - salt-call -l info --timeout=120 test.ping
+
   write_files:
    - path: /etc/network/interfaces
      content: |
diff --git a/tcp_tests/templates/cookied-bm-contrail40-nfv/core.yaml b/tcp_tests/templates/cookied-bm-contrail40-nfv/core.yaml
index 2675136..34c254d 100644
--- a/tcp_tests/templates/cookied-bm-contrail40-nfv/core.yaml
+++ b/tcp_tests/templates/cookied-bm-contrail40-nfv/core.yaml
@@ -3,124 +3,20 @@
 {% from 'cookied-bm-contrail40-nfv/underlay.yaml' import HOSTNAME_KVM02 with context %}
 {% from 'cookied-bm-contrail40-nfv/underlay.yaml' import HOSTNAME_KVM03 with context %}
 
-- description: Install glusterfs
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@glusterfs:server' state.sls glusterfs.server.service
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
+{% import 'shared-core.yaml' as SHARED_CORE with context %}
 
-- description: Setup glusterfs on primary controller
-  cmd: sleep 30; salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@glusterfs:server' state.sls glusterfs.server.setup -b 1
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 2, delay: 30}
-  skip_fail: false
+{{ SHARED_CORE.MACRO_INSTALL_KEEPALIVED() }}
 
-- description: Check the gluster status
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@glusterfs:server' cmd.run 'gluster peer status; gluster volume status' -b 1
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
+{{ SHARED_CORE.MACRO_INSTALL_GLUSTERFS() }}
 
-# Install support services
-- description: Install keepalived on ctl01
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@keepalived:cluster and *01*' state.sls keepalived
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 10}
-  skip_fail: true
+{{ SHARED_CORE.MACRO_INSTALL_RABBITMQ() }}
 
-- description: Install keepalived
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@keepalived:cluster' state.sls keepalived
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 10}
-  skip_fail: true
+{{ SHARED_CORE.MACRO_INSTALL_GALERA() }}
 
-- description: Install RabbitMQ on ctl01
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@rabbitmq:server and *01*' state.sls rabbitmq
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
+{{ SHARED_CORE.MACRO_INSTALL_HAPROXY() }}
 
-- description: Install RabbitMQ
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@rabbitmq:server' state.sls rabbitmq
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
+{{ SHARED_CORE.MACRO_INSTALL_NGINX() }}
 
-- description: Check the rabbitmq status
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@rabbitmq:server' cmd.run 'rabbitmqctl cluster_status'
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
+{{ SHARED_CORE.MACRO_INSTALL_MEMCACHED() }}
 
-- description: Install Galera on first server
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@galera:master' state.sls galera
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Install Galera on other servers
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@galera:slave' state.sls galera -b 1
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Check mysql status
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@galera:*' mysql.status | grep -A1 -e "wsrep_incoming_addresses\|wsrep_cluster_size"
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: true
-
-- description: Install haproxy
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@haproxy:proxy' state.sls haproxy
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Check haproxy status
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@haproxy:proxy' service.status haproxy
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Install nginx on prx nodes
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@nginx:server' state.sls nginx
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Restart rsyslog
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@haproxy:proxy' service.restart rsyslog
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Install memcached on all controllers
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@memcached:server' state.sls memcached
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Check the OpenStack control VIP
-  cmd: |
-    OPENSTACK_CONTROL_ADDRESS=$(salt --out=newline_values_only "ctl01*" pillar.get _param:cluster_vip_address);
-    echo "_param:cluster_vip_address (vip): ${OPENSTACK_CONTROL_ADDRESS}";
-    salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@keepalived:cluster' cmd.run "ip a | grep ${OPENSTACK_CONTROL_ADDRESS}" | grep -B1 ${OPENSTACK_CONTROL_ADDRESS}
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 3, delay: 10}
-  skip_fail: false
+{{ SHARED_CORE.MACRO_CHECK_VIP() }}
diff --git a/tcp_tests/templates/cookied-bm-contrail40-nfv/salt.yaml b/tcp_tests/templates/cookied-bm-contrail40-nfv/salt.yaml
index f8a1d9a..e59fdf8 100644
--- a/tcp_tests/templates/cookied-bm-contrail40-nfv/salt.yaml
+++ b/tcp_tests/templates/cookied-bm-contrail40-nfv/salt.yaml
@@ -32,9 +32,9 @@
     set -e;
     # Remove rack01 key
     . /root/venv-reclass-tools/bin/activate;
-    reclass-tools del-key parameters.reclass.storage.node.openstack_compute_rack01 /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/config.yml;
+    reclass-tools del-key parameters.reclass.storage.node.openstack_compute_rack01 /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/config/init.yml;
     # Add openstack_compute_node definition from system
-    reclass-tools add-key 'classes' 'system.reclass.storage.system.openstack_compute_multi' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/config.yml --merge;
+    reclass-tools add-key 'classes' 'system.reclass.storage.system.openstack_compute_multi' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/config/init.yml --merge;
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 10}
   skip_fail: false
@@ -43,8 +43,8 @@
   cmd: |
     set -e;
     . /root/venv-reclass-tools/bin/activate;
-    reclass-tools add-key parameters._param.salt_control_xenial_image 'https://apt.mcp.mirantis.net/images/ubuntu-16-04-x64-mcp{{ SHARED.REPOSITORY_SUITE }}.qcow2' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/init.yml;
-    reclass-tools add-key parameters._param.salt_control_trusty_image 'https://apt.mcp.mirantis.net/images/ubuntu-14-04-x64-mcp{{ SHARED.REPOSITORY_SUITE }}.qcow2' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/init.yml;
+    reclass-tools add-key parameters._param.salt_control_xenial_image 'http://images.mcp.mirantis.net/ubuntu-16-04-x64-mcp{{ SHARED.REPOSITORY_SUITE }}.qcow2' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/init.yml;
+    reclass-tools add-key parameters._param.salt_control_trusty_image 'http://images.mcp.mirantis.net/ubuntu-14-04-x64-mcp{{ SHARED.REPOSITORY_SUITE }}.qcow2' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/init.yml;
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 10}
   skip_fail: false
diff --git a/tcp_tests/templates/cookied-bm-contrail40-nfv/underlay--user-data-cfg01.yaml b/tcp_tests/templates/cookied-bm-contrail40-nfv/underlay--user-data-cfg01.yaml
index 6c9e48f..cc69c64 100644
--- a/tcp_tests/templates/cookied-bm-contrail40-nfv/underlay--user-data-cfg01.yaml
+++ b/tcp_tests/templates/cookied-bm-contrail40-nfv/underlay--user-data-cfg01.yaml
@@ -47,37 +47,13 @@
    - swapon /swapfile

    - echo "/swapfile   none    swap    defaults    0   0" >> /etc/fstab

 

-   ############## TCP Cloud cfg01 node ##################

-   #- sleep 120

-   #   - echo "Preparing base OS"

-

    - echo "nameserver 172.18.208.44" > /etc/resolv.conf;

-   #   - which wget >/dev/null || (apt-get update; apt-get install -y wget);

-

-   # Configure Ubuntu mirrors
-   #   - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial main restricted universe" > /etc/apt/sources.list
-   #   - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial-updates main restricted universe" >> /etc/apt/sources.list
-   #   - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial-security main restricted universe" >> /etc/apt/sources.list
-
-   #   - echo "deb [arch=amd64] http://apt.mirantis.com/xenial {{ REPOSITORY_SUITE }} salt extra" > /etc/apt/sources.list.d/mcp_salt.list;

-   #   - wget -O - http://apt.mirantis.com/public.gpg | apt-key add -;

-   #   - echo "deb http://repo.saltstack.com/apt/ubuntu/16.04/amd64/2016.3 xenial main" > /etc/apt/sources.list.d/saltstack.list;

-   #   - wget -O - https://repo.saltstack.com/apt/ubuntu/16.04/amd64/2016.3/SALTSTACK-GPG-KEY.pub | apt-key add -;

-

-   #   - apt-get clean

-   #   - apt-get update

-

-   # Install common packages

-   #   - eatmydata apt-get install -y python-pip git curl tmux byobu iputils-ping traceroute htop tree mc

-

-   # Install salt-minion and stop it until it is configured

-   #   - eatmydata apt-get install -y salt-minion && service salt-minion stop

-

-   ########################################################

-   # Node is ready, allow SSH access

-   #   - echo "Allow SSH access ..."

-   #   - sudo iptables -D INPUT -p tcp --dport 22 -j DROP

-   ########################################################

+   - mkdir -p /srv/salt/reclass/nodes
+   - systemctl enable salt-master
+   - systemctl enable salt-minion
+   - systemctl start salt-master
+   - systemctl start salt-minion
+   - salt-call -l info --timeout=120 test.ping
 

   write_files:

    - path: /etc/default/grub.d/97-enable-grub-menu.cfg

diff --git a/tcp_tests/templates/cookied-bm-contrail40/core.yaml b/tcp_tests/templates/cookied-bm-contrail40/core.yaml
index c815d86..21ab849 100644
--- a/tcp_tests/templates/cookied-bm-contrail40/core.yaml
+++ b/tcp_tests/templates/cookied-bm-contrail40/core.yaml
@@ -3,124 +3,20 @@
 {% from 'cookied-bm-contrail40/underlay.yaml' import HOSTNAME_KVM02 with context %}
 {% from 'cookied-bm-contrail40/underlay.yaml' import HOSTNAME_KVM03 with context %}
 
-- description: Install glusterfs
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@glusterfs:server' state.sls glusterfs.server.service
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
+{% import 'shared-core.yaml' as SHARED_CORE with context %}
 
-- description: Setup glusterfs on primary controller
-  cmd: sleep 30; salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@glusterfs:server' state.sls glusterfs.server.setup -b 1
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 2, delay: 30}
-  skip_fail: false
+{{ SHARED_CORE.MACRO_INSTALL_KEEPALIVED() }}
 
-- description: Check the gluster status
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@glusterfs:server' cmd.run 'gluster peer status; gluster volume status' -b 1
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
+{{ SHARED_CORE.MACRO_INSTALL_GLUSTERFS() }}
 
-# Install support services
-- description: Install keepalived on ctl01
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@keepalived:cluster and *01*' state.sls keepalived
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 10}
-  skip_fail: true
+{{ SHARED_CORE.MACRO_INSTALL_RABBITMQ() }}
 
-- description: Install keepalived
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@keepalived:cluster' state.sls keepalived
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 10}
-  skip_fail: true
+{{ SHARED_CORE.MACRO_INSTALL_GALERA() }}
 
-- description: Install RabbitMQ on ctl01
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@rabbitmq:server and *01*' state.sls rabbitmq
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
+{{ SHARED_CORE.MACRO_INSTALL_HAPROXY() }}
 
-- description: Install RabbitMQ
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@rabbitmq:server' state.sls rabbitmq
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
+{{ SHARED_CORE.MACRO_INSTALL_NGINX() }}
 
-- description: Check the rabbitmq status
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@rabbitmq:server' cmd.run 'rabbitmqctl cluster_status'
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
+{{ SHARED_CORE.MACRO_INSTALL_MEMCACHED() }}
 
-- description: Install Galera on first server
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@galera:master' state.sls galera
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Install Galera on other servers
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@galera:slave' state.sls galera -b 1
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Check mysql status
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@galera:*' mysql.status | grep -A1 -e "wsrep_incoming_addresses\|wsrep_cluster_size"
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: true
-
-- description: Install haproxy
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@haproxy:proxy' state.sls haproxy
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Check haproxy status
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@haproxy:proxy' service.status haproxy
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Install nginx on prx nodes
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@nginx:server' state.sls nginx
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Restart rsyslog
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@haproxy:proxy' service.restart rsyslog
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Install memcached on all controllers
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@memcached:server' state.sls memcached
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Check the OpenStack control VIP
-  cmd: |
-    OPENSTACK_CONTROL_ADDRESS=$(salt --out=newline_values_only "ctl01*" pillar.get _param:cluster_vip_address);
-    echo "_param:cluster_vip_address (vip): ${OPENSTACK_CONTROL_ADDRESS}";
-    salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@keepalived:cluster' cmd.run "ip a | grep ${OPENSTACK_CONTROL_ADDRESS}" | grep -B1 ${OPENSTACK_CONTROL_ADDRESS}
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 3, delay: 10}
-  skip_fail: false
+{{ SHARED_CORE.MACRO_CHECK_VIP() }}
diff --git a/tcp_tests/templates/cookied-bm-contrail40/salt-context-cookiecutter-contrail-ocata.yaml b/tcp_tests/templates/cookied-bm-contrail40/salt-context-cookiecutter-contrail-ocata.yaml
index c4470e6..db9b61b 100644
--- a/tcp_tests/templates/cookied-bm-contrail40/salt-context-cookiecutter-contrail-ocata.yaml
+++ b/tcp_tests/templates/cookied-bm-contrail40/salt-context-cookiecutter-contrail-ocata.yaml
@@ -82,7 +82,7 @@
   control_vlan: '2422'
   cookiecutter_template_branch: ''
   cookiecutter_template_credentials: gerrit
-  cookiecutter_template_url: https://gerrit.mcp.mirantis.net/mk/cookiecutter-templates.git
+  cookiecutter_template_url: https://gerrit.mcp.mirantis.com/mk/cookiecutter-templates.git
   deploy_network_gateway: 172.16.49.65
   deploy_network_netmask: 255.255.255.192
   deploy_network_subnet: 172.16.49.64/26
@@ -207,7 +207,7 @@
   salt_master_hostname: cfg01
   salt_master_management_address: 172.16.49.66
   shared_reclass_branch: ''
-  shared_reclass_url: https://gerrit.mcp.mirantis.net/salt-models/reclass-system.git
+  shared_reclass_url: https://gerrit.mcp.mirantis.com/salt-models/reclass-system.git
   stacklight_enabled: 'True'
   stacklight_log_address: 10.167.8.60
   stacklight_log_hostname: log
diff --git a/tcp_tests/templates/cookied-bm-contrail40/salt.yaml b/tcp_tests/templates/cookied-bm-contrail40/salt.yaml
index 9319634..3542e9b 100644
--- a/tcp_tests/templates/cookied-bm-contrail40/salt.yaml
+++ b/tcp_tests/templates/cookied-bm-contrail40/salt.yaml
@@ -32,9 +32,9 @@
     set -e;
     # Remove rack01 key
     . /root/venv-reclass-tools/bin/activate;
-    reclass-tools del-key parameters.reclass.storage.node.openstack_compute_rack01 /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/config.yml;
+    reclass-tools del-key parameters.reclass.storage.node.openstack_compute_rack01 /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/config/init.yml;
     # Add openstack_compute_node definition from system
-    reclass-tools add-key 'classes' 'system.reclass.storage.system.openstack_compute_multi' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/config.yml --merge;
+    reclass-tools add-key 'classes' 'system.reclass.storage.system.openstack_compute_multi' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/config/init.yml --merge;
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 10}
   skip_fail: false
@@ -43,8 +43,8 @@
   cmd: |
     set -e;
     . /root/venv-reclass-tools/bin/activate;
-    reclass-tools add-key parameters._param.salt_control_xenial_image 'https://apt.mcp.mirantis.net/images/ubuntu-16-04-x64-mcp{{ SHARED.REPOSITORY_SUITE }}.qcow2' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/init.yml;
-    reclass-tools add-key parameters._param.salt_control_trusty_image 'https://apt.mcp.mirantis.net/images/ubuntu-14-04-x64-mcp{{ SHARED.REPOSITORY_SUITE }}.qcow2' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/init.yml;
+    reclass-tools add-key parameters._param.salt_control_xenial_image 'http://images.mcp.mirantis.net/ubuntu-16-04-x64-mcp{{ SHARED.REPOSITORY_SUITE }}.qcow2' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/init.yml;
+    reclass-tools add-key parameters._param.salt_control_trusty_image 'http://images.mcp.mirantis.net/ubuntu-14-04-x64-mcp{{ SHARED.REPOSITORY_SUITE }}.qcow2' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/init.yml;
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 10}
   skip_fail: false
diff --git a/tcp_tests/templates/cookied-bm-contrail40/underlay--user-data-cfg01.yaml b/tcp_tests/templates/cookied-bm-contrail40/underlay--user-data-cfg01.yaml
index 6c9e48f..6b6ec9f 100644
--- a/tcp_tests/templates/cookied-bm-contrail40/underlay--user-data-cfg01.yaml
+++ b/tcp_tests/templates/cookied-bm-contrail40/underlay--user-data-cfg01.yaml
@@ -47,37 +47,14 @@
    - swapon /swapfile

    - echo "/swapfile   none    swap    defaults    0   0" >> /etc/fstab

 

-   ############## TCP Cloud cfg01 node ##################

-   #- sleep 120

-   #   - echo "Preparing base OS"

-

    - echo "nameserver 172.18.208.44" > /etc/resolv.conf;

-   #   - which wget >/dev/null || (apt-get update; apt-get install -y wget);

-

-   # Configure Ubuntu mirrors
-   #   - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial main restricted universe" > /etc/apt/sources.list
-   #   - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial-updates main restricted universe" >> /etc/apt/sources.list
-   #   - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial-security main restricted universe" >> /etc/apt/sources.list
 
-   #   - echo "deb [arch=amd64] http://apt.mirantis.com/xenial {{ REPOSITORY_SUITE }} salt extra" > /etc/apt/sources.list.d/mcp_salt.list;

-   #   - wget -O - http://apt.mirantis.com/public.gpg | apt-key add -;

-   #   - echo "deb http://repo.saltstack.com/apt/ubuntu/16.04/amd64/2016.3 xenial main" > /etc/apt/sources.list.d/saltstack.list;

-   #   - wget -O - https://repo.saltstack.com/apt/ubuntu/16.04/amd64/2016.3/SALTSTACK-GPG-KEY.pub | apt-key add -;

-

-   #   - apt-get clean

-   #   - apt-get update

-

-   # Install common packages

-   #   - eatmydata apt-get install -y python-pip git curl tmux byobu iputils-ping traceroute htop tree mc

-

-   # Install salt-minion and stop it until it is configured

-   #   - eatmydata apt-get install -y salt-minion && service salt-minion stop

-

-   ########################################################

-   # Node is ready, allow SSH access

-   #   - echo "Allow SSH access ..."

-   #   - sudo iptables -D INPUT -p tcp --dport 22 -j DROP

-   ########################################################

+   - mkdir -p /srv/salt/reclass/nodes
+   - systemctl enable salt-master
+   - systemctl enable salt-minion
+   - systemctl start salt-master
+   - systemctl start salt-minion
+   - salt-call -l info --timeout=120 test.ping
 

   write_files:

    - path: /etc/default/grub.d/97-enable-grub-menu.cfg

diff --git a/tcp_tests/templates/cookied-bm-dpdk-pipeline/core.yaml b/tcp_tests/templates/cookied-bm-dpdk-pipeline/core.yaml
index 530a4e7..55d6d8d 100644
--- a/tcp_tests/templates/cookied-bm-dpdk-pipeline/core.yaml
+++ b/tcp_tests/templates/cookied-bm-dpdk-pipeline/core.yaml
@@ -1,117 +1,17 @@
 {% from 'cookied-bm-dpdk-pipeline/underlay.yaml' import HOSTNAME_CFG01 with context %}
 
-# Install support services
-- description: Install keepalived on ctl01
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@keepalived:cluster and *01*' state.sls keepalived
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 10}
-  skip_fail: true
+{% import 'shared-core.yaml' as SHARED_CORE with context %}
 
-- description: Install keepalived
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@keepalived:cluster' state.sls keepalived
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 10}
-  skip_fail: true
+{{ SHARED_CORE.MACRO_INSTALL_KEEPALIVED() }}
 
-- description: Install glusterfs
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@glusterfs:server' state.sls glusterfs.server.service
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
+{{ SHARED_CORE.MACRO_INSTALL_GLUSTERFS() }}
 
-- description: Setup glusterfs on primary controller
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@glusterfs:server' state.sls glusterfs.server.setup -b 1
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 2, delay: 5}
-  skip_fail: false
+{{ SHARED_CORE.MACRO_INSTALL_RABBITMQ() }}
 
-- description: Check the gluster status
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@glusterfs:server' cmd.run 'gluster peer status; gluster volume status' -b 1
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
+{{ SHARED_CORE.MACRO_INSTALL_GALERA() }}
 
-- description: Install RabbitMQ on ctl01
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@rabbitmq:server and *01*' state.sls rabbitmq
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
+{{ SHARED_CORE.MACRO_INSTALL_HAPROXY() }}
 
-- description: Install RabbitMQ
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@rabbitmq:server' state.sls rabbitmq
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
+{{ SHARED_CORE.MACRO_INSTALL_MEMCACHED() }}
 
-- description: Check the rabbitmq status
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@rabbitmq:server' cmd.run 'rabbitmqctl cluster_status'
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Install Galera on first server
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@galera:master' state.sls galera
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Install Galera on other servers
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@galera:slave' state.sls galera
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Check mysql status
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@galera:*' mysql.status | grep -A1 -e "wsrep_incoming_addresses\|wsrep_cluster_size"
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: true
-
-
-- description: Install haproxy
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@haproxy:proxy' state.sls haproxy
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Check haproxy status
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@haproxy:proxy' service.status haproxy
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Restart rsyslog
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@haproxy:proxy' service.restart rsyslog
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Install memcached on all controllers
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@memcached:server' state.sls memcached
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Check the VIP
-  cmd: |
-    OPENSTACK_CONTROL_ADDRESS=`salt-call --out=newline_values_only pillar.get _param:openstack_control_address`;
-    echo "_param:openstack_control_address (vip): ${OPENSTACK_CONTROL_ADDRESS}";
-    salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@keepalived:cluster' cmd.run "ip a | grep ${OPENSTACK_CONTROL_ADDRESS}" | grep -B1 ${OPENSTACK_CONTROL_ADDRESS}
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 3, delay: 10}
-  skip_fail: false
+{{ SHARED_CORE.MACRO_CHECK_VIP() }}
diff --git a/tcp_tests/templates/cookied-bm-dpdk-pipeline/salt-context-cookiecutter-openstack_ovs_dpdk.yaml b/tcp_tests/templates/cookied-bm-dpdk-pipeline/salt-context-cookiecutter-openstack_ovs_dpdk.yaml
index 99ad264..ce13598 100644
--- a/tcp_tests/templates/cookied-bm-dpdk-pipeline/salt-context-cookiecutter-openstack_ovs_dpdk.yaml
+++ b/tcp_tests/templates/cookied-bm-dpdk-pipeline/salt-context-cookiecutter-openstack_ovs_dpdk.yaml
@@ -123,7 +123,6 @@
   openstack_compute_single_address_ranges: 172.16.10.105-172.16.10.106
   openstack_compute_deploy_address_ranges: 192.168.10.105-192.168.10.106
   openstack_compute_tenant_address_ranges: 10.1.0.105-10.1.0.106
-  openstack_compute_backend_address_ranges: 10.1.0.105-10.1.0.106
   openstack_control_address: 10.167.11.10
   openstack_control_hostname: ctl
   openstack_control_node01_address: 10.167.11.11
diff --git a/tcp_tests/templates/cookied-bm-dpdk-pipeline/salt.yaml b/tcp_tests/templates/cookied-bm-dpdk-pipeline/salt.yaml
index f3c2f61..459ab69 100644
--- a/tcp_tests/templates/cookied-bm-dpdk-pipeline/salt.yaml
+++ b/tcp_tests/templates/cookied-bm-dpdk-pipeline/salt.yaml
@@ -39,7 +39,7 @@
     [[ -d /root/venv-reclass-tools ]] || virtualenv /root/venv-reclass-tools;
     . /root/venv-reclass-tools/bin/activate;
     pip install git+https://github.com/dis-xcom/reclass-tools;
-    reclass-tools add-key parameters._param.salt_control_xenial_image 'https://apt.mcp.mirantis.net/images/ubuntu-16-04-x64-mcp{{ SHARED.REPOSITORY_SUITE }}.qcow2' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/init.yml;
+    reclass-tools add-key parameters._param.salt_control_xenial_image 'http://images.mcp.mirantis.net/ubuntu-16-04-x64-mcp{{ SHARED.REPOSITORY_SUITE }}.qcow2' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/init.yml;
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 10}
   skip_fail: false
diff --git a/tcp_tests/templates/cookied-bm-dpdk-pipeline/underlay--user-data-cfg01.yaml b/tcp_tests/templates/cookied-bm-dpdk-pipeline/underlay--user-data-cfg01.yaml
index 3f4f128..b77550a 100644
--- a/tcp_tests/templates/cookied-bm-dpdk-pipeline/underlay--user-data-cfg01.yaml
+++ b/tcp_tests/templates/cookied-bm-dpdk-pipeline/underlay--user-data-cfg01.yaml
@@ -18,8 +18,6 @@
    expire: False
 
   bootcmd:
-   # Block access to SSH while node is preparing
-   #- cloud-init-per once sudo iptables -A INPUT -p tcp --dport 22 -j DROP
    # Enable root access
    - sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
    - service sshd restart
@@ -51,16 +49,13 @@
    - echo "Preparing base OS"
 
    - echo "nameserver 172.18.208.44" > /etc/resolv.conf;
-   - which wget >/dev/null || (apt-get update; apt-get install -y wget);
 
-   # Install common packages
-   - eatmydata apt-get install -y python-pip git curl tmux byobu iputils-ping traceroute htop tree sshuttle
-
-   ########################################################
-   # Node is ready, allow SSH access
-   #- echo "Allow SSH access ..."
-   #- sudo iptables -D INPUT -p tcp --dport 22 -j DROP
-   ########################################################
+   - mkdir -p /srv/salt/reclass/nodes
+   - systemctl enable salt-master
+   - systemctl enable salt-minion
+   - systemctl start salt-master
+   - systemctl start salt-minion
+   - salt-call -l info --timeout=120 test.ping
 
   write_files:
    - path: /etc/network/interfaces
diff --git a/tcp_tests/templates/cookied-bm-mcp-pike-k8s-contrail/core.yaml b/tcp_tests/templates/cookied-bm-k8s-contrail/core.yaml
similarity index 81%
rename from tcp_tests/templates/cookied-bm-mcp-pike-k8s-contrail/core.yaml
rename to tcp_tests/templates/cookied-bm-k8s-contrail/core.yaml
index 4ab0f03..2d79d55 100644
--- a/tcp_tests/templates/cookied-bm-mcp-pike-k8s-contrail/core.yaml
+++ b/tcp_tests/templates/cookied-bm-k8s-contrail/core.yaml
@@ -1,8 +1,8 @@
-{% from 'cookied-bm-mcp-pike-k8s-contrail/underlay.yaml' import HOSTNAME_CFG01 with context %}
-{% from 'cookied-bm-mcp-pike-k8s-contrail/underlay.yaml' import HOSTNAME_KVM01 with context %}
-{% from 'cookied-bm-mcp-pike-k8s-contrail/underlay.yaml' import HOSTNAME_KVM02 with context %}
-{% from 'cookied-bm-mcp-pike-k8s-contrail/underlay.yaml' import HOSTNAME_KVM03 with context %}
-{% from 'cookied-bm-mcp-pike-k8s-contrail/underlay.yaml' import HOSTNAME_CFG01 with context %}
+{% from 'cookied-bm-k8s-contrail/underlay.yaml' import HOSTNAME_CFG01 with context %}
+{% from 'cookied-bm-k8s-contrail/underlay.yaml' import HOSTNAME_KVM01 with context %}
+{% from 'cookied-bm-k8s-contrail/underlay.yaml' import HOSTNAME_KVM02 with context %}
+{% from 'cookied-bm-k8s-contrail/underlay.yaml' import HOSTNAME_KVM03 with context %}
+{% from 'cookied-bm-k8s-contrail/underlay.yaml' import HOSTNAME_CFG01 with context %}
 
 # Install support services
 - description: Sync all
diff --git a/tcp_tests/templates/cookied-bm-mcp-pike-k8s-contrail/k8s.yaml b/tcp_tests/templates/cookied-bm-k8s-contrail/k8s.yaml
similarity index 97%
rename from tcp_tests/templates/cookied-bm-mcp-pike-k8s-contrail/k8s.yaml
rename to tcp_tests/templates/cookied-bm-k8s-contrail/k8s.yaml
index 308051a..c505c58 100644
--- a/tcp_tests/templates/cookied-bm-mcp-pike-k8s-contrail/k8s.yaml
+++ b/tcp_tests/templates/cookied-bm-k8s-contrail/k8s.yaml
@@ -1,5 +1,5 @@
-{% from 'cookied-bm-mcp-pike-k8s-contrail/underlay.yaml' import HOSTNAME_CFG01 with context %}
-{% from 'cookied-bm-mcp-pike-k8s-contrail/underlay.yaml' import HOSTNAME_CTL01 with context %}
+{% from 'cookied-bm-k8s-contrail/underlay.yaml' import HOSTNAME_CFG01 with context %}
+{% from 'cookied-bm-k8s-contrail/underlay.yaml' import HOSTNAME_CTL01 with context %}
 
 {%- macro MACRO_CHECK_SYSTEMCTL() %}
 {#######################################}
@@ -131,7 +131,7 @@
 
 - description: Run Kubernetes master setup
   cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-     -C 'I@kubernetes:master and *01*' state.sls kubernetes.master.setup
+     -C 'I@kubernetes:master' state.sls kubernetes.master.setup
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 5}
   skip_fail: false
diff --git a/tcp_tests/templates/cookied-bm-mcp-pike-k8s-contrail/lab04-upgrade-physical-inventory.yaml b/tcp_tests/templates/cookied-bm-k8s-contrail/lab04-upgrade-physical-inventory.yaml
similarity index 61%
rename from tcp_tests/templates/cookied-bm-mcp-pike-k8s-contrail/lab04-upgrade-physical-inventory.yaml
rename to tcp_tests/templates/cookied-bm-k8s-contrail/lab04-upgrade-physical-inventory.yaml
index 9cf1366..ad4e04a 100644
--- a/tcp_tests/templates/cookied-bm-mcp-pike-k8s-contrail/lab04-upgrade-physical-inventory.yaml
+++ b/tcp_tests/templates/cookied-bm-k8s-contrail/lab04-upgrade-physical-inventory.yaml
@@ -1,5 +1,5 @@
 nodes:
-    cfg01.bm-mcp-pike-k8s-contrail.local:
+    cfg01.bm-k8s-contrail.local:
       reclass_storage_name: infra_config_node01
       roles:
       - infra_config
@@ -9,7 +9,7 @@
           role: single_dhcp
     # Physical nodes
 
-    kvm01.bm-mcp-pike-k8s-contrail.local:
+    kvm01.bm-k8s-contrail.local:
       reclass_storage_name: infra_kvm_node01
       roles:
       - infra_kvm
@@ -20,7 +20,7 @@
         enp9s0f1:
           role: single_vlan_ctl
 
-    kvm02.bm-mcp-pike-k8s-contrail.local:
+    kvm02.bm-k8s-contrail.local:
       reclass_storage_name: infra_kvm_node02
       roles:
       - infra_kvm
@@ -31,7 +31,7 @@
         enp9s0f1:
           role: single_vlan_ctl
 
-    kvm03.bm-mcp-pike-k8s-contrail.local:
+    kvm03.bm-k8s-contrail.local:
       reclass_storage_name: infra_kvm_node03
       roles:
       - infra_kvm
@@ -42,7 +42,7 @@
         enp9s0f1:
           role: single_vlan_ctl
 
-    ctl01.bm-mcp-pike-k8s-contrail.local:
+    ctl01.bm-k8s-contrail.local:
       reclass_storage_name: kubernetes_control_node01
       roles:
       - kubernetes_control_contrail
@@ -55,7 +55,7 @@
           role: single_vlan_ctl
           single_address: 10.167.8.239
 
-    ctl02.bm-mcp-pike-k8s-contrail.local:
+    ctl02.bm-k8s-contrail.local:
       reclass_storage_name: kubernetes_control_node02
       roles:
       - kubernetes_control_contrail
@@ -68,7 +68,7 @@
           role: single_vlan_ctl
           single_address: 10.167.8.238
 
-    ctl03.bm-mcp-pike-k8s-contrail.local:
+    ctl03.bm-k8s-contrail.local:
       reclass_storage_name: kubernetes_control_node03
       roles:
       - kubernetes_control_contrail
@@ -81,28 +81,39 @@
           role: single_vlan_ctl
           single_address: 10.167.8.237
 
-    cmp001.bm-mcp-pike-k8s-contrail.local:
-      reclass_storage_name: kubernetes_compute_node001
+    cmp<<count>>:
+      reclass_storage_name: kubernetes_compute_rack01
       roles:
-      - linux_system_codename_xenial
       - kubernetes_compute_contrail
+      - linux_system_codename_xenial
       - salt_master_host
       interfaces:
         enp9s0f0:
           role: single_dhcp
         ens11f1:
           role: k8s_oc40_only_vhost_on_control_vlan
-          single_address: 10.167.8.103
-
-    cmp002.bm-mcp-pike-k8s-contrail.local:
-      reclass_storage_name: kubernetes_compute_node002
-      roles:
-      - linux_system_codename_xenial
-      - kubernetes_compute_contrail
-      - salt_master_host
-      interfaces:
-        enp9s0f0:
-          role: single_dhcp
-        ens11f1:
-          role: k8s_oc40_only_vhost_on_control_vlan
-          single_address: 10.167.8.104
+          #    cmp001.bm-k8s-contrail.local:
+          #      reclass_storage_name: kubernetes_compute_node001
+          #      roles:
+          #      - linux_system_codename_xenial
+          #      - kubernetes_compute_contrail
+          #      - salt_master_host
+          #      interfaces:
+          #        enp9s0f0:
+          #          role: single_dhcp
+          #        ens11f1:
+          #          role: k8s_oc40_only_vhost_on_control_vlan
+          #          single_address: 10.167.8.103
+          #
+          #    cmp002.bm-k8s-contrail.local:
+          #      reclass_storage_name: kubernetes_compute_node002
+          #      roles:
+          #      - linux_system_codename_xenial
+          #      - kubernetes_compute_contrail
+          #      - salt_master_host
+          #      interfaces:
+          #        enp9s0f0:
+          #          role: single_dhcp
+          #        ens11f1:
+          #          role: k8s_oc40_only_vhost_on_control_vlan
+          #          single_address: 10.167.8.104
diff --git a/tcp_tests/templates/cookied-bm-mcp-pike-k8s-contrail/salt-context-cookiecutter-k8s-contrail.yaml b/tcp_tests/templates/cookied-bm-k8s-contrail/salt-context-cookiecutter-k8s-contrail.yaml
similarity index 95%
rename from tcp_tests/templates/cookied-bm-mcp-pike-k8s-contrail/salt-context-cookiecutter-k8s-contrail.yaml
rename to tcp_tests/templates/cookied-bm-k8s-contrail/salt-context-cookiecutter-k8s-contrail.yaml
index 0699684..c8fc345 100644
--- a/tcp_tests/templates/cookied-bm-mcp-pike-k8s-contrail/salt-context-cookiecutter-k8s-contrail.yaml
+++ b/tcp_tests/templates/cookied-bm-k8s-contrail/salt-context-cookiecutter-k8s-contrail.yaml
@@ -107,9 +107,15 @@
   infra_primary_second_nic: eth2
   kubernetes_enabled: 'True'
   kubernetes_compute_count: 2
-  kubernetes_compute_rack01_single_subnet: 10.167.8
-  kubernetes_compute_rack01_tenant_subnet: 10.167.8
+  kubernetes_compute_rack01_hostname: cmp
+  kubernetes_compute_single_address_ranges: 10.167.8.103-10.167.8.104
+  kubernetes_compute_tenant_address_ranges: 10.167.8.103-10.167.8.104
   kubernetes_network_opencontrail_enabled: 'True'
+  kubernetes_keepalived_vip_interface: br_ctl
+  kubernetes_metallb_enabled: 'False'  # Not used with opencontrail
+  metallb_addresses: 172.17.41.160-172.17.41.180
+  kubernetes_ingressnginx_enabled: 'True'
+  kubernetes_ingressnginx_controller_replicas: 2
   local_repositories: 'False'
   maas_deploy_address: 172.16.49.66
   maas_deploy_range_end: 10.0.0.254
@@ -187,7 +193,6 @@
   stacklight_log_node02_hostname: log02
   stacklight_log_node03_address: 10.167.8.63
   stacklight_log_node03_hostname: log03
-  stacklight_long_term_storage_type: influxdb
   stacklight_monitor_address: 10.167.8.70
   stacklight_monitor_hostname: mon
   stacklight_monitor_node01_address: 10.167.8.71
diff --git a/tcp_tests/templates/cookied-bm-mcp-pike-k8s-contrail/salt-context-environment.yaml b/tcp_tests/templates/cookied-bm-k8s-contrail/salt-context-environment.yaml
similarity index 80%
rename from tcp_tests/templates/cookied-bm-mcp-pike-k8s-contrail/salt-context-environment.yaml
rename to tcp_tests/templates/cookied-bm-k8s-contrail/salt-context-environment.yaml
index 47e12c8..206dead 100644
--- a/tcp_tests/templates/cookied-bm-mcp-pike-k8s-contrail/salt-context-environment.yaml
+++ b/tcp_tests/templates/cookied-bm-k8s-contrail/salt-context-environment.yaml
@@ -1,5 +1,5 @@
 nodes:
-    mon01.cookied-bm-mcp-ocata-contrail.local:
+    mon01.bm-k8s-contrail.local:
       reclass_storage_name: stacklight_server_node01
       roles:
       - stacklightv2_server_leader
@@ -8,7 +8,7 @@
         ens3:
           role: single_ctl
 
-    mon02.cookied-bm-mcp-ocata-contrail.local:
+    mon02.bm-k8s-contrail.local:
       reclass_storage_name: stacklight_server_node02
       roles:
       - stacklightv2_server
@@ -17,7 +17,7 @@
         ens3:
           role: single_ctl
 
-    mon03.cookied-bm-mcp-ocata-contrail.local:
+    mon03.bm-k8s-contrail.local:
       reclass_storage_name: stacklight_server_node03
       roles:
       - stacklightv2_server
@@ -26,7 +26,7 @@
         ens3:
           role: single_ctl
 
-    mtr01.cookied-bm-mcp-ocata-contrail.local:
+    mtr01.bm-k8s-contrail.local:
       reclass_storage_name: stacklight_telemetry_node01
       roles:
       - stacklight_telemetry
@@ -35,7 +35,7 @@
         ens3:
           role: single_ctl
 
-    mtr02.cookied-bm-mcp-ocata-contrail.local:
+    mtr02.bm-k8s-contrail.local:
       reclass_storage_name: stacklight_telemetry_node02
       roles:
       - stacklight_telemetry
@@ -44,7 +44,7 @@
         ens3:
           role: single_ctl
 
-    mtr03.cookied-bm-mcp-ocata-contrail.local:
+    mtr03.bm-k8s-contrail.local:
       reclass_storage_name: stacklight_telemetry_node03
       roles:
       - stacklight_telemetry
@@ -53,7 +53,7 @@
         ens3:
           role: single_ctl
 
-    log01.cookied-bm-mcp-ocata-contrail.local:
+    log01.bm-k8s-contrail.local:
       reclass_storage_name: stacklight_log_node01
       roles:
       - stacklight_log_leader_v2
@@ -62,7 +62,7 @@
         ens3:
           role: single_ctl
 
-    log02.cookied-bm-mcp-ocata-contrail.local:
+    log02.bm-k8s-contrail.local:
       reclass_storage_name: stacklight_log_node02
       roles:
       - stacklight_log
@@ -71,7 +71,7 @@
         ens3:
           role: single_ctl
 
-    log03.cookied-bm-mcp-ocata-contrail.local:
+    log03.bm-k8s-contrail.local:
       reclass_storage_name: stacklight_log_node03
       roles:
       - stacklight_log
@@ -80,7 +80,7 @@
         ens3:
           role: single_ctl
 
-    cid01.cookied-bm-mcp-ocata-contrail.local:
+    cid01.bm-k8s-contrail.local:
       reclass_storage_name: cicd_control_node01
       roles:
       - cicd_control_leader
@@ -89,7 +89,7 @@
         ens3:
           role: single_ctl
 
-    cid02.cookied-bm-mcp-ocata-contrail.local:
+    cid02.bm-k8s-contrail.local:
       reclass_storage_name: cicd_control_node02
       roles:
       - cicd_control_manager
@@ -98,7 +98,7 @@
         ens3:
           role: single_ctl
 
-    cid03.cookied-bm-mcp-ocata-contrail.local:
+    cid03.bm-k8s-contrail.local:
       reclass_storage_name: cicd_control_node03
       roles:
       - cicd_control_manager
diff --git a/tcp_tests/templates/cookied-bm-mcp-pike-k8s-contrail/salt.yaml b/tcp_tests/templates/cookied-bm-k8s-contrail/salt.yaml
similarity index 85%
rename from tcp_tests/templates/cookied-bm-mcp-pike-k8s-contrail/salt.yaml
rename to tcp_tests/templates/cookied-bm-k8s-contrail/salt.yaml
index 7ebda02..274fb44 100644
--- a/tcp_tests/templates/cookied-bm-mcp-pike-k8s-contrail/salt.yaml
+++ b/tcp_tests/templates/cookied-bm-k8s-contrail/salt.yaml
@@ -1,9 +1,9 @@
-{% from 'cookied-bm-mcp-pike-k8s-contrail/underlay.yaml' import HOSTNAME_CFG01 with context %}
-{% from 'cookied-bm-mcp-pike-k8s-contrail/underlay.yaml' import LAB_CONFIG_NAME with context %}
-{% from 'cookied-bm-mcp-pike-k8s-contrail/underlay.yaml' import DOMAIN_NAME with context %}
-{% from 'cookied-bm-mcp-pike-k8s-contrail/underlay.yaml' import HOSTNAME_KVM01 with context %}
-{% from 'cookied-bm-mcp-pike-k8s-contrail/underlay.yaml' import HOSTNAME_KVM02 with context %}
-{% from 'cookied-bm-mcp-pike-k8s-contrail/underlay.yaml' import HOSTNAME_KVM03 with context %}
+{% from 'cookied-bm-k8s-contrail/underlay.yaml' import HOSTNAME_CFG01 with context %}
+{% from 'cookied-bm-k8s-contrail/underlay.yaml' import LAB_CONFIG_NAME with context %}
+{% from 'cookied-bm-k8s-contrail/underlay.yaml' import DOMAIN_NAME with context %}
+{% from 'cookied-bm-k8s-contrail/underlay.yaml' import HOSTNAME_KVM01 with context %}
+{% from 'cookied-bm-k8s-contrail/underlay.yaml' import HOSTNAME_KVM02 with context %}
+{% from 'cookied-bm-k8s-contrail/underlay.yaml' import HOSTNAME_KVM03 with context %}
 
 {% set SALT_MODELS_REPOSITORY = os_env('SALT_MODELS_REPOSITORY','https://gerrit.mcp.mirantis.com/salt-models/mcp-virtual-lab') %}
 # Other salt model repository parameters see in shared-salt.yaml
@@ -30,22 +30,12 @@
 
 {{ SHARED.MACRO_RUN_SALT_MASTER_UNDERLAY_STATES() }}
 
-- description: "Workaround for rack01 compute generator"
-  cmd: |
-    set -e;
-    # Remove rack01 key
-    . /root/venv-reclass-tools/bin/activate;
-    reclass-tools del-key parameters.reclass.storage.node.kubernetes_compute_rack01 /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/config.yml;
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 10}
-  skip_fail: false
-
 - description: "Change path to internal storage for salt.control images"
   cmd: |
     set -e;
     . /root/venv-reclass-tools/bin/activate;
-    reclass-tools add-key parameters._param.salt_control_xenial_image 'https://apt.mcp.mirantis.net/images/ubuntu-16-04-x64-mcp{{ SHARED.REPOSITORY_SUITE }}.qcow2' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/init.yml;
-    reclass-tools add-key parameters._param.salt_control_trusty_image 'https://apt.mcp.mirantis.net/images/ubuntu-14-04-x64-mcp{{ SHARED.REPOSITORY_SUITE }}.qcow2' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/init.yml;
+    reclass-tools add-key parameters._param.salt_control_xenial_image 'http://images.mcp.mirantis.net/ubuntu-16-04-x64-mcp{{ SHARED.REPOSITORY_SUITE }}.qcow2' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/init.yml;
+    reclass-tools add-key parameters._param.salt_control_trusty_image 'http://images.mcp.mirantis.net/ubuntu-14-04-x64-mcp{{ SHARED.REPOSITORY_SUITE }}.qcow2' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/init.yml;
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 10}
   skip_fail: false
diff --git a/tcp_tests/templates/cookied-bm-mcp-pike-k8s-contrail/sl.yaml b/tcp_tests/templates/cookied-bm-k8s-contrail/sl.yaml
similarity index 98%
rename from tcp_tests/templates/cookied-bm-mcp-pike-k8s-contrail/sl.yaml
rename to tcp_tests/templates/cookied-bm-k8s-contrail/sl.yaml
index 9dcb4f6..cb929e4 100644
--- a/tcp_tests/templates/cookied-bm-mcp-pike-k8s-contrail/sl.yaml
+++ b/tcp_tests/templates/cookied-bm-k8s-contrail/sl.yaml
@@ -1,4 +1,4 @@
-{% from 'cookied-bm-mcp-pike-k8s-contrail/underlay.yaml' import HOSTNAME_CFG01 with context %}
+{% from 'cookied-bm-k8s-contrail/underlay.yaml' import HOSTNAME_CFG01 with context %}
 {% import 'shared-sl-tests.yaml' as SHARED_SL_TESTS with context %}
 
 # Install docker swarm
diff --git a/tcp_tests/templates/cookied-bm-mcp-pike-k8s-contrail/underlay--meta-data.yaml b/tcp_tests/templates/cookied-bm-k8s-contrail/underlay--meta-data.yaml
similarity index 100%
rename from tcp_tests/templates/cookied-bm-mcp-pike-k8s-contrail/underlay--meta-data.yaml
rename to tcp_tests/templates/cookied-bm-k8s-contrail/underlay--meta-data.yaml
diff --git a/tcp_tests/templates/virtual-mcp-ocata-dvr/underlay--user-data-cfg01.yaml b/tcp_tests/templates/cookied-bm-k8s-contrail/underlay--user-data-cfg01.yaml
similarity index 82%
copy from tcp_tests/templates/virtual-mcp-ocata-dvr/underlay--user-data-cfg01.yaml
copy to tcp_tests/templates/cookied-bm-k8s-contrail/underlay--user-data-cfg01.yaml
index a73ca23..16bd9f6 100644
--- a/tcp_tests/templates/virtual-mcp-ocata-dvr/underlay--user-data-cfg01.yaml
+++ b/tcp_tests/templates/cookied-bm-k8s-contrail/underlay--user-data-cfg01.yaml
@@ -20,6 +20,7 @@
   bootcmd:
    # Enable root access
    - sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
+   - sed -i -e '/^PasswordAuthentication/s/^.*$/PasswordAuthentication yes/' /etc/ssh/sshd_config
    - service sshd restart
   output:
     all: '| tee -a /var/log/cloud-init-output.log /dev/tty0'
@@ -29,6 +30,9 @@
    - sudo echo "nameserver {gateway}" >> /etc/resolvconf/resolv.conf.d/base
    - sudo resolvconf -u
 
+   # Enable grub menu using updated config below
+   - update-grub
+
    # Prepare network connection
    - sudo ifdown ens3
    - sudo ip r d default || true  # remove existing default route to get it from dhcp
@@ -42,10 +46,14 @@
    - swapon /swapfile
    - echo "/swapfile   none    swap    defaults    0   0" >> /etc/fstab
 
-   - echo "nameserver 172.18.208.44" >> /etc/resolv.conf;
+   - echo "nameserver 172.17.41.2" > /etc/resolv.conf;
 
-   # Enable grub menu using updated config below
-   - update-grub
+   - mkdir -p /srv/salt/reclass/nodes
+   - systemctl enable salt-master
+   - systemctl enable salt-minion
+   - systemctl start salt-master
+   - systemctl start salt-minion
+   - salt-call -l info --timeout=120 test.ping
 
   write_files:
    - path: /etc/default/grub.d/97-enable-grub-menu.cfg
diff --git a/tcp_tests/templates/cookied-bm-mcp-pike-k8s-contrail/underlay--user-data1604-hwe-compute.yaml b/tcp_tests/templates/cookied-bm-k8s-contrail/underlay--user-data1604-hwe-compute.yaml
similarity index 100%
rename from tcp_tests/templates/cookied-bm-mcp-pike-k8s-contrail/underlay--user-data1604-hwe-compute.yaml
rename to tcp_tests/templates/cookied-bm-k8s-contrail/underlay--user-data1604-hwe-compute.yaml
diff --git a/tcp_tests/templates/cookied-bm-mcp-pike-k8s-contrail/underlay--user-data1604.yaml b/tcp_tests/templates/cookied-bm-k8s-contrail/underlay--user-data1604.yaml
similarity index 100%
rename from tcp_tests/templates/cookied-bm-mcp-pike-k8s-contrail/underlay--user-data1604.yaml
rename to tcp_tests/templates/cookied-bm-k8s-contrail/underlay--user-data1604.yaml
diff --git a/tcp_tests/templates/cookied-bm-mcp-pike-k8s-contrail/underlay.yaml b/tcp_tests/templates/cookied-bm-k8s-contrail/underlay.yaml
similarity index 96%
rename from tcp_tests/templates/cookied-bm-mcp-pike-k8s-contrail/underlay.yaml
rename to tcp_tests/templates/cookied-bm-k8s-contrail/underlay.yaml
index 7832675..089f343 100644
--- a/tcp_tests/templates/cookied-bm-mcp-pike-k8s-contrail/underlay.yaml
+++ b/tcp_tests/templates/cookied-bm-k8s-contrail/underlay.yaml
@@ -1,8 +1,8 @@
 # Set the repository suite, one of the: 'nightly', 'testing', 'stable', or any other required
-{% set REPOSITORY_SUITE = os_env('REPOSITORY_SUITE', 'testing') %}
+{% set REPOSITORY_SUITE = os_env('REPOSITORY_SUITE', 'proposed') %}
 
 #{% set DOMAIN_NAME = os_env('LAB_CONFIG_NAME', 'physical_mcp11_ovs_dpdk') + '.local' %}
-{% set LAB_CONFIG_NAME = os_env('LAB_CONFIG_NAME', 'cookied-bm-mcp-pike-k8s-contrail') %}
+{% set LAB_CONFIG_NAME = os_env('LAB_CONFIG_NAME', 'cookied-bm-k8s-contrail') %}
 {% set DOMAIN_NAME = os_env('DOMAIN_NAME', LAB_CONFIG_NAME + '.local') %}
 {% set HOSTNAME_CFG01 = os_env('HOSTNAME_CFG01', 'cfg01.' + DOMAIN_NAME) %}
 {% set HOSTNAME_KVM01 = os_env('HOSTNAME_KVM01', 'kvm01.' + DOMAIN_NAME) %}
@@ -22,10 +22,10 @@
 {% set ETH0_IP_ADDRESS_CTL01 = os_env('ETH0_IP_ADDRESS_CTL01', '172.17.41.9') %}
 {% set ETH0_IP_ADDRESS_CTL02 = os_env('ETH0_IP_ADDRESS_CTL02', '172.17.41.10') %}
 {% set ETH0_IP_ADDRESS_CTL03 = os_env('ETH0_IP_ADDRESS_CTL03', '172.17.41.11') %}
-{% import 'cookied-bm-mcp-pike-k8s-contrail/underlay--meta-data.yaml' as CLOUDINIT_META_DATA with context %}
-{% import 'cookied-bm-mcp-pike-k8s-contrail/underlay--user-data-cfg01.yaml' as CLOUDINIT_USER_DATA_CFG01 with context %}
-{% import 'cookied-bm-mcp-pike-k8s-contrail/underlay--user-data1604.yaml' as CLOUDINIT_USER_DATA with context %}
-{% import 'cookied-bm-mcp-pike-k8s-contrail/underlay--user-data1604-hwe-compute.yaml' as CLOUDINIT_USER_DATA_HWE_CMP with context %}
+{% import 'cookied-bm-k8s-contrail/underlay--meta-data.yaml' as CLOUDINIT_META_DATA with context %}
+{% import 'cookied-bm-k8s-contrail/underlay--user-data-cfg01.yaml' as CLOUDINIT_USER_DATA_CFG01 with context %}
+{% import 'cookied-bm-k8s-contrail/underlay--user-data1604.yaml' as CLOUDINIT_USER_DATA with context %}
+{% import 'cookied-bm-k8s-contrail/underlay--user-data1604-hwe-compute.yaml' as CLOUDINIT_USER_DATA_HWE_CMP with context %}
 
 ---
 aliases:
@@ -38,7 +38,7 @@
 
 template:
   devops_settings:
-    env_name: {{ os_env('ENV_NAME', 'cookied-bm-mcp-pike-k8s-contrail_' + REPOSITORY_SUITE + "_" + os_env('BUILD_NUMBER', '')) }}
+    env_name: {{ os_env('ENV_NAME', 'cookied-bm-k8s-contrail_' + REPOSITORY_SUITE + "_" + os_env('BUILD_NUMBER', '')) }}
 
     address_pools:
       admin-pool01:
diff --git a/tcp_tests/templates/cookied-bm-mcp-dvr-vxlan/core.yaml b/tcp_tests/templates/cookied-bm-mcp-dvr-vxlan/core.yaml
index 6dc4829..a3de973 100644
--- a/tcp_tests/templates/cookied-bm-mcp-dvr-vxlan/core.yaml
+++ b/tcp_tests/templates/cookied-bm-mcp-dvr-vxlan/core.yaml
@@ -1,124 +1,11 @@
 {% from 'cookied-bm-mcp-dvr-vxlan/underlay.yaml' import HOSTNAME_CFG01 with context %}
+{% import 'shared-core.yaml' as SHARED_CORE with context %}
 
-# Install support services
-- description: Install keepalived on ctl01
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@keepalived:cluster and *01*' state.sls keepalived
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 10}
-  skip_fail: true
-
-- description: Install keepalived
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@keepalived:cluster' state.sls keepalived
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 10}
-  skip_fail: true
-
-- description: Install glusterfs
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@glusterfs:server' state.sls glusterfs.server.service
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Setup glusterfs on primary controller
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@glusterfs:server' state.sls glusterfs.server.setup -b 1
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 2, delay: 5}
-  skip_fail: false
-
-- description: Check the gluster status
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@glusterfs:server' cmd.run 'gluster peer status; gluster volume status' -b 1
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Install RabbitMQ on ctl01
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@rabbitmq:server and *01*' state.sls rabbitmq
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Install RabbitMQ
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@rabbitmq:server' state.sls rabbitmq
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Check the rabbitmq status
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@rabbitmq:server' cmd.run 'rabbitmqctl cluster_status'
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Install Galera on first server
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@galera:master' state.sls galera
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Install Galera on other servers
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@galera:slave' state.sls galera -b 1
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Check mysql status
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@galera:*' mysql.status | grep -A1 -e "wsrep_incoming_addresses\|wsrep_cluster_size"
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: true
-
-
-- description: Install haproxy
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@haproxy:proxy' state.sls haproxy
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Check haproxy status
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@haproxy:proxy' service.status haproxy
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Install nginx on prx nodes
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@nginx:server' state.sls nginx
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Restart rsyslog
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@haproxy:proxy' service.restart rsyslog
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Install memcached on all controllers
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@memcached:server' state.sls memcached
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Check the VIP
-  cmd: |
-    OPENSTACK_CONTROL_ADDRESS=`salt-call --out=newline_values_only pillar.get _param:openstack_control_address`;
-    echo "_param:openstack_control_address (vip): ${OPENSTACK_CONTROL_ADDRESS}";
-    salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@keepalived:cluster' cmd.run "ip a | grep ${OPENSTACK_CONTROL_ADDRESS}" | grep -B1 ${OPENSTACK_CONTROL_ADDRESS}
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 3, delay: 10}
-  skip_fail: false
+{{ SHARED_CORE.MACRO_INSTALL_KEEPALIVED() }}
+{{ SHARED_CORE.MACRO_INSTALL_GLUSTERFS() }}
+{{ SHARED_CORE.MACRO_INSTALL_RABBITMQ() }}
+{{ SHARED_CORE.MACRO_INSTALL_GALERA() }}
+{{ SHARED_CORE.MACRO_INSTALL_HAPROXY() }}
+{{ SHARED_CORE.MACRO_INSTALL_NGINX() }}
+{{ SHARED_CORE.MACRO_INSTALL_MEMCACHED() }}
+{{ SHARED_CORE.MACRO_CHECK_VIP() }}
\ No newline at end of file
diff --git a/tcp_tests/templates/cookied-bm-mcp-dvr-vxlan/openstack.yaml b/tcp_tests/templates/cookied-bm-mcp-dvr-vxlan/openstack.yaml
index 7148d00..1d8cbbf 100644
--- a/tcp_tests/templates/cookied-bm-mcp-dvr-vxlan/openstack.yaml
+++ b/tcp_tests/templates/cookied-bm-mcp-dvr-vxlan/openstack.yaml
@@ -2,20 +2,13 @@
 {% from 'cookied-bm-mcp-dvr-vxlan/underlay.yaml' import HOSTNAME_CTL01 with context %}
 {% from 'cookied-bm-mcp-dvr-vxlan/underlay.yaml' import HOSTNAME_GTW01 with context %}
 {% from 'cookied-bm-mcp-dvr-vxlan/underlay.yaml' import HOSTNAME_GTW02 with context %}
-{% from 'shared-salt.yaml' import IPV4_NET_EXTERNAL_PREFIX with context %}
-{% from 'shared-salt.yaml' import IPV4_NET_TENANT_PREFIX with context %}
-
 {% import 'shared-salt.yaml' as SHARED with context %}
 {% import 'shared-openstack.yaml' as SHARED_OPENSTACK with context %}
 
 # Install OpenStack control services
-
 {{ SHARED_OPENSTACK.MACRO_INSTALL_KEYSTONE() }}
-
 {{ SHARED_OPENSTACK.MACRO_INSTALL_GLANCE() }}
-
 {{ SHARED_OPENSTACK.MACRO_INSTALL_NOVA() }}
-
 {{ SHARED_OPENSTACK.MACRO_INSTALL_CINDER(INSTALL_VOLUME=false) }}
 
 - description: Install cinder volume
@@ -26,10 +19,7 @@
   skip_fail: false
 
 {{ SHARED_OPENSTACK.MACRO_INSTALL_NEUTRON(INSTALL_GATEWAY=true) }}
-
 {{ SHARED_OPENSTACK.MACRO_INSTALL_HEAT() }}
-
 {{ SHARED_OPENSTACK.MACRO_INSTALL_HORIZON() }}
-
-{{ SHARED_OPENSTACK.MACRO_INSTALL_COMPUTE() }}
+{{ SHARED_OPENSTACK.MACRO_INSTALL_COMPUTE(CELL_MAPPING=true) }}
 
diff --git a/tcp_tests/templates/cookied-bm-mcp-dvr-vxlan/salt-context-cookiecutter-openstack_ovs_dvr_vxlan.yaml b/tcp_tests/templates/cookied-bm-mcp-dvr-vxlan/salt-context-cookiecutter-openstack_ovs_dvr_vxlan.yaml
index cb97e5e..7585c41 100644
--- a/tcp_tests/templates/cookied-bm-mcp-dvr-vxlan/salt-context-cookiecutter-openstack_ovs_dvr_vxlan.yaml
+++ b/tcp_tests/templates/cookied-bm-mcp-dvr-vxlan/salt-context-cookiecutter-openstack_ovs_dvr_vxlan.yaml
@@ -1,7 +1,7 @@
 default_context:
   mcp_version: proposed
   ceph_enabled: 'False'
-  cicd_enabled: 'True'
+  cicd_enabled: 'False'
   cicd_control_node01_address: 10.167.4.91
   cicd_control_node01_hostname: cid01
   cicd_control_node02_address: 10.167.4.92
@@ -65,7 +65,6 @@
   openstack_compute_single_address_ranges: 172.16.10.105-172.16.10.106
   openstack_compute_deploy_address_ranges: 192.168.10.105-192.168.10.106
   openstack_compute_tenant_address_ranges: 10.1.0.105-10.1.0.106
-  openstack_compute_backend_address_ranges: 10.1.0.105-10.1.0.106
   openstack_compute_node01_hostname: cmp01
   openstack_compute_node02_hostname: cmp02
   openstack_compute_node01_address: 10.167.4.3
diff --git a/tcp_tests/templates/cookied-bm-mcp-dvr-vxlan/salt-context-lab03-environment.yaml b/tcp_tests/templates/cookied-bm-mcp-dvr-vxlan/salt-context-lab03-environment.yaml
index def5353..692cf19 100644
--- a/tcp_tests/templates/cookied-bm-mcp-dvr-vxlan/salt-context-lab03-environment.yaml
+++ b/tcp_tests/templates/cookied-bm-mcp-dvr-vxlan/salt-context-lab03-environment.yaml
@@ -44,34 +44,29 @@
         enp9s0f1:
           role: bond0_ab_ovs_vlan_ctl
 
-    cmp01.cookied-bm-mcp-dvr-vxlan.local:
+    cmp001.cookied-bm-mcp-dvr-vxlan.local:
       reclass_storage_name: openstack_compute_node01
       roles:
       - openstack_compute
-      - features_lvm_backend
+      - features_lvm_backend_volume_sdb
       - linux_system_codename_xenial
       interfaces:
         enp9s0f0:
           role: single_mgm_dhcp
         enp9s0f1:
           role: bond0_ab_dvr_vxlan_ctl_mesh_floating
-          single_address: 10.167.4.105
-          tenant_address: 10.167.6.105
 
-
-    cmp02.cookied-bm-mcp-dvr-vxlan.local:
+    cmp002.cookied-bm-mcp-dvr-vxlan.local:
       reclass_storage_name: openstack_compute_node02
       roles:
       - openstack_compute
-      - features_lvm_backend
+      - features_lvm_backend_volume_sdb
       - linux_system_codename_xenial
       interfaces:
         enp9s0f0:
           role: single_mgm_dhcp
         enp9s0f1:
           role: bond0_ab_dvr_vxlan_ctl_mesh_floating
-          single_address: 10.167.4.106
-          tenant_address: 10.167.6.106
 
     gtw01.cookied-bm-mcp-dvr-vxlan.local:
       reclass_storage_name: openstack_gateway_node01
diff --git a/tcp_tests/templates/cookied-bm-mcp-dvr-vxlan/salt-context-vcp-environment.yaml b/tcp_tests/templates/cookied-bm-mcp-dvr-vxlan/salt-context-vcp-environment.yaml
index 37d0b14..6cace03 100644
--- a/tcp_tests/templates/cookied-bm-mcp-dvr-vxlan/salt-context-vcp-environment.yaml
+++ b/tcp_tests/templates/cookied-bm-mcp-dvr-vxlan/salt-context-vcp-environment.yaml
@@ -4,6 +4,7 @@
       roles:
       - openstack_control_leader
       - linux_system_codename_xenial
+      - features_lvm_backend_control
       interfaces:
         ens2:
           role: single_dhcp
@@ -15,6 +16,7 @@
       roles:
       - openstack_control
       - linux_system_codename_xenial
+      - features_lvm_backend_control
       interfaces:
         ens2:
           role: single_dhcp
@@ -26,6 +28,7 @@
       roles:
       - openstack_control
       - linux_system_codename_xenial
+      - features_lvm_backend_control
       interfaces:
         ens2:
           role: single_dhcp
@@ -119,57 +122,3 @@
           role: single_dhcp
         ens3:
           role: single_ctl
-
-    cid01.cookied-bm-mcp-dvr-vxlan.local:
-      reclass_storage_name: cicd_control_node01
-      roles:
-      - cicd_control_leader
-      - linux_system_codename_xenial
-      interfaces:
-        ens3:
-          role: single_ctl
-
-    cid02.cookied-bm-mcp-dvr-vxlan.local:
-      reclass_storage_name: cicd_control_node02
-      roles:
-      - cicd_control_manager
-      - linux_system_codename_xenial
-      interfaces:
-        ens3:
-          role: single_ctl
-
-    cid03.cookied-bm-mcp-dvr-vxlan.local:
-      reclass_storage_name: cicd_control_node03
-      roles:
-      - cicd_control_manager
-      - linux_system_codename_xenial
-      interfaces:
-        ens3:
-          role: single_ctl
-
-#    mon01.cookied-bm-mcp-dvr-vxlan.local:
-#      reclass_storage_name: stacklight_server_node01
-#      roles:
-#      - stacklightv2_server_leader
-#      - linux_system_codename_xenial
-#      interfaces:
-#        ens3:
-#          role: single_ctl
-#
-#    mon02.cookied-bm-mcp-dvr-vxlan.local:
-#      reclass_storage_name: stacklight_server_node02
-#      roles:
-#      - stacklightv2_server
-#      - linux_system_codename_xenial
-#      interfaces:
-#        ens3:
-#         role: single_ctl
-#
-#    mon03.cookied-bm-mcp-dvr-vxlan.local:
-#      reclass_storage_name: stacklight_server_node03
-#      roles:
-#      - stacklightv2_server
-#      - linux_system_codename_xenial
-#      interfaces:
-#        ens3:
-#          role: single_ctl
diff --git a/tcp_tests/templates/cookied-bm-mcp-dvr-vxlan/salt.yaml b/tcp_tests/templates/cookied-bm-mcp-dvr-vxlan/salt.yaml
index 81d9096..8804721 100644
--- a/tcp_tests/templates/cookied-bm-mcp-dvr-vxlan/salt.yaml
+++ b/tcp_tests/templates/cookied-bm-mcp-dvr-vxlan/salt.yaml
@@ -12,53 +12,19 @@
 {% import 'shared-salt.yaml' as SHARED with context %}
 
 {{ SHARED.MACRO_INSTALL_SALT_MASTER() }}
-
 {{ SHARED.MACRO_CLONE_RECLASS_MODELS() }}
-
 {{ SHARED.MACRO_CONFIGURE_RECLASS(FORMULA_SERVICES='\*') }}
 {{ SHARED.MACRO_INSTALL_SALT_MINIONS() }}
-
 {{ SHARED.MACRO_RUN_SALT_MASTER_UNDERLAY_STATES() }}
-
-- description: "WR for changing VCP images path to internal storage"
-  cmd: |
-    set -e;
-    apt-get -y install python-virtualenv python-pip build-essential python-dev libssl-dev;
-    [[ -d /root/venv-reclass-tools ]] || virtualenv /root/venv-reclass-tools;
-    . /root/venv-reclass-tools/bin/activate;
-    pip install git+https://github.com/dis-xcom/reclass-tools;
-    reclass-tools add-key parameters._param.salt_control_xenial_image 'https://apt.mcp.mirantis.net/images/ubuntu-16-04-x64-mcp{{ SHARED.REPOSITORY_SUITE }}.qcow2' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/init.yml;
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 10}
-  skip_fail: false
-
-
 {{ SHARED.MACRO_GENERATE_INVENTORY() }}
 {{ SHARED.MACRO_NETWORKING_WORKAROUNDS() }}
 
-- description: Temporary workaround for removing cinder-volume from CTL nodes
-  cmd: |
-    sed -i 's/\-\ system\.cinder\.volume\.single//g' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/openstack/control.yml;
-    sed -i 's/\-\ system\.cinder\.volume\.notification\.messagingv2//g' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/openstack/control.yml;
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: true
-
-- description: Temporary workaround for removing virtual gtw nodes
-  cmd: |
-    sed -i 's/\-\ system\.salt\.control\.sizes\.ovs\.compact//g' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/kvm.yml;
-    sed -i 's/\-\ system\.salt\.control\.placement\.ovs\.compact//g' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/kvm.yml;
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: true
-
 - description: Rerun openssh after env model is generated
   cmd: |
     salt-call state.sls openssh
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 10}
   skip_fail: false
-  
 {{ SHARED.MACRO_BOOTSTRAP_ALL_MINIONS() }}
 
 ########################################
@@ -129,4 +95,3 @@
 {{SHARED.MACRO_CHECK_SALT_VERSION_SERVICES_ON_CFG()}}
 
 {{SHARED.MACRO_CHECK_SALT_VERSION_ON_NODES()}}
-
diff --git a/tcp_tests/templates/cookied-bm-mcp-dvr-vxlan/underlay--user-data-cfg01.yaml b/tcp_tests/templates/cookied-bm-mcp-dvr-vxlan/underlay--user-data-cfg01.yaml
index 3f4f128..b77550a 100644
--- a/tcp_tests/templates/cookied-bm-mcp-dvr-vxlan/underlay--user-data-cfg01.yaml
+++ b/tcp_tests/templates/cookied-bm-mcp-dvr-vxlan/underlay--user-data-cfg01.yaml
@@ -18,8 +18,6 @@
    expire: False
 
   bootcmd:
-   # Block access to SSH while node is preparing
-   #- cloud-init-per once sudo iptables -A INPUT -p tcp --dport 22 -j DROP
    # Enable root access
    - sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
    - service sshd restart
@@ -51,16 +49,13 @@
    - echo "Preparing base OS"
 
    - echo "nameserver 172.18.208.44" > /etc/resolv.conf;
-   - which wget >/dev/null || (apt-get update; apt-get install -y wget);
 
-   # Install common packages
-   - eatmydata apt-get install -y python-pip git curl tmux byobu iputils-ping traceroute htop tree sshuttle
-
-   ########################################################
-   # Node is ready, allow SSH access
-   #- echo "Allow SSH access ..."
-   #- sudo iptables -D INPUT -p tcp --dport 22 -j DROP
-   ########################################################
+   - mkdir -p /srv/salt/reclass/nodes
+   - systemctl enable salt-master
+   - systemctl enable salt-minion
+   - systemctl start salt-master
+   - systemctl start salt-minion
+   - salt-call -l info --timeout=120 test.ping
 
   write_files:
    - path: /etc/network/interfaces
diff --git a/tcp_tests/templates/cookied-bm-mcp-dvr-vxlan/underlay.yaml b/tcp_tests/templates/cookied-bm-mcp-dvr-vxlan/underlay.yaml
index a7308e9..8d2bf09 100644
--- a/tcp_tests/templates/cookied-bm-mcp-dvr-vxlan/underlay.yaml
+++ b/tcp_tests/templates/cookied-bm-mcp-dvr-vxlan/underlay.yaml
@@ -6,8 +6,8 @@
 {% set HOSTNAME_KVM01 = os_env('HOSTNAME_KVM01', 'kvm01.' + DOMAIN_NAME) %}
 {% set HOSTNAME_KVM02 = os_env('HOSTNAME_KVM02', 'kvm02.' + DOMAIN_NAME) %}
 {% set HOSTNAME_KVM03 = os_env('HOSTNAME_KVM03', 'kvm03.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CMP01 = os_env('HOSTNAME_CMP01', 'cmp01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CMP02 = os_env('HOSTNAME_CMP02', 'cmp02.' + DOMAIN_NAME) %}
+{% set HOSTNAME_CMP001 = os_env('HOSTNAME_CMP001', 'cmp001.' + DOMAIN_NAME) %}
+{% set HOSTNAME_CMP002 = os_env('HOSTNAME_CMP002', 'cmp002.' + DOMAIN_NAME) %}
 {% set HOSTNAME_GTW01 = os_env('HOSTNAME_GTW01', 'gtw01.' + DOMAIN_NAME) %}
 {% set HOSTNAME_GTW02 = os_env('HOSTNAME_GTW02', 'gtw02.' + DOMAIN_NAME) %}
 {% set ETH1_IP_ADDRESS_CFG01 = os_env('ETH1_IP_ADDRESS_CFG01', '172.16.164.2') %}
@@ -15,8 +15,8 @@
 {% set ETH0_IP_ADDRESS_KVM01 = os_env('ETH0_IP_ADDRESS_KVM01', '172.16.164.11') %}
 {% set ETH0_IP_ADDRESS_KVM02 = os_env('ETH0_IP_ADDRESS_KVM02', '172.16.164.12') %}
 {% set ETH0_IP_ADDRESS_KVM03 = os_env('ETH0_IP_ADDRESS_KVM03', '172.16.164.13') %}
-{% set ETH0_IP_ADDRESS_CMP01 = os_env('ETH0_IP_ADDRESS_CMP01', '172.16.164.3') %}
-{% set ETH0_IP_ADDRESS_CMP02 = os_env('ETH0_IP_ADDRESS_CMP02', '172.16.164.31') %}
+{% set ETH0_IP_ADDRESS_CMP001 = os_env('ETH0_IP_ADDRESS_CMP001', '172.16.164.3') %}
+{% set ETH0_IP_ADDRESS_CMP002 = os_env('ETH0_IP_ADDRESS_CMP002', '172.16.164.31') %}
 {% set ETH0_IP_ADDRESS_GTW01 = os_env('ETH0_IP_ADDRESS_GTW01', '172.16.164.4') %}
 {% set ETH0_IP_ADDRESS_GTW02 = os_env('ETH0_IP_ADDRESS_GTW02', '172.16.164.5') %}
 
@@ -48,8 +48,8 @@
             default_{{ HOSTNAME_KVM01 }}: {{ ETH0_IP_ADDRESS_KVM01 }}
             default_{{ HOSTNAME_KVM02 }}: {{ ETH0_IP_ADDRESS_KVM02 }}
             default_{{ HOSTNAME_KVM03 }}: {{ ETH0_IP_ADDRESS_KVM03 }}
-            default_{{ HOSTNAME_CMP01 }}: {{ ETH0_IP_ADDRESS_CMP01 }}
-            default_{{ HOSTNAME_CMP02 }}: {{ ETH0_IP_ADDRESS_CMP02 }}
+            default_{{ HOSTNAME_CMP001 }}: {{ ETH0_IP_ADDRESS_CMP001 }}
+            default_{{ HOSTNAME_CMP002 }}: {{ ETH0_IP_ADDRESS_CMP002 }}
             default_{{ HOSTNAME_GTW01 }}: {{ ETH0_IP_ADDRESS_GTW01 }}
             default_{{ HOSTNAME_GTW02 }}: {{ ETH0_IP_ADDRESS_GTW02 }}
           ip_ranges:
@@ -311,14 +311,13 @@
                   parents:
                    - enp9s0f1
 
-
-          - name: {{ HOSTNAME_CMP01 }}
+          - name: {{ HOSTNAME_CMP001 }}
             role: salt_minion
             params:
               ipmi_user: !os_env IPMI_USER
               ipmi_password: !os_env IPMI_PASSWORD
               ipmi_previlegies: OPERATOR
-              ipmi_host: !os_env IPMI_HOST_CMP01  # hostname or IP address
+              ipmi_host: !os_env IPMI_HOST_CMP001  # hostname or IP address
               ipmi_lan_interface: lanplus
               ipmi_port: 623
 
@@ -344,9 +343,9 @@
               interfaces:
                 - label: enp9s0f0
                   l2_network_device: admin
-                  mac_address: !os_env ETH0_MAC_ADDRESS_CMP01
+                  mac_address: !os_env ETH0_MAC_ADDRESS_CMP001
                 - label: enp9s0f1
-                  mac_address: !os_env ETH1_MAC_ADDRESS_CMP01
+                  mac_address: !os_env ETH1_MAC_ADDRESS_CMP001
               network_config:
                 enp9s0f0:
                   networks:
@@ -359,15 +358,13 @@
                    - enp9s0f0
                    - enp9s0f1
 
-
-
-          - name: {{ HOSTNAME_CMP02 }}
+          - name: {{ HOSTNAME_CMP002 }}
             role: salt_minion
             params:
               ipmi_user: !os_env IPMI_USER
               ipmi_password: !os_env IPMI_PASSWORD
               ipmi_previlegies: OPERATOR
-              ipmi_host: !os_env IPMI_HOST_CMP02  # hostname or IP address
+              ipmi_host: !os_env IPMI_HOST_CMP002  # hostname or IP address
               ipmi_lan_interface: lanplus
               ipmi_port: 623
 
@@ -393,9 +390,9 @@
               interfaces:
                 - label: enp9s0f0
                   l2_network_device: admin
-                  mac_address: !os_env ETH0_MAC_ADDRESS_CMP02
+                  mac_address: !os_env ETH0_MAC_ADDRESS_CMP002
                 - label: enp9s0f1
-                  mac_address: !os_env ETH1_MAC_ADDRESS_CMP02
+                  mac_address: !os_env ETH1_MAC_ADDRESS_CMP002
               network_config:
                 enp9s0f0:
                   networks:
@@ -408,7 +405,6 @@
                    - enp9s0f0
                    - enp9s0f1
 
-
           - name: {{ HOSTNAME_GTW01 }}
             role: salt_minion
             params:
diff --git a/tcp_tests/templates/cookied-bm-mcp-ocata-contrail-nfv/core.yaml b/tcp_tests/templates/cookied-bm-mcp-ocata-contrail-nfv/core.yaml
index b7fcb07..b1e37c6 100644
--- a/tcp_tests/templates/cookied-bm-mcp-ocata-contrail-nfv/core.yaml
+++ b/tcp_tests/templates/cookied-bm-mcp-ocata-contrail-nfv/core.yaml
@@ -3,124 +3,13 @@
 {% from 'cookied-bm-mcp-ocata-contrail-nfv/underlay.yaml' import HOSTNAME_KVM02 with context %}
 {% from 'cookied-bm-mcp-ocata-contrail-nfv/underlay.yaml' import HOSTNAME_KVM03 with context %}
 
-- description: Install glusterfs
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@glusterfs:server' state.sls glusterfs.server.service
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
+{% import 'shared-core.yaml' as SHARED_CORE with context %}
 
-- description: Setup glusterfs on primary controller
-  cmd: sleep 30; salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@glusterfs:server' state.sls glusterfs.server.setup -b 1
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 2, delay: 30}
-  skip_fail: false
-
-- description: Check the gluster status
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@glusterfs:server' cmd.run 'gluster peer status; gluster volume status' -b 1
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-# Install support services
-- description: Install keepalived on ctl01
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@keepalived:cluster and *01*' state.sls keepalived
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 10}
-  skip_fail: true
-
-- description: Install keepalived
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@keepalived:cluster' state.sls keepalived
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 10}
-  skip_fail: true
-
-- description: Install RabbitMQ on ctl01
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@rabbitmq:server and *01*' state.sls rabbitmq
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Install RabbitMQ
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@rabbitmq:server' state.sls rabbitmq
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Check the rabbitmq status
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@rabbitmq:server' cmd.run 'rabbitmqctl cluster_status'
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Install Galera on first server
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@galera:master' state.sls galera
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Install Galera on other servers
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@galera:slave' state.sls galera -b 1
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Check mysql status
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@galera:*' mysql.status | grep -A1 -e "wsrep_incoming_addresses\|wsrep_cluster_size"
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: true
-
-- description: Install haproxy
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@haproxy:proxy' state.sls haproxy
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Check haproxy status
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@haproxy:proxy' service.status haproxy
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Install nginx on prx nodes
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@nginx:server' state.sls nginx
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Restart rsyslog
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@haproxy:proxy' service.restart rsyslog
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Install memcached on all controllers
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@memcached:server' state.sls memcached
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Check the OpenStack control VIP
-  cmd: |
-    OPENSTACK_CONTROL_ADDRESS=$(salt --out=newline_values_only "ctl01*" pillar.get _param:cluster_vip_address);
-    echo "_param:cluster_vip_address (vip): ${OPENSTACK_CONTROL_ADDRESS}";
-    salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@keepalived:cluster' cmd.run "ip a | grep ${OPENSTACK_CONTROL_ADDRESS}" | grep -B1 ${OPENSTACK_CONTROL_ADDRESS}
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 3, delay: 10}
-  skip_fail: false
+{{ SHARED_CORE.MACRO_INSTALL_KEEPALIVED() }}
+{{ SHARED_CORE.MACRO_INSTALL_GLUSTERFS() }}
+{{ SHARED_CORE.MACRO_INSTALL_RABBITMQ() }}
+{{ SHARED_CORE.MACRO_INSTALL_GALERA() }}
+{{ SHARED_CORE.MACRO_INSTALL_HAPROXY() }}
+{{ SHARED_CORE.MACRO_INSTALL_NGINX() }}
+{{ SHARED_CORE.MACRO_INSTALL_MEMCACHED() }}
+{{ SHARED_CORE.MACRO_CHECK_VIP() }}
diff --git a/tcp_tests/templates/cookied-bm-mcp-ocata-contrail-nfv/salt.yaml b/tcp_tests/templates/cookied-bm-mcp-ocata-contrail-nfv/salt.yaml
index afec74c..77980d0 100644
--- a/tcp_tests/templates/cookied-bm-mcp-ocata-contrail-nfv/salt.yaml
+++ b/tcp_tests/templates/cookied-bm-mcp-ocata-contrail-nfv/salt.yaml
@@ -33,9 +33,9 @@
     set -e;
     # Remove rack01 key
     . /root/venv-reclass-tools/bin/activate;
-    reclass-tools del-key parameters.reclass.storage.node.openstack_compute_rack01 /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/config.yml;
+    reclass-tools del-key parameters.reclass.storage.node.openstack_compute_rack01 /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/config/init.yml;
     # Add openstack_compute_node definition from system
-    reclass-tools add-key 'classes' 'system.reclass.storage.system.openstack_compute_multi' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/config.yml --merge;
+    reclass-tools add-key 'classes' 'system.reclass.storage.system.openstack_compute_multi' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/config/init.yml --merge;
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 10}
   skip_fail: false
@@ -44,8 +44,8 @@
   cmd: |
     set -e;
     . /root/venv-reclass-tools/bin/activate;
-    reclass-tools add-key parameters._param.salt_control_xenial_image 'https://apt.mcp.mirantis.net/images/ubuntu-16-04-x64-mcp{{ SHARED.REPOSITORY_SUITE }}.qcow2' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/init.yml;
-    reclass-tools add-key parameters._param.salt_control_trusty_image 'https://apt.mcp.mirantis.net/images/ubuntu-14-04-x64-mcp{{ SHARED.REPOSITORY_SUITE }}.qcow2' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/init.yml;
+    reclass-tools add-key parameters._param.salt_control_xenial_image 'http://images.mcp.mirantis.net/ubuntu-16-04-x64-mcp{{ SHARED.REPOSITORY_SUITE }}.qcow2' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/init.yml;
+    reclass-tools add-key parameters._param.salt_control_trusty_image 'http://images.mcp.mirantis.net/ubuntu-14-04-x64-mcp{{ SHARED.REPOSITORY_SUITE }}.qcow2' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/init.yml;
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 10}
   skip_fail: false
diff --git a/tcp_tests/templates/cookied-bm-mcp-ocata-contrail-nfv/underlay--user-data-cfg01.yaml b/tcp_tests/templates/cookied-bm-mcp-ocata-contrail-nfv/underlay--user-data-cfg01.yaml
index 7677268..59a799e 100644
--- a/tcp_tests/templates/cookied-bm-mcp-ocata-contrail-nfv/underlay--user-data-cfg01.yaml
+++ b/tcp_tests/templates/cookied-bm-mcp-ocata-contrail-nfv/underlay--user-data-cfg01.yaml
@@ -47,43 +47,14 @@
    - swapon /swapfile

    - echo "/swapfile   none    swap    defaults    0   0" >> /etc/fstab

 

-   ############## TCP Cloud cfg01 node ##################

-   #- sleep 120

-   #   - echo "Preparing base OS"

-

    - echo "nameserver 172.18.208.44" > /etc/resolv.conf;

-   #   - which wget >/dev/null || (apt-get update; apt-get install -y wget);

-

-   # Configure Ubuntu mirrors
-   #   - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial main restricted universe" > /etc/apt/sources.list
-   #   - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial-updates main restricted universe" >> /etc/apt/sources.list
-   #   - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial-security main restricted universe" >> /etc/apt/sources.list
 
-   #   - echo "deb [arch=amd64] http://apt.mirantis.com/xenial {{ REPOSITORY_SUITE }} salt extra" > /etc/apt/sources.list.d/mcp_salt.list;

-   #   - wget -O - http://apt.mirantis.com/public.gpg | apt-key add -;

-   #   - echo "deb http://repo.saltstack.com/apt/ubuntu/16.04/amd64/2016.3 xenial main" > /etc/apt/sources.list.d/saltstack.list;

-   #   - wget -O - https://repo.saltstack.com/apt/ubuntu/16.04/amd64/2016.3/SALTSTACK-GPG-KEY.pub | apt-key add -;

-

-   #   - apt-get clean

-   #   - apt-get update

-

-   # Install common packages

-   #   - eatmydata apt-get install -y python-pip git curl tmux byobu iputils-ping traceroute htop tree mc

-

-   # Install salt-minion and stop it until it is configured

-   #   - eatmydata apt-get install -y salt-minion && service salt-minion stop

-

-   ########################################################

-   # Node is ready, allow SSH access

-   #   - echo "Allow SSH access ..."

-   #   - sudo iptables -D INPUT -p tcp --dport 22 -j DROP

-   ########################################################

-

-   # Install common packages
-   - eatmydata apt-get install -y python-pip git curl tmux byobu iputils-ping traceroute htop tree sshuttle
-
-   # Use sshuttle to allow SSH access to the model-related control network 10.167.4.0/24 on baremetal/VM nodes from cfg01
-   - sshuttle -r {{ ETH0_IP_ADDRESS_KVM01 }} 10.167.8.0/24 -D
+   - mkdir -p /srv/salt/reclass/nodes
+   - systemctl enable salt-master
+   - systemctl enable salt-minion
+   - systemctl start salt-master
+   - systemctl start salt-minion
+   - salt-call -l info --timeout=120 test.ping
 
   write_files:

    - path: /etc/default/grub.d/97-enable-grub-menu.cfg

diff --git a/tcp_tests/templates/cookied-bm-mcp-ocata-contrail/core.yaml b/tcp_tests/templates/cookied-bm-mcp-ocata-contrail/core.yaml
index 9971a9f..4dc3470 100644
--- a/tcp_tests/templates/cookied-bm-mcp-ocata-contrail/core.yaml
+++ b/tcp_tests/templates/cookied-bm-mcp-ocata-contrail/core.yaml
@@ -3,124 +3,13 @@
 {% from 'cookied-bm-mcp-ocata-contrail/underlay.yaml' import HOSTNAME_KVM02 with context %}
 {% from 'cookied-bm-mcp-ocata-contrail/underlay.yaml' import HOSTNAME_KVM03 with context %}
 
-- description: Install glusterfs
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@glusterfs:server' state.sls glusterfs.server.service
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
+{% import 'shared-core.yaml' as SHARED_CORE with context %}
 
-- description: Setup glusterfs on primary controller
-  cmd: sleep 30; salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@glusterfs:server' state.sls glusterfs.server.setup -b 1
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 2, delay: 30}
-  skip_fail: false
-
-- description: Check the gluster status
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@glusterfs:server' cmd.run 'gluster peer status; gluster volume status' -b 1
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-# Install support services
-- description: Install keepalived on ctl01
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@keepalived:cluster and *01*' state.sls keepalived
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 10}
-  skip_fail: true
-
-- description: Install keepalived
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@keepalived:cluster' state.sls keepalived
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 10}
-  skip_fail: true
-
-- description: Install RabbitMQ on ctl01
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@rabbitmq:server and *01*' state.sls rabbitmq
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Install RabbitMQ
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@rabbitmq:server' state.sls rabbitmq
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Check the rabbitmq status
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@rabbitmq:server' cmd.run 'rabbitmqctl cluster_status'
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Install Galera on first server
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@galera:master' state.sls galera
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Install Galera on other servers
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@galera:slave' state.sls galera -b 1
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Check mysql status
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@galera:*' mysql.status | grep -A1 -e "wsrep_incoming_addresses\|wsrep_cluster_size"
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: true
-
-- description: Install haproxy
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@haproxy:proxy' state.sls haproxy
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Check haproxy status
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@haproxy:proxy' service.status haproxy
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Install nginx on prx nodes
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@nginx:server' state.sls nginx
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Restart rsyslog
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@haproxy:proxy' service.restart rsyslog
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Install memcached on all controllers
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@memcached:server' state.sls memcached
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Check the OpenStack control VIP
-  cmd: |
-    OPENSTACK_CONTROL_ADDRESS=$(salt --out=newline_values_only "ctl01*" pillar.get _param:cluster_vip_address);
-    echo "_param:cluster_vip_address (vip): ${OPENSTACK_CONTROL_ADDRESS}";
-    salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@keepalived:cluster' cmd.run "ip a | grep ${OPENSTACK_CONTROL_ADDRESS}" | grep -B1 ${OPENSTACK_CONTROL_ADDRESS}
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 3, delay: 10}
-  skip_fail: false
+{{ SHARED_CORE.MACRO_INSTALL_KEEPALIVED() }}
+{{ SHARED_CORE.MACRO_INSTALL_GLUSTERFS() }}
+{{ SHARED_CORE.MACRO_INSTALL_RABBITMQ() }}
+{{ SHARED_CORE.MACRO_INSTALL_GALERA() }}
+{{ SHARED_CORE.MACRO_INSTALL_HAPROXY() }}
+{{ SHARED_CORE.MACRO_INSTALL_NGINX() }}
+{{ SHARED_CORE.MACRO_INSTALL_MEMCACHED() }}
+{{ SHARED_CORE.MACRO_CHECK_VIP() }}
diff --git a/tcp_tests/templates/cookied-bm-mcp-ocata-contrail/salt.yaml b/tcp_tests/templates/cookied-bm-mcp-ocata-contrail/salt.yaml
index 1adfd90..24ee31f 100644
--- a/tcp_tests/templates/cookied-bm-mcp-ocata-contrail/salt.yaml
+++ b/tcp_tests/templates/cookied-bm-mcp-ocata-contrail/salt.yaml
@@ -34,9 +34,9 @@
     set -e;
     # Remove rack01 key
     . /root/venv-reclass-tools/bin/activate;
-    reclass-tools del-key parameters.reclass.storage.node.openstack_compute_rack01 /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/config.yml;
+    reclass-tools del-key parameters.reclass.storage.node.openstack_compute_rack01 /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/config/init.yml;
     # Add openstack_compute_node definition from system
-    reclass-tools add-key 'classes' 'system.reclass.storage.system.openstack_compute_multi' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/config.yml --merge;
+    reclass-tools add-key 'classes' 'system.reclass.storage.system.openstack_compute_multi' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/config/init.yml --merge;
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 10}
   skip_fail: false
@@ -45,8 +45,8 @@
   cmd: |
     set -e;
     . /root/venv-reclass-tools/bin/activate;
-    reclass-tools add-key parameters._param.salt_control_xenial_image 'https://apt.mcp.mirantis.net/images/ubuntu-16-04-x64-mcp{{ SHARED.REPOSITORY_SUITE }}.qcow2' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/init.yml;
-    reclass-tools add-key parameters._param.salt_control_trusty_image 'https://apt.mcp.mirantis.net/images/ubuntu-14-04-x64-mcp{{ SHARED.REPOSITORY_SUITE }}.qcow2' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/init.yml;
+    reclass-tools add-key parameters._param.salt_control_xenial_image 'http://images.mcp.mirantis.net/ubuntu-16-04-x64-mcp{{ SHARED.REPOSITORY_SUITE }}.qcow2' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/init.yml;
+    reclass-tools add-key parameters._param.salt_control_trusty_image 'http://images.mcp.mirantis.net/ubuntu-14-04-x64-mcp{{ SHARED.REPOSITORY_SUITE }}.qcow2' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/init.yml;
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 10}
   skip_fail: false
diff --git a/tcp_tests/templates/cookied-bm-mcp-ocata-contrail/underlay--user-data-cfg01.yaml b/tcp_tests/templates/cookied-bm-mcp-ocata-contrail/underlay--user-data-cfg01.yaml
index 6c9e48f..cde8295 100644
--- a/tcp_tests/templates/cookied-bm-mcp-ocata-contrail/underlay--user-data-cfg01.yaml
+++ b/tcp_tests/templates/cookied-bm-mcp-ocata-contrail/underlay--user-data-cfg01.yaml
@@ -18,8 +18,6 @@
    expire: False

 

   bootcmd:

-   #   # Block access to SSH while node is preparing

-   #   - cloud-init-per once sudo iptables -A INPUT -p tcp --dport 22 -j DROP

    # Enable root access

    - sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config

    - service sshd restart

@@ -47,38 +45,15 @@
    - swapon /swapfile

    - echo "/swapfile   none    swap    defaults    0   0" >> /etc/fstab

 

-   ############## TCP Cloud cfg01 node ##################

-   #- sleep 120

-   #   - echo "Preparing base OS"

-

    - echo "nameserver 172.18.208.44" > /etc/resolv.conf;

-   #   - which wget >/dev/null || (apt-get update; apt-get install -y wget);

 

-   # Configure Ubuntu mirrors
-   #   - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial main restricted universe" > /etc/apt/sources.list
-   #   - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial-updates main restricted universe" >> /etc/apt/sources.list
-   #   - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial-security main restricted universe" >> /etc/apt/sources.list
+   - mkdir -p /srv/salt/reclass/nodes
+   - systemctl enable salt-master
+   - systemctl enable salt-minion
+   - systemctl start salt-master
+   - systemctl start salt-minion
+   - salt-call -l info --timeout=120 test.ping
 
-   #   - echo "deb [arch=amd64] http://apt.mirantis.com/xenial {{ REPOSITORY_SUITE }} salt extra" > /etc/apt/sources.list.d/mcp_salt.list;

-   #   - wget -O - http://apt.mirantis.com/public.gpg | apt-key add -;

-   #   - echo "deb http://repo.saltstack.com/apt/ubuntu/16.04/amd64/2016.3 xenial main" > /etc/apt/sources.list.d/saltstack.list;

-   #   - wget -O - https://repo.saltstack.com/apt/ubuntu/16.04/amd64/2016.3/SALTSTACK-GPG-KEY.pub | apt-key add -;

-

-   #   - apt-get clean

-   #   - apt-get update

-

-   # Install common packages

-   #   - eatmydata apt-get install -y python-pip git curl tmux byobu iputils-ping traceroute htop tree mc

-

-   # Install salt-minion and stop it until it is configured

-   #   - eatmydata apt-get install -y salt-minion && service salt-minion stop

-

-   ########################################################

-   # Node is ready, allow SSH access

-   #   - echo "Allow SSH access ..."

-   #   - sudo iptables -D INPUT -p tcp --dport 22 -j DROP

-   ########################################################

-

   write_files:

    - path: /etc/default/grub.d/97-enable-grub-menu.cfg

      content: |

diff --git a/tcp_tests/templates/cookied-bm-mcp-ovs-dpdk/core.yaml b/tcp_tests/templates/cookied-bm-mcp-ovs-dpdk/core.yaml
index e6dc270..4d9af8c 100644
--- a/tcp_tests/templates/cookied-bm-mcp-ovs-dpdk/core.yaml
+++ b/tcp_tests/templates/cookied-bm-mcp-ovs-dpdk/core.yaml
@@ -1,117 +1,19 @@
 {% from 'cookied-bm-mcp-ovs-dpdk/underlay.yaml' import HOSTNAME_CFG01 with context %}
 
-# Install support services
-- description: Install keepalived on ctl01
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@keepalived:cluster and *01*' state.sls keepalived
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 10}
-  skip_fail: true
+{% import 'shared-core.yaml' as SHARED_CORE with context %}
 
-- description: Install keepalived
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@keepalived:cluster' state.sls keepalived
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 10}
-  skip_fail: true
+{{ SHARED_CORE.MACRO_INSTALL_KEEPALIVED() }}
 
-- description: Install glusterfs
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@glusterfs:server' state.sls glusterfs.server.service
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
+{{ SHARED_CORE.MACRO_INSTALL_GLUSTERFS() }}
 
-- description: Setup glusterfs on primary controller
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@glusterfs:server' state.sls glusterfs.server.setup -b 1
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 2, delay: 5}
-  skip_fail: false
+{{ SHARED_CORE.MACRO_INSTALL_RABBITMQ() }}
 
-- description: Check the gluster status
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@glusterfs:server' cmd.run 'gluster peer status; gluster volume status' -b 1
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
+{{ SHARED_CORE.MACRO_INSTALL_GALERA() }}
 
-- description: Install RabbitMQ on ctl01
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@rabbitmq:server and *01*' state.sls rabbitmq
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
+{{ SHARED_CORE.MACRO_INSTALL_HAPROXY() }}
 
-- description: Install RabbitMQ
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@rabbitmq:server' state.sls rabbitmq
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
+{{ SHARED_CORE.MACRO_INSTALL_NGINX() }}
 
-- description: Check the rabbitmq status
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@rabbitmq:server' cmd.run 'rabbitmqctl cluster_status'
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
+{{ SHARED_CORE.MACRO_INSTALL_MEMCACHED() }}
 
-- description: Install Galera on first server
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@galera:master' state.sls galera
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Install Galera on other servers
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@galera:slave' state.sls galera
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Check mysql status
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@galera:*' mysql.status | grep -A1 -e "wsrep_incoming_addresses\|wsrep_cluster_size"
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: true
-
-
-- description: Install haproxy
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@haproxy:proxy' state.sls haproxy
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Check haproxy status
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@haproxy:proxy' service.status haproxy
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Restart rsyslog
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@haproxy:proxy' service.restart rsyslog
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Install memcached on all controllers
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@memcached:server' state.sls memcached
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Check the VIP
-  cmd: |
-    OPENSTACK_CONTROL_ADDRESS=`salt-call --out=newline_values_only pillar.get _param:openstack_control_address`;
-    echo "_param:openstack_control_address (vip): ${OPENSTACK_CONTROL_ADDRESS}";
-    salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@keepalived:cluster' cmd.run "ip a | grep ${OPENSTACK_CONTROL_ADDRESS}" | grep -B1 ${OPENSTACK_CONTROL_ADDRESS}
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 3, delay: 10}
-  skip_fail: false
+{{ SHARED_CORE.MACRO_CHECK_VIP() }}
diff --git a/tcp_tests/templates/cookied-bm-mcp-ovs-dpdk/salt-context-cookiecutter-openstack_ovs_dpdk.yaml b/tcp_tests/templates/cookied-bm-mcp-ovs-dpdk/salt-context-cookiecutter-openstack_ovs_dpdk.yaml
index 484b776..c137d12 100644
--- a/tcp_tests/templates/cookied-bm-mcp-ovs-dpdk/salt-context-cookiecutter-openstack_ovs_dpdk.yaml
+++ b/tcp_tests/templates/cookied-bm-mcp-ovs-dpdk/salt-context-cookiecutter-openstack_ovs_dpdk.yaml
@@ -123,7 +123,6 @@
   openstack_compute_single_address_ranges: 172.16.10.105-172.16.10.106
   openstack_compute_deploy_address_ranges: 192.168.10.105-192.168.10.106
   openstack_compute_tenant_address_ranges: 10.1.0.105-10.1.0.106
-  openstack_compute_backend_address_ranges: 10.1.0.105-10.1.0.106
   openstack_control_address: 10.167.11.10
   openstack_control_hostname: ctl
   openstack_control_node01_address: 10.167.11.11
diff --git a/tcp_tests/templates/cookied-bm-mcp-ovs-dpdk/underlay--user-data-cfg01.yaml b/tcp_tests/templates/cookied-bm-mcp-ovs-dpdk/underlay--user-data-cfg01.yaml
index 3f4f128..48bf712 100644
--- a/tcp_tests/templates/cookied-bm-mcp-ovs-dpdk/underlay--user-data-cfg01.yaml
+++ b/tcp_tests/templates/cookied-bm-mcp-ovs-dpdk/underlay--user-data-cfg01.yaml
@@ -18,8 +18,6 @@
    expire: False
 
   bootcmd:
-   # Block access to SSH while node is preparing
-   #- cloud-init-per once sudo iptables -A INPUT -p tcp --dport 22 -j DROP
    # Enable root access
    - sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
    - service sshd restart
@@ -46,21 +44,16 @@
    - swapon /swapfile
    - echo "/swapfile   none    swap    defaults    0   0" >> /etc/fstab
 
-   ############## TCP Cloud cfg01 node ##################
-   #- sleep 120
    - echo "Preparing base OS"
 
    - echo "nameserver 172.18.208.44" > /etc/resolv.conf;
-   - which wget >/dev/null || (apt-get update; apt-get install -y wget);
 
-   # Install common packages
-   - eatmydata apt-get install -y python-pip git curl tmux byobu iputils-ping traceroute htop tree sshuttle
-
-   ########################################################
-   # Node is ready, allow SSH access
-   #- echo "Allow SSH access ..."
-   #- sudo iptables -D INPUT -p tcp --dport 22 -j DROP
-   ########################################################
+   - mkdir -p /srv/salt/reclass/nodes
+   - systemctl enable salt-master
+   - systemctl enable salt-minion
+   - systemctl start salt-master
+   - systemctl start salt-minion
+   - salt-call -l info --timeout=120 test.ping
 
   write_files:
    - path: /etc/network/interfaces
diff --git a/tcp_tests/templates/cookied-bm-oc40-queens/core.yaml b/tcp_tests/templates/cookied-bm-oc40-queens/core.yaml
new file mode 100644
index 0000000..80073cf
--- /dev/null
+++ b/tcp_tests/templates/cookied-bm-oc40-queens/core.yaml
@@ -0,0 +1,11 @@
+{% from 'cookied-bm-oc40-queens/underlay.yaml' import HOSTNAME_CFG01 with context %}
+{% import 'shared-core.yaml' as SHARED_CORE with context %}
+
+{{ SHARED_CORE.MACRO_INSTALL_KEEPALIVED() }}
+{{ SHARED_CORE.MACRO_INSTALL_GLUSTERFS() }}
+{{ SHARED_CORE.MACRO_INSTALL_RABBITMQ() }}
+{{ SHARED_CORE.MACRO_INSTALL_GALERA() }}
+{{ SHARED_CORE.MACRO_INSTALL_HAPROXY() }}
+{{ SHARED_CORE.MACRO_INSTALL_NGINX() }}
+{{ SHARED_CORE.MACRO_INSTALL_MEMCACHED() }}
+{{ SHARED_CORE.MACRO_CHECK_VIP() }}
diff --git a/tcp_tests/templates/cookied-bm-oc40-queens/lab04-physical-inventory.yaml b/tcp_tests/templates/cookied-bm-oc40-queens/lab04-physical-inventory.yaml
new file mode 100644
index 0000000..5aa9ebe
--- /dev/null
+++ b/tcp_tests/templates/cookied-bm-oc40-queens/lab04-physical-inventory.yaml
@@ -0,0 +1,94 @@
+nodes:

+    cfg01.cookied-bm-oc40-queens.local:

+      reclass_storage_name: infra_config_node01

+      roles:

+      - infra_config

+      - linux_system_codename_xenial

+      interfaces:

+        ens3:

+          role: single_dhcp

+        ens4:

+          role: single_static_ctl

+          single_address: 10.167.8.99

+

+    # Physical nodes

+    kvm01.cookied-bm-oc40-queens.local:

+      reclass_storage_name: infra_kvm_node01

+      roles:

+      - infra_kvm

+      - linux_system_codename_xenial

+      interfaces:

+        enp9s0f0:

+          role: single_mgm

+        enp9s0f1:

+          role: bond0_ab_ovs_vlan_ctl

+

+    kvm02.cookied-bm-oc40-queens.local:

+      reclass_storage_name: infra_kvm_node02

+      roles:

+      - infra_kvm

+      - linux_system_codename_xenial

+      interfaces:

+        enp9s0f0:

+          role: single_mgm

+        enp9s0f1:

+          role: bond0_ab_ovs_vlan_ctl

+

+    kvm03.cookied-bm-oc40-queens.local:

+      reclass_storage_name: infra_kvm_node03

+      roles:

+      - infra_kvm

+      - linux_system_codename_xenial

+      interfaces:

+        enp9s0f0:

+          role: single_mgm

+        enp9s0f1:

+          role: bond0_ab_ovs_vlan_ctl

+

+    cmp<<count>>:

+      reclass_storage_name: openstack_compute_rack01

+      roles:

+      - openstack_compute

+      - features_lvm_backend_volume_sdb

+      - linux_system_codename_xenial

+      interfaces:

+        enp2s0f1:

+        enp2s0f1:

+          role: single_dhcp

+        enp5s0f0:

+          role: bond0_ab_contrail

+        enp5s0f1:

+          role: single_vlan_ctl

+

+#    cmp001.cookied-bm-oc40-queens.local:

+#      reclass_storage_name: openstack_compute_node01

+#      roles:

+#      - openstack_compute

+#      - features_lvm_backend_volume_sdb

+#      - linux_system_codename_xenial

+#      interfaces:

+#        enp2s0f1:

+#          role: single_mgm

+#          deploy_address: 172.16.49.73

+#        enp5s0f0:

+#          role: single_contrail_vlan_prv

+#          tenant_address: 192.168.0.101

+#        enp5s0f1:

+#          role: single_vlan_ctl

+#          single_address: 10.167.8.101

+#    cmp002.cookied-bm-oc40-queens.local:

+#      reclass_storage_name: openstack_compute_node02

+#      roles:

+#      - openstack_compute

+#      - features_lvm_backend_volume_sdb

+#      - linux_system_codename_xenial

+#      interfaces:

+#        enp2s0f1:

+#          role: single_mgm

+#          deploy_address: 172.16.49.74

+#        enp5s0f0:

+#          role: single_contrail_vlan_prv

+#          tenant_address: 192.168.0.102

+#        enp5s0f1:

+#          role: single_vlan_ctl

+#          single_address: 10.167.8.102

diff --git a/tcp_tests/templates/cookied-bm-oc40-queens/openstack.yaml b/tcp_tests/templates/cookied-bm-oc40-queens/openstack.yaml
new file mode 100644
index 0000000..7dff4de
--- /dev/null
+++ b/tcp_tests/templates/cookied-bm-oc40-queens/openstack.yaml
@@ -0,0 +1,287 @@
+{% from 'cookied-bm-oc40-queens/underlay.yaml' import HOSTNAME_CFG01 with context %}
+{% from 'cookied-bm-oc40-queens/underlay.yaml' import HOSTNAME_CTL01 with context %}
+{% from 'cookied-bm-oc40-queens/underlay.yaml' import DOMAIN_NAME with context %}
+{% from 'cookied-bm-oc40-queens/underlay.yaml' import LAB_CONFIG_NAME with context %}
+{% from 'shared-salt.yaml' import IPV4_NET_EXTERNAL_PREFIX with context %}
+{% from 'shared-salt.yaml' import IPV4_NET_TENANT_PREFIX with context %}
+
+{% set PATTERN = os_env('PATTERN', 'false') %}
+{% set RUN_TEMPEST = os_env('RUN_TEMPEST', 'false') %}
+
+{% import 'shared-openstack.yaml' as SHARED_OPENSTACK with context %}
+
+# Install OpenStack control services
+
+{{ SHARED_OPENSTACK.MACRO_INSTALL_KEYSTONE() }}
+
+{{ SHARED_OPENSTACK.MACRO_INSTALL_GLANCE() }}
+
+{{ SHARED_OPENSTACK.MACRO_INSTALL_NOVA() }}
+
+{{ SHARED_OPENSTACK.MACRO_INSTALL_CINDER(INSTALL_VOLUME=false) }}
+
+- description: WR Install cinder volume
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@cinder:volume' state.sls cinder
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 2, delay: 5}
+  skip_fail: false
+
+{{ SHARED_OPENSTACK.MACRO_INSTALL_NEUTRON(INSTALL_GATEWAY=false) }}
+
+# install contrail
+
+- description: Install Docker services
+  cmd: |
+    if salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:host' match.pillar 'docker:host' ; then
+      salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:host' state.sls docker.host
+    fi; sleep 10;
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 20}
+  skip_fail: false
+
+- description: Install opencontrail database services on first minion
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@opencontrail:database and *01*' state.sls opencontrail.database
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 20}
+  skip_fail: false
+
+- description: Install opencontrail database services
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@opencontrail:database' state.sls opencontrail.database
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 20}
+  skip_fail: false
+
+- description: Install Opencontrail control services on first minion
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@opencontrail:control and *01*' state.sls opencontrail exclude=opencontrail.client
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 20}
+  skip_fail: false
+
+- description: Install Opencontrail control services
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@opencontrail:control' state.sls opencontrail exclude=opencontrail.client
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 20}
+  skip_fail: false
+
+- description: Install Opencontrail collectors on first minion
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@opencontrail:collector and *01*' state.sls opencontrail exclude=opencontrail.client
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 20}
+  skip_fail: false
+
+- description: Install Opencontrail collectors
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@opencontrail:collector' state.sls opencontrail exclude=opencontrail.client
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 20}
+  skip_fail: false
+
+- description: Spawn Opencontrail docker images
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@opencontrail:control or I@opencontrail:collector' state.sls docker.client && sleep 15;
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 3, delay: 5}
+  skip_fail: false
+
+- description: Finalize opencontrail services
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@opencontrail:database:id:1' state.sls opencontrail.client
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 3, delay: 30}
+  skip_fail: false
+
+- description: Finalize opencontrail services
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@opencontrail:client and not I@opencontrail:compute' state.sls opencontrail.client
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 30}
+  skip_fail: false
+
+- description: Finalize opencontrail services
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@opencontrail:compute' state.sls opencontrail exclude=opencontrail.client
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 3, delay: 30}
+  skip_fail: true
+
+- description: Check contrail status
+  cmd: sleep 15; salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@opencontrail:database' cmd.run 'doctrail all contrail-status'
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+- description: Reboot computes
+  cmd: |
+    salt "cmp*" system.reboot;
+    sleep 600;
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 30}
+  skip_fail: true
+
+- description: Remove crashes files from /var/crashes/ while vrouter was crashed
+  cmd: salt  "cmp*" cmd.run "rm -rf /var/crashes/*"
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 30}
+  skip_fail: true
+
+- description: Apply Opencontrail compute
+  cmd: salt -C 'I@opencontrail:compute' state.sls opencontrail.client
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 3, delay: 30}
+  skip_fail: false
+
+- description: Apply Opencontrail compute
+  cmd: salt -C 'I@opencontrail:compute' state.sls opencontrail
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 30}
+  skip_fail: false
+
+- description: Check status for contrail services
+  cmd: |
+    sleep 15;
+    salt -C 'I@opencontrail:database' cmd.run 'doctrail all contrail-status'
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 30}
+  skip_fail: false
+
+{{ SHARED_OPENSTACK.MACRO_INSTALL_HEAT() }}
+
+{{ SHARED_OPENSTACK.MACRO_INSTALL_HORIZON() }}
+
+{{ SHARED_OPENSTACK.MACRO_INSTALL_COMPUTE(CELL_MAPPING=true) }}
+
+- description: sync time
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False '*' cmd.run
+    'service ntp stop; ntpd -gq;  service ntp start'
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 30}
+  skip_fail: false
+
+- description: Hack resolv.conf on VCP nodes for internal services access
+  cmd: |
+    salt --hard-crash --state-output=mixed --state-verbose=False -C '* and not kvm* and not cmp* and not gtw* and not cfg*' cmd.run "echo 'nameserver 172.18.208.44' > /etc/resolv.conf;"
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+- description: Create heat-net before external net create
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
+    '. /root/keystonercv3; neutron net-create heat-net'
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 30}
+  skip_fail: false
+
+- description: Create public network for contrail
+  cmd: |
+    salt 'ntw01*' contrail.virtual_network_create public '{"external":true,"ip_prefix":"192.168.200.0","ip_prefix_len":24,"asn":64512,"target":10000}'
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: true
+
+- description: Steps from neutron client for contrail
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
+    '. /root/keystonercv3; neutron subnet-create heat-net 10.20.30.0/24 --allocation-pool start=10.20.30.10,end=10.20.30.254 --gateway 10.20.30.1 --name heat-subnet'
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 30}
+  skip_fail: false
+
+- description: Steps from neutron client for contrail
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
+    '. /root/keystonercv3; neutron router-create heat-router'
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 30}
+  skip_fail: false
+
+- description: Steps from neutron client for contrail
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
+    '. /root/keystonercv3; neutron router-gateway-set heat-router public'
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 30}
+  skip_fail: false
+
+- description: Steps from neutron client for contrail
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
+    '. /root/keystonercv3; neutron router-interface-add heat-router heat-subnet'
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 30}
+  skip_fail: false
+
+# Starting prepare runtest
+
+- description: Upload tempest template
+  upload:
+    local_path: {{ config.salt_deploy.templates_dir }}{{ LAB_CONFIG_NAME }}/
+    local_filename: runtest.yml
+    remote_path: /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/
+  node_name: {{ HOSTNAME_CFG01 }}
+  skip_fail: False
+
+- description: Include class with tempest template into cfg node
+  cmd: |
+    sed -i 's/classes\:/classes\:\n- cluster.{{ LAB_CONFIG_NAME }}.infra.runtest/g' /srv/salt/reclass/nodes/_generated/cfg01.{{ DOMAIN_NAME }}.yml;
+    salt '*' saltutil.refresh_pillar;
+    salt '*' saltutil.sync_all;
+    salt 'ctl01*' pkg.install docker.io;
+    salt 'ctl01*' cmd.run 'iptables --policy FORWARD ACCEPT';
+    salt 'cfg01*' state.sls salt.minion && sleep 20;
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 10}
+  skip_fail: false
+
+- description: Enforce keystone client
+  cmd: |
+    salt 'cfg01*' state.sls keystone.client;
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+- description: Create flavors for tests
+  cmd: |
+    salt 'cfg01*' state.sls nova.client;
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+- description: Upload cirros image
+  cmd: |
+    salt 'cfg01*' state.sls glance.client;
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+- description: Generate tempest config
+  cmd: |
+    salt 'cfg01*' state.sls runtest;
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+- description: Download cirros image for runtest
+  cmd: |
+    wget http://cz8133.bud.mirantis.net:8099/cirros-0.3.5-x86_64-disk.img -O /tmp/TestCirros-0.3.5.img
+  node_name: {{ HOSTNAME_CTL01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+- description: Run tempest from new docker image
+  cmd: |
+    OPENSTACK_VERSION=`salt-call --out=newline_values_only pillar.get _param:openstack_version`;
+    docker run -e ARGS="-r test -w 2" -v /tmp/test/tempest.conf:/etc/tempest/tempest.conf -v /tmp/:/tmp/ -v /tmp/test:/root/tempest -v /etc/ssl/certs/:/etc/ssl/certs/ --rm docker-prod-virtual.docker.mirantis.net/mirantis/cicd/ci-tempest:$OPENSTACK_VERSION /bin/bash -c "run-tempest";
+  node_name: {{ HOSTNAME_CTL01 }}
+  retry: {count: 1, delay: 30}
+  skip_fail: true
+
+- description: Download xml results
+  download:
+    remote_path: /tmp/test/
+    remote_filename: "report_*.xml"
+    local_path: {{ os_env('PWD') }}
+  node_name: {{ HOSTNAME_CTL01 }}
+  skip_fail: true
diff --git a/tcp_tests/templates/cookied-bm-oc40-queens/runtest.yml b/tcp_tests/templates/cookied-bm-oc40-queens/runtest.yml
new file mode 100644
index 0000000..f0d6d8a
--- /dev/null
+++ b/tcp_tests/templates/cookied-bm-oc40-queens/runtest.yml
@@ -0,0 +1,47 @@
+classes:
+- service.runtest.tempest
+- service.runtest.tempest.public_net
+- service.runtest.tempest.services.manila.glance
+parameters:
+  _param:
+    glance_image_cirros_location: http://cz8133.bud.mirantis.net:8099/cirros-0.3.5-x86_64-disk.img
+    glance_image_fedora_location: http://cz8133.bud.mirantis.net:8099/Fedora-Cloud-Base-27-1.6.x86_64.qcow2
+    glance_image_manila_location: http://cz8133.bud.mirantis.net:8099/manila-service-image-master.qcow2
+    openstack_public_neutron_subnet_allocation_end: 192.168.200.220
+    openstack_public_neutron_subnet_allocation_start: 192.168.200.130
+    openstack_public_neutron_subnet_cidr: 192.168.200.0/24
+    openstack_public_neutron_subnet_gateway: 192.168.200.1
+    runtest_tempest_cfg_dir: /tmp/test
+    runtest_tempest_cfg_name: tempest.conf
+    runtest_tempest_public_net: public
+    tempest_test_target: ctl01*
+  neutron:
+    client:
+      enabled: true
+  runtest:
+    enabled: true
+    keystonerc_node: ctl01*
+    tempest:
+      DEFAULT:
+        log_file: tempest.log
+      cfg_dir: ${_param:runtest_tempest_cfg_dir}
+      cfg_name: ${_param:runtest_tempest_cfg_name}
+      compute:
+        min_compute_nodes: 2
+      convert_to_uuid:
+        network:
+          public_network_id: ${_param:runtest_tempest_public_net}
+      enabled: true
+      heat_plugin:
+        build_timeout: '600'
+      put_keystone_rc_enabled: false
+      put_local_image_file_enabled: false
+      share:
+        capability_snapshot_support: true
+        run_driver_assisted_migration_tests: false
+        run_manage_unmanage_snapshot_tests: false
+        run_manage_unmanage_tests: false
+        run_migration_with_preserve_snapshots_tests: false
+        run_quota_tests: true
+        run_replication_tests: false
+        run_snapshot_tests: true
diff --git a/tcp_tests/templates/cookied-bm-oc40-queens/salt-context-cookiecutter-contrail.yaml b/tcp_tests/templates/cookied-bm-oc40-queens/salt-context-cookiecutter-contrail.yaml
new file mode 100644
index 0000000..bfcd153
--- /dev/null
+++ b/tcp_tests/templates/cookied-bm-oc40-queens/salt-context-cookiecutter-contrail.yaml
@@ -0,0 +1,257 @@
+default_context:
+  backup_private_key: |-
+    -----BEGIN RSA PRIVATE KEY-----
+    MIIEpAIBAAKCAQEApq5WxkagvkNWO85FtS1ByHDKkNWhmFdpY9D49dZrSwuE9XGQ
+    +WW79F2AGwKki2N2j1iyfpMEKRIEIb/5cbl6fZzTGTndhd7Jxkx6xGdhZkX9VM6N
+    qotaO4ckj7NsfiZKTwp58/YSRkz3Ii1XPpt0NQqZLuNAwus4Bl9e1Wk5dNw+gHN3
+    m4JmAczJbQ81lrQURC7f3d2xjoFkXWXC2FKkMS6AOl1j87ATeeSG9xeHLbOvIyBw
+    7IwP9MFA5vUtHl8DzsdmzWmVRabe2VMtGa1Ya5JTTgK8nXmtYW3dvEQ/DtgzcKPJ
+    2fO31cze9LRpDSS0E6d/cISBgzsPfBJuUCGHTQIDAQABAoIBAQCmFVVVoA6PRt1o
+    HjMLQpsntGvDQXsRJxhWY2WO4CZs0n+baZvBRgOwjHIXd9ypH2SFlSXWRXuByPfh
+    AT72eJB7FYaqviPjPojjVFWH2lMM63RvypkSdGRmqFRf87KJSHIGrDO0SV8QOaSO
+    o4spURDLwVG9jKd9EY/zmZgPIhgkPazzVrFoGr8YnKE6qSJh5HivscNl8D3+36SN
+    5uhuElzBTNGd2iU4elLJIGjahetIalEZqL0Fvi1ZzAWoK0YXDmbI8uG8/epJ5Sy4
+    XyyHc7+0Jvm1JWwXczdDFuy+RlL9r66Ja8V9MauuJyigOKnNOJhE2b5/klEcczhC
+    AHA/Hw4pAoGBANcJ/gdouXgcuq3JNXq5Cb4w9lvZbDwQdEtY3+qdHAVndomoGsDT
+    USKq6ZRZzkAAnjiN2YywAQzqFGevoYig+WNLTPd2TdNdlNHfw9Wc4G2iSFb1pIr2
+    uoJ+TQGv4Ck/7LS2NVnWfqNoeo8Iq+Wvnh+F3twv0UIazGI8Bj/xLxvrAoGBAMZu
+    QErf3vzbY4g50HFVbPNi2Nl63A7/P421pEe4JAT1clwIVMyntRpNdVyHKkkKdDWr
+    98tBOhf71+shgsVPEMkfPyZ2nuiBit7LzZ+EAztG9i3hhm8yIUPXoipo0YCOe+yF
+    r+r03pX97aciXuRMPmMTHH6N1vFaUXHSgVs6Y7OnAoGAP4v1ZO0eug8LX6XxRuX9
+    qhXAB96VrJ5UL5wA980b5cDwd7eUyFzqQittwWhUmfdUynOo0XmFpfJau1VckAq6
+    CAzNnud4Ejk6bFcLAUpNzDhD1mbbDDHjZgK68P+vZ6E7ax/ZXkYTwGh0p2Yxnjuq
+    p7gg5sK+vSE8Ot9wHV9Bw6cCgYEAguPq6PjvgF+/Mfbg9kFhUtKbNCoEyqe4ZmOw
+    79YZfGPjga3FMhJWNfluNxC55eBNc7HyDFMEXRm0/dbnCfvzmJdR8q9AdyIsVnad
+    NmHAN/PBI9al9OdeZf/xaoQl3eUe/Y/Z0OShhtMvVpYnffSFGplarGgnpqDrJGe1
+    CFZlufUCgYBemuy+C6gLwTOzhcTcCo4Ir5ZiKcXAE6ufk8OIdGnMWJcmTxxmIMY6
+    XyKu0oobWpOBXPiipQ6TmDpI+flxWYRHwPFFzPa+jhCtupRuTdORKrklV2UfdIWZ
+    N4e+J2yCu7lyz0upwa3MkFIVQ1ez0o8X9NRvAz243qi64y1+KOMPmQ==
+    -----END RSA PRIVATE KEY-----
+  backup_public_key: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCmrlbGRqC+Q1Y7zkW1LUHIcMqQ1aGYV2lj0Pj11mtLC4T1cZD5Zbv0XYAbAqSLY3aPWLJ+kwQpEgQhv/lxuXp9nNMZOd2F3snGTHrEZ2FmRf1Uzo2qi1o7hySPs2x+JkpPCnnz9hJGTPciLVc+m3Q1Cpku40DC6zgGX17VaTl03D6Ac3ebgmYBzMltDzWWtBRELt/d3bGOgWRdZcLYUqQxLoA6XWPzsBN55Ib3F4cts68jIHDsjA/0wUDm9S0eXwPOx2bNaZVFpt7ZUy0ZrVhrklNOArydea1hbd28RD8O2DNwo8nZ87fVzN70tGkNJLQTp39whIGDOw98Em5QIYdN
+  bmk_enabled: 'False'
+  ceph_enabled: 'False'
+  cicd_control_node01_address: 10.167.8.91
+  cicd_control_node01_hostname: cid01
+  cicd_control_node02_address: 10.167.8.92
+  cicd_control_node02_hostname: cid02
+  cicd_control_node03_address: 10.167.8.93
+  cicd_control_node03_hostname: cid03
+  cicd_control_vip_address: 10.167.8.90
+  cicd_control_vip_hostname: cid
+  cicd_enabled: 'True'
+  cicd_private_key: |-
+    -----BEGIN RSA PRIVATE KEY-----
+    MIIEowIBAAKCAQEAuBC224XQZFyzqC56EyS7yr/rlpRRYsr2vji77faoWQFmgYbZ
+    oeyqqqm8eSN0Cc0wAnxWsQ7H3ZN9uTnyWVrsogs1vx8597iorZAT4Mu6JDbkWlZh
+    IUHo9P9itWJdUWpxjDavqIvjZo+DmOO1mfv9K1asP8COanQEsgHSyuf+XKMBg0ko
+    kEammAUtS9HRxCAJ47QgLPSCFij5ih/MRWY3HWFUFEF3gRdUodWmeJNmW+7JH7T2
+    wId1kn8oRya7eadKxd6wEaCGm5ILXwwVFmFkOGlEeC8wHnbkatd/A53DxzUfOHBi
+    27Gaf83DPxKqDWW0aAh7b49EnFhdkuF3ZyXbYwIDAQABAoIBAFtioQbYpyBNDj2f
+    5af/guUk6Di4pregAWVsEZIR9n9KPLRuWTsVn55f611Rhtke8IkrZnc92WlfQvpl
+    lLdcd0P0wNiFDmi5W7XgZJ4lR+OXBUT8wfibGqgY688WaTJ04K82r3vFCD/xXOrZ
+    k15CR+3ueFKmrY6Yz4P5d8iZ6iXfR47ZYm+wdmx3vmJ+IVfZCRRPAGP25GxqsOs5
+    3qMl9hV7a1MGVVaVPmVzrq0Xzk6IAW2+0p5udGmezn4y6HFPIvOriUVUkni3mNjX
+    dokrETqVbOjkdFkSw28cMBfP/tO3vyfGh5VX24xvRztWtcpAm6Qr5lKEDSvFv13r
+    0z/DxRECgYEA8oZ4+w2cqLJz91fKpWutGZKj4m/HEY6FZfjVflsTT2bKTt+nTtRY
+    qAeKGYIbrjZMAyy4dG+RgW7WORFcRHFyeSrS5Aw51zO+JQ0KzuBv83UqcbqNLcsz
+    BAPHPk/7f30W4wuInqgXrWMTiGePz0hQsvNU6aR7MH4Sd2C0ot4W+00CgYEAwkq+
+    UtugC8ywK+F0xZvjXHi3VJRJZf4WLtRxZGy8CimaritSKpZZRG23Sk0ifDE6+4fD
+    VtxeTfTmeZBictg/fEAPVHzhsNPNyDMA8t7t4ZKmMX9DNYAqVX21s5YQ9encH6KT
+    1q0NRpjvw7QzhfbFfsxeAxHKZFbFlVmROplF+W8CgYAWHVz6x4r5dwxMCZ1Y6DCo
+    nE6FX1vvpedUHRSaqQNhwiXAe3RuI77R054sJUkQ4bKct386XtIN02WFXqfjNdUS
+    Z21DjjnX/cfg6QeLRbvvn0d3h2NIQbctLosEi5aLUYS8v1h93yYJkXc+gPMEG7wA
+    FWAwzebNzTEx4YeXMlk2IQKBgCt8JxTMawm5CkUH9Oa1eTGdIwsfFT5qm/RnP+nG
+    HF/559DLiVxWwiv6kmdi1DEPo6/gNuwd7k1sXpkeo6oolCzu+X9jY+/7t7bzE2dI
+    Vd2CwQebACPdR5xSwnQrRiiD6ux5qrUFjk8as68NieqVzKYQf4oYVUAX26kNnt+K
+    poqpAoGBAINHTGBFVK3XC+fCbu7rhFS8wZAjBmvEDHGnUBp19JREEr3q7a2D84T3
+    17zo0bwxL09QFnOCDDJcXsh8eGbCONV0hJvJU2o7wGol+lRFSd+v6WYZ37bPEyEx
+    l8kv0xXAElriC1RE1CNtvoOn/uxyRs+2OnNgBVxtAGqUWVdpm6CD
+    -----END RSA PRIVATE KEY-----
+  cicd_public_key: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQC4ELbbhdBkXLOoLnoTJLvKv+uWlFFiyva+OLvt9qhZAWaBhtmh7Kqqqbx5I3QJzTACfFaxDsfdk325OfJZWuyiCzW/Hzn3uKitkBPgy7okNuRaVmEhQej0/2K1Yl1RanGMNq+oi+Nmj4OY47WZ+/0rVqw/wI5qdASyAdLK5/5cowGDSSiQRqaYBS1L0dHEIAnjtCAs9IIWKPmKH8xFZjcdYVQUQXeBF1Sh1aZ4k2Zb7skftPbAh3WSfyhHJrt5p0rF3rARoIabkgtfDBUWYWQ4aUR4LzAeduRq138DncPHNR84cGLbsZp/zcM/EqoNZbRoCHtvj0ScWF2S4XdnJdtj
+  cluster_domain: cookied-bm-oc40-queens.local
+  cluster_name: cookied-bm-oc40-queens
+  opencontrail_version: 4.0
+  linux_repo_contrail_component: oc40
+  compute_bond_mode: active-backup
+  compute_padding_with_zeros: 'True'
+  compute_primary_first_nic: eth1
+  compute_primary_second_nic: eth2
+  context_seed: TFWH0xgUevQkslwhbWVedwwYhBtImHLiGUIExjT9ahxPAUBHh9Kg3QSAIrqTqtvk
+  control_network_netmask: 255.255.255.0
+  control_network_subnet: 10.167.8.0/24
+  control_vlan: '2422'
+  cookiecutter_template_branch: ''
+  cookiecutter_template_credentials: gerrit
+  cookiecutter_template_url: https://gerrit.mcp.mirantis.com/mk/cookiecutter-templates.git
+  deploy_network_gateway: 172.16.49.65
+  deploy_network_netmask: 255.255.255.192
+  deploy_network_subnet: 172.16.49.64/26
+  deployment_type: physical
+  dns_server01: 172.18.176.6
+  dns_server02: 172.18.208.44
+  email_address: sgudz@mirantis.com
+  infra_bond_mode: active-backup
+  infra_deploy_nic: eth0
+  infra_kvm01_control_address: 10.167.8.241
+  infra_kvm01_deploy_address: 172.16.49.67
+  infra_kvm01_hostname: kvm01
+  infra_kvm02_control_address: 10.167.8.242
+  infra_kvm02_deploy_address: 172.16.49.68
+  infra_kvm02_hostname: kvm02
+  infra_kvm03_control_address: 10.167.8.243
+  infra_kvm03_deploy_address: 172.16.49.69
+  infra_kvm03_hostname: kvm03
+  infra_kvm_vip_address: 10.167.8.240
+  infra_primary_first_nic: eth1
+  infra_primary_second_nic: eth2
+  internal_proxy_enabled: 'False'
+  kqueen_custom_mail_enabled: 'False'
+  kqueen_enabled: 'False'
+  kubernetes_enabled: 'False'
+  local_repositories: 'False'
+  maas_deploy_address: 172.16.49.66
+  maas_deploy_cidr: 172.16.49.64/26
+  maas_deploy_gateway: 172.16.49.65
+  maas_deploy_range_end: 172.16.49.119
+  maas_deploy_range_start: 172.16.49.77
+  maas_deploy_vlan: '0'
+  maas_dhcp_enabled: 'True'
+  maas_fabric_name: fabric-51
+  maas_hostname: cfg01
+  maas_manage_deploy_network: 'True'
+  mcp_common_scripts_branch: ''
+  mcp_version: proposed
+  offline_deployment: 'False'
+  opencontrail_analytics_address: 10.167.8.30
+  opencontrail_analytics_hostname: nal
+  opencontrail_analytics_node01_address: 10.167.8.31
+  opencontrail_analytics_node01_hostname: nal01
+  opencontrail_analytics_node02_address: 10.167.8.32
+  opencontrail_analytics_node02_hostname: nal02
+  opencontrail_analytics_node03_address: 10.167.8.33
+  opencontrail_analytics_node03_hostname: nal03
+  opencontrail_compute_iface_mask: '24'
+  opencontrail_control_address: 10.167.8.20
+  opencontrail_control_hostname: ntw
+  opencontrail_control_node01_address: 10.167.8.21
+  opencontrail_control_node01_hostname: ntw01
+  opencontrail_control_node02_address: 10.167.8.22
+  opencontrail_control_node02_hostname: ntw02
+  opencontrail_control_node03_address: 10.167.8.23
+  opencontrail_control_node03_hostname: ntw03
+  opencontrail_enabled: 'True'
+  opencontrail_router01_address: 10.167.8.220
+  opencontrail_router01_hostname: rtr01
+  openldap_enabled: 'False'
+  openssh_groups: ''
+  openstack_benchmark_node01_address: 10.167.8.95
+  openstack_benchmark_node01_hostname: bmk01
+  openstack_cluster_size: compact
+  openstack_compute_count: '2'
+  openstack_compute_rack01_hostname: cmp
+  openstack_compute_rack01_single_subnet: 10.167.8
+  openstack_compute_rack01_tenant_subnet: 192.168.0
+  openstack_compute_single_address_ranges: 10.167.8.101-10.167.8.102
+  openstack_compute_deploy_address_ranges: 172.16.49.73-172.16.49.74
+  openstack_compute_tenant_address_ranges: 192.168.0.101-192.168.0.102
+  openstack_compute_backend_address_ranges: 192.168.0.101-192.168.0.102
+  openstack_compute_node01_hostname: cmp01
+  openstack_compute_node02_hostname: cmp02
+  openstack_compute_node01_address: 10.167.8.101
+  openstack_compute_node02_address: 10.167.8.102
+  openstack_compute_node01_single_address: 10.167.8.101
+  openstack_compute_node02_single_address: 10.167.8.102
+  openstack_compute_node01_deploy_address: 172.16.49.73
+  openstack_compute_node02_deploy_address: 172.16.49.74
+  openstack_control_address: 10.167.8.10
+  openstack_control_hostname: ctl
+  openstack_control_node01_address: 10.167.8.11
+  openstack_control_node01_hostname: ctl01
+  openstack_control_node02_address: 10.167.8.12
+  openstack_control_node02_hostname: ctl02
+  openstack_control_node03_address: 10.167.8.13
+  openstack_control_node03_hostname: ctl03
+  openstack_database_address: 10.167.8.50
+  openstack_database_hostname: dbs
+  openstack_database_node01_address: 10.167.8.51
+  openstack_database_node01_hostname: dbs01
+  openstack_database_node02_address: 10.167.8.52
+  openstack_database_node02_hostname: dbs02
+  openstack_database_node03_address: 10.167.8.53
+  openstack_database_node03_hostname: dbs03
+  openstack_enabled: 'True'
+  openstack_message_queue_address: 10.167.8.40
+  openstack_message_queue_hostname: msg
+  openstack_message_queue_node01_address: 10.167.8.41
+  openstack_message_queue_node01_hostname: msg01
+  openstack_message_queue_node02_address: 10.167.8.42
+  openstack_message_queue_node02_hostname: msg02
+  openstack_message_queue_node03_address: 10.167.8.43
+  openstack_message_queue_node03_hostname: msg03
+  openstack_network_engine: opencontrail
+  openstack_neutron_bgp_vpn: 'False'
+  openstack_neutron_bgp_vpn_driver: bagpipe
+  openstack_nfv_dpdk_enabled: 'False'
+  openstack_nfv_sriov_enabled: 'False'
+  openstack_nova_compute_nfv_req_enabled: 'False'
+  openstack_nova_compute_reserved_host_memory_mb: '900'
+  openstack_proxy_address: 10.167.8.80
+  openstack_proxy_hostname: prx
+  openstack_proxy_node01_address: 10.167.8.81
+  openstack_proxy_node01_hostname: prx01
+  openstack_proxy_node02_address: 10.167.8.82
+  openstack_proxy_node02_hostname: prx02
+  openstack_upgrade_node01_address: 10.167.8.19
+  openstack_version: queens
+  oss_enabled: 'False'
+  oss_node03_address: ${_param:stacklight_monitor_node03_address}
+  oss_webhook_app_id: '24'
+  oss_webhook_login_id: '13'
+  platform: openstack_enabled
+  public_host: ${_param:openstack_proxy_address}
+  publication_method: email
+  reclass_repository: https://github.com/Mirantis/mk-lab-salt-model.git
+  salt_api_password: BNRhXeGFdgVNx0Ikm2CAMw7eyeHf4grH
+  salt_api_password_hash: $6$jriFnsbZ$eon54Ts/Kn4ywKpexe/W8srpBF64cxr2D8jd0RzTH8zdZVjS3viYt64m1d1VlXenurwpcGLkGzaGmOI0dlOox0
+  salt_master_address: 172.16.49.66
+  salt_master_hostname: cfg01
+  salt_master_management_address: 172.16.49.66
+  shared_reclass_branch: ''
+  shared_reclass_url: https://gerrit.mcp.mirantis.com/salt-models/reclass-system.git
+  stacklight_enabled: 'True'
+  stacklight_log_address: 10.167.8.60
+  stacklight_log_hostname: log
+  stacklight_log_node01_address: 10.167.8.61
+  stacklight_log_node01_hostname: log01
+  stacklight_log_node02_address: 10.167.8.62
+  stacklight_log_node02_hostname: log02
+  stacklight_log_node03_address: 10.167.8.63
+  stacklight_log_node03_hostname: log03
+  stacklight_long_term_storage_type: prometheus
+  stacklight_monitor_address: 10.167.8.70
+  stacklight_monitor_hostname: mon
+  stacklight_monitor_node01_address: 10.167.8.71
+  stacklight_monitor_node01_hostname: mon01
+  stacklight_monitor_node02_address: 10.167.8.72
+  stacklight_monitor_node02_hostname: mon02
+  stacklight_monitor_node03_address: 10.167.8.73
+  stacklight_monitor_node03_hostname: mon03
+  stacklight_telemetry_address: 10.167.8.85
+  stacklight_telemetry_hostname: mtr
+  stacklight_telemetry_node01_address: 10.167.8.86
+  stacklight_telemetry_node01_hostname: mtr01
+  stacklight_telemetry_node02_address: 10.167.8.87
+  stacklight_telemetry_node02_hostname: mtr02
+  stacklight_telemetry_node03_address: 10.167.8.88
+  stacklight_telemetry_node03_hostname: mtr03
+  stacklight_version: '2'
+  static_ips_on_deploy_network_enabled: 'False'
+  tenant_network_gateway: 192.168.0.220
+  tenant_network_netmask: 255.255.255.0
+  tenant_network_subnet: 192.168.0.0/24
+  tenant_vlan: '2423'
+  upstream_proxy_enabled: 'False'
+  use_default_network_scheme: 'True'
+  openldap_domain: cookied-bm-oc40-queens.local
+  openldap_enabled: 'True'
+  openldap_organisation: ${_param:cluster_name}
diff --git a/tcp_tests/templates/cookied-bm-oc40-queens/salt-context-environment.yaml b/tcp_tests/templates/cookied-bm-oc40-queens/salt-context-environment.yaml
new file mode 100644
index 0000000..90d7a3d
--- /dev/null
+++ b/tcp_tests/templates/cookied-bm-oc40-queens/salt-context-environment.yaml
@@ -0,0 +1,271 @@
+nodes:

+    # Virtual Control Plane nodes

+    cid01.cookied-bm-oc40-queens.local:

+      reclass_storage_name: cicd_control_node01

+      roles:

+      - cicd_control_leader

+      - linux_system_codename_xenial

+      interfaces:

+        ens3:

+          role: single_ctl

+

+    cid02.cookied-bm-oc40-queens.local:

+      reclass_storage_name: cicd_control_node02

+      roles:

+      - cicd_control_manager

+      - linux_system_codename_xenial

+      interfaces:

+        ens3:

+          role: single_ctl

+

+    cid03.cookied-bm-oc40-queens.local:

+      reclass_storage_name: cicd_control_node03

+      roles:

+      - cicd_control_manager

+      - linux_system_codename_xenial

+      interfaces:

+        ens3:

+          role: single_ctl 

+

+    ctl01.cookied-bm-oc40-queens.local:

+      reclass_storage_name: openstack_control_node01

+      roles:

+      - openstack_control_leader

+      - linux_system_codename_xenial

+      interfaces:

+        ens3:

+          role: single_ctl

+

+    ctl02.cookied-bm-oc40-queens.local:

+      reclass_storage_name: openstack_control_node02

+      roles:

+      - openstack_control

+      - linux_system_codename_xenial

+      interfaces:

+        ens3:

+          role: single_ctl

+

+    ctl03.cookied-bm-oc40-queens.local:

+      reclass_storage_name: openstack_control_node03

+      roles:

+      - openstack_control

+      - linux_system_codename_xenial

+      interfaces:

+        ens3:

+          role: single_ctl

+

+    dbs01.cookied-bm-oc40-queens.local:

+      reclass_storage_name: openstack_database_node01

+      roles:

+      - openstack_database_leader

+      - linux_system_codename_xenial

+      interfaces:

+        ens3:

+          role: single_ctl

+

+    dbs02.cookied-bm-oc40-queens.local:

+      reclass_storage_name: openstack_database_node02

+      roles:

+      - openstack_database

+      - linux_system_codename_xenial

+      interfaces:

+        ens3:

+          role: single_ctl

+

+    dbs03.cookied-bm-oc40-queens.local:

+      reclass_storage_name: openstack_database_node03

+      roles:

+      - openstack_database

+      - linux_system_codename_xenial

+      interfaces:

+        ens3:

+          role: single_ctl

+

+    msg01.cookied-bm-oc40-queens.local:

+      reclass_storage_name: openstack_message_queue_node01

+      roles:

+      - openstack_message_queue

+      - linux_system_codename_xenial

+      interfaces:

+        ens3:

+          role: single_ctl

+

+    msg02.cookied-bm-oc40-queens.local:

+      reclass_storage_name: openstack_message_queue_node02

+      roles:

+      - openstack_message_queue

+      - linux_system_codename_xenial

+      interfaces:

+        ens3:

+          role: single_ctl

+

+    msg03.cookied-bm-oc40-queens.local:

+      reclass_storage_name: openstack_message_queue_node03

+      roles:

+      - openstack_message_queue

+      - linux_system_codename_xenial

+      interfaces:

+        ens3:

+          role: single_ctl

+

+    prx01.cookied-bm-oc40-queens.local:

+      reclass_storage_name: openstack_proxy_node01

+      roles:

+      - openstack_proxy

+      - linux_system_codename_xenial

+      interfaces:

+        ens3:

+          role: single_ctl

+

+    prx02.cookied-bm-oc40-queens.local:

+      reclass_storage_name: openstack_proxy_node02

+      roles:

+      - openstack_proxy

+      - linux_system_codename_xenial

+      interfaces:

+        ens3:

+          role: single_ctl

+

+    mon01.cookied-bm-oc40-queens.local:

+      reclass_storage_name: stacklight_server_node01

+      roles:

+      - stacklightv2_server_leader

+      - linux_system_codename_xenial

+      interfaces:

+        ens3:

+          role: single_ctl

+

+    mon02.cookied-bm-oc40-queens.local:

+      reclass_storage_name: stacklight_server_node02

+      roles:

+      - stacklightv2_server

+      - linux_system_codename_xenial

+      interfaces:

+        ens3:

+          role: single_ctl

+

+    mon03.cookied-bm-oc40-queens.local:

+      reclass_storage_name: stacklight_server_node03

+      roles:

+      - stacklightv2_server

+      - linux_system_codename_xenial

+      interfaces:

+        ens3:

+          role: single_ctl

+

+    nal01.cookied-bm-oc40-queens.local:

+      reclass_storage_name: opencontrail_analytics_node01

+      roles:

+      - opencontrail_analytics

+      - linux_system_codename_xenial

+      interfaces:

+        ens3:

+          role: single_ctl

+

+    nal02.cookied-bm-oc40-queens.local:

+      reclass_storage_name: opencontrail_analytics_node02

+      roles:

+      - opencontrail_analytics

+      - linux_system_codename_xenial

+      interfaces:

+        ens3:

+          role: single_ctl

+

+    nal03.cookied-bm-oc40-queens.local:

+      reclass_storage_name: opencontrail_analytics_node03

+      roles:

+      - opencontrail_analytics

+      - linux_system_codename_xenial

+      interfaces:

+        ens3:

+          role: single_ctl

+

+    ntw01.cookied-bm-oc40-queens.local:

+      reclass_storage_name: opencontrail_control_node01

+      roles:

+      - opencontrail_control

+      - linux_system_codename_xenial

+      interfaces:

+        ens3:

+          role: single_ctl

+

+    ntw02.cookied-bm-oc40-queens.local:

+      reclass_storage_name: opencontrail_control_node02

+      roles:

+      - opencontrail_control

+      - linux_system_codename_xenial

+      interfaces:

+        ens3:

+          role: single_ctl

+

+    ntw03.cookied-bm-oc40-queens.local:

+      reclass_storage_name: opencontrail_control_node03

+      roles:

+      - opencontrail_control

+      - linux_system_codename_xenial

+      interfaces:

+        ens3:

+          role: single_ctl

+

+    mtr01.cookied-bm-oc40-queens.local:

+      reclass_storage_name: stacklight_telemetry_node01

+      roles:

+      - stacklight_telemetry

+      - linux_system_codename_xenial

+      interfaces:

+        ens3:

+          role: single_ctl

+

+    mtr02.cookied-bm-oc40-queens.local:

+      reclass_storage_name: stacklight_telemetry_node02

+      roles:

+      - stacklight_telemetry

+      - linux_system_codename_xenial

+      interfaces:

+        ens3:

+          role: single_ctl

+

+    mtr03.cookied-bm-oc40-queens.local:

+      reclass_storage_name: stacklight_telemetry_node03

+      roles:

+      - stacklight_telemetry

+      - linux_system_codename_xenial

+      interfaces:

+        ens3:

+          role: single_ctl

+

+    log01.cookied-bm-oc40-queens.local:

+      reclass_storage_name: stacklight_log_node01

+      roles:

+      - stacklight_log_leader_v2

+      - linux_system_codename_xenial

+      interfaces:

+        ens3:

+          role: single_ctl

+

+    log02.cookied-bm-oc40-queens.local:

+      reclass_storage_name: stacklight_log_node02

+      roles:

+      - stacklight_log

+      - linux_system_codename_xenial

+      interfaces:

+        ens3:

+          role: single_ctl

+

+    log03.cookied-bm-oc40-queens.local:

+      reclass_storage_name: stacklight_log_node03

+      roles:

+      - stacklight_log

+      - linux_system_codename_xenial

+      interfaces:

+        ens3:

+          role: single_ctl

+

+#    bmk01.cookied-bm-oc40-queens.local:

+#      reclass_storage_name: openstack_benchmark_node01

+#      roles:

+#      - openstack_benchmark

+#      - linux_system_codename_xenial

+#      interfaces:

+#        ens3:

+#          role: single_ctl

diff --git a/tcp_tests/templates/cookied-bm-mcp-pike-k8s-contrail/salt.yaml b/tcp_tests/templates/cookied-bm-oc40-queens/salt.yaml
similarity index 62%
copy from tcp_tests/templates/cookied-bm-mcp-pike-k8s-contrail/salt.yaml
copy to tcp_tests/templates/cookied-bm-oc40-queens/salt.yaml
index 7ebda02..3853acd 100644
--- a/tcp_tests/templates/cookied-bm-mcp-pike-k8s-contrail/salt.yaml
+++ b/tcp_tests/templates/cookied-bm-oc40-queens/salt.yaml
@@ -1,45 +1,32 @@
-{% from 'cookied-bm-mcp-pike-k8s-contrail/underlay.yaml' import HOSTNAME_CFG01 with context %}
-{% from 'cookied-bm-mcp-pike-k8s-contrail/underlay.yaml' import LAB_CONFIG_NAME with context %}
-{% from 'cookied-bm-mcp-pike-k8s-contrail/underlay.yaml' import DOMAIN_NAME with context %}
-{% from 'cookied-bm-mcp-pike-k8s-contrail/underlay.yaml' import HOSTNAME_KVM01 with context %}
-{% from 'cookied-bm-mcp-pike-k8s-contrail/underlay.yaml' import HOSTNAME_KVM02 with context %}
-{% from 'cookied-bm-mcp-pike-k8s-contrail/underlay.yaml' import HOSTNAME_KVM03 with context %}
+{% from 'cookied-bm-oc40-queens/underlay.yaml' import HOSTNAME_CFG01 with context %}
+{% from 'cookied-bm-oc40-queens/underlay.yaml' import LAB_CONFIG_NAME with context %}
+{% from 'cookied-bm-oc40-queens/underlay.yaml' import DOMAIN_NAME with context %}
 
 {% set SALT_MODELS_REPOSITORY = os_env('SALT_MODELS_REPOSITORY','https://gerrit.mcp.mirantis.com/salt-models/mcp-virtual-lab') %}
 # Other salt model repository parameters see in shared-salt.yaml
 
 # Name of the context file (without extension, that is fixed .yaml) used to render the Environment model
-{% set ENVIRONMENT_MODEL_INVENTORY_NAME = os_env('ENVIRONMENT_MODEL_INVENTORY_NAME','bm-mcp-pike-k8s-contrail') %}
+{% set ENVIRONMENT_MODEL_INVENTORY_NAME = os_env('ENVIRONMENT_MODEL_INVENTORY_NAME','physical-cookied-bm-oc40-queens') %}
 # Path to the context files used to render Cluster and Environment models
-{%- set CLUSTER_CONTEXT_NAME = 'salt-context-cookiecutter-k8s-contrail.yaml' %}
-{%- set ENVIRONMENT_CONTEXT_NAMES = ['salt-context-environment.yaml','lab04-upgrade-physical-inventory.yaml'] %}
-{%- set CONTROL_VLAN = os_env('CONTROL_VLAN', '2410') %}
-{%- set TENANT_VLAN = os_env('TENANT_VLAN', '2411') %}
-
+{%- set CLUSTER_CONTEXT_NAME = os_env('CLUSTER_CONTEXT_NAME', 'salt-context-cookiecutter-contrail.yaml') %}
+{%- set ENVIRONMENT_CONTEXT_NAMES = ['salt-context-environment.yaml','lab04-physical-inventory.yaml'] %}
+{%- set CONTROL_VLAN = os_env('CONTROL_VLAN', '2422') %}
+{%- set TENANT_VLAN = os_env('TENANT_VLAN', '2423') %}
 
 {% import 'shared-salt.yaml' as SHARED with context %}
 
 {{ SHARED.MACRO_INSTALL_SALT_MASTER() }}
 
-{{ SHARED.MACRO_GENERATE_COOKIECUTTER_MODEL(CONTROL_VLAN=CONTROL_VLAN, TENANT_VLAN=TENANT_VLAN, CLUSTER_PRODUCT_MODELS='cicd infra kubernetes opencontrail stacklight2') }}
+{{ SHARED.MACRO_GENERATE_COOKIECUTTER_MODEL(CONTROL_VLAN=CONTROL_VLAN, TENANT_VLAN=TENANT_VLAN) }}
 
 {{ SHARED.MACRO_GENERATE_AND_ENABLE_ENVIRONMENT_MODEL() }}
 
 {{ SHARED.MACRO_CONFIGURE_RECLASS(FORMULA_SERVICES='\*') }}
+
 {{ SHARED.MACRO_INSTALL_SALT_MINIONS() }}
 
 {{ SHARED.MACRO_RUN_SALT_MASTER_UNDERLAY_STATES() }}
 
-- description: "Workaround for rack01 compute generator"
-  cmd: |
-    set -e;
-    # Remove rack01 key
-    . /root/venv-reclass-tools/bin/activate;
-    reclass-tools del-key parameters.reclass.storage.node.kubernetes_compute_rack01 /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/config.yml;
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 10}
-  skip_fail: false
-
 - description: "Change path to internal storage for salt.control images"
   cmd: |
     set -e;
@@ -50,12 +37,13 @@
   retry: {count: 1, delay: 10}
   skip_fail: false
 
-- description: Delete proxy inclusion from kvm
+- description: Temporary workaround for removing cinder-volume from CTL nodes
   cmd: |
-    sed -i 's/- system.salt.control.cluster.kubernetes_proxy_cluster//g' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/kvm.yml;
+    sed -i 's/\-\ system\.cinder\.volume\.single//g' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/openstack/control.yml;
+    sed -i 's/\-\ system\.cinder\.volume\.notification\.messagingv2//g' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/openstack/control.yml;
   node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 10}
-  skip_fail: false
+  retry: {count: 1, delay: 5}
+  skip_fail: true
 
 - description: Temporary WR for correct bridge name according to envoronment templates
   cmd: |
@@ -65,44 +53,10 @@
   retry: {count: 1, delay: 10}
   skip_fail: false
 
-- description: "Excluding tenant network from cluster"
-  cmd: |
-    set -e;
-    . /root/venv-reclass-tools/bin/activate;
-    reclass-tools add-key parameters._param.opencontrail_compute_address '${_param:single_address}' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/opencontrail/compute.yml;
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 10}
-  skip_fail: false
-
-- description: "Use correct compute interface"
-  cmd: |
-    set -e;
-    . /root/venv-reclass-tools/bin/activate;
-    reclass-tools add-key parameters._param.opencontrail_compute_iface 'ens11f1.${_param:control_vlan}' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/opencontrail/init.yml;
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 10}
-  skip_fail: false
-
-- description: Rerun openssh after env model is generated
-  cmd: |
-    salt-call state.sls openssh
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 10}
-  skip_fail: false
-
 {{ SHARED.MACRO_GENERATE_INVENTORY() }}
 
 {{ SHARED.MACRO_NETWORKING_WORKAROUNDS() }}
 
-- description: "Disable kubelet_fail_on_swap"
-  cmd: |
-    set -e;
-    . /root/venv-reclass-tools/bin/activate;
-    reclass-tools add-key parameters._param.kubelet_fail_on_swap false /srv/salt/reclass/classes/system/kubernetes/common.yml;
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 10}
-  skip_fail: false
-
 - description: Update minion information
   cmd: |
     salt --hard-crash --state-output=mixed --state-verbose=False '*' saltutil.sync_all &&
@@ -151,15 +105,13 @@
   retry: {count: 20, delay: 30}
   skip_fail: false
 
-{{ SHARED.MACRO_BOOTSTRAP_ALL_MINIONS() }}
-
 #########################################
 # Configure all running salt minion nodes
 #########################################
 
 - description: Hack resolv.conf on VCP nodes for internal services access
   cmd: |
-    salt --hard-crash --state-output=mixed --state-verbose=False -C '* and not cfg*' cmd.run "echo 'nameserver 172.17.41.2' > /etc/resolv.conf;"
+    salt --hard-crash --state-output=mixed --state-verbose=False -C '* and not cfg*' cmd.run "echo 'nameserver 172.18.208.44' > /etc/resolv.conf;"
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 5}
   skip_fail: false
@@ -182,6 +134,8 @@
   retry: {count: 1, delay: 5}
   skip_fail: false
 
+{{ SHARED.MACRO_BOOTSTRAP_ALL_MINIONS() }}
+
 {{SHARED.MACRO_CHECK_SALT_VERSION_SERVICES_ON_CFG()}}
 
 {{SHARED.MACRO_CHECK_SALT_VERSION_ON_NODES()}}
@@ -196,14 +150,3 @@
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 5}
   skip_fail: true
-
-- description: "Lab04 workaround: Control network access from cfg01 node using sshuttle via kvm01"
-  cmd: |
-    set -e;
-    set -x;
-    KVM01_DEPLOY_ADDRESS=$(salt-call --out=newline_values_only pillar.get _param:infra_kvm_node01_deploy_address);
-    apt-get install -y sshuttle;
-    sshuttle -r ${KVM01_DEPLOY_ADDRESS} 10.167.8.0/24 -D >/dev/null;
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: true
diff --git a/tcp_tests/templates/cookied-bm-oc40-queens/sl.yaml b/tcp_tests/templates/cookied-bm-oc40-queens/sl.yaml
new file mode 100644
index 0000000..2ff8f3d
--- /dev/null
+++ b/tcp_tests/templates/cookied-bm-oc40-queens/sl.yaml
@@ -0,0 +1,15 @@
+{% from 'cookied-bm-oc40-queens/underlay.yaml' import HOSTNAME_CFG01 with context %}
+
+{% import 'shared-sl.yaml' as SHARED_SL with context %}
+{% import 'shared-sl-tests.yaml' as SHARED_SL_TESTS with context %}
+
+{{  SHARED_SL.MACRO_INSTALL_DOCKER_SWARM() }}
+{{  SHARED_SL.MACRO_INSTALL_MONGODB() }}
+{{  SHARED_SL.MACRO_INSTALL_MONGODB_CLUSTER() }}
+{{  SHARED_SL.MACRO_INSTALL_TELEGRAF_AND_PROMETHEUS() }}
+{{  SHARED_SL.MACRO_INSTALL_ELASTICSEARCH_AND_KIBANA() }}
+{{  SHARED_SL.MACRO_INSTALL_LOG_COLLECTION() }}
+{{  SHARED_SL.MACRO_INSTALL_CEILOMETER_COLLECTOR() }}
+{{  SHARED_SL.MACRO_CONFIGURE_SERVICES() }}
+{{  SHARED_SL_TESTS.MACRO_CLONE_SL_TESTS() }}
+{{  SHARED_SL_TESTS.MACRO_CONFIGURE_TESTS() }}
diff --git a/tcp_tests/templates/cookied-bm-oc40-queens/underlay--meta-data.yaml b/tcp_tests/templates/cookied-bm-oc40-queens/underlay--meta-data.yaml
new file mode 100644
index 0000000..a594a53
--- /dev/null
+++ b/tcp_tests/templates/cookied-bm-oc40-queens/underlay--meta-data.yaml
@@ -0,0 +1,4 @@
+| # All the data below will be stored as a string object

+  instance-id: iid-local1

+  hostname: {hostname}

+  local-hostname: {hostname}

diff --git a/tcp_tests/templates/cookied-bm-mcp-pike-k8s-contrail/underlay--user-data-cfg01.yaml b/tcp_tests/templates/cookied-bm-oc40-queens/underlay--user-data-cfg01.yaml
similarity index 95%
rename from tcp_tests/templates/cookied-bm-mcp-pike-k8s-contrail/underlay--user-data-cfg01.yaml
rename to tcp_tests/templates/cookied-bm-oc40-queens/underlay--user-data-cfg01.yaml
index 646af7a..6c9e48f 100644
--- a/tcp_tests/templates/cookied-bm-mcp-pike-k8s-contrail/underlay--user-data-cfg01.yaml
+++ b/tcp_tests/templates/cookied-bm-oc40-queens/underlay--user-data-cfg01.yaml
@@ -1,103 +1,102 @@
-| # All the data below will be stored as a string object
-  #cloud-config, see http://cloudinit.readthedocs.io/en/latest/topics/examples.html
-
-  ssh_pwauth: True
-  users:
-   - name: root
-     sudo: ALL=(ALL) NOPASSWD:ALL
-     shell: /bin/bash
-     ssh_authorized_keys:
-     {% for key in config.underlay.ssh_keys %}
-      - ssh-rsa {{ key['public'] }}
-     {% endfor %}
-
-  disable_root: false
-  chpasswd:
-   list: |
-    root:r00tme
-   expire: False
-
-  bootcmd:
-   #   # Block access to SSH while node is preparing
-   #   - cloud-init-per once sudo iptables -A INPUT -p tcp --dport 22 -j DROP
-   # Enable root access
-   - sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
-   - sed -i -e '/^PasswordAuthentication/s/^.*$/PasswordAuthentication yes/' /etc/ssh/sshd_config
-   - service sshd restart
-  output:
-    all: '| tee -a /var/log/cloud-init-output.log /dev/tty0'
-
-  runcmd:
-   # Configure dhclient
-   - sudo echo "nameserver {gateway}" >> /etc/resolvconf/resolv.conf.d/base
-   - sudo resolvconf -u
-
-   # Enable grub menu using updated config below
-   - update-grub
-
-   # Prepare network connection
-   - sudo ifdown ens3
+| # All the data below will be stored as a string object

+  #cloud-config, see http://cloudinit.readthedocs.io/en/latest/topics/examples.html

+

+  ssh_pwauth: True

+  users:

+   - name: root

+     sudo: ALL=(ALL) NOPASSWD:ALL

+     shell: /bin/bash

+     ssh_authorized_keys:

+     {% for key in config.underlay.ssh_keys %}

+      - ssh-rsa {{ key['public'] }}

+     {% endfor %}

+

+  disable_root: false

+  chpasswd:

+   list: |

+    root:r00tme

+   expire: False

+

+  bootcmd:

+   #   # Block access to SSH while node is preparing

+   #   - cloud-init-per once sudo iptables -A INPUT -p tcp --dport 22 -j DROP

+   # Enable root access

+   - sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config

+   - service sshd restart

+  output:

+    all: '| tee -a /var/log/cloud-init-output.log /dev/tty0'

+

+  runcmd:

+   # Configure dhclient

+   - sudo echo "nameserver {gateway}" >> /etc/resolvconf/resolv.conf.d/base

+   - sudo resolvconf -u

+

+   # Enable grub menu using updated config below

+   - update-grub

+

+   # Prepare network connection

+   - sudo ifdown ens3

    - sudo ip r d default || true  # remove existing default route to get it from dhcp
-   - sudo ifup ens3
-   #- sudo route add default gw {gateway} {interface_name}
-
-   # Create swap
-   - fallocate -l 4G /swapfile
-   - chmod 600 /swapfile
-   - mkswap /swapfile
-   - swapon /swapfile
-   - echo "/swapfile   none    swap    defaults    0   0" >> /etc/fstab
-
-   ############## TCP Cloud cfg01 node ##################
-   #- sleep 120
-   #   - echo "Preparing base OS"
-
-   - echo "nameserver 172.17.41.2" > /etc/resolv.conf;
-   #   - which wget >/dev/null || (apt-get update; apt-get install -y wget);
-
+   - sudo ifup ens3

+   #- sudo route add default gw {gateway} {interface_name}

+

+   # Create swap

+   - fallocate -l 4G /swapfile

+   - chmod 600 /swapfile

+   - mkswap /swapfile

+   - swapon /swapfile

+   - echo "/swapfile   none    swap    defaults    0   0" >> /etc/fstab

+

+   ############## TCP Cloud cfg01 node ##################

+   #- sleep 120

+   #   - echo "Preparing base OS"

+

+   - echo "nameserver 172.18.208.44" > /etc/resolv.conf;

+   #   - which wget >/dev/null || (apt-get update; apt-get install -y wget);

+

    # Configure Ubuntu mirrors
    #   - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial main restricted universe" > /etc/apt/sources.list
    #   - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial-updates main restricted universe" >> /etc/apt/sources.list
    #   - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial-security main restricted universe" >> /etc/apt/sources.list
 
-   #   - echo "deb [arch=amd64] http://apt.mirantis.com/xenial {{ REPOSITORY_SUITE }} salt extra" > /etc/apt/sources.list.d/mcp_salt.list;
-   #   - wget -O - http://apt.mirantis.com/public.gpg | apt-key add -;
-   #   - echo "deb http://repo.saltstack.com/apt/ubuntu/16.04/amd64/2016.3 xenial main" > /etc/apt/sources.list.d/saltstack.list;
-   #   - wget -O - https://repo.saltstack.com/apt/ubuntu/16.04/amd64/2016.3/SALTSTACK-GPG-KEY.pub | apt-key add -;
-
-   #   - apt-get clean
-   #   - apt-get update
-
-   # Install common packages
-   #   - eatmydata apt-get install -y python-pip git curl tmux byobu iputils-ping traceroute htop tree mc
-
-   # Install salt-minion and stop it until it is configured
-   #   - eatmydata apt-get install -y salt-minion && service salt-minion stop
-
-   ########################################################
-   # Node is ready, allow SSH access
-   #   - echo "Allow SSH access ..."
-   #   - sudo iptables -D INPUT -p tcp --dport 22 -j DROP
-   ########################################################
-
-  write_files:
-   - path: /etc/default/grub.d/97-enable-grub-menu.cfg
-     content: |
-         GRUB_RECORDFAIL_TIMEOUT=30
-         GRUB_TIMEOUT=3
-         GRUB_TIMEOUT_STYLE=menu
-
-   - path: /etc/network/interfaces
-     content: |
-          auto ens3
-          iface ens3 inet dhcp
-
-   - path: /root/.ssh/config
-     owner: root:root
-     permissions: '0600'
-     content: |
-          Host *
-            ServerAliveInterval 300
-            ServerAliveCountMax 10
-            StrictHostKeyChecking no
-            UserKnownHostsFile /dev/null
+   #   - echo "deb [arch=amd64] http://apt.mirantis.com/xenial {{ REPOSITORY_SUITE }} salt extra" > /etc/apt/sources.list.d/mcp_salt.list;

+   #   - wget -O - http://apt.mirantis.com/public.gpg | apt-key add -;

+   #   - echo "deb http://repo.saltstack.com/apt/ubuntu/16.04/amd64/2016.3 xenial main" > /etc/apt/sources.list.d/saltstack.list;

+   #   - wget -O - https://repo.saltstack.com/apt/ubuntu/16.04/amd64/2016.3/SALTSTACK-GPG-KEY.pub | apt-key add -;

+

+   #   - apt-get clean

+   #   - apt-get update

+

+   # Install common packages

+   #   - eatmydata apt-get install -y python-pip git curl tmux byobu iputils-ping traceroute htop tree mc

+

+   # Install salt-minion and stop it until it is configured

+   #   - eatmydata apt-get install -y salt-minion && service salt-minion stop

+

+   ########################################################

+   # Node is ready, allow SSH access

+   #   - echo "Allow SSH access ..."

+   #   - sudo iptables -D INPUT -p tcp --dport 22 -j DROP

+   ########################################################

+

+  write_files:

+   - path: /etc/default/grub.d/97-enable-grub-menu.cfg

+     content: |

+         GRUB_RECORDFAIL_TIMEOUT=30

+         GRUB_TIMEOUT=3

+         GRUB_TIMEOUT_STYLE=menu

+

+   - path: /etc/network/interfaces

+     content: |

+          auto ens3

+          iface ens3 inet dhcp

+

+   - path: /root/.ssh/config

+     owner: root:root

+     permissions: '0600'

+     content: |

+          Host *

+            ServerAliveInterval 300

+            ServerAliveCountMax 10

+            StrictHostKeyChecking no

+            UserKnownHostsFile /dev/null

diff --git a/tcp_tests/templates/cookied-bm-mcp-pike-k8s-contrail/underlay--user-data-cfg01.yaml b/tcp_tests/templates/cookied-bm-oc40-queens/underlay--user-data1604-hwe.yaml
similarity index 78%
copy from tcp_tests/templates/cookied-bm-mcp-pike-k8s-contrail/underlay--user-data-cfg01.yaml
copy to tcp_tests/templates/cookied-bm-oc40-queens/underlay--user-data1604-hwe.yaml
index 646af7a..106c3d5 100644
--- a/tcp_tests/templates/cookied-bm-mcp-pike-k8s-contrail/underlay--user-data-cfg01.yaml
+++ b/tcp_tests/templates/cookied-bm-oc40-queens/underlay--user-data1604-hwe.yaml
@@ -1,103 +1,99 @@
-| # All the data below will be stored as a string object
-  #cloud-config, see http://cloudinit.readthedocs.io/en/latest/topics/examples.html
-
-  ssh_pwauth: True
-  users:
-   - name: root
-     sudo: ALL=(ALL) NOPASSWD:ALL
-     shell: /bin/bash
-     ssh_authorized_keys:
-     {% for key in config.underlay.ssh_keys %}
-      - ssh-rsa {{ key['public'] }}
-     {% endfor %}
-
-  disable_root: false
-  chpasswd:
-   list: |
-    root:r00tme
-   expire: False
-
-  bootcmd:
-   #   # Block access to SSH while node is preparing
-   #   - cloud-init-per once sudo iptables -A INPUT -p tcp --dport 22 -j DROP
-   # Enable root access
-   - sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
-   - sed -i -e '/^PasswordAuthentication/s/^.*$/PasswordAuthentication yes/' /etc/ssh/sshd_config
-   - service sshd restart
-  output:
-    all: '| tee -a /var/log/cloud-init-output.log /dev/tty0'
-
-  runcmd:
-   # Configure dhclient
-   - sudo echo "nameserver {gateway}" >> /etc/resolvconf/resolv.conf.d/base
-   - sudo resolvconf -u
-
-   # Enable grub menu using updated config below
-   - update-grub
-
-   # Prepare network connection
-   - sudo ifdown ens3
-   - sudo ip r d default || true  # remove existing default route to get it from dhcp
-   - sudo ifup ens3
-   #- sudo route add default gw {gateway} {interface_name}
-
-   # Create swap
-   - fallocate -l 4G /swapfile
-   - chmod 600 /swapfile
-   - mkswap /swapfile
-   - swapon /swapfile
-   - echo "/swapfile   none    swap    defaults    0   0" >> /etc/fstab
-
-   ############## TCP Cloud cfg01 node ##################
-   #- sleep 120
-   #   - echo "Preparing base OS"
-
-   - echo "nameserver 172.17.41.2" > /etc/resolv.conf;
-   #   - which wget >/dev/null || (apt-get update; apt-get install -y wget);
-
+| # All the data below will be stored as a string object

+  #cloud-config, see http://cloudinit.readthedocs.io/en/latest/topics/examples.html

+

+  ssh_pwauth: True

+  users:

+   - name: root

+     sudo: ALL=(ALL) NOPASSWD:ALL

+     shell: /bin/bash

+     ssh_authorized_keys:

+     {% for key in config.underlay.ssh_keys %}

+      - ssh-rsa {{ key['public'] }}

+     {% endfor %}

+

+  disable_root: false

+  chpasswd:

+   list: |

+    root:r00tme

+   expire: False

+

+  bootcmd:

+   #   # Block access to SSH while node is preparing

+   #   - cloud-init-per once sudo iptables -A INPUT -p tcp --dport 22 -j DROP

+   # Enable root access

+   - sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config

+   - service sshd restart

+  output:

+    all: '| tee -a /var/log/cloud-init-output.log /dev/tty0'

+

+  runcmd:

+   - export TERM=linux

+   - export LANG=C

+   # Configure dhclient

+   - sudo echo "nameserver {gateway}" >> /etc/resolvconf/resolv.conf.d/base

+   - sudo resolvconf -u

+

+   # Enable grub menu using updated config below

+   - update-grub

+

+   # Prepare network connection

+   - sudo ifup {interface_name}

+   #- sudo route add default gw {gateway} {interface_name}

+

+   # Create swap

+   - fallocate -l 4G /swapfile

+   - chmod 600 /swapfile

+   - mkswap /swapfile

+   - swapon /swapfile

+   - echo "/swapfile   none    swap    defaults   0   0" >> /etc/fstab

+

+

+   ############## TCP Cloud cfg01 node ##################

+   #- sleep 120

+   #   - echo "Preparing base OS"

+   - echo "nameserver 172.18.208.44" > /etc/resolv.conf;

+   #   - which wget >/dev/null || (apt-get update; apt-get install -y wget)

+

    # Configure Ubuntu mirrors
    #   - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial main restricted universe" > /etc/apt/sources.list
    #   - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial-updates main restricted universe" >> /etc/apt/sources.list
    #   - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial-security main restricted universe" >> /etc/apt/sources.list
 
-   #   - echo "deb [arch=amd64] http://apt.mirantis.com/xenial {{ REPOSITORY_SUITE }} salt extra" > /etc/apt/sources.list.d/mcp_salt.list;
-   #   - wget -O - http://apt.mirantis.com/public.gpg | apt-key add -;
-   #   - echo "deb http://repo.saltstack.com/apt/ubuntu/16.04/amd64/2016.3 xenial main" > /etc/apt/sources.list.d/saltstack.list;
-   #   - wget -O - https://repo.saltstack.com/apt/ubuntu/16.04/amd64/2016.3/SALTSTACK-GPG-KEY.pub | apt-key add -;
-
-   #   - apt-get clean
-   #   - apt-get update
-
-   # Install common packages
-   #   - eatmydata apt-get install -y python-pip git curl tmux byobu iputils-ping traceroute htop tree mc
-
-   # Install salt-minion and stop it until it is configured
-   #   - eatmydata apt-get install -y salt-minion && service salt-minion stop
-
-   ########################################################
-   # Node is ready, allow SSH access
-   #   - echo "Allow SSH access ..."
-   #   - sudo iptables -D INPUT -p tcp --dport 22 -j DROP
-   ########################################################
-
-  write_files:
-   - path: /etc/default/grub.d/97-enable-grub-menu.cfg
-     content: |
-         GRUB_RECORDFAIL_TIMEOUT=30
-         GRUB_TIMEOUT=3
-         GRUB_TIMEOUT_STYLE=menu
-
-   - path: /etc/network/interfaces
-     content: |
-          auto ens3
-          iface ens3 inet dhcp
-
-   - path: /root/.ssh/config
-     owner: root:root
-     permissions: '0600'
-     content: |
-          Host *
-            ServerAliveInterval 300
-            ServerAliveCountMax 10
-            StrictHostKeyChecking no
-            UserKnownHostsFile /dev/null
+   #   - echo "deb [arch=amd64] http://apt.mirantis.com/xenial {{ REPOSITORY_SUITE }} salt extra" > /etc/apt/sources.list.d/mcp_salt.list;

+   #   - wget -O - http://apt.mirantis.com/public.gpg | apt-key add -;

+   #   - echo "deb http://repo.saltstack.com/apt/ubuntu/16.04/amd64/2016.3 xenial main" > /etc/apt/sources.list.d/saltstack.list;

+   #   - wget -O - https://repo.saltstack.com/apt/ubuntu/16.04/amd64/2016.3/SALTSTACK-GPG-KEY.pub | apt-key add -;

+

+   #   - apt-get clean

+   #   - eatmydata apt-get update && apt-get -y upgrade

+

+   # Install common packages

+   #   - eatmydata apt-get install -y python-pip git curl tmux byobu iputils-ping traceroute htop tree mc

+

+   # Install salt-minion and stop it until it is configured

+   #   - eatmydata apt-get install -y salt-minion && service salt-minion stop

+

+   # Install latest kernel

+   #   - eatmydata apt-get install -y {{ os_env('LINUX_KERNEL_HWE_PACKAGE_NAME', 'linux-image-extra-4.10.0-42-generic') }}
+

+   ########################################################

+   # Node is ready, allow SSH access

+   #- echo "Allow SSH access ..."

+   #- sudo iptables -D INPUT -p tcp --dport 22 -j DROP

+   #   - reboot

+   ########################################################

+

+  write_files:

+   - path: /etc/default/grub.d/97-enable-grub-menu.cfg

+     content: |

+         GRUB_RECORDFAIL_TIMEOUT=30

+         GRUB_TIMEOUT=3

+         GRUB_TIMEOUT_STYLE=menu

+

+   - path: /etc/network/interfaces

+     content: |

+          # The loopback network interface

+          auto lo

+          iface lo inet loopback

+          auto {interface_name}

+          iface {interface_name} inet dhcp

diff --git a/tcp_tests/templates/cookied-bm-mcp-pike-k8s-contrail/underlay--user-data-cfg01.yaml b/tcp_tests/templates/cookied-bm-oc40-queens/underlay--user-data1604.yaml
similarity index 79%
copy from tcp_tests/templates/cookied-bm-mcp-pike-k8s-contrail/underlay--user-data-cfg01.yaml
copy to tcp_tests/templates/cookied-bm-oc40-queens/underlay--user-data1604.yaml
index 646af7a..915981e 100644
--- a/tcp_tests/templates/cookied-bm-mcp-pike-k8s-contrail/underlay--user-data-cfg01.yaml
+++ b/tcp_tests/templates/cookied-bm-oc40-queens/underlay--user-data1604.yaml
@@ -1,103 +1,95 @@
-| # All the data below will be stored as a string object
-  #cloud-config, see http://cloudinit.readthedocs.io/en/latest/topics/examples.html
-
-  ssh_pwauth: True
-  users:
-   - name: root
-     sudo: ALL=(ALL) NOPASSWD:ALL
-     shell: /bin/bash
-     ssh_authorized_keys:
-     {% for key in config.underlay.ssh_keys %}
-      - ssh-rsa {{ key['public'] }}
-     {% endfor %}
-
-  disable_root: false
-  chpasswd:
-   list: |
-    root:r00tme
-   expire: False
-
-  bootcmd:
-   #   # Block access to SSH while node is preparing
-   #   - cloud-init-per once sudo iptables -A INPUT -p tcp --dport 22 -j DROP
-   # Enable root access
-   - sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
-   - sed -i -e '/^PasswordAuthentication/s/^.*$/PasswordAuthentication yes/' /etc/ssh/sshd_config
-   - service sshd restart
-  output:
-    all: '| tee -a /var/log/cloud-init-output.log /dev/tty0'
-
-  runcmd:
-   # Configure dhclient
-   - sudo echo "nameserver {gateway}" >> /etc/resolvconf/resolv.conf.d/base
-   - sudo resolvconf -u
-
-   # Enable grub menu using updated config below
-   - update-grub
-
-   # Prepare network connection
-   - sudo ifdown ens3
-   - sudo ip r d default || true  # remove existing default route to get it from dhcp
-   - sudo ifup ens3
-   #- sudo route add default gw {gateway} {interface_name}
-
-   # Create swap
-   - fallocate -l 4G /swapfile
-   - chmod 600 /swapfile
-   - mkswap /swapfile
-   - swapon /swapfile
-   - echo "/swapfile   none    swap    defaults    0   0" >> /etc/fstab
-
-   ############## TCP Cloud cfg01 node ##################
-   #- sleep 120
-   #   - echo "Preparing base OS"
-
-   - echo "nameserver 172.17.41.2" > /etc/resolv.conf;
-   #   - which wget >/dev/null || (apt-get update; apt-get install -y wget);
-
+| # All the data below will be stored as a string object

+  #cloud-config, see http://cloudinit.readthedocs.io/en/latest/topics/examples.html

+

+  ssh_pwauth: True

+  users:

+   - name: root

+     sudo: ALL=(ALL) NOPASSWD:ALL

+     shell: /bin/bash

+     ssh_authorized_keys:

+     {% for key in config.underlay.ssh_keys %}

+      - ssh-rsa {{ key['public'] }}

+     {% endfor %}

+

+  disable_root: false

+  chpasswd:

+   list: |

+    root:r00tme

+   expire: False

+

+  bootcmd:

+   # Block access to SSH while node is preparing

+   #   - cloud-init-per once sudo iptables -A INPUT -p tcp --dport 22 -j DROP

+   # Enable root access

+   - sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config

+   - service sshd restart

+  output:

+    all: '| tee -a /var/log/cloud-init-output.log /dev/tty0'

+

+  runcmd:

+   - export TERM=linux

+   - export LANG=C

+   # Configure dhclient

+   - sudo echo "nameserver {gateway}" >> /etc/resolvconf/resolv.conf.d/base

+   - sudo resolvconf -u

+

+   # Enable grub menu using updated config below

+   - update-grub

+

+   # Prepare network connection

+   - sudo ifup {interface_name}

+   #- sudo route add default gw {gateway} {interface_name}

+

+   # Create swap

+   - fallocate -l 4G /swapfile

+   - chmod 600 /swapfile

+   - mkswap /swapfile

+   - swapon /swapfile

+   - echo "/swapfile   none    swap    defaults   0   0" >> /etc/fstab

+

+

+   ############## TCP Cloud cfg01 node ##################

+   #- sleep 120

+   #   - echo "Preparing base OS"

+   - echo "nameserver 172.18.208.44" > /etc/resolv.conf;

+   #   - which wget >/dev/null || (apt-get update; apt-get install -y wget)

+

    # Configure Ubuntu mirrors
    #   - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial main restricted universe" > /etc/apt/sources.list
    #   - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial-updates main restricted universe" >> /etc/apt/sources.list
    #   - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial-security main restricted universe" >> /etc/apt/sources.list
 
-   #   - echo "deb [arch=amd64] http://apt.mirantis.com/xenial {{ REPOSITORY_SUITE }} salt extra" > /etc/apt/sources.list.d/mcp_salt.list;
-   #   - wget -O - http://apt.mirantis.com/public.gpg | apt-key add -;
-   #   - echo "deb http://repo.saltstack.com/apt/ubuntu/16.04/amd64/2016.3 xenial main" > /etc/apt/sources.list.d/saltstack.list;
-   #   - wget -O - https://repo.saltstack.com/apt/ubuntu/16.04/amd64/2016.3/SALTSTACK-GPG-KEY.pub | apt-key add -;
-
-   #   - apt-get clean
-   #   - apt-get update
-
-   # Install common packages
-   #   - eatmydata apt-get install -y python-pip git curl tmux byobu iputils-ping traceroute htop tree mc
-
-   # Install salt-minion and stop it until it is configured
-   #   - eatmydata apt-get install -y salt-minion && service salt-minion stop
-
-   ########################################################
-   # Node is ready, allow SSH access
-   #   - echo "Allow SSH access ..."
-   #   - sudo iptables -D INPUT -p tcp --dport 22 -j DROP
-   ########################################################
-
-  write_files:
-   - path: /etc/default/grub.d/97-enable-grub-menu.cfg
-     content: |
-         GRUB_RECORDFAIL_TIMEOUT=30
-         GRUB_TIMEOUT=3
-         GRUB_TIMEOUT_STYLE=menu
-
-   - path: /etc/network/interfaces
-     content: |
-          auto ens3
-          iface ens3 inet dhcp
-
-   - path: /root/.ssh/config
-     owner: root:root
-     permissions: '0600'
-     content: |
-          Host *
-            ServerAliveInterval 300
-            ServerAliveCountMax 10
-            StrictHostKeyChecking no
-            UserKnownHostsFile /dev/null
+   #   - echo "deb [arch=amd64] http://apt.mirantis.com/xenial {{ REPOSITORY_SUITE }} salt extra" > /etc/apt/sources.list.d/mcp_salt.list;

+   #   - wget -O - http://apt.mirantis.com/public.gpg | apt-key add -;

+   #   - echo "deb http://repo.saltstack.com/apt/ubuntu/16.04/amd64/2016.3 xenial main" > /etc/apt/sources.list.d/saltstack.list;

+   #   - wget -O - https://repo.saltstack.com/apt/ubuntu/16.04/amd64/2016.3/SALTSTACK-GPG-KEY.pub | apt-key add -;

+

+   #   - apt-get clean

+   #   - eatmydata apt-get update && apt-get -y upgrade

+

+   # Install common packages

+   #   - eatmydata apt-get install -y python-pip git curl tmux byobu iputils-ping traceroute htop tree mc

+

+   # Install salt-minion and stop it until it is configured

+   #   - eatmydata apt-get install -y salt-minion && service salt-minion stop

+

+   ########################################################

+   # Node is ready, allow SSH access

+   #   - echo "Allow SSH access ..."

+   #   - sudo iptables -D INPUT -p tcp --dport 22 -j DROP

+   ########################################################

+

+  write_files:

+   - path: /etc/default/grub.d/97-enable-grub-menu.cfg

+     content: |

+         GRUB_RECORDFAIL_TIMEOUT=30

+         GRUB_TIMEOUT=3

+         GRUB_TIMEOUT_STYLE=menu

+

+   - path: /etc/network/interfaces

+     content: |

+          # The loopback network interface

+          auto lo

+          iface lo inet loopback

+          auto {interface_name}

+          iface {interface_name} inet dhcp

diff --git a/tcp_tests/templates/cookied-bm-oc40-queens/underlay.yaml b/tcp_tests/templates/cookied-bm-oc40-queens/underlay.yaml
new file mode 100644
index 0000000..e84e22d
--- /dev/null
+++ b/tcp_tests/templates/cookied-bm-oc40-queens/underlay.yaml
@@ -0,0 +1,574 @@
+# Set the repository suite, one of the: 'nightly', 'testing', 'stable', or any other required
+{% set REPOSITORY_SUITE = os_env('REPOSITORY_SUITE', 'proposed') %}
+
+#{% set DOMAIN_NAME = os_env('LAB_CONFIG_NAME', 'physical_mcp11_ovs_dpdk') + '.local' %}
+{% set LAB_CONFIG_NAME = os_env('LAB_CONFIG_NAME', 'cookied-bm-oc40-queens') %}
+{% set DOMAIN_NAME = os_env('DOMAIN_NAME', LAB_CONFIG_NAME + '.local') %}
+{% set HOSTNAME_CFG01 = os_env('HOSTNAME_CFG01', 'cfg01.' + DOMAIN_NAME) %}
+{% set HOSTNAME_KVM01 = os_env('HOSTNAME_KVM01', 'kvm01.' + DOMAIN_NAME) %}
+{% set HOSTNAME_KVM02 = os_env('HOSTNAME_KVM02', 'kvm02.' + DOMAIN_NAME) %}
+{% set HOSTNAME_KVM03 = os_env('HOSTNAME_KVM03', 'kvm03.' + DOMAIN_NAME) %}
+{% set HOSTNAME_CMP001 = os_env('HOSTNAME_CMP001', 'cmp001.' + DOMAIN_NAME) %}
+{% set HOSTNAME_CMP002 = os_env('HOSTNAME_CMP002', 'cmp002.' + DOMAIN_NAME) %}
+#{% set HOSTNAME_CMP003 = os_env('HOSTNAME_CMP003', 'cmp003.' + DOMAIN_NAME) %}
+#{% set HOSTNAME_KVM04 = os_env('HOSTNAME_KVM04', 'kvm04.' + DOMAIN_NAME) %}
+{% set HOSTNAME_CTL01 = os_env('HOSTNAME_CTL01', 'ctl01.' + DOMAIN_NAME) %}
+
+{% set ETH1_IP_ADDRESS_CFG01 = os_env('ETH1_IP_ADDRESS_CFG01', '172.16.49.66') %}
+{% set ETH2_IP_ADDRESS_CFG01 = os_env('ETH2_IP_ADDRESS_CFG01', '10.167.8.99') %}
+{% set ETH0_IP_ADDRESS_KVM01 = os_env('ETH0_IP_ADDRESS_KVM01', '172.16.49.67') %}
+{% set ETH0_IP_ADDRESS_KVM02 = os_env('ETH0_IP_ADDRESS_KVM02', '172.16.49.68') %}
+{% set ETH0_IP_ADDRESS_KVM03 = os_env('ETH0_IP_ADDRESS_KVM03', '172.16.49.69') %}
+{% set ETH0_IP_ADDRESS_CMP001 = os_env('ETH0_IP_ADDRESS_CMP001', '172.16.49.73') %}
+{% set ETH0_IP_ADDRESS_CMP002 = os_env('ETH0_IP_ADDRESS_CMP002', '172.16.49.74') %}
+#{% set ETH0_IP_ADDRESS_CMP003 = os_env('ETH0_IP_ADDRESS_CMP003', '172.16.49.121') %}
+#{% set ETH0_IP_ADDRESS_KVM04 = os_env('ETH0_IP_ADDRESS_KVM04', '172.16.49.122') %}
+# {% set ETH0_IP_ADDRESS_GTW02 = os_env('ETH0_IP_ADDRESS_GTW02', '172.16.49.4') %}
+
+{% import 'cookied-bm-oc40-queens/underlay--meta-data.yaml' as CLOUDINIT_META_DATA with context %}
+{% import 'cookied-bm-oc40-queens/underlay--user-data-cfg01.yaml' as CLOUDINIT_USER_DATA_CFG01 with context %}
+{% import 'cookied-bm-oc40-queens/underlay--user-data1604.yaml' as CLOUDINIT_USER_DATA with context %}
+{% import 'cookied-bm-oc40-queens/underlay--user-data1604-hwe.yaml' as CLOUDINIT_USER_DATA_HWE with context %}
+
+---
+aliases:
+ - &interface_model {{ os_env('INTERFACE_MODEL', 'virtio') }}
+ - &cloudinit_meta_data {{ CLOUDINIT_META_DATA }}
+ - &cloudinit_user_data_cfg01 {{ CLOUDINIT_USER_DATA_CFG01 }}
+ - &cloudinit_user_data {{ CLOUDINIT_USER_DATA }}
+ - &cloudinit_user_data_hwe {{ CLOUDINIT_USER_DATA_HWE }}
+
+
+template:
+  devops_settings:
+    env_name: {{ os_env('ENV_NAME', 'cookied-bm-oc4_' + REPOSITORY_SUITE + "_" + os_env('BUILD_NUMBER', '')) }}
+
+    address_pools:
+      admin-pool01:
+        net: {{ os_env('ADMIN_ADDRESS_POOL01', '172.16.49.64/26:26') }}
+        params:
+          ip_reserved:
+            gateway: +62
+            l2_network_device: +61
+            default_{{ HOSTNAME_CFG01 }}: {{ ETH1_IP_ADDRESS_CFG01 }}
+            default_{{ HOSTNAME_KVM01 }}: {{ ETH0_IP_ADDRESS_KVM01 }}
+            default_{{ HOSTNAME_KVM02 }}: {{ ETH0_IP_ADDRESS_KVM02 }}
+            default_{{ HOSTNAME_KVM03 }}: {{ ETH0_IP_ADDRESS_KVM03 }}
+            default_{{ HOSTNAME_CMP001 }}: {{ ETH0_IP_ADDRESS_CMP001 }}
+            default_{{ HOSTNAME_CMP002 }}: {{ ETH0_IP_ADDRESS_CMP002 }}
+            #default_{{ HOSTNAME_CMP003 }}: {{ ETH0_IP_ADDRESS_CMP003 }}
+            #default_{{ HOSTNAME_KVM04 }}: {{ ETH0_IP_ADDRESS_KVM04 }}
+            #default_{{ HOSTNAME_GTW02 }}: {{ ETH0_IP_ADDRESS_GTW02 }}
+            virtual_{{ HOSTNAME_KVM01 }}: {{ ETH0_IP_ADDRESS_KVM01 }}
+            virtual_{{ HOSTNAME_KVM02 }}: {{ ETH0_IP_ADDRESS_KVM02 }}
+            virtual_{{ HOSTNAME_KVM03 }}: {{ ETH0_IP_ADDRESS_KVM03 }}
+            virtual_{{ HOSTNAME_CMP001 }}: {{ ETH0_IP_ADDRESS_CMP001 }}
+            virtual_{{ HOSTNAME_CMP002 }}: {{ ETH0_IP_ADDRESS_CMP002 }}
+            #virtual_{{ HOSTNAME_CMP003 }}: {{ ETH0_IP_ADDRESS_CMP003 }}
+            #virtual_{{ HOSTNAME_KVM04 }}: {{ ETH0_IP_ADDRESS_KVM04 }}
+            # virtual_{{ HOSTNAME_GTW02 }}: {{ ETH0_IP_ADDRESS_GTW02 }}
+          #ip_ranges:
+          #    dhcp: [+2, -4]
+      private-pool01:
+        net: {{ os_env('PRIVATE_ADDRESS_POOL01', '10.167.8.0/24:24') }}
+        params:
+          ip_reserved:
+            virtual_{{ HOSTNAME_CFG01 }}: {{ ETH2_IP_ADDRESS_CFG01 }}
+            gateway: +1
+            l2_network_device: +1
+
+      tenant-pool01:
+        net: {{ os_env('TENANT_ADDRESS_POOL01', '192.168.5.0/24:24') }}
+        params:
+          ip_reserved:
+            gateway: +1
+            l2_network_device: +1
+
+      external-pool01:
+        net: {{ os_env('EXTERNAL_ADDRESS_POOL01', '192.168.200.0/24:24') }}
+        params:
+          ip_reserved:
+            gateway: +1
+            l2_network_device: -2
+
+    groups:
+
+      - name: virtual
+        driver:
+          name: devops.driver.libvirt
+          params:
+            connection_string: !os_env CONNECTION_STRING, qemu:///system
+            storage_pool_name: !os_env STORAGE_POOL_NAME, default
+            stp: False
+            hpet: False
+            enable_acpi: true
+            use_host_cpu: !os_env DRIVER_USE_HOST_CPU, true
+
+        network_pools:
+          admin: admin-pool01
+
+        l2_network_devices:
+          # Ironic management interface
+          admin:
+            address_pool: admin-pool01
+            dhcp: false
+            parent_iface:
+              phys_dev: !os_env IRONIC_LAB_PXE_IFACE_0
+          private:
+            parent_iface:
+              phys_dev: !os_env CONTROL_IFACE
+
+        group_volumes:
+         - name: cloudimage1604    # This name is used for 'backing_store' option for node volumes.
+           source_image: !os_env IMAGE_PATH1604  # https://cloud-images.ubuntu.com/xenial/current/xenial-server-cloudimg-amd64-disk1.img
+           format: qcow2
+         - name: cfg01_day01_image               # Pre-configured day01 image
+           source_image: {{ os_env('IMAGE_PATH_CFG01_DAY01', os_env('IMAGE_PATH1604')) }} # http://images.mirantis.com/cfg01-day01.qcow2 or fallback to IMAGE_PATH1604
+           format: qcow2
+
+        nodes:
+          - name: {{ HOSTNAME_CFG01 }}
+            role: salt_master
+            params:
+              vcpu: !os_env SLAVE_NODE_CPU, 4
+              memory: !os_env SLAVE_NODE_MEMORY, 8192
+              boot:
+                - hd
+              cloud_init_volume_name: iso
+              cloud_init_iface_up: ens3
+              volumes:
+                - name: system
+                  capacity: !os_env NODE_VOLUME_SIZE, 150
+                  backing_store: cfg01_day01_image
+                  format: qcow2
+                - name: iso  # Volume with name 'iso' will be used
+                             # for store image with cloud-init metadata.
+                  capacity: 1
+                  format: raw
+                  device: cdrom
+                  bus: ide
+                  cloudinit_meta_data: *cloudinit_meta_data
+                  cloudinit_user_data: *cloudinit_user_data_cfg01
+
+              interfaces:
+                - label: ens3
+                  l2_network_device: admin
+                  interface_model: *interface_model
+                  mac_address: !os_env ETH1_MAC_ADDRESS_CFG01
+                - label: ens4
+                  l2_network_device: private
+                  interface_model: *interface_model
+              network_config:
+                ens3:
+                  networks:
+                    - admin
+                ens4:
+                  networks:
+                    - private
+
+      - name: default
+        driver:
+          name: devops_driver_ironic
+          params:
+            os_auth_token: fake-token
+            ironic_url: !os_env IRONIC_URL  # URL that will be used by fuel-devops
+                                            # to access Ironic API
+            # Agent URL that is accessible from deploying node when nodes
+            # are bootstrapped with PXE. Usually PXE/provision network address is used.
+            agent_kernel_url: !os_env IRONIC_AGENT_KERNEL_URL
+            agent_ramdisk_url: !os_env IRONIC_AGENT_RAMDISK_URL
+
+        network_pools:
+          admin: admin-pool01
+
+        nodes:
+
+        #  - name: {{ HOSTNAME_CFG01 }}
+        #    role: salt_master
+        #    params:
+        #      ipmi_user: !os_env IPMI_USER
+        #      ipmi_password: !os_env IPMI_PASSWORD
+        #      ipmi_previlegies: OPERATOR
+        #      ipmi_host: !os_env IPMI_HOST_CFG01  # hostname or IP address
+        #      ipmi_lan_interface: lanplus
+        #      ipmi_port: 623
+
+        #      root_volume_name: system     # see 'volumes' below
+        #      cloud_init_volume_name: iso  # see 'volumes' below
+        #      cloud_init_iface_up: enp3s0f1  # see 'interfaces' below.
+        #      volumes:
+        #        - name: system
+        #          capacity: !os_env NODE_VOLUME_SIZE, 200
+
+        #          # The same as for agent URL, here is an URL to the image that should be
+        #          # used for deploy the node. It should also be accessible from deploying
+        #          # node when nodes are provisioned by agent. Usually PXE/provision network address is used.
+        #          source_image: !os_env IRONIC_SOURCE_IMAGE_URL
+        #          source_image_checksum: !os_env IRONIC_SOURCE_IMAGE_CHECKSUM
+
+        #        - name: iso  # Volume with name 'iso' will be used
+        #                     # for store image with cloud-init metadata.
+
+        #          cloudinit_meta_data: *cloudinit_meta_data
+        #          cloudinit_user_data: *cloudinit_user_data_cfg01
+
+        #      interfaces:
+        #        - label: enp3s0f0  # Infra interface
+        #          mac_address: !os_env ETH0_MAC_ADDRESS_CFG01
+        #        - label: enp3s0f1
+        #          l2_network_device: admin
+        #          mac_address: !os_env ETH1_MAC_ADDRESS_CFG01
+
+        #      network_config:
+        #        enp3s0f0:
+        #          networks:
+        #           - infra
+        #        enp3s0f1:
+        #          networks:
+        #           - admin
+
+          - name: {{ HOSTNAME_KVM01 }}
+            role: salt_minion
+            params:
+              ipmi_user: !os_env IPMI_USER
+              ipmi_password: !os_env IPMI_PASSWORD
+              ipmi_previlegies: OPERATOR
+              ipmi_host: !os_env IPMI_HOST_KVM01  # hostname or IP address
+              ipmi_lan_interface: lanplus
+              ipmi_port: 623
+
+              root_volume_name: system     # see 'volumes' below
+              cloud_init_volume_name: iso  # see 'volumes' below
+              cloud_init_iface_up: enp9s0f0  # see 'interfaces' below.
+              volumes:
+                - name: system
+                  capacity: !os_env NODE_VOLUME_SIZE, 200
+
+                  # The same as for agent URL, here is an URL to the image that should be
+                  # used for deploy the node. It should also be accessible from deploying
+                  # node when nodes are provisioned by agent. Usually PXE/provision network address is used.
+                  source_image: !os_env IRONIC_SOURCE_IMAGE_URL
+                  source_image_checksum: !os_env IRONIC_SOURCE_IMAGE_CHECKSUM
+
+                - name: iso  # Volume with name 'iso' will be used
+                             # for store image with cloud-init metadata.
+
+                  cloudinit_meta_data: *cloudinit_meta_data
+                  cloudinit_user_data: *cloudinit_user_data
+
+              interfaces:
+                - label: enp9s0f0
+                  l2_network_device: admin
+                  mac_address: !os_env ETH0_MAC_ADDRESS_KVM01
+                - label: enp9s0f1
+                  mac_address: !os_env ETH1_MAC_ADDRESS_KVM01
+
+              network_config:
+                enp9s0f0:
+                  networks:
+                   - admin
+                bond0:
+                  networks:
+                   - control
+                  aggregation: active-backup
+                  parents:
+                   - enp9s0f1
+
+          - name: {{ HOSTNAME_KVM02 }}
+            role: salt_minion
+            params:
+              ipmi_user: !os_env IPMI_USER
+              ipmi_password: !os_env IPMI_PASSWORD
+              ipmi_previlegies: OPERATOR
+              ipmi_host: !os_env IPMI_HOST_KVM02  # hostname or IP address
+              ipmi_lan_interface: lanplus
+              ipmi_port: 623
+
+              root_volume_name: system     # see 'volumes' below
+              cloud_init_volume_name: iso  # see 'volumes' below
+              cloud_init_iface_up: enp9s0f0  # see 'interfaces' below.
+              volumes:
+                - name: system
+                  capacity: !os_env NODE_VOLUME_SIZE, 200
+
+                  # The same as for agent URL, here is an URL to the image that should be
+                  # used for deploy the node. It should also be accessible from deploying
+                  # node when nodes are provisioned by agent. Usually PXE/provision network address is used.
+                  source_image: !os_env IRONIC_SOURCE_IMAGE_URL
+                  source_image_checksum: !os_env IRONIC_SOURCE_IMAGE_CHECKSUM
+
+                - name: iso  # Volume with name 'iso' will be used
+                             # for store image with cloud-init metadata.
+
+                  cloudinit_meta_data: *cloudinit_meta_data
+                  cloudinit_user_data: *cloudinit_user_data
+
+              interfaces:
+                - label: enp9s0f0
+                  l2_network_device: admin
+                  mac_address: !os_env ETH0_MAC_ADDRESS_KVM02
+                - label: enp9s0f1
+                  mac_address: !os_env ETH1_MAC_ADDRESS_KVM02
+
+              network_config:
+                enp9s0f0:
+                  networks:
+                   - admin
+                bond0:
+                  networks:
+                   - control
+                  aggregation: active-backup
+                  parents:
+                   - enp9s0f1
+
+          - name: {{ HOSTNAME_KVM03 }}
+            role: salt_minion
+            params:
+              ipmi_user: !os_env IPMI_USER
+              ipmi_password: !os_env IPMI_PASSWORD
+              ipmi_previlegies: OPERATOR
+              ipmi_host: !os_env IPMI_HOST_KVM03  # hostname or IP address
+              ipmi_lan_interface: lanplus
+              ipmi_port: 623
+
+              root_volume_name: system     # see 'volumes' below
+              cloud_init_volume_name: iso  # see 'volumes' below
+              # cloud_init_iface_up: eno1  # see 'interfaces' below.
+              cloud_init_iface_up: enp9s0f0  # see 'interfaces' below.
+              volumes:
+                - name: system
+                  capacity: !os_env NODE_VOLUME_SIZE, 200
+
+                  # The same as for agent URL, here is an URL to the image that should be
+                  # used for deploy the node. It should also be accessible from deploying
+                  # node when nodes are provisioned by agent. Usually PXE/provision network address is used.
+                  source_image: !os_env IRONIC_SOURCE_IMAGE_URL
+                  source_image_checksum: !os_env IRONIC_SOURCE_IMAGE_CHECKSUM
+
+                - name: iso  # Volume with name 'iso' will be used
+                             # for store image with cloud-init metadata.
+
+                  cloudinit_meta_data: *cloudinit_meta_data
+                  cloudinit_user_data: *cloudinit_user_data
+
+              interfaces:
+                # - label: eno1
+                - label: enp9s0f0
+                  l2_network_device: admin
+                  mac_address: !os_env ETH0_MAC_ADDRESS_KVM03
+                # - label: eno2
+                - label: enp9s0f1
+                  mac_address: !os_env ETH1_MAC_ADDRESS_KVM03
+
+              network_config:
+                # eno1:
+                enp9s0f0:
+                  networks:
+                   - admin
+                bond0:
+                  networks:
+                   - control
+                  aggregation: active-backup
+                  parents:
+                   - enp9s0f1
+
+                     #          - name: {{ HOSTNAME_KVM04 }}
+                     #            role: salt_minion
+                     #            params:
+                     #              ipmi_user: !os_env IPMI_USER
+                     #              ipmi_password: !os_env IPMI_PASSWORD
+                     #              ipmi_previlegies: OPERATOR
+                     #              ipmi_host: !os_env IPMI_HOST_KVM04  # hostname or IP address
+                     #              ipmi_lan_interface: lanplus
+                     #              ipmi_port: 623
+                     #
+                     #              root_volume_name: system     # see 'volumes' below
+                     #              cloud_init_volume_name: iso  # see 'volumes' below
+                     #              # cloud_init_iface_up: eno1  # see 'interfaces' below.
+                     #              cloud_init_iface_up: enp2s0f0  # see 'interfaces' below.
+                     #              volumes:
+                     #                - name: system
+                     #                  capacity: !os_env NODE_VOLUME_SIZE, 200
+                     #
+                     #                  # The same as for agent URL, here is an URL to the image that should be
+                     #                  # used for deploy the node. It should also be accessible from deploying
+                     #                  # node when nodes are provisioned by agent. Usually PXE/provision network address is used.
+                     #                  source_image: !os_env IRONIC_SOURCE_IMAGE_URL
+                     #                  source_image_checksum: !os_env IRONIC_SOURCE_IMAGE_CHECKSUM
+                     #
+                     #                - name: iso  # Volume with name 'iso' will be used
+                     #                             # for store image with cloud-init metadata.
+                     #
+                     #                  cloudinit_meta_data: *cloudinit_meta_data
+                     #                  cloudinit_user_data: *cloudinit_user_data
+                     #
+                     #              interfaces:
+                     #                # - label: eno1
+                     #                - label: enp2s0f0
+                     #                  l2_network_device: admin
+                     #                  mac_address: !os_env ETH0_MAC_ADDRESS_KVM04
+                     #                # - label: eno2
+                     #                - label: enp2s0f1
+                     #                  mac_address: !os_env ETH1_MAC_ADDRESS_KVM04
+                     #
+                     #              network_config:
+                     #                # eno1:
+                     #                enp2s0f0:
+                     #                  networks:
+                     #                   - admin
+                     #                bond0:
+                     #                  networks:
+                     #                   - control
+                     #                  aggregation: active-backup
+                     #                  parents:
+                     #                   - enp2s0f1
+                     #
+          - name: {{ HOSTNAME_CMP001 }}
+            role: salt_minion
+            params:
+              ipmi_user: !os_env IPMI_USER
+              ipmi_password: !os_env IPMI_PASSWORD
+              ipmi_previlegies: OPERATOR
+              ipmi_host: !os_env IPMI_HOST_CMP001  # hostname or IP address
+              ipmi_lan_interface: lanplus
+              ipmi_port: 623
+
+              root_volume_name: system     # see 'volumes' below
+              cloud_init_volume_name: iso  # see 'volumes' below
+              # cloud_init_iface_up: enp3s0f0  # see 'interfaces' below.
+              cloud_init_iface_up: enp2s0f1  # see 'interfaces' below.
+              volumes:
+                - name: system
+                  capacity: !os_env NODE_VOLUME_SIZE, 200
+
+                  # The same as for agent URL, here is an URL to the image that should be
+                  # used for deploy the node. It should also be accessible from deploying
+                  # node when nodes are provisioned by agent. Usually PXE/provision network address is used.
+                  source_image: !os_env IRONIC_SOURCE_IMAGE_URL
+                  source_image_checksum: !os_env IRONIC_SOURCE_IMAGE_CHECKSUM
+
+                - name: iso  # Volume with name 'iso' will be used
+                             # for store image with cloud-init metadata.
+
+                  cloudinit_meta_data: *cloudinit_meta_data
+                  cloudinit_user_data: *cloudinit_user_data_hwe
+
+              interfaces:
+                - label: enp2s0f0
+                  mac_address: !os_env ETH0_MAC_ADDRESS_CMP001
+                - label: enp2s0f1
+                  l2_network_device: admin
+                  mac_address: !os_env ETH1_MAC_ADDRESS_CMP001
+                - label: enp5s0f0
+                  mac_address: !os_env ETH2_MAC_ADDRESS_CMP001
+                  features: ['dpdk', 'dpdk_pci: 0000:05:00.0']
+                - label: enp5s0f1
+                  mac_address: !os_env ETH3_MAC_ADDRESS_CMP001
+                  features: ['dpdk', 'dpdk_pci: 0000:05:00.1']
+                # - label: enp5s0f2
+                #   mac_address: !os_env ETH4_MAC_ADDRESS_CMP001
+                #   features: ['dpdk', 'dpdk_pci: 0000:05:00.2']
+
+              network_config:
+                enp2s0f1:
+                  networks:
+                   - admin
+
+          - name: {{ HOSTNAME_CMP002 }}
+            role: salt_minion
+            params:
+              ipmi_user: !os_env IPMI_USER
+              ipmi_password: !os_env IPMI_PASSWORD
+              ipmi_previlegies: OPERATOR
+              ipmi_host: !os_env IPMI_HOST_CMP002  # hostname or IP address
+              ipmi_lan_interface: lanplus
+              ipmi_port: 623
+
+              root_volume_name: system     # see 'volumes' below
+              cloud_init_volume_name: iso  # see 'volumes' below
+              # cloud_init_iface_up: eno1  # see 'interfaces' below.
+              cloud_init_iface_up: enp2s0f1  # see 'interfaces' below.
+              volumes:
+                - name: system
+                  capacity: !os_env NODE_VOLUME_SIZE, 200
+
+                  # The same as for agent URL, here is an URL to the image that should be
+                  # used for deploy the node. It should also be accessible from deploying
+                  # node when nodes are provisioned by agent. Usually PXE/provision network address is used.
+                  source_image: !os_env IRONIC_SOURCE_IMAGE_URL
+                  source_image_checksum: !os_env IRONIC_SOURCE_IMAGE_CHECKSUM
+
+                - name: iso  # Volume with name 'iso' will be used
+                             # for store image with cloud-init metadata.
+
+                  cloudinit_meta_data: *cloudinit_meta_data
+                  cloudinit_user_data: *cloudinit_user_data_hwe
+
+              interfaces:
+                # - label: eno1
+                - label: enp2s0f0
+                  mac_address: !os_env ETH0_MAC_ADDRESS_CMP002
+                # - label: eth0
+                - label: enp2s0f1
+                  l2_network_device: admin
+                  mac_address: !os_env ETH1_MAC_ADDRESS_CMP002
+                # - label: eth3
+                - label: enp5s0f0
+                  mac_address: !os_env ETH2_MAC_ADDRESS_CMP002
+                  features: ['dpdk', 'dpdk_pci: 0000:05:00.0']
+                # - label: eth2
+                - label: enp5s0f1
+                  mac_address: !os_env ETH3_MAC_ADDRESS_CMP002
+                  features: ['dpdk', 'dpdk_pci: 0000:05:00.1']
+                # - label: eth4
+                #   mac_address: !os_env ETH4_MAC_ADDRESS_CMP002
+                #   features: ['dpdk', 'dpdk_pci: 0000:0b:00.0']
+
+              network_config:
+                enp2s0f1:
+                  networks:
+                   - admin
+
+                     #          - name: {{ HOSTNAME_CMP003 }}
+                     #            role: salt_minion
+                     #            params:
+                     #              ipmi_user: !os_env IPMI_USER
+                     #              ipmi_password: !os_env IPMI_PASSWORD
+                     #              ipmi_previlegies: OPERATOR
+                     #              ipmi_host: !os_env IPMI_HOST_CMP003  # hostname or IP address
+                     #              ipmi_lan_interface: lanplus
+                     #              ipmi_port: 623
+                     #
+                     #              root_volume_name: system     # see 'volumes' below
+                     #              cloud_init_volume_name: iso  # see 'volumes' below
+                     #              # cloud_init_iface_up: eno1  # see 'interfaces' below.
+                     #              cloud_init_iface_up: enp2s0f0  # see 'interfaces' below.
+                     #              volumes:
+                     #                - name: system
+                     #                  capacity: !os_env NODE_VOLUME_SIZE, 200
+                     #
+                     #                  # The same as for agent URL, here is an URL to the image that should be
+                     #                  # used for deploy the node. It should also be accessible from deploying
+                     #                  # node when nodes are provisioned by agent. Usually PXE/provision network address is used.
+                     #                  source_image: !os_env IRONIC_SOURCE_IMAGE_URL
+                     #                  source_image_checksum: !os_env IRONIC_SOURCE_IMAGE_CHECKSUM
+                     #
+                     #                - name: iso  # Volume with name 'iso' will be used
+                     #                             # for store image with cloud-init metadata.
+                     #
+                     #                  cloudinit_meta_data: *cloudinit_meta_data
+                     #                  cloudinit_user_data: *cloudinit_user_data_hwe
+                     #
+                     #              interfaces:
+                     #                # - label: eno1
+                     #                - label: enp2s0f1
+                     #                  mac_address: !os_env ETH1_MAC_ADDRESS_CMP003
+                     #                # - label: eth0
+                     #                - label: enp2s0f0
+                     #                  l2_network_device: admin
+                     #                  mac_address: !os_env ETH0_MAC_ADDRESS_CMP003
+                     #
+                     #              network_config:
+                     #                enp2s0f0:
+                     #                  networks:
+                     #                   - admin
diff --git a/tcp_tests/templates/cookied-bm-ocata-cicd-pipeline/salt.yaml b/tcp_tests/templates/cookied-bm-ocata-cicd-pipeline/salt.yaml
index d39ca10..fa2d723 100644
--- a/tcp_tests/templates/cookied-bm-ocata-cicd-pipeline/salt.yaml
+++ b/tcp_tests/templates/cookied-bm-ocata-cicd-pipeline/salt.yaml
@@ -48,9 +48,9 @@
     set -e;
     # Remove rack01 key
     . /root/venv-reclass-tools/bin/activate;
-    reclass-tools del-key parameters.reclass.storage.node.openstack_compute_rack01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
+    reclass-tools del-key parameters.reclass.storage.node.openstack_compute_rack01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
     # Add openstack_compute_node definition from system
-    reclass-tools add-key 'classes' 'system.reclass.storage.system.openstack_compute_multi' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml --merge;
+    reclass-tools add-key 'classes' 'system.reclass.storage.system.openstack_compute_multi' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml --merge;
 
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 10}
diff --git a/tcp_tests/templates/cookied-bm-ocata-cicd-pipeline/underlay--user-data-cfg01.yaml b/tcp_tests/templates/cookied-bm-ocata-cicd-pipeline/underlay--user-data-cfg01.yaml
index 8faced7..3839f93 100644
--- a/tcp_tests/templates/cookied-bm-ocata-cicd-pipeline/underlay--user-data-cfg01.yaml
+++ b/tcp_tests/templates/cookied-bm-ocata-cicd-pipeline/underlay--user-data-cfg01.yaml
@@ -45,6 +45,13 @@
    - swapon /swapfile

    - echo "/swapfile   none    swap    defaults    0   0" >> /etc/fstab

    - echo "nameserver 172.18.208.44" > /etc/resolv.conf;

+
+   - mkdir -p /srv/salt/reclass/nodes
+   - systemctl enable salt-master
+   - systemctl enable salt-minion
+   - systemctl start salt-master
+   - systemctl start salt-minion
+   - salt-call -l info --timeout=120 test.ping
 

   write_files:

    - path: /etc/default/grub.d/97-enable-grub-menu.cfg

diff --git a/tcp_tests/templates/cookied-cicd-k8s-calico-sl/cookiecutter-context-k8s-sl.yaml b/tcp_tests/templates/cookied-cicd-k8s-calico-sl/cookiecutter-context-k8s-sl.yaml
index 0414b30..438696b 100644
--- a/tcp_tests/templates/cookied-cicd-k8s-calico-sl/cookiecutter-context-k8s-sl.yaml
+++ b/tcp_tests/templates/cookied-cicd-k8s-calico-sl/cookiecutter-context-k8s-sl.yaml
@@ -119,11 +119,26 @@
   kubernetes_control_node03_address: 10.167.4.13
   kubernetes_control_node03_deploy_address: 10.167.5.13
   kubernetes_control_node03_hostname: ctl03
+  kubernetes_compute_count: 4
+  kubernetes_compute_rack01_hostname: cmp
+  kubernetes_compute_deploy_address_ranges: 10.167.5.101-10.167.5.104
+  kubernetes_compute_single_address_ranges: 10.167.4.101-10.167.4.104
+  kubernetes_compute_tenant_address_ranges: 10.167.6.101-10.167.6.104
   kubernetes_enabled: 'True'
   kubernetes_externaldns_enabled: 'False'
   kubernetes_keepalived_vip_interface: br_ctl
   kubernetes_network_calico_enabled: 'True'
-  kubernetes_virtlet_enabled: 'False'
+  kubernetes_virtlet_enabled: 'True'
+  kubernetes_proxy_hostname: prx
+  kubernetes_proxy_node01_hostname: prx01
+  kubernetes_proxy_node02_hostname: prx02
+  kubernetes_proxy_address: 10.167.4.220
+  kubernetes_proxy_node01_address: 10.167.4.221
+  kubernetes_proxy_node02_address: 10.167.4.222
+  kubernetes_metallb_enabled: 'True'
+  metallb_addresses: 172.17.16.150-172.17.16.190
+  kubernetes_ingressnginx_enabled: 'True'
+  kubernetes_ingressnginx_controller_replicas: 2
   local_repositories: 'False'
   maas_deploy_address: 10.167.5.15
   maas_deploy_range_end: 10.167.5.199
diff --git a/tcp_tests/templates/cookied-cicd-k8s-calico-sl/environment-context-k8s-sl.yaml b/tcp_tests/templates/cookied-cicd-k8s-calico-sl/environment-context-k8s-sl.yaml
index bfff297..4abe271 100644
--- a/tcp_tests/templates/cookied-cicd-k8s-calico-sl/environment-context-k8s-sl.yaml
+++ b/tcp_tests/templates/cookied-cicd-k8s-calico-sl/environment-context-k8s-sl.yaml
@@ -9,6 +9,8 @@
           role: single_dhcp
         ens4:
           role: single_static_ctl
+        ens5:
+          role: single_storage_dhcp
 
     kvm01:
       reclass_storage_name: infra_kvm_node01
@@ -86,6 +88,8 @@
           role: single_dhcp
         ens4:
           role: single_ctl_calico
+        ens5:
+          role: single_storage_dhcp
 
     ctl02:
       reclass_storage_name: kubernetes_control_node02
@@ -97,6 +101,8 @@
           role: single_dhcp
         ens4:
           role: single_ctl_calico
+        ens5:
+          role: single_storage_dhcp
 
     ctl03:
       reclass_storage_name: kubernetes_control_node03
@@ -108,9 +114,38 @@
           role: single_dhcp
         ens4:
           role: single_ctl_calico
+        ens5:
+          role: single_storage_dhcp
 
-    cmp001:
-      reclass_storage_name: kubernetes_compute_node01
+    prx01:
+      reclass_storage_name: kubernetes_proxy_node01
+      roles:
+      - kubernetes_proxy
+      - linux_system_codename_xenial
+      interfaces:
+        ens3:
+          role: single_dhcp
+        ens4:
+          role: single_ctl
+        ens5:
+          role: single_storage_dhcp
+
+    prx02:
+      reclass_storage_name: kubernetes_proxy_node02
+      roles:
+      - kubernetes_proxy
+      - linux_system_codename_xenial
+      interfaces:
+        ens3:
+          role: single_dhcp
+        ens4:
+          role: single_ctl
+        ens5:
+          role: single_storage_dhcp
+
+    # Generator-based computes. For compatibility only
+    cmp<<count>>:
+      reclass_storage_name: kubernetes_compute_rack01
       roles:
       - kubernetes_compute
       - linux_system_codename_xenial
@@ -120,46 +155,8 @@
           role: single_dhcp
         ens4:
           role: single_ctl_calico
-          single_address: ${_param:kubernetes_compute_node01_address}
-
-    cmp002:
-      reclass_storage_name: kubernetes_compute_node02
-      roles:
-      - kubernetes_compute
-      - linux_system_codename_xenial
-      - salt_master_host
-      interfaces:
-        ens3:
-          role: single_dhcp
-        ens4:
-          role: single_ctl_calico
-          single_address: ${_param:kubernetes_compute_node02_address}
-
-    cmp003:
-      reclass_storage_name: kubernetes_compute_node03
-      roles:
-      - kubernetes_compute
-      - linux_system_codename_xenial
-      - salt_master_host
-      interfaces:
-        ens3:
-          role: single_dhcp
-        ens4:
-          role: single_ctl_calico
-          single_address: ${_param:kubernetes_compute_node03_address}
-
-    cmp004:
-      reclass_storage_name: kubernetes_compute_node04
-      roles:
-      - kubernetes_compute
-      - linux_system_codename_xenial
-      - salt_master_host
-      interfaces:
-        ens3:
-          role: single_dhcp
-        ens4:
-          role: single_ctl_calico
-          single_address: ${_param:kubernetes_compute_node04_address}
+        ens5:
+          role: single_storage_dhcp
 
     mon01:
       reclass_storage_name: stacklight_server_node01
diff --git a/tcp_tests/templates/cookied-cicd-k8s-calico-sl/underlay.yaml b/tcp_tests/templates/cookied-cicd-k8s-calico-sl/underlay.yaml
index 5799a2d..132a382 100644
--- a/tcp_tests/templates/cookied-cicd-k8s-calico-sl/underlay.yaml
+++ b/tcp_tests/templates/cookied-cicd-k8s-calico-sl/underlay.yaml
@@ -25,6 +25,7 @@
 {% set HOSTNAME_MON02 = os_env('HOSTNAME_MON02', 'mon02') %}
 {% set HOSTNAME_MON03 = os_env('HOSTNAME_MON03', 'mon03') %}
 {% set HOSTNAME_PRX01 = os_env('HOSTNAME_PRX01', 'prx01') %}
+{% set HOSTNAME_PRX02 = os_env('HOSTNAME_PRX02', 'prx02') %}
 {% set HOSTNAME_KVM01 = os_env('HOSTNAME_KVM01', 'kvm01') %}
 {% set HOSTNAME_KVM02 = os_env('HOSTNAME_KVM02', 'kvm02') %}
 {% set HOSTNAME_KVM03 = os_env('HOSTNAME_KVM03', 'kvm03') %}
@@ -57,6 +58,8 @@
             default_{{ HOSTNAME_CID01 }}: +91
             default_{{ HOSTNAME_CID02 }}: +92
             default_{{ HOSTNAME_CID03 }}: +93
+            default_{{ HOSTNAME_PRX01 }}: +221
+            default_{{ HOSTNAME_PRX02 }}: +222
 
           ip_ranges:
             dhcp: [+90, -10]
@@ -94,7 +97,8 @@
             default_{{ HOSTNAME_MTR01 }}: +86
             default_{{ HOSTNAME_MTR02 }}: +87
             default_{{ HOSTNAME_MTR03 }}: +88
-            default_{{ HOSTNAME_PRX01 }}: +222
+            default_{{ HOSTNAME_PRX01 }}: +221
+            default_{{ HOSTNAME_PRX02 }}: +222
             default_{{ HOSTNAME_KVM }}: +240
             default_{{ HOSTNAME_KVM01 }}: +241
             default_{{ HOSTNAME_KVM02 }}: +242
@@ -131,6 +135,15 @@
             default_{{ HOSTNAME_CID01 }}: +91
             default_{{ HOSTNAME_CID02 }}: +92
             default_{{ HOSTNAME_CID03 }}: +93
+            default_{{ HOSTNAME_CTL01 }}: +11
+            default_{{ HOSTNAME_CTL02 }}: +12
+            default_{{ HOSTNAME_CTL03 }}: +13
+            default_{{ HOSTNAME_CMP01 }}: +101
+            default_{{ HOSTNAME_CMP02 }}: +102
+            default_{{ HOSTNAME_CMP03 }}: +103
+            default_{{ HOSTNAME_CMP04 }}: +104
+            default_{{ HOSTNAME_PRX01 }}: +221
+            default_{{ HOSTNAME_PRX02 }}: +222
 
           ip_ranges:
             dhcp: [+10, -10]
@@ -170,7 +183,7 @@
 
           external:
             address_pool: external-pool01
-            dhcp: false
+            dhcp: true
             forward:
               mode: nat
 
@@ -206,20 +219,26 @@
                   bus: ide
                   # source_image: !os_env CFG01_CONFIG_PATH # no source image required.
                                                             # it will be uploaded after config drive generation
-              interfaces:
+              interfaces: &all_interfaces
                 - label: ens3
                   l2_network_device: admin
                   interface_model: *interface_model
                 - label: ens4
                   l2_network_device: private
                   interface_model: *interface_model
-              network_config:
+                - label: ens5
+                  l2_network_device: external
+                  interface_model: *interface_model
+              network_config: &all_network_config
                 ens3:
                   networks:
                     - admin
                 ens4:
                   networks:
                     - private
+                ens5:
+                  networks:
+                    - external
 
           - name: {{ HOSTNAME_KVM01 }}
             role: salt_minion
@@ -403,9 +422,6 @@
                   capacity: !os_env NODE_VOLUME_SIZE, 150
                   backing_store: mcp_ubuntu_1604_image
                   format: qcow2
-                - name: cinder
-                  capacity: 50
-                  format: qcow2
                 - name: iso  # Volume with name 'iso' will be used
                              # for store image with cloud-init metadata.
                   capacity: 1
@@ -415,8 +431,8 @@
                   cloudinit_meta_data: *cloudinit_meta_data
                   cloudinit_user_data: *cloudinit_user_data_1604
 
-              interfaces: *interfaces
-              network_config: *network_config
+              interfaces: *all_interfaces
+              network_config: *all_network_config
 
           - name: {{ HOSTNAME_CTL02 }}
             role: salt_minion
@@ -432,9 +448,6 @@
                   capacity: !os_env NODE_VOLUME_SIZE, 150
                   backing_store: mcp_ubuntu_1604_image
                   format: qcow2
-                - name: cinder
-                  capacity: 50
-                  format: qcow2
                 - name: iso  # Volume with name 'iso' will be used
                              # for store image with cloud-init metadata.
                   capacity: 1
@@ -444,8 +457,8 @@
                   cloudinit_meta_data: *cloudinit_meta_data
                   cloudinit_user_data: *cloudinit_user_data_1604
 
-              interfaces: *interfaces
-              network_config: *network_config
+              interfaces: *all_interfaces
+              network_config: *all_network_config
 
           - name: {{ HOSTNAME_CTL03 }}
             role: salt_minion
@@ -461,9 +474,6 @@
                   capacity: !os_env NODE_VOLUME_SIZE, 150
                   backing_store: mcp_ubuntu_1604_image
                   format: qcow2
-                - name: cinder
-                  capacity: 50
-                  format: qcow2
                 - name: iso  # Volume with name 'iso' will be used
                              # for store image with cloud-init metadata.
                   capacity: 1
@@ -473,8 +483,8 @@
                   cloudinit_meta_data: *cloudinit_meta_data
                   cloudinit_user_data: *cloudinit_user_data_1604
 
-              interfaces: *interfaces
-              network_config: *network_config
+              interfaces: *all_interfaces
+              network_config: *all_network_config
 
           - name: {{ HOSTNAME_CMP01 }}
             role: salt_minion
@@ -499,8 +509,8 @@
                   cloudinit_meta_data: *cloudinit_meta_data
                   cloudinit_user_data: *cloudinit_user_data_1604
 
-              interfaces: *interfaces
-              network_config: *network_config
+              interfaces: *all_interfaces
+              network_config: *all_network_config
 
           - name: {{ HOSTNAME_CMP02 }}
             role: salt_minion
@@ -525,8 +535,8 @@
                   cloudinit_meta_data: *cloudinit_meta_data
                   cloudinit_user_data: *cloudinit_user_data_1604
 
-              interfaces: *interfaces
-              network_config: *network_config
+              interfaces: *all_interfaces
+              network_config: *all_network_config
 
           - name: {{ HOSTNAME_CMP03 }}
             role: salt_minion
@@ -551,8 +561,8 @@
                   cloudinit_meta_data: *cloudinit_meta_data
                   cloudinit_user_data: *cloudinit_user_data_1604
 
-              interfaces: *interfaces
-              network_config: *network_config
+              interfaces: *all_interfaces
+              network_config: *all_network_config
 
           - name: {{ HOSTNAME_CMP04 }}
             role: salt_minion
@@ -577,8 +587,8 @@
                   cloudinit_meta_data: *cloudinit_meta_data
                   cloudinit_user_data: *cloudinit_user_data_1604
 
-              interfaces: *interfaces
-              network_config: *network_config
+              interfaces: *all_interfaces
+              network_config: *all_network_config
 
           - name: {{ HOSTNAME_MON01 }}
             role: salt_minion
@@ -813,3 +823,55 @@
 
               interfaces: *interfaces
               network_config: *network_config
+
+          - name: {{ HOSTNAME_PRX01 }}
+            role: salt_minion
+            params:
+              vcpu: !os_env SLAVE_NODE_CPU, 1
+              memory: !os_env SLAVE_NODE_MEMORY, 2048
+              boot:
+                - hd
+              cloud_init_volume_name: iso
+              cloud_init_iface_up: ens3
+              volumes:
+                - name: system
+                  capacity: !os_env NODE_VOLUME_SIZE, 150
+                  backing_store: mcp_ubuntu_1604_image
+                  format: qcow2
+                - name: iso  # Volume with name 'iso' will be used
+                             # for store image with cloud-init metadata.
+                  capacity: 1
+                  format: raw
+                  device: cdrom
+                  bus: ide
+                  cloudinit_meta_data: *cloudinit_meta_data
+                  cloudinit_user_data: *cloudinit_user_data_1604_swp
+
+              interfaces: *all_interfaces
+              network_config: *all_network_config
+
+          - name: {{ HOSTNAME_PRX02 }}
+            role: salt_minion
+            params:
+              vcpu: !os_env SLAVE_NODE_CPU, 1
+              memory: !os_env SLAVE_NODE_MEMORY, 2048
+              boot:
+                - hd
+              cloud_init_volume_name: iso
+              cloud_init_iface_up: ens3
+              volumes:
+                - name: system
+                  capacity: !os_env NODE_VOLUME_SIZE, 150
+                  backing_store: mcp_ubuntu_1604_image
+                  format: qcow2
+                - name: iso  # Volume with name 'iso' will be used
+                             # for store image with cloud-init metadata.
+                  capacity: 1
+                  format: raw
+                  device: cdrom
+                  bus: ide
+                  cloudinit_meta_data: *cloudinit_meta_data
+                  cloudinit_user_data: *cloudinit_user_data_1604_swp
+
+              interfaces: *all_interfaces
+              network_config: *all_network_config
diff --git a/tcp_tests/templates/cookied-cicd-k8s-calico/cookiecutter-context-k8s.yaml b/tcp_tests/templates/cookied-cicd-k8s-calico/cookiecutter-context-k8s.yaml
index f042844..b8bda7e 100644
--- a/tcp_tests/templates/cookied-cicd-k8s-calico/cookiecutter-context-k8s.yaml
+++ b/tcp_tests/templates/cookied-cicd-k8s-calico/cookiecutter-context-k8s.yaml
@@ -119,11 +119,22 @@
   kubernetes_control_node03_address: 10.167.4.13
   kubernetes_control_node03_deploy_address: 10.167.5.13
   kubernetes_control_node03_hostname: ctl03
+  kubernetes_compute_count: 4
+  kubernetes_compute_rack01_hostname: cmp
+  kubernetes_compute_deploy_address_ranges: 10.167.5.101-10.167.5.104
+  kubernetes_compute_single_address_ranges: 10.167.4.101-10.167.4.104
+  kubernetes_compute_tenant_address_ranges: 10.167.6.101-10.167.6.104
   kubernetes_enabled: 'True'
   kubernetes_externaldns_enabled: 'False'
   kubernetes_keepalived_vip_interface: br_ctl
   kubernetes_network_calico_enabled: 'True'
   kubernetes_virtlet_enabled: 'False'
+  kubernetes_proxy_hostname: prx
+  kubernetes_proxy_node01_hostname: prx01
+  kubernetes_proxy_node02_hostname: prx02
+  kubernetes_proxy_address: 10.167.4.220
+  kubernetes_proxy_node01_address: 10.167.4.221
+  kubernetes_proxy_node02_address: 10.167.4.222
   local_repositories: 'False'
   maas_deploy_address: 10.167.5.15
   maas_deploy_range_end: 10.167.5.199
diff --git a/tcp_tests/templates/cookied-cicd-k8s-calico/environment-context-k8s.yaml b/tcp_tests/templates/cookied-cicd-k8s-calico/environment-context-k8s.yaml
index 8af2c0c..d13627b 100644
--- a/tcp_tests/templates/cookied-cicd-k8s-calico/environment-context-k8s.yaml
+++ b/tcp_tests/templates/cookied-cicd-k8s-calico/environment-context-k8s.yaml
@@ -109,34 +109,31 @@
         ens4:
           role: single_ctl_calico
 
-    cmp001:
-      reclass_storage_name: kubernetes_compute_node01
+    prx01:
+      reclass_storage_name: kubernetes_proxy_node01
       roles:
-      - kubernetes_compute
+      - kubernetes_proxy
       - linux_system_codename_xenial
-      - salt_master_host
       interfaces:
         ens3:
           role: single_dhcp
         ens4:
-          role: single_ctl_calico
-          single_address: ${_param:kubernetes_compute_node01_address}
+          role: single_ctl
 
-    cmp002:
-      reclass_storage_name: kubernetes_compute_node02
+    prx02:
+      reclass_storage_name: kubernetes_proxy_node02
       roles:
-      - kubernetes_compute
+      - kubernetes_proxy
       - linux_system_codename_xenial
-      - salt_master_host
       interfaces:
         ens3:
           role: single_dhcp
         ens4:
-          role: single_ctl_calico
-          single_address: ${_param:kubernetes_compute_node02_address}
+          role: single_ctl
 
-    cmp003:
-      reclass_storage_name: kubernetes_compute_node03
+    # Generator-based computes. For compatibility only
+    cmp<<count>>:
+      reclass_storage_name: kubernetes_compute_rack01
       roles:
       - kubernetes_compute
       - linux_system_codename_xenial
@@ -146,17 +143,3 @@
           role: single_dhcp
         ens4:
           role: single_ctl_calico
-          single_address: ${_param:kubernetes_compute_node03_address}
-
-    cmp004:
-      reclass_storage_name: kubernetes_compute_node04
-      roles:
-      - kubernetes_compute
-      - linux_system_codename_xenial
-      - salt_master_host
-      interfaces:
-        ens3:
-          role: single_dhcp
-        ens4:
-          role: single_ctl_calico
-          single_address: ${_param:kubernetes_compute_node04_address}
diff --git a/tcp_tests/templates/cookied-cicd-k8s-calico/underlay.yaml b/tcp_tests/templates/cookied-cicd-k8s-calico/underlay.yaml
index d9dcdaf..81a8afa 100644
--- a/tcp_tests/templates/cookied-cicd-k8s-calico/underlay.yaml
+++ b/tcp_tests/templates/cookied-cicd-k8s-calico/underlay.yaml
@@ -25,6 +25,7 @@
 {% set HOSTNAME_MON02 = os_env('HOSTNAME_MON02', 'mon02.' + DOMAIN_NAME) %}
 {% set HOSTNAME_MON03 = os_env('HOSTNAME_MON03', 'mon03.' + DOMAIN_NAME) %}
 {% set HOSTNAME_PRX01 = os_env('HOSTNAME_PRX01', 'prx01.' + DOMAIN_NAME) %}
+{% set HOSTNAME_PRX02 = os_env('HOSTNAME_PRX02', 'prx02.' + DOMAIN_NAME) %}
 {% set HOSTNAME_KVM01 = os_env('HOSTNAME_KVM01', 'kvm01.' + DOMAIN_NAME) %}
 {% set HOSTNAME_KVM02 = os_env('HOSTNAME_KVM02', 'kvm02.' + DOMAIN_NAME) %}
 {% set HOSTNAME_KVM03 = os_env('HOSTNAME_KVM03', 'kvm03.' + DOMAIN_NAME) %}
@@ -57,6 +58,8 @@
             default_{{ HOSTNAME_CID01 }}: +91
             default_{{ HOSTNAME_CID02 }}: +92
             default_{{ HOSTNAME_CID03 }}: +93
+            default_{{ HOSTNAME_PRX01 }}: +221
+            default_{{ HOSTNAME_PRX02 }}: +222
 
           ip_ranges:
             dhcp: [+90, -10]
@@ -94,7 +97,8 @@
             default_{{ HOSTNAME_MTR01 }}: +86
             default_{{ HOSTNAME_MTR02 }}: +87
             default_{{ HOSTNAME_MTR03 }}: +88
-            default_{{ HOSTNAME_PRX01 }}: +222
+            default_{{ HOSTNAME_PRX01 }}: +221
+            default_{{ HOSTNAME_PRX02 }}: +222
             default_{{ HOSTNAME_KVM }}: +240
             default_{{ HOSTNAME_KVM01 }}: +241
             default_{{ HOSTNAME_KVM02 }}: +242
@@ -131,6 +135,8 @@
             default_{{ HOSTNAME_CID01 }}: +91
             default_{{ HOSTNAME_CID02 }}: +92
             default_{{ HOSTNAME_CID03 }}: +93
+            default_{{ HOSTNAME_PRX01 }}: +221
+            default_{{ HOSTNAME_PRX02 }}: +222
 
           ip_ranges:
             dhcp: [+10, -10]
@@ -403,9 +409,6 @@
                   capacity: !os_env NODE_VOLUME_SIZE, 150
                   backing_store: mcp_ubuntu_1604_image
                   format: qcow2
-                - name: cinder
-                  capacity: 50
-                  format: qcow2
                 - name: iso  # Volume with name 'iso' will be used
                              # for store image with cloud-init metadata.
                   capacity: 1
@@ -432,9 +435,6 @@
                   capacity: !os_env NODE_VOLUME_SIZE, 150
                   backing_store: mcp_ubuntu_1604_image
                   format: qcow2
-                - name: cinder
-                  capacity: 50
-                  format: qcow2
                 - name: iso  # Volume with name 'iso' will be used
                              # for store image with cloud-init metadata.
                   capacity: 1
@@ -461,9 +461,6 @@
                   capacity: !os_env NODE_VOLUME_SIZE, 150
                   backing_store: mcp_ubuntu_1604_image
                   format: qcow2
-                - name: cinder
-                  capacity: 50
-                  format: qcow2
                 - name: iso  # Volume with name 'iso' will be used
                              # for store image with cloud-init metadata.
                   capacity: 1
@@ -579,3 +576,55 @@
 
               interfaces: *interfaces
               network_config: *network_config
+
+          - name: {{ HOSTNAME_PRX01 }}
+            role: salt_minion
+            params:
+              vcpu: !os_env SLAVE_NODE_CPU, 1
+              memory: !os_env SLAVE_NODE_MEMORY, 2048
+              boot:
+                - hd
+              cloud_init_volume_name: iso
+              cloud_init_iface_up: ens3
+              volumes:
+                - name: system
+                  capacity: !os_env NODE_VOLUME_SIZE, 150
+                  backing_store: mcp_ubuntu_1604_image
+                  format: qcow2
+                - name: iso  # Volume with name 'iso' will be used
+                             # for store image with cloud-init metadata.
+                  capacity: 1
+                  format: raw
+                  device: cdrom
+                  bus: ide
+                  cloudinit_meta_data: *cloudinit_meta_data
+                  cloudinit_user_data: *cloudinit_user_data_1604_swp
+
+              interfaces: *interfaces
+              network_config: *network_config
+
+          - name: {{ HOSTNAME_PRX02 }}
+            role: salt_minion
+            params:
+              vcpu: !os_env SLAVE_NODE_CPU, 1
+              memory: !os_env SLAVE_NODE_MEMORY, 2048
+              boot:
+                - hd
+              cloud_init_volume_name: iso
+              cloud_init_iface_up: ens3
+              volumes:
+                - name: system
+                  capacity: !os_env NODE_VOLUME_SIZE, 150
+                  backing_store: mcp_ubuntu_1604_image
+                  format: qcow2
+                - name: iso  # Volume with name 'iso' will be used
+                             # for store image with cloud-init metadata.
+                  capacity: 1
+                  format: raw
+                  device: cdrom
+                  bus: ide
+                  cloudinit_meta_data: *cloudinit_meta_data
+                  cloudinit_user_data: *cloudinit_user_data_1604_swp
+
+              interfaces: *interfaces
+              network_config: *network_config
diff --git a/tcp_tests/templates/cookied-cicd-k8s-genie/cookiecutter-context-k8s-genie.yaml b/tcp_tests/templates/cookied-cicd-k8s-genie/cookiecutter-context-k8s-genie.yaml
index 55d1c5e..7352614 100644
--- a/tcp_tests/templates/cookied-cicd-k8s-genie/cookiecutter-context-k8s-genie.yaml
+++ b/tcp_tests/templates/cookied-cicd-k8s-genie/cookiecutter-context-k8s-genie.yaml
@@ -119,10 +119,25 @@
   kubernetes_control_node03_address: 10.167.4.13
   kubernetes_control_node03_deploy_address: 10.167.5.13
   kubernetes_control_node03_hostname: ctl03
+  kubernetes_compute_count: 4
+  kubernetes_compute_rack01_hostname: cmp
+  kubernetes_compute_deploy_address_ranges: 10.167.5.101-10.167.5.104
+  kubernetes_compute_single_address_ranges: 10.167.4.101-10.167.4.104
+  kubernetes_compute_tenant_address_ranges: 10.167.6.101-10.167.6.104
   kubernetes_enabled: 'True'
   kubernetes_externaldns_enabled: 'False'
   kubernetes_keepalived_vip_interface: br_ctl
   kubernetes_network_calico_enabled: 'True'
+  kubernetes_proxy_hostname: prx
+  kubernetes_proxy_node01_hostname: prx01
+  kubernetes_proxy_node02_hostname: prx02
+  kubernetes_proxy_address: 10.167.4.220
+  kubernetes_proxy_node01_address: 10.167.4.221
+  kubernetes_proxy_node02_address: 10.167.4.222
+  kubernetes_metallb_enabled: 'True'
+  metallb_addresses: 172.17.16.150-172.17.16.190
+  kubernetes_ingressnginx_enabled: 'True'
+  kubernetes_ingressnginx_controller_replicas: 2
   local_repositories: 'False'
   maas_deploy_address: 10.167.5.15
   maas_deploy_range_end: 10.167.5.199
@@ -169,5 +184,3 @@
   kubernetes_network_genie_enabled: 'True'
   kubernetes_genie_default_plugin: 'calico'
   kubernetes_virtlet_enabled: 'True'
-  kubernetes_compute_node01_hostname: cmp001
-  kubernetes_compute_node02_hostname: cmp002
diff --git a/tcp_tests/templates/cookied-cicd-k8s-genie/environment-context-k8s-genie.yaml b/tcp_tests/templates/cookied-cicd-k8s-genie/environment-context-k8s-genie.yaml
index 8af2c0c..807d07f 100644
--- a/tcp_tests/templates/cookied-cicd-k8s-genie/environment-context-k8s-genie.yaml
+++ b/tcp_tests/templates/cookied-cicd-k8s-genie/environment-context-k8s-genie.yaml
@@ -9,6 +9,8 @@
           role: single_dhcp
         ens4:
           role: single_static_ctl
+        ens5:
+          role: single_storage_dhcp
 
     kvm01:
       reclass_storage_name: infra_kvm_node01
@@ -86,6 +88,8 @@
           role: single_dhcp
         ens4:
           role: single_ctl_calico
+        ens5:
+          role: single_storage_dhcp
 
     ctl02:
       reclass_storage_name: kubernetes_control_node02
@@ -97,6 +101,8 @@
           role: single_dhcp
         ens4:
           role: single_ctl_calico
+        ens5:
+          role: single_storage_dhcp
 
     ctl03:
       reclass_storage_name: kubernetes_control_node03
@@ -108,9 +114,38 @@
           role: single_dhcp
         ens4:
           role: single_ctl_calico
+        ens5:
+          role: single_storage_dhcp
 
-    cmp001:
-      reclass_storage_name: kubernetes_compute_node01
+    prx01:
+      reclass_storage_name: kubernetes_proxy_node01
+      roles:
+      - kubernetes_proxy
+      - linux_system_codename_xenial
+      interfaces:
+        ens3:
+          role: single_dhcp
+        ens4:
+          role: single_ctl
+        ens5:
+          role: single_storage_dhcp
+
+    prx02:
+      reclass_storage_name: kubernetes_proxy_node02
+      roles:
+      - kubernetes_proxy
+      - linux_system_codename_xenial
+      interfaces:
+        ens3:
+          role: single_dhcp
+        ens4:
+          role: single_ctl
+        ens5:
+          role: single_storage_dhcp
+
+    # Generator-based computes. For compatibility only
+    cmp<<count>>:
+      reclass_storage_name: kubernetes_compute_rack01
       roles:
       - kubernetes_compute
       - linux_system_codename_xenial
@@ -120,43 +155,5 @@
           role: single_dhcp
         ens4:
           role: single_ctl_calico
-          single_address: ${_param:kubernetes_compute_node01_address}
-
-    cmp002:
-      reclass_storage_name: kubernetes_compute_node02
-      roles:
-      - kubernetes_compute
-      - linux_system_codename_xenial
-      - salt_master_host
-      interfaces:
-        ens3:
-          role: single_dhcp
-        ens4:
-          role: single_ctl_calico
-          single_address: ${_param:kubernetes_compute_node02_address}
-
-    cmp003:
-      reclass_storage_name: kubernetes_compute_node03
-      roles:
-      - kubernetes_compute
-      - linux_system_codename_xenial
-      - salt_master_host
-      interfaces:
-        ens3:
-          role: single_dhcp
-        ens4:
-          role: single_ctl_calico
-          single_address: ${_param:kubernetes_compute_node03_address}
-
-    cmp004:
-      reclass_storage_name: kubernetes_compute_node04
-      roles:
-      - kubernetes_compute
-      - linux_system_codename_xenial
-      - salt_master_host
-      interfaces:
-        ens3:
-          role: single_dhcp
-        ens4:
-          role: single_ctl_calico
-          single_address: ${_param:kubernetes_compute_node04_address}
+        ens5:
+          role: single_storage_dhcp
diff --git a/tcp_tests/templates/cookied-cicd-k8s-genie/underlay.yaml b/tcp_tests/templates/cookied-cicd-k8s-genie/underlay.yaml
index 2fa2aaa..ee69506 100644
--- a/tcp_tests/templates/cookied-cicd-k8s-genie/underlay.yaml
+++ b/tcp_tests/templates/cookied-cicd-k8s-genie/underlay.yaml
@@ -15,6 +15,8 @@
 {% set HOSTNAME_CMP02 = os_env('HOSTNAME_CMP02', 'cmp002') %}
 {% set HOSTNAME_CMP03 = os_env('HOSTNAME_CMP03', 'cmp003') %}
 {% set HOSTNAME_CMP04 = os_env('HOSTNAME_CMP04', 'cmp004') %}
+{% set HOSTNAME_PRX01 = os_env('HOSTNAME_PRX01', 'prx01') %}
+{% set HOSTNAME_PRX02 = os_env('HOSTNAME_PRX02', 'prx02') %}
 {% set HOSTNAME_KVM01 = os_env('HOSTNAME_KVM01', 'kvm01') %}
 {% set HOSTNAME_KVM02 = os_env('HOSTNAME_KVM02', 'kvm02') %}
 {% set HOSTNAME_KVM03 = os_env('HOSTNAME_KVM03', 'kvm03') %}
@@ -47,6 +49,8 @@
             default_{{ HOSTNAME_CID01 }}: +91
             default_{{ HOSTNAME_CID02 }}: +92
             default_{{ HOSTNAME_CID03 }}: +93
+            default_{{ HOSTNAME_PRX01 }}: +221
+            default_{{ HOSTNAME_PRX02 }}: +222
 
           ip_ranges:
             dhcp: [+90, -10]
@@ -72,6 +76,8 @@
             default_{{ HOSTNAME_CMP02 }}: +102
             default_{{ HOSTNAME_CMP03 }}: +103
             default_{{ HOSTNAME_CMP04 }}: +104
+            default_{{ HOSTNAME_PRX01 }}: +221
+            default_{{ HOSTNAME_PRX02 }}: +222
             default_{{ HOSTNAME_KVM }}: +240
             default_{{ HOSTNAME_KVM01 }}: +241
             default_{{ HOSTNAME_KVM02 }}: +242
@@ -108,6 +114,15 @@
             default_{{ HOSTNAME_CID01 }}: +91
             default_{{ HOSTNAME_CID02 }}: +92
             default_{{ HOSTNAME_CID03 }}: +93
+            default_{{ HOSTNAME_CTL01 }}: +11
+            default_{{ HOSTNAME_CTL02 }}: +12
+            default_{{ HOSTNAME_CTL03 }}: +13
+            default_{{ HOSTNAME_CMP01 }}: +101
+            default_{{ HOSTNAME_CMP02 }}: +102
+            default_{{ HOSTNAME_CMP03 }}: +103
+            default_{{ HOSTNAME_CMP04 }}: +104
+            default_{{ HOSTNAME_PRX01 }}: +221
+            default_{{ HOSTNAME_PRX02 }}: +222
 
           ip_ranges:
             dhcp: [+10, -10]
@@ -147,7 +162,7 @@
 
           external:
             address_pool: external-pool01
-            dhcp: false
+            dhcp: true
             forward:
               mode: nat
 
@@ -183,20 +198,26 @@
                   bus: ide
                   # source_image: !os_env CFG01_CONFIG_PATH # no source image required.
                                                             # it will be uploaded after config drive generation
-              interfaces:
+              interfaces: &all_interfaces
                 - label: ens3
                   l2_network_device: admin
                   interface_model: *interface_model
                 - label: ens4
                   l2_network_device: private
                   interface_model: *interface_model
-              network_config:
+                - label: ens5
+                  l2_network_device: external
+                  interface_model: *interface_model
+              network_config: &all_network_config
                 ens3:
                   networks:
                     - admin
                 ens4:
                   networks:
                     - private
+                ens5:
+                  networks:
+                    - external
 
           - name: {{ HOSTNAME_KVM01 }}
             role: salt_minion
@@ -380,9 +401,6 @@
                   capacity: !os_env NODE_VOLUME_SIZE, 150
                   backing_store: mcp_ubuntu_1604_image
                   format: qcow2
-                - name: cinder
-                  capacity: 50
-                  format: qcow2
                 - name: iso  # Volume with name 'iso' will be used
                              # for store image with cloud-init metadata.
                   capacity: 1
@@ -392,8 +410,8 @@
                   cloudinit_meta_data: *cloudinit_meta_data
                   cloudinit_user_data: *cloudinit_user_data_1604
 
-              interfaces: *interfaces
-              network_config: *network_config
+              interfaces: *all_interfaces
+              network_config: *all_network_config
 
           - name: {{ HOSTNAME_CTL02 }}
             role: salt_minion
@@ -409,9 +427,6 @@
                   capacity: !os_env NODE_VOLUME_SIZE, 150
                   backing_store: mcp_ubuntu_1604_image
                   format: qcow2
-                - name: cinder
-                  capacity: 50
-                  format: qcow2
                 - name: iso  # Volume with name 'iso' will be used
                              # for store image with cloud-init metadata.
                   capacity: 1
@@ -421,8 +436,8 @@
                   cloudinit_meta_data: *cloudinit_meta_data
                   cloudinit_user_data: *cloudinit_user_data_1604
 
-              interfaces: *interfaces
-              network_config: *network_config
+              interfaces: *all_interfaces
+              network_config: *all_network_config
 
           - name: {{ HOSTNAME_CTL03 }}
             role: salt_minion
@@ -438,9 +453,6 @@
                   capacity: !os_env NODE_VOLUME_SIZE, 150
                   backing_store: mcp_ubuntu_1604_image
                   format: qcow2
-                - name: cinder
-                  capacity: 50
-                  format: qcow2
                 - name: iso  # Volume with name 'iso' will be used
                              # for store image with cloud-init metadata.
                   capacity: 1
@@ -450,8 +462,8 @@
                   cloudinit_meta_data: *cloudinit_meta_data
                   cloudinit_user_data: *cloudinit_user_data_1604
 
-              interfaces: *interfaces
-              network_config: *network_config
+              interfaces: *all_interfaces
+              network_config: *all_network_config
 
           - name: {{ HOSTNAME_CMP01 }}
             role: salt_minion
@@ -476,8 +488,8 @@
                   cloudinit_meta_data: *cloudinit_meta_data
                   cloudinit_user_data: *cloudinit_user_data_1604
 
-              interfaces: *interfaces
-              network_config: *network_config
+              interfaces: *all_interfaces
+              network_config: *all_network_config
 
           - name: {{ HOSTNAME_CMP02 }}
             role: salt_minion
@@ -502,8 +514,8 @@
                   cloudinit_meta_data: *cloudinit_meta_data
                   cloudinit_user_data: *cloudinit_user_data_1604
 
-              interfaces: *interfaces
-              network_config: *network_config
+              interfaces: *all_interfaces
+              network_config: *all_network_config
 
           - name: {{ HOSTNAME_CMP03 }}
             role: salt_minion
@@ -528,8 +540,8 @@
                   cloudinit_meta_data: *cloudinit_meta_data
                   cloudinit_user_data: *cloudinit_user_data_1604
 
-              interfaces: *interfaces
-              network_config: *network_config
+              interfaces: *all_interfaces
+              network_config: *all_network_config
 
           - name: {{ HOSTNAME_CMP04 }}
             role: salt_minion
@@ -554,5 +566,57 @@
                   cloudinit_meta_data: *cloudinit_meta_data
                   cloudinit_user_data: *cloudinit_user_data_1604
 
-              interfaces: *interfaces
-              network_config: *network_config
+              interfaces: *all_interfaces
+              network_config: *all_network_config
+
+          - name: {{ HOSTNAME_PRX01 }}
+            role: salt_minion
+            params:
+              vcpu: !os_env SLAVE_NODE_CPU, 1
+              memory: !os_env SLAVE_NODE_MEMORY, 2048
+              boot:
+                - hd
+              cloud_init_volume_name: iso
+              cloud_init_iface_up: ens3
+              volumes:
+                - name: system
+                  capacity: !os_env NODE_VOLUME_SIZE, 150
+                  backing_store: mcp_ubuntu_1604_image
+                  format: qcow2
+                - name: iso  # Volume with name 'iso' will be used
+                             # for store image with cloud-init metadata.
+                  capacity: 1
+                  format: raw
+                  device: cdrom
+                  bus: ide
+                  cloudinit_meta_data: *cloudinit_meta_data
+                  cloudinit_user_data: *cloudinit_user_data_1604_swp
+
+              interfaces: *all_interfaces
+              network_config: *all_network_config
+
+          - name: {{ HOSTNAME_PRX02 }}
+            role: salt_minion
+            params:
+              vcpu: !os_env SLAVE_NODE_CPU, 1
+              memory: !os_env SLAVE_NODE_MEMORY, 2048
+              boot:
+                - hd
+              cloud_init_volume_name: iso
+              cloud_init_iface_up: ens3
+              volumes:
+                - name: system
+                  capacity: !os_env NODE_VOLUME_SIZE, 150
+                  backing_store: mcp_ubuntu_1604_image
+                  format: qcow2
+                - name: iso  # Volume with name 'iso' will be used
+                             # for store image with cloud-init metadata.
+                  capacity: 1
+                  format: raw
+                  device: cdrom
+                  bus: ide
+                  cloudinit_meta_data: *cloudinit_meta_data
+                  cloudinit_user_data: *cloudinit_user_data_1604_swp
+
+              interfaces: *all_interfaces
+              network_config: *all_network_config
diff --git a/tcp_tests/templates/cookied-cicd-pike-dpdk/cookiecutter-context-pike-ovs-dpdk.yaml b/tcp_tests/templates/cookied-cicd-pike-dpdk/cookiecutter-context-pike-ovs-dpdk.yaml
index fdef434..53c9687 100644
--- a/tcp_tests/templates/cookied-cicd-pike-dpdk/cookiecutter-context-pike-ovs-dpdk.yaml
+++ b/tcp_tests/templates/cookied-cicd-pike-dpdk/cookiecutter-context-pike-ovs-dpdk.yaml
@@ -95,12 +95,11 @@
   openstack_cluster_size: compact
   openstack_compute_count: '2'
   openstack_compute_rack01_hostname: cmp
-  openstack_compute_rack01_single_subnet: 172.16.10
-  openstack_compute_rack01_tenant_subnet: 10.1.0
-  openstack_compute_single_address_ranges: 172.16.10.105-172.16.10.106
-  openstack_compute_deploy_address_ranges: 192.168.10.105-192.168.10.106
-  openstack_compute_tenant_address_ranges: 10.1.0.105-10.1.0.106
-  openstack_compute_backend_address_ranges: 10.1.0.105-10.1.0.106
+  openstack_compute_rack01_single_subnet: 10.167.4
+  openstack_compute_rack01_tenant_subnet: 10.167.6
+  openstack_compute_single_address_ranges: 10.167.4.105-10.167.4.106
+  openstack_compute_deploy_address_ranges: 10.167.5.105-10.167.5.106
+  openstack_compute_tenant_address_ranges: 10.167.6.105-10.167.6.106
   openstack_control_address: 10.167.4.100
   openstack_control_hostname: ctl
   openstack_control_node01_address: 10.167.4.101
@@ -142,10 +141,11 @@
   openstack_nfv_sriov_enabled: 'False'
   openstack_nova_compute_hugepages_count: '2048'
   openstack_nova_compute_nfv_req_enabled: 'False'
-  openstack_nova_cpu_pinning: '3'
+  openstack_nova_cpu_pinning: '4,5,8,9,10,11'
   openstack_ovs_dvr_enabled: 'False'
   openstack_ovs_encapsulation_type: vxlan
-  openstack_proxy_address: 10.167.4.80
+  openstack_proxy_address: 172.17.16.80  # external network endpoint
+  openstack_proxy_vip_interface: ens5
   openstack_proxy_hostname: prx
   openstack_proxy_node01_address: 10.167.4.121
   openstack_proxy_node01_hostname: prx01
diff --git a/tcp_tests/templates/cookied-cicd-pike-dpdk/environment-context.yaml b/tcp_tests/templates/cookied-cicd-pike-dpdk/environment-context.yaml
index 53f5dd0..8e0ecb6 100644
--- a/tcp_tests/templates/cookied-cicd-pike-dpdk/environment-context.yaml
+++ b/tcp_tests/templates/cookied-cicd-pike-dpdk/environment-context.yaml
@@ -80,6 +80,7 @@
       reclass_storage_name: openstack_control_node01
       roles:
       - openstack_control_leader
+      - features_lvm_backend_control
       - linux_system_codename_xenial
       interfaces:
         ens3:
@@ -91,6 +92,7 @@
       reclass_storage_name: openstack_control_node02
       roles:
       - openstack_control
+      - features_lvm_backend_control
       - linux_system_codename_xenial
       interfaces:
         ens3:
@@ -102,6 +104,7 @@
       reclass_storage_name: openstack_control_node03
       roles:
       - openstack_control
+      - features_lvm_backend_control
       - linux_system_codename_xenial
       interfaces:
         ens3:
@@ -178,20 +181,24 @@
     prx01:
       reclass_storage_name: openstack_proxy_node01
       roles:
-      - openstack_proxy
+      #- openstack_proxy  # another VIP interface used
       - linux_system_codename_xenial
       interfaces:
         ens3:
           role: single_dhcp
         ens4:
           role: single_ctl
+        ens5:
+          role: single_external
+          external_address: 172.17.16.121
+          external_network_netmask: 255.255.255.0
 
     # Generator-based computes. For compatibility only
     cmp<<count>>:
       reclass_storage_name: openstack_compute_rack01
       roles:
       - openstack_compute
-      - features_lvm_backend
+      - features_lvm_backend_volume_vdb
       - linux_system_codename_xenial
       interfaces:
         ens3:
@@ -218,7 +225,10 @@
         ens4:
           role: single_ctl
         ens5:
-          role: single_ovs_br_prv
-          mtu: 1500
+          role: bond0_ab_ovs_vxlan_mesh_no_tag
+        ens6:
+          role: bond0_ab_ovs_vxlan_mesh_no_tag
         ens7:
-          role: bond1_ab_ovs_floating
+          role: single_ovs_br_floating
+          external_address: 10.90.0.110
+          external_network_netmask: 255.255.255.0
diff --git a/tcp_tests/templates/cookied-cicd-pike-dpdk/salt.yaml b/tcp_tests/templates/cookied-cicd-pike-dpdk/salt.yaml
index 4cb437b..ae2e235 100644
--- a/tcp_tests/templates/cookied-cicd-pike-dpdk/salt.yaml
+++ b/tcp_tests/templates/cookied-cicd-pike-dpdk/salt.yaml
@@ -18,21 +18,13 @@
   retry: {count: 1, delay: 10}
   skip_fail: false
 
-- description: "Workaround for PROD-18834: Pre-install linux-headers package"
-  cmd: salt 'cmp*' cmd.run "apt-get install -y linux-headers-$(uname -r)";
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 10}
-  skip_fail: false
-
-- description: "Workaround for PROD-17975: Pre-install ovs packages to update alternatives to DPDK"
+- description: "Workaround to avoid reboot cmp nodes: apply patch to bring OVS interfaces UP (PROD-24343)"
   cmd: |
-    set -ex;
-    salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@linux:system' state.sls linux.system.repo
-    salt 'cmp*' cmd.run "apt-get install -y openvswitch-switch openvswitch-switch-dpdk";
-    salt 'cmp*' cmd.run "service openvswitch-switch stop";
-    salt 'cmp*' cmd.run "rm -f /var/lib/openvswitch/*";
-    salt 'cmp*' cmd.run "update-alternatives --remove ovs-vswitchd /usr/lib/openvswitch-switch/ovs-vswitchd";
-    salt 'cmp*' cmd.run "service openvswitch-switch start";
+    set -ex
+    git clone https://gerrit.mcp.mirantis.com/salt-formulas/linux /root/salt-formula-linux
+    cd /root/salt-formula-linux
+    git fetch https://gerrit.mcp.mirantis.com/salt-formulas/linux refs/changes/32/29432/11 && git checkout FETCH_HEAD
+    cp -r /root/salt-formula-linux/linux/ /srv/salt/env/prd/
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 10}
   skip_fail: false
diff --git a/tcp_tests/templates/cookied-cicd-pike-dpdk/underlay.yaml b/tcp_tests/templates/cookied-cicd-pike-dpdk/underlay.yaml
index 71c1914..c114631 100644
--- a/tcp_tests/templates/cookied-cicd-pike-dpdk/underlay.yaml
+++ b/tcp_tests/templates/cookied-cicd-pike-dpdk/underlay.yaml
@@ -505,9 +505,6 @@
                   capacity: !os_env NODE_VOLUME_SIZE, 150
                   backing_store: cloudimage1604
                   format: qcow2
-                - name: cinder
-                  capacity: 50
-                  format: qcow2
                 - name: iso  # Volume with name 'iso' will be used
                              # for store image with cloud-init metadata.
                   capacity: 1
@@ -517,9 +514,26 @@
                   cloudinit_meta_data: *cloudinit_meta_data
                   cloudinit_user_data: *cloudinit_user_data_1604_swp
 
-              interfaces: *interfaces
-              network_config: *network_config
-
+              interfaces:
+                - label: ens3
+                  l2_network_device: admin
+                  interface_model: *interface_model
+                - label: ens4
+                  l2_network_device: private
+                  interface_model: *interface_model
+                - label: ens5
+                  l2_network_device: external
+                  interface_model: *interface_model
+              network_config:
+                ens3:
+                  networks:
+                    - admin
+                ens4:
+                  networks:
+                    - private
+                ens5:
+                  networks:
+                    - external
 
           - name: {{ HOSTNAME_CMP01 }}
             role: salt_minion
diff --git a/tcp_tests/templates/cookied-cicd-pike-dvr-sl/cookiecutter-context-pike-dvr-sl.yaml b/tcp_tests/templates/cookied-cicd-pike-dvr-sl/cookiecutter-context-pike-dvr-sl.yaml
index 7acd05a..b0c69e8 100644
--- a/tcp_tests/templates/cookied-cicd-pike-dvr-sl/cookiecutter-context-pike-dvr-sl.yaml
+++ b/tcp_tests/templates/cookied-cicd-pike-dvr-sl/cookiecutter-context-pike-dvr-sl.yaml
@@ -95,12 +95,11 @@
   openstack_cluster_size: compact
   openstack_compute_count: '2'
   openstack_compute_rack01_hostname: cmp
-  openstack_compute_rack01_single_subnet: 172.16.10
-  openstack_compute_rack01_tenant_subnet: 10.1.0
-  openstack_compute_single_address_ranges: 172.16.10.105-172.16.10.106
-  openstack_compute_deploy_address_ranges: 192.168.10.105-192.168.10.106
-  openstack_compute_tenant_address_ranges: 10.1.0.105-10.1.0.106
-  openstack_compute_backend_address_ranges: 10.1.0.105-10.1.0.106
+  openstack_compute_rack01_single_subnet: 10.167.4
+  openstack_compute_rack01_tenant_subnet: 10.167.6
+  openstack_compute_single_address_ranges: 10.167.4.105-10.167.4.106
+  openstack_compute_deploy_address_ranges: 10.167.5.105-10.167.5.106
+  openstack_compute_tenant_address_ranges: 10.167.6.105-10.167.6.106
   openstack_control_address: 10.167.4.100
   openstack_control_hostname: ctl
   openstack_control_node01_address: 10.167.4.101
@@ -143,7 +142,8 @@
   openstack_nova_compute_nfv_req_enabled: 'False'
   openstack_ovs_dvr_enabled: 'True'
   openstack_ovs_encapsulation_type: vxlan
-  openstack_proxy_address: 10.167.4.80
+  openstack_proxy_address: 172.17.16.80  # external network endpoint
+  openstack_proxy_vip_interface: ens5
   openstack_proxy_hostname: prx
   openstack_proxy_node01_address: 10.167.4.121
   openstack_proxy_node01_hostname: prx01
diff --git a/tcp_tests/templates/cookied-cicd-pike-dvr-sl/environment_context.yaml b/tcp_tests/templates/cookied-cicd-pike-dvr-sl/environment_context.yaml
index 5ed6d36..4025792 100644
--- a/tcp_tests/templates/cookied-cicd-pike-dvr-sl/environment_context.yaml
+++ b/tcp_tests/templates/cookied-cicd-pike-dvr-sl/environment_context.yaml
@@ -84,6 +84,7 @@
       - openstack_control_leader
       - openstack_database_leader
       - openstack_message_queue
+      - features_lvm_backend_control
       - linux_system_codename_xenial
       interfaces:
         ens3:
@@ -97,6 +98,7 @@
       - openstack_control
       - openstack_database
       - openstack_message_queue
+      - features_lvm_backend_control
       - linux_system_codename_xenial
       interfaces:
         ens3:
@@ -110,6 +112,7 @@
       - openstack_control
       - openstack_database
       - openstack_message_queue
+      - features_lvm_backend_control
       - linux_system_codename_xenial
       interfaces:
         ens3:
@@ -120,13 +123,17 @@
     prx01:
       reclass_storage_name: openstack_proxy_node01
       roles:
-      - openstack_proxy
+      #- openstack_proxy  # another VIP interface used
       - linux_system_codename_xenial
       interfaces:
         ens3:
           role: single_dhcp
         ens4:
           role: single_ctl
+        ens5:
+          role: single_external
+          external_address: 172.17.16.121
+          external_network_netmask: 255.255.255.0
 
     mon01:
       reclass_storage_name: stacklight_server_node01
@@ -232,6 +239,7 @@
       reclass_storage_name: openstack_compute_rack01
       roles:
       - openstack_compute
+      - features_lvm_backend_volume_vdb
       - linux_system_codename_xenial
       interfaces:
         ens3:
@@ -246,7 +254,6 @@
     gtw01:
       reclass_storage_name: openstack_gateway_node01
       roles:
-      - openstack_gateway
       - linux_system_codename_xenial
       interfaces:
         ens3:
diff --git a/tcp_tests/templates/cookied-cicd-pike-dvr-sl/salt.yaml b/tcp_tests/templates/cookied-cicd-pike-dvr-sl/salt.yaml
index a38f2f3..12e013c 100644
--- a/tcp_tests/templates/cookied-cicd-pike-dvr-sl/salt.yaml
+++ b/tcp_tests/templates/cookied-cicd-pike-dvr-sl/salt.yaml
@@ -9,6 +9,17 @@
 
 {{ SHARED.MACRO_INSTALL_SALT_MINIONS() }}
 
+- description: "Workaround to avoid reboot cmp nodes: apply patch to bring OVS interfaces UP (PROD-24343)"
+  cmd: |
+    set -ex
+    git clone https://gerrit.mcp.mirantis.com/salt-formulas/linux /root/salt-formula-linux
+    cd /root/salt-formula-linux
+    git fetch https://gerrit.mcp.mirantis.com/salt-formulas/linux refs/changes/32/29432/11 && git checkout FETCH_HEAD
+    cp -r /root/salt-formula-linux/linux/ /srv/salt/env/prd/
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 10}
+  skip_fail: false
+
 {{SHARED.MACRO_CHECK_SALT_VERSION_SERVICES_ON_CFG()}}
 
 {{SHARED.MACRO_CHECK_SALT_VERSION_ON_NODES()}}
diff --git a/tcp_tests/templates/cookied-cicd-pike-dvr-sl/underlay.yaml b/tcp_tests/templates/cookied-cicd-pike-dvr-sl/underlay.yaml
index 25af0b6..2cbbce6 100644
--- a/tcp_tests/templates/cookied-cicd-pike-dvr-sl/underlay.yaml
+++ b/tcp_tests/templates/cookied-cicd-pike-dvr-sl/underlay.yaml
@@ -264,9 +264,6 @@
                   capacity: !os_env NODE_VOLUME_SIZE, 150
                   backing_store: mcp_ubuntu_1604_image
                   format: qcow2
-                - name: cinder
-                  capacity: 50
-                  format: qcow2
                 - name: iso  # Volume with name 'iso' will be used
                              # for store image with cloud-init metadata.
                   capacity: 1
@@ -305,9 +302,6 @@
                   capacity: !os_env NODE_VOLUME_SIZE, 150
                   backing_store: mcp_ubuntu_1604_image
                   format: qcow2
-                - name: cinder
-                  capacity: 50
-                  format: qcow2
                 - name: iso  # Volume with name 'iso' will be used
                              # for store image with cloud-init metadata.
                   capacity: 1
@@ -334,9 +328,6 @@
                   capacity: !os_env NODE_VOLUME_SIZE, 150
                   backing_store: mcp_ubuntu_1604_image
                   format: qcow2
-                - name: cinder
-                  capacity: 50
-                  format: qcow2
                 - name: iso  # Volume with name 'iso' will be used
                              # for store image with cloud-init metadata.
                   capacity: 1
@@ -597,9 +588,6 @@
                   capacity: !os_env NODE_VOLUME_SIZE, 150
                   backing_store: mcp_ubuntu_1604_image
                   format: qcow2
-                - name: cinder
-                  capacity: 50
-                  format: qcow2
                 - name: iso  # Volume with name 'iso' will be used
                              # for store image with cloud-init metadata.
                   capacity: 1
@@ -609,9 +597,26 @@
                   cloudinit_meta_data: *cloudinit_meta_data
                   cloudinit_user_data: *cloudinit_user_data_1604_swp
 
-              interfaces: *interfaces
-              network_config: *network_config
-
+              interfaces:
+                - label: ens3
+                  l2_network_device: admin
+                  interface_model: *interface_model
+                - label: ens4
+                  l2_network_device: private
+                  interface_model: *interface_model
+                - label: ens5
+                  l2_network_device: external
+                  interface_model: *interface_model
+              network_config:
+                ens3:
+                  networks:
+                    - admin
+                ens4:
+                  networks:
+                    - private
+                ens5:
+                  networks:
+                    - external
 
           - name: {{ HOSTNAME_CMP01 }}
             role: salt_minion
@@ -627,6 +632,9 @@
                   capacity: !os_env NODE_VOLUME_SIZE, 150
                   backing_store: cloudimage1604
                   format: qcow2
+                - name: cinder
+                  capacity: 50
+                  format: qcow2
                 - name: iso  # Volume with name 'iso' will be used
                              # for store image with cloud-init metadata.
                   capacity: 1
@@ -678,6 +686,9 @@
                   capacity: !os_env NODE_VOLUME_SIZE, 150
                   backing_store: cloudimage1604
                   format: qcow2
+                - name: cinder
+                  capacity: 50
+                  format: qcow2
                 - name: iso  # Volume with name 'iso' will be used
                              # for store image with cloud-init metadata.
                   capacity: 1
diff --git a/tcp_tests/templates/cookied-cicd-pike-ovs-sl/cookiecutter-context-pike-ovs-sl.yaml b/tcp_tests/templates/cookied-cicd-pike-ovs-sl/cookiecutter-context-pike-ovs-sl.yaml
index f9d1f15..a5a862b 100644
--- a/tcp_tests/templates/cookied-cicd-pike-ovs-sl/cookiecutter-context-pike-ovs-sl.yaml
+++ b/tcp_tests/templates/cookied-cicd-pike-ovs-sl/cookiecutter-context-pike-ovs-sl.yaml
@@ -95,12 +95,11 @@
   openstack_cluster_size: compact
   openstack_compute_count: '2'
   openstack_compute_rack01_hostname: cmp
-  openstack_compute_rack01_single_subnet: 172.16.10
-  openstack_compute_rack01_tenant_subnet: 10.1.0
-  openstack_compute_single_address_ranges: 172.16.10.105-172.16.10.106
-  openstack_compute_deploy_address_ranges: 192.168.10.105-192.168.10.106
-  openstack_compute_tenant_address_ranges: 10.1.0.105-10.1.0.106
-  openstack_compute_backend_address_ranges: 10.1.0.105-10.1.0.106
+  openstack_compute_rack01_single_subnet: 10.167.4
+  openstack_compute_rack01_tenant_subnet: 10.167.6
+  openstack_compute_single_address_ranges: 10.167.4.105-10.167.4.106
+  openstack_compute_deploy_address_ranges: 10.167.5.105-10.167.5.106
+  openstack_compute_tenant_address_ranges: 10.167.6.105-10.167.6.106
   openstack_control_address: 10.167.4.100
   openstack_control_hostname: ctl
   openstack_control_node01_address: 10.167.4.101
@@ -143,7 +142,8 @@
   openstack_nova_compute_nfv_req_enabled: 'False'
   openstack_ovs_dvr_enabled: 'False'
   openstack_ovs_encapsulation_type: vxlan
-  openstack_proxy_address: 10.167.4.80
+  openstack_proxy_address: 172.17.16.80  # external network endpoint
+  openstack_proxy_vip_interface: ens5
   openstack_proxy_hostname: prx
   openstack_proxy_node01_address: 10.167.4.121
   openstack_proxy_node01_hostname: prx01
@@ -190,6 +190,35 @@
       7r3i+QUZ7RBdOb24QcQ618q54ozNTCB7OywY78ptFzeoBeptiNr1
       -----END RSA PRIVATE KEY-----
   backup_public_key: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDEvr+tWAJ62wROllpSZeaSPxxnVY3R65sfUW8wM6L8tr1knJOTQLoBikmcjISb3ekyPlwubTypGoxb7al06FiNwfr3KDkytflKRGTyMKYgchighuFCfBuePd13cjf1l19TYU7u7a+VuCVWi7pmhDGUkMi24s23OroQb7D14XX17v46wLrqJQi2nrXzN/DWXcn/ycq8IZ7ZFgN/uYlbpfAKX8PCvImbDDO8+BgndAy4MPz8cWOWsnfGMVNePhvhazVcijLvx8Vu2Iuvg7CoJiSGjTe7YTms44/WpnFkHreyK8cwsw4wzls4BApu6UU2jIAsAMZh9zux/Rtni71dcNfF
+  octavia_private_key: |-
+    -----BEGIN RSA PRIVATE KEY-----
+    MIIEpAIBAAKCAQEAtjnPDJsQToHBtoqIo15mdSYpfi8z6DFMi8Gbo0KCN33OUn5u
+    OctbdtjUfeuhvI6px1SCnvyWi09Ft8eWwq+KwLCGKbUxLvqKltuJ7K3LIrGXkt+m
+    qZN4O9XKeVKfZH+mQWkkxRWgX2r8RKNV3GkdNtd74VjhP+R6XSKJQ1Z8b7eHM10v
+    6IjTY/jPczjK+eyCeEj4qbSnV8eKlqLhhquuSQRmUO2DRSjLVdpdf2BB4/BdWFsD
+    YOmX7mb8kpEr9vQ+c1JKMXDwD6ehzyU8kE+1kVm5zOeEy4HdYIMpvUfN49P1anRV
+    2ISQ1ZE+r22IAMKl0tekrGH0e/1NP1DF5rINMwIDAQABAoIBAQCkP/cgpaRNHyg8
+    ISKIHs67SWqdEm73G3ijgB+JSKmW2w7dzJgN//6xYUAnP/zIuM7PnJ0gMQyBBTMS
+    NBTv5spqZLKJZYivj6Tb1Ya8jupKm0jEWlMfBo2ZYVrfgFmrfGOfEebSvmuPlh9M
+    vuzlftmWVSSUOkjODmM9D6QpzgrbpktBuA/WpX+6esMTwJpOcQ5xZWEnHXnVzuTc
+    SncodVweE4gz6F1qorbqIJz8UAUQ5T0OZTdHzIS1IbamACHWaxQfixAO2s4+BoUK
+    ANGGZWkfneCxx7lthvY8DiKn7M5cSRnqFyDToGqaLezdkMNlGC7v3U11FF5blSEW
+    fL1o/HwBAoGBAOavhTr8eqezTchqZvarorFIq7HFWk/l0vguIotu6/wlh1V/KdF+
+    aLLHgPgJ5j+RrCMvTBoKqMeeHfVGrS2udEy8L1mK6b3meG+tMxU05OA55abmhYn7
+    7vF0q8XJmYIHIXmuCgF90R8Piscb0eaMlmHW9unKTKo8EOs5j+D8+AMJAoGBAMo4
+    8WW+D3XiD7fsymsfXalf7VpAt/H834QTbNZJweUWhg11eLutyahyyfjjHV200nNZ
+    cnU09DWKpBbLg7d1pyT69CNLXpNnxuWCt8oiUjhWCUpNqVm2nDJbUdlRFTzYb2fS
+    ZC4r0oQaPD5kMLSipjcwzMWe0PniySxNvKXKInFbAoGBAKxW2qD7uKKKuQSOQUft
+    aAksMmEIAHWKTDdvOA2VG6XvX5DHBLXmy08s7rPfqW06ZjCPCDq4Velzvgvc9koX
+    d/lP6cvqlL9za+x6p5wjPQ4rEt/CfmdcmOE4eY+1EgLrUt314LHGjjG3ScWAiirE
+    QyDrGOIGaYoQf89L3KqIMr0JAoGARYAklw8nSSCUvmXHe+Gf0yKA9M/haG28dCwo
+    780RsqZ3FBEXmYk1EYvCFqQX56jJ25MWX2n/tJcdpifz8Q2ikHcfiTHSI187YI34
+    lKQPFgWb08m1NnwoWrY//yx63BqWz1vjymqNQ5GwutC8XJi5/6Xp+tGGiRuEgJGH
+    EIPUKpkCgYAjBIVMkpNiLCREZ6b+qjrPV96ed3iTUt7TqP7yGlFI/OkORFS38xqC
+    hBP6Fk8iNWuOWQD+ohM/vMMnvIhk5jwlcwn+kF0ra04gi5KBFWSh/ddWMJxUtPC1
+    2htvlEc6zQAR6QfqXHmwhg1hP81JcpqpicQzCMhkzLoR1DC6stXdLg==
+    -----END RSA PRIVATE KEY-----
+  octavia_public_key: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQC2Oc8MmxBOgcG2ioijXmZ1Jil+LzPoMUyLwZujQoI3fc5Sfm45y1t22NR966G8jqnHVIKe/JaLT0W3x5bCr4rAsIYptTEu+oqW24nsrcsisZeS36apk3g71cp5Up9kf6ZBaSTFFaBfavxEo1XcaR0213vhWOE/5HpdIolDVnxvt4czXS/oiNNj+M9zOMr57IJ4SPiptKdXx4qWouGGq65JBGZQ7YNFKMtV2l1/YEHj8F1YWwNg6ZfuZvySkSv29D5zUkoxcPAPp6HPJTyQT7WRWbnM54TLgd1ggym9R83j0/VqdFXYhJDVkT6vbYgAwqXS16SsYfR7/U0/UMXmsg0z
   salt_api_password: H0rTPdmktZ8RI7T7y6fjqY0uEbbs7Kwi
   salt_api_password_hash: $6$lfbIFtMZ$.nTbTDMzs1iYv0WqkZHia8H8Fma963Nv3qyyz1x68jQh0YXK9i907B/hvoG4QHMvfolE7V7vQnFClJ1mVA3Yb.
   salt_master_address: 10.167.4.15
@@ -232,3 +261,9 @@
   tenant_vlan: '20'
   upstream_proxy_enabled: 'False'
   use_default_network_scheme: 'False'
+  openstack_octavia_enabled: 'True'
+  octavia_hm_bind_ip: 192.168.1.12
+  octavia_lb_mgmt_cidr: 192.168.1.0/24
+  octavia_lb_mgmt_allocation_pool_start: 192.168.1.2
+  octavia_lb_mgmt_allocation_pool_end: 192.168.1.200
+
diff --git a/tcp_tests/templates/cookied-cicd-pike-ovs-sl/environment-context.yaml b/tcp_tests/templates/cookied-cicd-pike-ovs-sl/environment-context.yaml
index 1791477..28a1115 100644
--- a/tcp_tests/templates/cookied-cicd-pike-ovs-sl/environment-context.yaml
+++ b/tcp_tests/templates/cookied-cicd-pike-ovs-sl/environment-context.yaml
@@ -82,6 +82,7 @@
       - openstack_control_leader
       - openstack_database_leader
       - openstack_message_queue
+      - features_lvm_backend_control
       - linux_system_codename_xenial
       interfaces:
         ens3:
@@ -95,6 +96,7 @@
       - openstack_control
       - openstack_database
       - openstack_message_queue
+      - features_lvm_backend_control
       - linux_system_codename_xenial
       interfaces:
         ens3:
@@ -108,6 +110,7 @@
       - openstack_control
       - openstack_database
       - openstack_message_queue
+      - features_lvm_backend_control
       - linux_system_codename_xenial
       interfaces:
         ens3:
@@ -118,13 +121,17 @@
     prx01:
       reclass_storage_name: openstack_proxy_node01
       roles:
-      - openstack_proxy
+      #- openstack_proxy  # another VIP interface used
       - linux_system_codename_xenial
       interfaces:
         ens3:
           role: single_dhcp
         ens4:
           role: single_ctl
+        ens5:
+          role: single_external
+          external_address: 172.17.16.121
+          external_network_netmask: 255.255.255.0
 
     mon01:
       reclass_storage_name: stacklight_server_node01
@@ -230,6 +237,7 @@
       reclass_storage_name: openstack_compute_rack01
       roles:
       - openstack_compute
+      - features_lvm_backend_volume_vdb
       - linux_system_codename_xenial
       interfaces:
         ens3:
@@ -244,7 +252,6 @@
     gtw01:
       reclass_storage_name: openstack_gateway_node01
       roles:
-      - openstack_gateway
       - linux_system_codename_xenial
       interfaces:
         ens3:
diff --git a/tcp_tests/templates/cookied-cicd-pike-ovs-sl/salt.yaml b/tcp_tests/templates/cookied-cicd-pike-ovs-sl/salt.yaml
index 4905e32..33440ad 100644
--- a/tcp_tests/templates/cookied-cicd-pike-ovs-sl/salt.yaml
+++ b/tcp_tests/templates/cookied-cicd-pike-ovs-sl/salt.yaml
@@ -9,6 +9,17 @@
 
 {{ SHARED.MACRO_INSTALL_SALT_MINIONS() }}
 
+- description: "Workaround to avoid reboot cmp nodes: apply patch to bring OVS interfaces UP (PROD-24343)"
+  cmd: |
+    set -ex
+    git clone https://gerrit.mcp.mirantis.com/salt-formulas/linux /root/salt-formula-linux
+    cd /root/salt-formula-linux
+    git fetch https://gerrit.mcp.mirantis.com/salt-formulas/linux refs/changes/32/29432/11 && git checkout FETCH_HEAD
+    cp -r /root/salt-formula-linux/linux/ /srv/salt/env/prd/
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 10}
+  skip_fail: false
+
 {{SHARED.MACRO_CHECK_SALT_VERSION_SERVICES_ON_CFG()}}
 
 {{SHARED.MACRO_CHECK_SALT_VERSION_ON_NODES()}}
diff --git a/tcp_tests/templates/cookied-cicd-pike-ovs-sl/underlay.yaml b/tcp_tests/templates/cookied-cicd-pike-ovs-sl/underlay.yaml
index 78ef271..a7b966c 100644
--- a/tcp_tests/templates/cookied-cicd-pike-ovs-sl/underlay.yaml
+++ b/tcp_tests/templates/cookied-cicd-pike-ovs-sl/underlay.yaml
@@ -265,9 +265,6 @@
                   capacity: !os_env NODE_VOLUME_SIZE, 150
                   backing_store: mcp_ubuntu_1604_image
                   format: qcow2
-                - name: cinder
-                  capacity: 50
-                  format: qcow2
                 - name: iso  # Volume with name 'iso' will be used
                              # for store image with cloud-init metadata.
                   capacity: 1
@@ -306,9 +303,6 @@
                   capacity: !os_env NODE_VOLUME_SIZE, 150
                   backing_store: mcp_ubuntu_1604_image
                   format: qcow2
-                - name: cinder
-                  capacity: 50
-                  format: qcow2
                 - name: iso  # Volume with name 'iso' will be used
                              # for store image with cloud-init metadata.
                   capacity: 1
@@ -335,9 +329,6 @@
                   capacity: !os_env NODE_VOLUME_SIZE, 150
                   backing_store: mcp_ubuntu_1604_image
                   format: qcow2
-                - name: cinder
-                  capacity: 50
-                  format: qcow2
                 - name: iso  # Volume with name 'iso' will be used
                              # for store image with cloud-init metadata.
                   capacity: 1
@@ -598,9 +589,6 @@
                   capacity: !os_env NODE_VOLUME_SIZE, 150
                   backing_store: mcp_ubuntu_1604_image
                   format: qcow2
-                - name: cinder
-                  capacity: 50
-                  format: qcow2
                 - name: iso  # Volume with name 'iso' will be used
                              # for store image with cloud-init metadata.
                   capacity: 1
@@ -610,8 +598,26 @@
                   cloudinit_meta_data: *cloudinit_meta_data
                   cloudinit_user_data: *cloudinit_user_data_1604_swp
 
-              interfaces: *interfaces
-              network_config: *network_config
+              interfaces:
+                - label: ens3
+                  l2_network_device: admin
+                  interface_model: *interface_model
+                - label: ens4
+                  l2_network_device: private
+                  interface_model: *interface_model
+                - label: ens5
+                  l2_network_device: external
+                  interface_model: *interface_model
+              network_config:
+                ens3:
+                  networks:
+                    - admin
+                ens4:
+                  networks:
+                    - private
+                ens5:
+                  networks:
+                    - external
 
           - name: {{ HOSTNAME_CMP01 }}
             role: salt_minion
@@ -627,6 +633,9 @@
                   capacity: !os_env NODE_VOLUME_SIZE, 150
                   backing_store: cloudimage1604
                   format: qcow2
+                - name: cinder
+                  capacity: 50
+                  format: qcow2
                 - name: iso  # Volume with name 'iso' will be used
                              # for store image with cloud-init metadata.
                   capacity: 1
@@ -678,6 +687,9 @@
                   capacity: !os_env NODE_VOLUME_SIZE, 150
                   backing_store: cloudimage1604
                   format: qcow2
+                - name: cinder
+                  capacity: 50
+                  format: qcow2
                 - name: iso  # Volume with name 'iso' will be used
                              # for store image with cloud-init metadata.
                   capacity: 1
diff --git a/tcp_tests/templates/cookied-cicd-queens-dvr-sl/cookiecutter-context-queens-dvr-sl.yaml b/tcp_tests/templates/cookied-cicd-queens-dvr-sl/cookiecutter-context-queens-dvr-sl.yaml
new file mode 100644
index 0000000..855363b
--- /dev/null
+++ b/tcp_tests/templates/cookied-cicd-queens-dvr-sl/cookiecutter-context-queens-dvr-sl.yaml
@@ -0,0 +1,269 @@
+default_context:
+  auditd_enabled: 'False'
+  bmk_enabled: 'False'
+  ceph_enabled: 'False'
+  cicd_control_node01_address: 10.167.4.91
+  cicd_control_node01_hostname: cid01
+  cicd_control_node02_address: 10.167.4.92
+  cicd_control_node02_hostname: cid02
+  cicd_control_node03_address: 10.167.4.93
+  cicd_control_node03_hostname: cid03
+  cicd_control_vip_address: 10.167.4.90
+  cicd_control_vip_hostname: cid
+  cicd_enabled: 'True'
+  cicd_private_key: |-
+    -----BEGIN RSA PRIVATE KEY-----
+    MIIEpAIBAAKCAQEAv64AnFbEuuOQHLlmMkmaZ+Hh/8hJ+VfFpJ/MzW1wWzYyhis7
+    3A8rxNFWJ/I1/LJSsFI8qU0DpxjFjS9LMTTFXhDPPpuzgRLwfVusEmuQdXjOiT34
+    AIs07Q4w1nlvJ2+/l788ie1AEfnewd9erUHOs8Wt/PT3OOM/0ikY7EibvYF4L1Lb
+    xGRKYnUkY7G3eal9XcQpsTzAFRXoK3WafbCFBFsfzEWOhx1T+tn1SwaxPYJDt1OB
+    B1s77enFtBwbmbd0m1F1ufSXmdWea2xF3+5caS6tps/hwhCoOSJUQb7+dK4ri8og
+    q2YIhfEptrMP1R+nVqEY76P31aa/YSw4zOvcQwIDAQABAoIBAQCLKOzQlD4n4ObT
+    s9Z6U+2B1gCaDvOFzy9yoYGy8u1Li0GLHwBKd8kzDzgZsEN5vo1B7bKUx5ELU3S5
+    V8ijZMiVzmZn8eqUnwdyO4flp6otXxOzmAXhfy9hm5fhXjBQ1VSn+vMcv95wLpSG
+    9IBsEQbchXwX1lFWP8Yp8iRiByTqoz6A7qSxRzIOtq1/coYS9Vcy7VZDMiUjqvuc
+    pYvwYHvrgeYqxLXyDRzbZX1BbkSoNI/5VwxLb9IMG901IXph0r4V3uVgnnq+Xzkk
+    MoOfmB3cyOrvtWblZAjkyA+jzTs/QNALRUeI7wUeh4FvlwEGHE6v5G4G28zOS0vL
+    7IEhCqThAoGBAOeyDO07b060l+NOO+Jkv+NV31VD0w3S4TMyLPVSxXsrRPoHM9RM
+    udi6lewmALE4wk2Lc1Il6n0UrUGVbXxf55NJp2BQoSic+ZK2nTki0cZ/CkUDVNwY
+    R0WtWE0i3J+eF3e8j9VYm1mIlv0aDoYeH4qCp5is/JanvLy4MUl6tM7/AoGBANPJ
+    XheDO5lmqq1ejDTo3GAzYuAs44dQLDs0znEuuaUKZ4MKgQ4ax0L9n0MxvsuUGVcN
+    Nm7fZS4uMY3zLCOLcAXyD1jXY210gmOgFdXeYrH+2kSmqfflV8KHOLCatxLzRtbe
+    KBflcrEnrpUVNGKlpZaYr+4AyapXeMuXIxwveva9AoGAYtoDS9/UwHaqau+A+zlS
+    6TJFA8LZNAepz0b0CYLUAJXYavhRs508mWwZ9NPN7c6yj5UUkZLdtZnxxY50VOEy
+    ExQUljIwX/yBOogxEiR57b9b6U/fj7vIBMFNcDOUf4Far9pCX5rbRNrS2I+abLxD
+    ZrwRt0Duz3QnQTkwxhHVPI8CgYAaIjQJJLl7AW84O32DneRrvouJ7CAbd2ot2CNN
+    Vh20XudNBUPNkMJb4t3/Nak8h8bktg2sesaKf0rAIGym6jLlmOwJ43IydHkOgBeR
+    r4JwQml+pS4+F7/Pkk4NhNnobbqlEv7RjA+uCp6BaP9w2M3pGmhDLzezXF3ciYbc
+    mINM5QKBgQCyM9ZWwSiA0D3oitnhs7C4eC0IHBfnSoa7f40osKm4VvmqKBFgRu8L
+    qYK9qX++pUm4sk0q7poGUscc1udMlejAkfc/HLIlUi6MM+S7ZQ2NHtnZ7COZa5O4
+    9fG8FTiigLvMHka9ihYXtPbyGvusCaqyHp3D9VyOT+WsyM5eJe40lA==
+    -----END RSA PRIVATE KEY-----
+  cicd_public_key: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQC/rgCcVsS645AcuWYySZpn4eH/yEn5V8Wkn8zNbXBbNjKGKzvcDyvE0VYn8jX8slKwUjypTQOnGMWNL0sxNMVeEM8+m7OBEvB9W6wSa5B1eM6JPfgAizTtDjDWeW8nb7+XvzyJ7UAR+d7B316tQc6zxa389Pc44z/SKRjsSJu9gXgvUtvEZEpidSRjsbd5qX1dxCmxPMAVFegrdZp9sIUEWx/MRY6HHVP62fVLBrE9gkO3U4EHWzvt6cW0HBuZt3SbUXW59JeZ1Z5rbEXf7lxpLq2mz+HCEKg5IlRBvv50riuLyiCrZgiF8Sm2sw/VH6dWoRjvo/fVpr9hLDjM69xD
+  cluster_domain: cookied-cicd-queens-dvr-sl.local
+  cluster_name: cookied-cicd-queens-dvr-sl
+  compute_bond_mode: active-backup
+  compute_primary_first_nic: eth1
+  compute_primary_second_nic: eth2
+  context_seed: wUqrwKeBTCpRpVrhK1KwZQv4cjM9VhG7L2vQ0iQsTuMrXASklEBDmJEf6bnPEqcK
+  control_network_netmask: 255.255.255.0
+  control_network_subnet: 10.167.4.0/24
+  control_vlan: '10'
+  cookiecutter_template_branch: master
+  cookiecutter_template_credentials: gerrit
+  cookiecutter_template_url: https://gerrit.mcp.mirantis.com/mk/cookiecutter-templates.git
+  deploy_network_gateway: 10.167.5.1
+  deploy_network_netmask: 255.255.255.0
+  deploy_network_subnet: 10.167.5.0/24
+  deployment_type: physical
+  dns_server01: 172.18.176.6
+  dns_server02: 172.18.208.44
+  email_address: ddmitriev@mirantis.com
+  gateway_primary_first_nic: eth1
+  gateway_primary_second_nic: eth2
+  infra_bond_mode: active-backup
+  infra_deploy_nic: eth0
+  infra_kvm01_control_address: 10.167.4.241
+  infra_kvm01_deploy_address: 10.167.5.91
+  infra_kvm01_hostname: kvm01
+  infra_kvm02_control_address: 10.167.4.242
+  infra_kvm02_deploy_address: 10.167.5.92
+  infra_kvm02_hostname: kvm02
+  infra_kvm03_control_address: 10.167.4.243
+  infra_kvm03_deploy_address: 10.167.5.93
+  infra_kvm03_hostname: kvm03
+  infra_kvm_vip_address: 10.167.4.240
+  infra_primary_first_nic: eth1
+  infra_primary_second_nic: eth2
+  kubernetes_enabled: 'False'
+  local_repositories: 'False'
+  maas_deploy_address: 10.167.5.15
+  maas_deploy_range_end: 10.167.5.199
+  maas_deploy_range_start: 10.167.5.180
+  maas_deploy_vlan: '0'
+  maas_fabric_name: deploy-fabric0
+  maas_hostname: cfg01
+  mcp_version: proposed
+  offline_deployment: 'False'
+  opencontrail_enabled: 'False'
+  openldap_domain: ${_param:cluster_name}.local
+  openldap_enabled: 'True'
+  openldap_organisation: ${_param:cluster_name}
+  openssh_groups: cicd
+  openstack_benchmark_node01_address: 10.167.4.95
+  openstack_benchmark_node01_hostname: bmk01
+  openstack_cluster_size: compact
+  openstack_compute_count: '2'
+  openstack_compute_rack01_hostname: cmp
+  openstack_compute_rack01_single_subnet: 10.167.4
+  openstack_compute_rack01_tenant_subnet: 10.167.6
+  openstack_compute_single_address_ranges: 10.167.4.105-10.167.4.106
+  openstack_compute_deploy_address_ranges: 10.167.5.105-10.167.5.106
+  openstack_compute_tenant_address_ranges: 10.167.6.105-10.167.6.106
+  openstack_control_address: 10.167.4.100
+  openstack_control_hostname: ctl
+  openstack_control_node01_address: 10.167.4.101
+  openstack_control_node01_hostname: ctl01
+  openstack_control_node02_address: 10.167.4.102
+  openstack_control_node02_hostname: ctl02
+  openstack_control_node03_address: 10.167.4.103
+  openstack_control_node03_hostname: ctl03
+  openstack_database_address: 10.167.4.100
+  openstack_database_hostname: ctl
+  openstack_database_node01_address: 10.167.4.101
+  openstack_database_node01_hostname: ctl01
+  openstack_database_node02_address: 10.167.4.102
+  openstack_database_node02_hostname: ctl02
+  openstack_database_node03_address: 10.167.4.103
+  openstack_database_node03_hostname: ctl03
+  openstack_enabled: 'True'
+  openstack_gateway_node01_address: 10.167.4.110
+  openstack_gateway_node01_hostname: gtw01
+  openstack_gateway_node01_tenant_address: 10.167.6.6
+  openstack_gateway_node02_address: 10.167.4.111
+  openstack_gateway_node02_hostname: gtw02
+  openstack_gateway_node02_tenant_address: 10.167.6.7
+  openstack_gateway_node03_address: 10.167.4.112
+  openstack_gateway_node03_hostname: gtw03
+  openstack_gateway_node03_tenant_address: 10.167.6.8
+  openstack_message_queue_address: 10.167.4.100
+  openstack_message_queue_hostname: ctl
+  openstack_message_queue_node01_address: 10.167.4.101
+  openstack_message_queue_node01_hostname: ctl01
+  openstack_message_queue_node02_address: 10.167.4.102
+  openstack_message_queue_node02_hostname: ctl02
+  openstack_message_queue_node03_address: 10.167.4.103
+  openstack_message_queue_node03_hostname: ctl03
+  openstack_network_engine: ovs
+  openstack_neutron_qos: 'False'
+  openstack_neutron_vlan_aware_vms: 'False'
+  openstack_nfv_dpdk_enabled: 'False'
+  openstack_nfv_sriov_enabled: 'False'
+  openstack_nova_compute_nfv_req_enabled: 'False'
+  openstack_ovs_dvr_enabled: 'True'
+  openstack_ovs_encapsulation_type: vxlan
+  openstack_proxy_address: 10.167.4.80
+  openstack_proxy_hostname: prx
+  openstack_proxy_node01_address: 10.167.4.121
+  openstack_proxy_node01_hostname: prx01
+  openstack_proxy_node02_address: 10.167.4.122
+  openstack_proxy_node02_hostname: prx02
+  openstack_upgrade_node01_address: 10.167.4.19
+  openstack_version: queens
+  oss_enabled: 'False'
+  oss_node03_address: ${_param:stacklight_monitor_node03_address}
+  oss_webhook_app_id: '24'
+  oss_pushkin_email_sender_password: password
+  oss_pushkin_smtp_port: '587'
+  oss_webhook_login_id: '13'
+  platform: openstack_enabled
+  public_host: ${_param:openstack_proxy_address}
+  publication_method: email
+  reclass_repository: https://github.com/Mirantis/mk-lab-salt-model.git
+  backup_private_key: |
+      -----BEGIN RSA PRIVATE KEY-----
+      MIIEowIBAAKCAQEAxL6/rVgCetsETpZaUmXmkj8cZ1WN0eubH1FvMDOi/La9ZJyT
+      k0C6AYpJnIyEm93pMj5cLm08qRqMW+2pdOhYjcH69yg5MrX5SkRk8jCmIHIYoIbh
+      Qnwbnj3dd3I39ZdfU2FO7u2vlbglVou6ZoQxlJDItuLNtzq6EG+w9eF19e7+OsC6
+      6iUItp618zfw1l3J/8nKvCGe2RYDf7mJW6XwCl/DwryJmwwzvPgYJ3QMuDD8/HFj
+      lrJ3xjFTXj4b4Ws1XIoy78fFbtiLr4OwqCYkho03u2E5rOOP1qZxZB63sivHMLMO
+      MM5bOAQKbulFNoyALADGYfc7sf0bZ4u9XXDXxQIDAQABAoIBAQCfmc2MJRT97KW1
+      yqpCpX9BrAiymuiNHf+cjEcSZxEUyHkjIRFmJt+9WB0W7ba1anM92vCUiPDojSzH
+      dig9Oi578JxR20NrK8uqv4jUHzrknynzLveVI3CUEcOSnglfJQijbxDFKfOCFPvV
+      FUyE1UATMNBh6+LNfMprgu+exuMWOPnDyUiYQ+WZ0JfuZY8fuaZte4woJJOb9LUu
+      5rsMG/smIzjpgZ0Z9ZVDMurfq565qhpaXRAqKeIuyht8pacTo31iMQdHB78AvY/3
+      g0z21Gk8k3z0Kr/YFKr2r4FmXY5m/gAUvZly2ZrVQM5XsbTVCzq/JpI5fssNvSbU
+      AKmXzf4RAoGBAOO3d4/cstxERzW6hyOTjZIN1ppR52CsnZTsVPbfd0pCtmzmVZce
+      CtHKdcXSbTwZvvkK09QSWAp3MoSpd0gIOiLU8Wx/R/RIZsu9BlhTS3r3EQLnk72d
+      H/1TTA+j4T/LIYLSojQ1RxvIrHetAD44j732aTwKAHj/SybEAVqNkOB/AoGBAN0u
+      gLcrgqIHGrk4VjWSvlCGymfF40equcx+ud7XhfZDGETUOSahW4dPZ52cjPAkrCBQ
+      MMfcDwSVGsOAjd+mNt11BHUKobnhXwFaWWuyqyn9NmWFbjMbICVh7E3Of5aVN38o
+      lrmo/7LuKMVG7XRwphCv5NkaJmQG4njDyUQWlaW7AoGADCd8wDb9bPhP/LQqBmIX
+      ylXmwHHisaxE9O/wUQT4bwREjGd25gv6c9wkkRx8LBsLsGs9hzI7dMOL9Ly+2x9l
+      SvqmsC3S/1zl77X1Ir2/Z57MT6Vgo1xBmtnZU3Rhz2/eKAdqFPNLClaZrgGT475N
+      HcyLLWMzR0IJFtabY+Puea0CgYA8Zb5wRkldxWLewSuJZZDinGwY+kieAVjLJq/K
+      0j+ah6fQ48LXcah0wpIgz+cMjHcUO9GWQdk3/x9X03rqX5EL2DBnZYfUIl63F9zj
+      M97ZkHOSNWVqPzX//0Vv2butewG0j3jZKfTo/2/SrxOYgEpYtC9huWpSVi7xm0US
+      erhSkQKBgFIf9JEsfgE57ANhvITZ3ZI0uZXNxZkXQaVg8jvScDi79IIhy9iPzhKC
+      aIIQoDNIlWv1ftCRZ5AlBvVXgvQ/QNrwy48JiQTzWZlb9Ezg8w+olQmSbG6fq7Y+
+      7r3i+QUZ7RBdOb24QcQ618q54ozNTCB7OywY78ptFzeoBeptiNr1
+      -----END RSA PRIVATE KEY-----
+  backup_public_key: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDEvr+tWAJ62wROllpSZeaSPxxnVY3R65sfUW8wM6L8tr1knJOTQLoBikmcjISb3ekyPlwubTypGoxb7al06FiNwfr3KDkytflKRGTyMKYgchighuFCfBuePd13cjf1l19TYU7u7a+VuCVWi7pmhDGUkMi24s23OroQb7D14XX17v46wLrqJQi2nrXzN/DWXcn/ycq8IZ7ZFgN/uYlbpfAKX8PCvImbDDO8+BgndAy4MPz8cWOWsnfGMVNePhvhazVcijLvx8Vu2Iuvg7CoJiSGjTe7YTms44/WpnFkHreyK8cwsw4wzls4BApu6UU2jIAsAMZh9zux/Rtni71dcNfF
+  salt_api_password: H0rTPdmktZ8RI7T7y6fjqY0uEbbs7Kwi
+  salt_api_password_hash: $6$lfbIFtMZ$.nTbTDMzs1iYv0WqkZHia8H8Fma963Nv3qyyz1x68jQh0YXK9i907B/hvoG4QHMvfolE7V7vQnFClJ1mVA3Yb.
+  salt_master_address: 10.167.4.15
+  salt_master_hostname: cfg01
+  salt_master_management_address: 10.167.5.15
+  shared_reclass_branch: 'proposed'
+  shared_reclass_url: https://gerrit.mcp.mirantis.com/salt-models/reclass-system.git
+  fluentd_enabled: 'True'
+  stacklight_enabled: 'True'
+  stacklight_log_address: 10.167.4.60
+  stacklight_log_hostname: log
+  stacklight_log_node01_address: 10.167.4.61
+  stacklight_log_node01_hostname: log01
+  stacklight_log_node02_address: 10.167.4.62
+  stacklight_log_node02_hostname: log02
+  stacklight_log_node03_address: 10.167.4.63
+  stacklight_log_node03_hostname: log03
+  stacklight_monitor_address: 10.167.4.70
+  stacklight_monitor_hostname: mon
+  stacklight_monitor_node01_address: 10.167.4.71
+  stacklight_monitor_node01_hostname: mon01
+  stacklight_monitor_node02_address: 10.167.4.72
+  stacklight_monitor_node02_hostname: mon02
+  stacklight_monitor_node03_address: 10.167.4.73
+  stacklight_monitor_node03_hostname: mon03
+  stacklight_telemetry_address: 10.167.4.85
+  stacklight_telemetry_hostname: mtr
+  stacklight_telemetry_node01_address: 10.167.4.86
+  stacklight_telemetry_node01_hostname: mtr01
+  stacklight_telemetry_node02_address: 10.167.4.87
+  stacklight_telemetry_node02_hostname: mtr02
+  stacklight_telemetry_node03_address: 10.167.4.88
+  stacklight_telemetry_node03_hostname: mtr03
+  stacklight_version: '2'
+  stacklight_long_term_storage_type: prometheus
+  static_ips_on_deploy_network_enabled: 'False'
+  tenant_network_gateway: 10.167.6.1
+  tenant_network_netmask: 255.255.255.0
+  tenant_network_subnet: 10.167.6.0/24
+  tenant_vlan: '20'
+  upstream_proxy_enabled: 'False'
+  use_default_network_scheme: 'False'
+  octavia_private_key: |-
+    -----BEGIN RSA PRIVATE KEY-----
+    MIIEpAIBAAKCAQEAtjnPDJsQToHBtoqIo15mdSYpfi8z6DFMi8Gbo0KCN33OUn5u
+    OctbdtjUfeuhvI6px1SCnvyWi09Ft8eWwq+KwLCGKbUxLvqKltuJ7K3LIrGXkt+m
+    qZN4O9XKeVKfZH+mQWkkxRWgX2r8RKNV3GkdNtd74VjhP+R6XSKJQ1Z8b7eHM10v
+    6IjTY/jPczjK+eyCeEj4qbSnV8eKlqLhhquuSQRmUO2DRSjLVdpdf2BB4/BdWFsD
+    YOmX7mb8kpEr9vQ+c1JKMXDwD6ehzyU8kE+1kVm5zOeEy4HdYIMpvUfN49P1anRV
+    2ISQ1ZE+r22IAMKl0tekrGH0e/1NP1DF5rINMwIDAQABAoIBAQCkP/cgpaRNHyg8
+    ISKIHs67SWqdEm73G3ijgB+JSKmW2w7dzJgN//6xYUAnP/zIuM7PnJ0gMQyBBTMS
+    NBTv5spqZLKJZYivj6Tb1Ya8jupKm0jEWlMfBo2ZYVrfgFmrfGOfEebSvmuPlh9M
+    vuzlftmWVSSUOkjODmM9D6QpzgrbpktBuA/WpX+6esMTwJpOcQ5xZWEnHXnVzuTc
+    SncodVweE4gz6F1qorbqIJz8UAUQ5T0OZTdHzIS1IbamACHWaxQfixAO2s4+BoUK
+    ANGGZWkfneCxx7lthvY8DiKn7M5cSRnqFyDToGqaLezdkMNlGC7v3U11FF5blSEW
+    fL1o/HwBAoGBAOavhTr8eqezTchqZvarorFIq7HFWk/l0vguIotu6/wlh1V/KdF+
+    aLLHgPgJ5j+RrCMvTBoKqMeeHfVGrS2udEy8L1mK6b3meG+tMxU05OA55abmhYn7
+    7vF0q8XJmYIHIXmuCgF90R8Piscb0eaMlmHW9unKTKo8EOs5j+D8+AMJAoGBAMo4
+    8WW+D3XiD7fsymsfXalf7VpAt/H834QTbNZJweUWhg11eLutyahyyfjjHV200nNZ
+    cnU09DWKpBbLg7d1pyT69CNLXpNnxuWCt8oiUjhWCUpNqVm2nDJbUdlRFTzYb2fS
+    ZC4r0oQaPD5kMLSipjcwzMWe0PniySxNvKXKInFbAoGBAKxW2qD7uKKKuQSOQUft
+    aAksMmEIAHWKTDdvOA2VG6XvX5DHBLXmy08s7rPfqW06ZjCPCDq4Velzvgvc9koX
+    d/lP6cvqlL9za+x6p5wjPQ4rEt/CfmdcmOE4eY+1EgLrUt314LHGjjG3ScWAiirE
+    QyDrGOIGaYoQf89L3KqIMr0JAoGARYAklw8nSSCUvmXHe+Gf0yKA9M/haG28dCwo
+    780RsqZ3FBEXmYk1EYvCFqQX56jJ25MWX2n/tJcdpifz8Q2ikHcfiTHSI187YI34
+    lKQPFgWb08m1NnwoWrY//yx63BqWz1vjymqNQ5GwutC8XJi5/6Xp+tGGiRuEgJGH
+    EIPUKpkCgYAjBIVMkpNiLCREZ6b+qjrPV96ed3iTUt7TqP7yGlFI/OkORFS38xqC
+    hBP6Fk8iNWuOWQD+ohM/vMMnvIhk5jwlcwn+kF0ra04gi5KBFWSh/ddWMJxUtPC1
+    2htvlEc6zQAR6QfqXHmwhg1hP81JcpqpicQzCMhkzLoR1DC6stXdLg==
+    -----END RSA PRIVATE KEY-----
+  octavia_public_key: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQC2Oc8MmxBOgcG2ioijXmZ1Jil+LzPoMUyLwZujQoI3fc5Sfm45y1t22NR966G8jqnHVIKe/JaLT0W3x5bCr4rAsIYptTEu+oqW24nsrcsisZeS36apk3g71cp5Up9kf6ZBaSTFFaBfavxEo1XcaR0213vhWOE/5HpdIolDVnxvt4czXS/oiNNj+M9zOMr57IJ4SPiptKdXx4qWouGGq65JBGZQ7YNFKMtV2l1/YEHj8F1YWwNg6ZfuZvySkSv29D5zUkoxcPAPp6HPJTyQT7WRWbnM54TLgd1ggym9R83j0/VqdFXYhJDVkT6vbYgAwqXS16SsYfR7/U0/UMXmsg0z
+  openstack_octavia_enabled: 'True'
+  octavia_hm_bind_ip: 192.168.1.12
+  octavia_lb_mgmt_cidr: 192.168.1.0/24
+  octavia_lb_mgmt_allocation_pool_start: 192.168.1.2
+  octavia_lb_mgmt_allocation_pool_end: 192.168.1.200
+
+
diff --git a/tcp_tests/templates/cookied-cicd-queens-dvr-sl/environment_context.yaml b/tcp_tests/templates/cookied-cicd-queens-dvr-sl/environment_context.yaml
new file mode 100644
index 0000000..c3efdde
--- /dev/null
+++ b/tcp_tests/templates/cookied-cicd-queens-dvr-sl/environment_context.yaml
@@ -0,0 +1,262 @@
+nodes:
+    cfg01:
+      reclass_storage_name: infra_config_node01
+      roles:
+      - infra_config
+      - linux_system_codename_xenial
+      classes:
+      - environment.cookied-cicd-queens-dvr-sl.override_ntp_virtual
+      interfaces:
+        ens3:
+          role: single_dhcp
+        ens4:
+          role: single_static_ctl
+
+    kvm01:
+      reclass_storage_name: infra_kvm_node01
+      roles:
+      - infra_kvm
+      - linux_system_codename_xenial
+      interfaces:
+        ens3:
+          role: single_dhcp
+        ens4:
+          role: single_ctl
+
+    kvm02:
+      reclass_storage_name: infra_kvm_node02
+      roles:
+      - infra_kvm
+      - linux_system_codename_xenial
+      interfaces:
+        ens3:
+          role: single_dhcp
+        ens4:
+          role: single_ctl
+
+    kvm03:
+      reclass_storage_name: infra_kvm_node03
+      roles:
+      - infra_kvm
+      - linux_system_codename_xenial
+      interfaces:
+        ens3:
+          role: single_dhcp
+        ens4:
+          role: single_ctl
+
+    cid01:
+      reclass_storage_name: cicd_control_node01
+      roles:
+      - cicd_control_leader
+      - linux_system_codename_xenial
+      interfaces:
+        ens3:
+          role: single_dhcp
+        ens4:
+          role: single_ctl
+
+    cid02:
+      reclass_storage_name: cicd_control_node02
+      roles:
+      - cicd_control_manager
+      - linux_system_codename_xenial
+      interfaces:
+        ens3:
+          role: single_dhcp
+        ens4:
+          role: single_ctl
+
+    cid03:
+      reclass_storage_name: cicd_control_node03
+      roles:
+      - cicd_control_manager
+      - linux_system_codename_xenial
+      interfaces:
+        ens3:
+          role: single_dhcp
+        ens4:
+          role: single_ctl
+
+    ctl01:
+      reclass_storage_name: openstack_control_node01
+      roles:
+      - openstack_control_leader
+      - openstack_database_leader
+      - openstack_message_queue
+      - features_lvm_backend_control
+      - linux_system_codename_xenial
+      interfaces:
+        ens3:
+          role: single_dhcp
+        ens4:
+          role: single_ctl
+
+    ctl02:
+      reclass_storage_name: openstack_control_node02
+      roles:
+      - openstack_control
+      - openstack_database
+      - openstack_message_queue
+      - features_lvm_backend_control
+      - linux_system_codename_xenial
+      interfaces:
+        ens3:
+          role: single_dhcp
+        ens4:
+          role: single_ctl
+
+    ctl03:
+      reclass_storage_name: openstack_control_node03
+      roles:
+      - openstack_control
+      - openstack_database
+      - openstack_message_queue
+      - features_lvm_backend_control
+      - linux_system_codename_xenial
+      interfaces:
+        ens3:
+          role: single_dhcp
+        ens4:
+          role: single_ctl
+
+    prx01:
+      reclass_storage_name: openstack_proxy_node01
+      roles:
+      - openstack_proxy
+      - linux_system_codename_xenial
+      interfaces:
+        ens3:
+          role: single_dhcp
+        ens4:
+          role: single_ctl
+
+    mon01:
+      reclass_storage_name: stacklight_server_node01
+      roles:
+      - stacklightv2_server_leader
+      - linux_system_codename_xenial
+      interfaces:
+        ens3:
+          role: single_dhcp
+        ens4:
+          role: single_ctl
+
+    mon02:
+      reclass_storage_name: stacklight_server_node02
+      roles:
+      - stacklightv2_server
+      - linux_system_codename_xenial
+      interfaces:
+        ens3:
+          role: single_dhcp
+        ens4:
+          role: single_ctl
+
+    mon03:
+      reclass_storage_name: stacklight_server_node03
+      roles:
+      - stacklightv2_server
+      - linux_system_codename_xenial
+      interfaces:
+        ens3:
+          role: single_dhcp
+        ens4:
+          role: single_ctl
+
+    log01:
+      reclass_storage_name: stacklight_log_node01
+      roles:
+      - stacklight_log_leader_v2
+      - linux_system_codename_xenial
+      interfaces:
+        ens3:
+          role: single_dhcp
+        ens4:
+          role: single_ctl
+
+    log02:
+      reclass_storage_name: stacklight_log_node02
+      roles:
+      - stacklight_log
+      - linux_system_codename_xenial
+      interfaces:
+        ens3:
+          role: single_dhcp
+        ens4:
+          role: single_ctl
+
+    log03:
+      reclass_storage_name: stacklight_log_node03
+      roles:
+      - stacklight_log
+      - linux_system_codename_xenial
+      interfaces:
+        ens3:
+          role: single_dhcp
+        ens4:
+          role: single_ctl
+
+    mtr01:
+      reclass_storage_name: stacklight_telemetry_node01
+      roles:
+      - stacklight_telemetry_leader
+      - linux_system_codename_xenial
+      interfaces:
+        ens3:
+          role: single_dhcp
+        ens4:
+          role: single_ctl
+
+    mtr02:
+      reclass_storage_name: stacklight_telemetry_node02
+      roles:
+      - stacklight_telemetry
+      - linux_system_codename_xenial
+      interfaces:
+        ens3:
+          role: single_dhcp
+        ens4:
+          role: single_ctl
+
+    mtr03:
+      reclass_storage_name: stacklight_telemetry_node03
+      roles:
+      - stacklight_telemetry
+      - linux_system_codename_xenial
+      interfaces:
+        ens3:
+          role: single_dhcp
+        ens4:
+          role: single_ctl
+
+    # Generator-based computes. For compatibility only
+    cmp<<count>>:
+      reclass_storage_name: openstack_compute_rack01
+      roles:
+      - openstack_compute
+      - features_lvm_backend_volume_vdb
+      - linux_system_codename_xenial
+      interfaces:
+        ens3:
+          role: single_dhcp
+        ens4:
+          role: single_ctl
+        ens5:
+          role: bond0_ab_ovs_vxlan_mesh
+        ens6:
+          role: bond1_ab_ovs_floating
+
+    gtw01:
+      reclass_storage_name: openstack_gateway_node01
+      roles:
+      - linux_system_codename_xenial
+      interfaces:
+        ens3:
+          role: single_dhcp
+        ens4:
+          role: single_ctl
+        ens5:
+          role: bond0_ab_ovs_vxlan_mesh
+        ens6:
+          role: bond1_ab_ovs_floating
diff --git a/tcp_tests/templates/cookied-cicd-queens-dvr-sl/salt.yaml b/tcp_tests/templates/cookied-cicd-queens-dvr-sl/salt.yaml
new file mode 100644
index 0000000..62a8a23
--- /dev/null
+++ b/tcp_tests/templates/cookied-cicd-queens-dvr-sl/salt.yaml
@@ -0,0 +1,25 @@
+{% from 'cookied-cicd-queens-dvr-sl/underlay.yaml' import HOSTNAME_CFG01 with context %}
+{% from 'cookied-cicd-queens-dvr-sl/underlay.yaml' import LAB_CONFIG_NAME with context %}
+{% from 'cookied-cicd-queens-dvr-sl/underlay.yaml' import DOMAIN_NAME with context %}
+
+{% set SALT_MODELS_REPOSITORY = os_env('SALT_MODELS_REPOSITORY','https://gerrit.mcp.mirantis.com/salt-models/mcp-virtual-lab') %}
+# Other salt model repository parameters see in shared-salt.yaml
+
+{% import 'shared-salt.yaml' as SHARED with context %}
+
+{{ SHARED.MACRO_INSTALL_SALT_MINIONS() }}
+
+- description: "Workaround to avoid reboot cmp nodes: apply patch to bring OVS interfaces UP (PROD-24343)"
+  cmd: |
+    set -ex
+    git clone https://gerrit.mcp.mirantis.com/salt-formulas/linux /root/salt-formula-linux
+    cd /root/salt-formula-linux
+    git fetch https://gerrit.mcp.mirantis.com/salt-formulas/linux refs/changes/32/29432/11 && git checkout FETCH_HEAD
+    cp -r /root/salt-formula-linux/linux/ /srv/salt/env/prd/
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 10}
+  skip_fail: false
+
+{{SHARED.MACRO_CHECK_SALT_VERSION_SERVICES_ON_CFG()}}
+
+{{SHARED.MACRO_CHECK_SALT_VERSION_ON_NODES()}}
diff --git a/tcp_tests/templates/cookied-bm-mcp-pike-k8s-contrail/underlay--meta-data.yaml b/tcp_tests/templates/cookied-cicd-queens-dvr-sl/underlay--meta-data.yaml
similarity index 100%
copy from tcp_tests/templates/cookied-bm-mcp-pike-k8s-contrail/underlay--meta-data.yaml
copy to tcp_tests/templates/cookied-cicd-queens-dvr-sl/underlay--meta-data.yaml
diff --git a/tcp_tests/templates/cookied-cicd-queens-dvr-sl/underlay--user-data-cfg01.yaml b/tcp_tests/templates/cookied-cicd-queens-dvr-sl/underlay--user-data-cfg01.yaml
new file mode 100644
index 0000000..4c43578
--- /dev/null
+++ b/tcp_tests/templates/cookied-cicd-queens-dvr-sl/underlay--user-data-cfg01.yaml
@@ -0,0 +1,101 @@
+| # All the data below will be stored as a string object
+  #cloud-config, see http://cloudinit.readthedocs.io/en/latest/topics/examples.html
+
+  ssh_pwauth: True
+  users:
+   - name: root
+     sudo: ALL=(ALL) NOPASSWD:ALL
+     shell: /bin/bash
+     ssh_authorized_keys:
+     {% for key in config.underlay.ssh_keys %}
+      - ssh-rsa {{ key['public'] }}
+     {% endfor %}
+
+  disable_root: false
+  chpasswd:
+   list: |
+    root:r00tme
+   expire: False
+
+  bootcmd:
+   # Block access to SSH while node is preparing
+   - cloud-init-per once sudo touch /is_cloud_init_started
+   # Enable root access
+   - sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
+   - service sshd restart
+
+  output:
+    all: '| tee -a /var/log/cloud-init-output.log /dev/tty0'
+
+  runcmd:
+   - echo "******** MOUNT CONFIG DRIVE"
+   # Mount config drive
+   - mkdir /root/config-drive
+   - mount /dev/sr0 /root/config-drive
+
+   # Configure dhclient
+   - sudo echo "nameserver {gateway}" >> /etc/resolvconf/resolv.conf.d/base
+   - sudo resolvconf -u
+
+   # Enable grub menu using updated config below
+   - update-grub
+
+   # Prepare network connection
+   #- sudo ifdown ens3
+   #- sudo ip r d default || true  # remove existing default route to get it from dhcp
+   #- sudo ifup ens3
+   #- sudo route add default gw {gateway} {interface_name}
+
+   # Create swap
+   - fallocate -l 16G /swapfile
+   - chmod 600 /swapfile
+   - mkswap /swapfile
+   - swapon /swapfile
+   - echo "/swapfile   none    swap    defaults    0   0" >> /etc/fstab
+
+   # Run user data script from config drive
+   - ifdown --force ens3; ifconfig ens3 down; ip a flush dev ens3; rm -f /var/run/network/ifstate.ens3; ip l set down ens3
+   - ifdown --force ens4; ifconfig ens4 down; ip a flush dev ens4; rm -f /var/run/network/ifstate.ens4; ip l set down ens4
+   - rm -f /etc/network/interfaces
+   #- ifdown --force ens5; ifconfig ens5 down; ip a flush dev ens5; rm -f /var/run/network/ifstate.ens5
+   #- cp /root/config-drive/user-data /root/user-data
+   #- sed -i '/^reboot$/d' /root/user-data
+   #- set -x; cd /root && /bin/bash -xe ./user-data
+   - |
+     set -x
+     cd /root/config-drive
+     if /bin/bash -xe ./user-data; then
+         touch /is_cloud_init_finished
+     else
+         set +x
+         echo "bootstrap script /root/config-drive/user-data failed\n" > /is_cloud_init_failed
+     fi
+
+   # Enable root access (after reboot)
+   - sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
+
+  write_files:
+   - path: /etc/default/grub.d/97-enable-grub-menu.cfg
+     content: |
+         GRUB_RECORDFAIL_TIMEOUT=30
+         GRUB_TIMEOUT=3
+         GRUB_TIMEOUT_STYLE=menu
+
+   #- path: /etc/network/interfaces
+   - path: /root/interfaces
+     content: |
+          auto lo
+          iface lo inet loopback
+
+          auto ens3
+          iface ens3 inet dhcp
+
+   - path: /root/.ssh/config
+     owner: root:root
+     permissions: '0600'
+     content: |
+          Host *
+            ServerAliveInterval 60
+            ServerAliveCountMax 0
+            StrictHostKeyChecking no
+            UserKnownHostsFile /dev/null
diff --git a/tcp_tests/templates/virtual-mcp-ocata-dvr/underlay--user-data-cfg01.yaml b/tcp_tests/templates/cookied-cicd-queens-dvr-sl/underlay--user-data1604-swp.yaml
similarity index 71%
copy from tcp_tests/templates/virtual-mcp-ocata-dvr/underlay--user-data-cfg01.yaml
copy to tcp_tests/templates/cookied-cicd-queens-dvr-sl/underlay--user-data1604-swp.yaml
index a73ca23..319c007 100644
--- a/tcp_tests/templates/virtual-mcp-ocata-dvr/underlay--user-data-cfg01.yaml
+++ b/tcp_tests/templates/cookied-cicd-queens-dvr-sl/underlay--user-data1604-swp.yaml
@@ -25,27 +25,25 @@
     all: '| tee -a /var/log/cloud-init-output.log /dev/tty0'
 
   runcmd:
+   - export TERM=linux
+   - export LANG=C
    # Configure dhclient
    - sudo echo "nameserver {gateway}" >> /etc/resolvconf/resolv.conf.d/base
    - sudo resolvconf -u
 
+   # Enable grub menu using updated config below
+   - update-grub
+
    # Prepare network connection
-   - sudo ifdown ens3
-   - sudo ip r d default || true  # remove existing default route to get it from dhcp
    - sudo ifup ens3
    #- sudo route add default gw {gateway} {interface_name}
 
    # Create swap
-   - fallocate -l 4G /swapfile
+   - fallocate -l 16G /swapfile
    - chmod 600 /swapfile
    - mkswap /swapfile
    - swapon /swapfile
-   - echo "/swapfile   none    swap    defaults    0   0" >> /etc/fstab
-
-   - echo "nameserver 172.18.208.44" >> /etc/resolv.conf;
-
-   # Enable grub menu using updated config below
-   - update-grub
+   - echo "/swapfile   none    swap    defaults   0   0" >> /etc/fstab
 
   write_files:
    - path: /etc/default/grub.d/97-enable-grub-menu.cfg
@@ -59,12 +57,3 @@
           auto ens3
           iface ens3 inet dhcp
 
-   - path: /root/.ssh/config
-     owner: root:root
-     permissions: '0600'
-     content: |
-          Host *
-            ServerAliveInterval 300
-            ServerAliveCountMax 10
-            StrictHostKeyChecking no
-            UserKnownHostsFile /dev/null
diff --git a/tcp_tests/templates/cookied-cicd-queens-dvr-sl/underlay.yaml b/tcp_tests/templates/cookied-cicd-queens-dvr-sl/underlay.yaml
new file mode 100644
index 0000000..baa714d
--- /dev/null
+++ b/tcp_tests/templates/cookied-cicd-queens-dvr-sl/underlay.yaml
@@ -0,0 +1,867 @@
+# Set the repository suite, one of the: 'nightly', 'testing', 'stable', or any other required
+{% set REPOSITORY_SUITE = os_env('REPOSITORY_SUITE', 'testing') %}
+
+{% import 'cookied-cicd-queens-dvr-sl/underlay--meta-data.yaml' as CLOUDINIT_META_DATA with context %}
+{% import 'cookied-cicd-queens-dvr-sl/underlay--user-data1604-swp.yaml' as CLOUDINIT_USER_DATA_1604_SWP with context %}
+
+---
+aliases:
+ - &interface_model {{ os_env('INTERFACE_MODEL', 'virtio') }}
+ - &cloudinit_meta_data {{ CLOUDINIT_META_DATA }}
+ - &cloudinit_user_data_1604_swp {{ CLOUDINIT_USER_DATA_1604_SWP }}
+
+{% set LAB_CONFIG_NAME = os_env('LAB_CONFIG_NAME', 'cookied-cicd-queens-dvr-sl') %}
+{% set DOMAIN_NAME = os_env('DOMAIN_NAME', LAB_CONFIG_NAME) + '.local' %}
+{% set HOSTNAME_CFG01 = os_env('HOSTNAME_CFG01', 'cfg01.' + DOMAIN_NAME) %}
+{% set HOSTNAME_KVM01 = os_env('HOSTNAME_KVM01', 'kvm01.' + DOMAIN_NAME) %}
+{% set HOSTNAME_KVM02 = os_env('HOSTNAME_KVM02', 'kvm02.' + DOMAIN_NAME) %}
+{% set HOSTNAME_KVM03 = os_env('HOSTNAME_KVM03', 'kvm03.' + DOMAIN_NAME) %}
+{% set HOSTNAME_CID01 = os_env('HOSTNAME_CID01', 'cid01.' + DOMAIN_NAME) %}
+{% set HOSTNAME_CID02 = os_env('HOSTNAME_CID02', 'cid02.' + DOMAIN_NAME) %}
+{% set HOSTNAME_CID03 = os_env('HOSTNAME_CID03', 'cid03.' + DOMAIN_NAME) %}
+{% set HOSTNAME_CTL01 = os_env('HOSTNAME_CTL01', 'ctl01.' + DOMAIN_NAME) %}
+{% set HOSTNAME_CTL02 = os_env('HOSTNAME_CTL02', 'ctl02.' + DOMAIN_NAME) %}
+{% set HOSTNAME_CTL03 = os_env('HOSTNAME_CTL03', 'ctl03.' + DOMAIN_NAME) %}
+{% set HOSTNAME_CMP01 = os_env('HOSTNAME_CMP01', 'cmp001.' + DOMAIN_NAME) %}
+{% set HOSTNAME_CMP02 = os_env('HOSTNAME_CMP02', 'cmp002.' + DOMAIN_NAME) %}
+{% set HOSTNAME_MON01 = os_env('HOSTNAME_MON01', 'mon01.' + DOMAIN_NAME) %}
+{% set HOSTNAME_MON02 = os_env('HOSTNAME_MON02', 'mon02.' + DOMAIN_NAME) %}
+{% set HOSTNAME_MON03 = os_env('HOSTNAME_MON03', 'mon03.' + DOMAIN_NAME) %}
+{% set HOSTNAME_LOG01 = os_env('HOSTNAME_LOG01', 'log01.' + DOMAIN_NAME) %}
+{% set HOSTNAME_LOG02 = os_env('HOSTNAME_LOG02', 'log02.' + DOMAIN_NAME) %}
+{% set HOSTNAME_LOG03 = os_env('HOSTNAME_LOG03', 'log03.' + DOMAIN_NAME) %}
+{% set HOSTNAME_MTR01 = os_env('HOSTNAME_MTR01', 'mtr01.' + DOMAIN_NAME) %}
+{% set HOSTNAME_MTR02 = os_env('HOSTNAME_MTR02', 'mtr02.' + DOMAIN_NAME) %}
+{% set HOSTNAME_MTR03 = os_env('HOSTNAME_MTR03', 'mtr03.' + DOMAIN_NAME) %}
+{% set HOSTNAME_GTW01 = os_env('HOSTNAME_GTW01', 'gtw01.' + DOMAIN_NAME) %}
+{% set HOSTNAME_PRX01 = os_env('HOSTNAME_PRX01', 'prx01.' + DOMAIN_NAME) %}
+
+template:
+  devops_settings:
+    env_name: {{ os_env('ENV_NAME', 'cookied-cicd-queens-dvr-sl_' + REPOSITORY_SUITE + "_" + os_env('BUILD_NUMBER', '')) }}
+
+    address_pools:
+      private-pool01:
+        net: {{ os_env('PRIVATE_ADDRESS_POOL01', '10.60.0.0/16:24') }}
+        params:
+          ip_reserved:
+            gateway: +1
+            l2_network_device: +1
+            default_{{ HOSTNAME_CFG01 }}: +15
+            default_{{ HOSTNAME_KVM }}: +240
+            default_{{ HOSTNAME_KVM01 }}: +241
+            default_{{ HOSTNAME_KVM02 }}: +242
+            default_{{ HOSTNAME_KVM03 }}: +243
+            default_{{ HOSTNAME_CID }}: +90
+            default_{{ HOSTNAME_CID01 }}: +91
+            default_{{ HOSTNAME_CID02 }}: +92
+            default_{{ HOSTNAME_CID03 }}: +93
+            default_{{ HOSTNAME_CTL01 }}: +101
+            default_{{ HOSTNAME_CTL02 }}: +102
+            default_{{ HOSTNAME_CTL03 }}: +103
+            default_{{ HOSTNAME_CMP01 }}: +105
+            default_{{ HOSTNAME_CMP02 }}: +106
+            default_{{ HOSTNAME_MON01 }}: +71
+            default_{{ HOSTNAME_MON02 }}: +72
+            default_{{ HOSTNAME_MON03 }}: +73
+            default_{{ HOSTNAME_LOG01 }}: +61
+            default_{{ HOSTNAME_LOG02 }}: +62
+            default_{{ HOSTNAME_LOG03 }}: +63
+            default_{{ HOSTNAME_MTR01 }}: +86
+            default_{{ HOSTNAME_MTR02 }}: +87
+            default_{{ HOSTNAME_MTR03 }}: +88
+            default_{{ HOSTNAME_GTW01 }}: +110
+            default_{{ HOSTNAME_PRX01 }}: +121
+          ip_ranges:
+            dhcp: [+90, -10]
+
+      admin-pool01:
+        net: {{ os_env('ADMIN_ADDRESS_POOL01', '10.70.0.0/16:24') }}
+        params:
+          ip_reserved:
+            gateway: +1
+            l2_network_device: +1
+            default_{{ HOSTNAME_CFG01 }}: +15
+            default_{{ HOSTNAME_KVM }}: +240
+            default_{{ HOSTNAME_KVM01 }}: +241
+            default_{{ HOSTNAME_KVM02 }}: +242
+            default_{{ HOSTNAME_KVM03 }}: +243
+            default_{{ HOSTNAME_CID }}: +90
+            default_{{ HOSTNAME_CID01 }}: +91
+            default_{{ HOSTNAME_CID02 }}: +92
+            default_{{ HOSTNAME_CID03 }}: +93
+            default_{{ HOSTNAME_CTL01 }}: +101
+            default_{{ HOSTNAME_CTL02 }}: +102
+            default_{{ HOSTNAME_CTL03 }}: +103
+            default_{{ HOSTNAME_CMP01 }}: +105
+            default_{{ HOSTNAME_CMP02 }}: +106
+            default_{{ HOSTNAME_MON01 }}: +71
+            default_{{ HOSTNAME_MON02 }}: +72
+            default_{{ HOSTNAME_MON03 }}: +73
+            default_{{ HOSTNAME_LOG01 }}: +61
+            default_{{ HOSTNAME_LOG02 }}: +62
+            default_{{ HOSTNAME_LOG03 }}: +63
+            default_{{ HOSTNAME_MTR01 }}: +86
+            default_{{ HOSTNAME_MTR02 }}: +87
+            default_{{ HOSTNAME_MTR03 }}: +88
+            default_{{ HOSTNAME_GTW01 }}: +110
+            default_{{ HOSTNAME_PRX01 }}: +121
+          ip_ranges:
+            dhcp: [+90, -10]
+
+      tenant-pool01:
+        net: {{ os_env('TENANT_ADDRESS_POOL01', '10.80.0.0/16:24') }}
+        params:
+          ip_reserved:
+            gateway: +1
+            l2_network_device: +1
+            default_{{ HOSTNAME_CFG01 }}: +15
+            default_{{ HOSTNAME_CTL01 }}: +101
+            default_{{ HOSTNAME_CTL02 }}: +102
+            default_{{ HOSTNAME_CTL03 }}: +103
+            default_{{ HOSTNAME_CMP01 }}: +105
+            default_{{ HOSTNAME_CMP02 }}: +106
+            default_{{ HOSTNAME_MON01 }}: +71
+            default_{{ HOSTNAME_MON02 }}: +72
+            default_{{ HOSTNAME_MON03 }}: +73
+            default_{{ HOSTNAME_LOG01 }}: +61
+            default_{{ HOSTNAME_LOG02 }}: +62
+            default_{{ HOSTNAME_LOG03 }}: +63
+            default_{{ HOSTNAME_MTR01 }}: +86
+            default_{{ HOSTNAME_MTR02 }}: +87
+            default_{{ HOSTNAME_MTR03 }}: +88
+            default_{{ HOSTNAME_GTW01 }}: +110
+            default_{{ HOSTNAME_PRX01 }}: +121
+          ip_ranges:
+            dhcp: [+10, -10]
+
+      external-pool01:
+        net: {{ os_env('EXTERNAL_ADDRESS_POOL01', '10.90.0.0/16:24') }}
+        params:
+          ip_reserved:
+            gateway: +1
+            l2_network_device: +1
+            default_{{ HOSTNAME_CFG01 }}: +15
+            default_{{ HOSTNAME_CTL01 }}: +101
+            default_{{ HOSTNAME_CTL02 }}: +102
+            default_{{ HOSTNAME_CTL03 }}: +103
+            default_{{ HOSTNAME_CMP01 }}: +105
+            default_{{ HOSTNAME_CMP02 }}: +106
+            default_{{ HOSTNAME_MON01 }}: +71
+            default_{{ HOSTNAME_MON02 }}: +72
+            default_{{ HOSTNAME_MON03 }}: +73
+            default_{{ HOSTNAME_LOG01 }}: +61
+            default_{{ HOSTNAME_LOG02 }}: +62
+            default_{{ HOSTNAME_LOG03 }}: +63
+            default_{{ HOSTNAME_MTR01 }}: +86
+            default_{{ HOSTNAME_MTR02 }}: +87
+            default_{{ HOSTNAME_MTR03 }}: +88
+            default_{{ HOSTNAME_GTW01 }}: +110
+            default_{{ HOSTNAME_PRX01 }}: +121
+          ip_ranges:
+            dhcp: [+180, +220]
+
+    groups:
+      - name: default
+        driver:
+          name: devops.driver.libvirt
+          params:
+            connection_string: !os_env CONNECTION_STRING, qemu:///system
+            storage_pool_name: !os_env STORAGE_POOL_NAME, default
+            stp: False
+            hpet: False
+            enable_acpi: true
+            use_host_cpu: !os_env DRIVER_USE_HOST_CPU, true
+            use_hugepages: !os_env DRIVER_USE_HUGEPAGES, false
+
+        network_pools:
+          admin: admin-pool01
+          private: private-pool01
+          tenant: tenant-pool01
+          external: external-pool01
+
+        l2_network_devices:
+          private:
+            address_pool: private-pool01
+            dhcp: false
+            forward:
+              mode: route
+
+          admin:
+            address_pool: admin-pool01
+            dhcp: true
+            forward:
+              mode: nat
+
+          tenant:
+            address_pool: tenant-pool01
+            dhcp: false
+
+          external:
+            address_pool: external-pool01
+            dhcp: false
+            forward:
+              mode: route
+
+
+        group_volumes:
+         - name: cloudimage1604    # This name is used for 'backing_store' option for node volumes.
+           source_image: {{ os_env('MCP_IMAGE_PATH1604') }}  # http://ci.mcp.mirantis.net:8085/images/ubuntu-16-04-x64-mcpproposed.qcow2
+           format: qcow2
+         - name: cfg01_day01_image               # Pre-configured day01 image
+           source_image: {{ os_env('IMAGE_PATH_CFG01_DAY01') }} # http://images.mirantis.com/cfg01-day01.qcow2 or fallback to IMAGE_PATH1604
+           format: qcow2
+         - name: mcp_ubuntu_1604_image           # Pre-configured image for control plane
+           source_image: !os_env MCP_IMAGE_PATH1604
+           format: qcow2
+
+        nodes:
+          - name: {{ HOSTNAME_CFG01 }}
+            role: salt_master
+            params:
+              vcpu: {{ os_env('CFG_NODE_CPU', 3) }}
+              memory: {{ os_env('CFG_NODE_MEMORY', 8192) }}
+              boot:
+                - hd
+              volumes:
+                - name: system
+                  capacity: {{ os_env('CFG_NODE_VOLUME_SIZE', 150) }}
+                  backing_store: cfg01_day01_image
+                  format: qcow2
+                - name: config
+                  capacity: 1
+                  format: raw
+                  device: cdrom
+                  bus: ide
+                  # source_image: !os_env CFG01_CONFIG_PATH # no source image required.
+                                                            # it will be uploaded after config drive generation
+              interfaces:
+                - label: ens3
+                  l2_network_device: admin
+                  interface_model: *interface_model
+                - label: ens4
+                  l2_network_device: private
+                  interface_model: *interface_model
+              network_config:
+                ens3:
+                  networks:
+                    - admin
+                ens4:
+                  networks:
+                    - private
+
+          - name: {{ HOSTNAME_CTL01 }}
+            role: salt_minion
+            params:
+              vcpu: !os_env SLAVE_NODE_CPU, 2
+              memory: !os_env SLAVE_NODE_MEMORY, 16384
+              boot:
+                - hd
+              cloud_init_volume_name: iso
+              cloud_init_iface_up: ens3
+              volumes:
+                - name: system
+                  capacity: !os_env NODE_VOLUME_SIZE, 150
+                  backing_store: mcp_ubuntu_1604_image
+                  format: qcow2
+                - name: iso  # Volume with name 'iso' will be used
+                             # for store image with cloud-init metadata.
+                  capacity: 1
+                  format: raw
+                  device: cdrom
+                  bus: ide
+                  cloudinit_meta_data: *cloudinit_meta_data
+                  cloudinit_user_data: *cloudinit_user_data_1604_swp
+
+              interfaces: &interfaces
+                - label: ens3
+                  l2_network_device: admin
+                  interface_model: *interface_model
+                - label: ens4
+                  l2_network_device: private
+                  interface_model: *interface_model
+              network_config: &network_config
+                ens3:
+                  networks:
+                    - admin
+                ens4:
+                  networks:
+                    - private
+
+          - name: {{ HOSTNAME_CTL02 }}
+            role: salt_minion
+            params:
+              vcpu: !os_env SLAVE_NODE_CPU, 2
+              memory: !os_env SLAVE_NODE_MEMORY, 16384
+              boot:
+                - hd
+              cloud_init_volume_name: iso
+              cloud_init_iface_up: ens3
+              volumes:
+                - name: system
+                  capacity: !os_env NODE_VOLUME_SIZE, 150
+                  backing_store: mcp_ubuntu_1604_image
+                  format: qcow2
+                - name: iso  # Volume with name 'iso' will be used
+                             # for store image with cloud-init metadata.
+                  capacity: 1
+                  format: raw
+                  device: cdrom
+                  bus: ide
+                  cloudinit_meta_data: *cloudinit_meta_data
+                  cloudinit_user_data: *cloudinit_user_data_1604_swp
+
+              interfaces: *interfaces
+              network_config: *network_config
+
+          - name: {{ HOSTNAME_CTL03 }}
+            role: salt_minion
+            params:
+              vcpu: !os_env SLAVE_NODE_CPU, 2
+              memory: !os_env SLAVE_NODE_MEMORY, 16384
+              boot:
+                - hd
+              cloud_init_volume_name: iso
+              cloud_init_iface_up: ens3
+              volumes:
+                - name: system
+                  capacity: !os_env NODE_VOLUME_SIZE, 150
+                  backing_store: mcp_ubuntu_1604_image
+                  format: qcow2
+                - name: iso  # Volume with name 'iso' will be used
+                             # for store image with cloud-init metadata.
+                  capacity: 1
+                  format: raw
+                  device: cdrom
+                  bus: ide
+                  cloudinit_meta_data: *cloudinit_meta_data
+                  cloudinit_user_data: *cloudinit_user_data_1604_swp
+
+              interfaces: *interfaces
+              network_config: *network_config
+
+          - name: {{ HOSTNAME_MON01 }}
+            role: salt_minion
+            params:
+              vcpu: !os_env SLAVE_NODE_CPU, 2
+              memory: !os_env SLAVE_NODE_MEMORY, 2048
+              boot:
+                - hd
+              cloud_init_volume_name: iso
+              cloud_init_iface_up: ens3
+              volumes:
+                - name: system
+                  capacity: !os_env NODE_VOLUME_SIZE, 150
+                  backing_store: mcp_ubuntu_1604_image
+                  format: qcow2
+                - name: iso  # Volume with name 'iso' will be used
+                             # for store image with cloud-init metadata.
+                  capacity: 1
+                  format: raw
+                  device: cdrom
+                  bus: ide
+                  cloudinit_meta_data: *cloudinit_meta_data
+                  cloudinit_user_data: *cloudinit_user_data_1604_swp
+
+              interfaces: *interfaces
+              network_config: *network_config
+
+          - name: {{ HOSTNAME_MON02 }}
+            role: salt_minion
+            params:
+              vcpu: !os_env SLAVE_NODE_CPU, 2
+              memory: !os_env SLAVE_NODE_MEMORY, 2048
+              boot:
+                - hd
+              cloud_init_volume_name: iso
+              cloud_init_iface_up: ens3
+              volumes:
+                - name: system
+                  capacity: !os_env NODE_VOLUME_SIZE, 150
+                  backing_store: mcp_ubuntu_1604_image
+                  format: qcow2
+                - name: iso  # Volume with name 'iso' will be used
+                             # for store image with cloud-init metadata.
+                  capacity: 1
+                  format: raw
+                  device: cdrom
+                  bus: ide
+                  cloudinit_meta_data: *cloudinit_meta_data
+                  cloudinit_user_data: *cloudinit_user_data_1604_swp
+
+              interfaces: *interfaces
+              network_config: *network_config
+
+          - name: {{ HOSTNAME_MON03 }}
+            role: salt_minion
+            params:
+              vcpu: !os_env SLAVE_NODE_CPU, 2
+              memory: !os_env SLAVE_NODE_MEMORY, 2048
+              boot:
+                - hd
+              cloud_init_volume_name: iso
+              cloud_init_iface_up: ens3
+              volumes:
+                - name: system
+                  capacity: !os_env NODE_VOLUME_SIZE, 150
+                  backing_store: mcp_ubuntu_1604_image
+                  format: qcow2
+                - name: iso  # Volume with name 'iso' will be used
+                             # for store image with cloud-init metadata.
+                  capacity: 1
+                  format: raw
+                  device: cdrom
+                  bus: ide
+                  cloudinit_meta_data: *cloudinit_meta_data
+                  cloudinit_user_data: *cloudinit_user_data_1604_swp
+
+              interfaces: *interfaces
+              network_config: *network_config
+
+          - name: {{ HOSTNAME_LOG01 }}
+            role: salt_minion
+            params:
+              vcpu: !os_env SLAVE_NODE_CPU, 2
+              memory: !os_env SLAVE_NODE_MEMORY, 4096
+              boot:
+                - hd
+              cloud_init_volume_name: iso
+              cloud_init_iface_up: ens3
+              volumes:
+                - name: system
+                  capacity: !os_env NODE_VOLUME_SIZE, 150
+                  backing_store: mcp_ubuntu_1604_image
+                  format: qcow2
+                - name: iso  # Volume with name 'iso' will be used
+                             # for store image with cloud-init metadata.
+                  capacity: 1
+                  format: raw
+                  device: cdrom
+                  bus: ide
+                  cloudinit_meta_data: *cloudinit_meta_data
+                  cloudinit_user_data: *cloudinit_user_data_1604_swp
+
+              interfaces: *interfaces
+              network_config: *network_config
+
+          - name: {{ HOSTNAME_LOG02 }}
+            role: salt_minion
+            params:
+              vcpu: !os_env SLAVE_NODE_CPU, 2
+              memory: !os_env SLAVE_NODE_MEMORY, 4096
+              boot:
+                - hd
+              cloud_init_volume_name: iso
+              cloud_init_iface_up: ens3
+              volumes:
+                - name: system
+                  capacity: !os_env NODE_VOLUME_SIZE, 150
+                  backing_store: mcp_ubuntu_1604_image
+                  format: qcow2
+                - name: iso  # Volume with name 'iso' will be used
+                             # for store image with cloud-init metadata.
+                  capacity: 1
+                  format: raw
+                  device: cdrom
+                  bus: ide
+                  cloudinit_meta_data: *cloudinit_meta_data
+                  cloudinit_user_data: *cloudinit_user_data_1604_swp
+
+              interfaces: *interfaces
+              network_config: *network_config
+
+          - name: {{ HOSTNAME_LOG03 }}
+            role: salt_minion
+            params:
+              vcpu: !os_env SLAVE_NODE_CPU, 2
+              memory: !os_env SLAVE_NODE_MEMORY, 4096
+              boot:
+                - hd
+              cloud_init_volume_name: iso
+              cloud_init_iface_up: ens3
+              volumes:
+                - name: system
+                  capacity: !os_env NODE_VOLUME_SIZE, 150
+                  backing_store: mcp_ubuntu_1604_image
+                  format: qcow2
+                - name: iso  # Volume with name 'iso' will be used
+                             # for store image with cloud-init metadata.
+                  capacity: 1
+                  format: raw
+                  device: cdrom
+                  bus: ide
+                  cloudinit_meta_data: *cloudinit_meta_data
+                  cloudinit_user_data: *cloudinit_user_data_1604_swp
+
+              interfaces: *interfaces
+              network_config: *network_config
+
+          - name: {{ HOSTNAME_MTR01 }}
+            role: salt_minion
+            params:
+              vcpu: !os_env SLAVE_NODE_CPU, 2
+              memory: !os_env SLAVE_NODE_MEMORY, 2048
+              boot:
+                - hd
+              cloud_init_volume_name: iso
+              cloud_init_iface_up: ens3
+              volumes:
+                - name: system
+                  capacity: !os_env NODE_VOLUME_SIZE, 150
+                  backing_store: mcp_ubuntu_1604_image
+                  format: qcow2
+                - name: iso  # Volume with name 'iso' will be used
+                             # for store image with cloud-init metadata.
+                  capacity: 1
+                  format: raw
+                  device: cdrom
+                  bus: ide
+                  cloudinit_meta_data: *cloudinit_meta_data
+                  cloudinit_user_data: *cloudinit_user_data_1604_swp
+
+              interfaces: *interfaces
+              network_config: *network_config
+
+          - name: {{ HOSTNAME_MTR02 }}
+            role: salt_minion
+            params:
+              vcpu: !os_env SLAVE_NODE_CPU, 2
+              memory: !os_env SLAVE_NODE_MEMORY, 2048
+              boot:
+                - hd
+              cloud_init_volume_name: iso
+              cloud_init_iface_up: ens3
+              volumes:
+                - name: system
+                  capacity: !os_env NODE_VOLUME_SIZE, 150
+                  backing_store: mcp_ubuntu_1604_image
+                  format: qcow2
+                - name: iso  # Volume with name 'iso' will be used
+                             # for store image with cloud-init metadata.
+                  capacity: 1
+                  format: raw
+                  device: cdrom
+                  bus: ide
+                  cloudinit_meta_data: *cloudinit_meta_data
+                  cloudinit_user_data: *cloudinit_user_data_1604_swp
+
+              interfaces: *interfaces
+              network_config: *network_config
+
+          - name: {{ HOSTNAME_MTR03 }}
+            role: salt_minion
+            params:
+              vcpu: !os_env SLAVE_NODE_CPU, 2
+              memory: !os_env SLAVE_NODE_MEMORY, 2048
+              boot:
+                - hd
+              cloud_init_volume_name: iso
+              cloud_init_iface_up: ens3
+              volumes:
+                - name: system
+                  capacity: !os_env NODE_VOLUME_SIZE, 150
+                  backing_store: mcp_ubuntu_1604_image
+                  format: qcow2
+                - name: iso  # Volume with name 'iso' will be used
+                             # for store image with cloud-init metadata.
+                  capacity: 1
+                  format: raw
+                  device: cdrom
+                  bus: ide
+                  cloudinit_meta_data: *cloudinit_meta_data
+                  cloudinit_user_data: *cloudinit_user_data_1604_swp
+
+              interfaces: *interfaces
+              network_config: *network_config
+
+          - name: {{ HOSTNAME_PRX01 }}
+            role: salt_minion
+            params:
+              vcpu: !os_env SLAVE_NODE_CPU, 1
+              memory: !os_env SLAVE_NODE_MEMORY, 2048
+              boot:
+                - hd
+              cloud_init_volume_name: iso
+              cloud_init_iface_up: ens3
+              volumes:
+                - name: system
+                  capacity: !os_env NODE_VOLUME_SIZE, 150
+                  backing_store: mcp_ubuntu_1604_image
+                  format: qcow2
+                - name: iso  # Volume with name 'iso' will be used
+                             # for store image with cloud-init metadata.
+                  capacity: 1
+                  format: raw
+                  device: cdrom
+                  bus: ide
+                  cloudinit_meta_data: *cloudinit_meta_data
+                  cloudinit_user_data: *cloudinit_user_data_1604_swp
+
+              interfaces: *interfaces
+              network_config: *network_config
+
+
+          - name: {{ HOSTNAME_CMP01 }}
+            role: salt_minion
+            params:
+              vcpu: !os_env SLAVE_NODE_CPU, 3
+              memory: !os_env SLAVE_NODE_MEMORY, 4096
+              boot:
+                - hd
+              cloud_init_volume_name: iso
+              cloud_init_iface_up: ens3
+              volumes:
+                - name: system
+                  capacity: !os_env NODE_VOLUME_SIZE, 150
+                  backing_store: cloudimage1604
+                  format: qcow2
+                - name: cinder
+                  capacity: 50
+                  format: qcow2
+                - name: iso  # Volume with name 'iso' will be used
+                             # for store image with cloud-init metadata.
+                  capacity: 1
+                  format: raw
+                  device: cdrom
+                  bus: ide
+                  cloudinit_meta_data: *cloudinit_meta_data
+                  cloudinit_user_data: *cloudinit_user_data_1604_swp
+
+
+              interfaces: &all_interfaces
+                - label: ens3
+                  l2_network_device: admin
+                  interface_model: *interface_model
+                - label: ens4
+                  l2_network_device: private
+                  interface_model: *interface_model
+                - label: ens5
+                  l2_network_device: tenant
+                  interface_model: *interface_model
+                - label: ens6
+                  l2_network_device: external
+                  interface_model: *interface_model
+              network_config: &all_network_config
+                ens3:
+                  networks:
+                    - admin
+                ens4:
+                  networks:
+                    - private
+                ens5:
+                  networks:
+                    - tenant
+                ens6:
+                  networks:
+                    - external
+
+          - name: {{ HOSTNAME_CMP02 }}
+            role: salt_minion
+            params:
+              vcpu: !os_env SLAVE_NODE_CPU, 3
+              memory: !os_env SLAVE_NODE_MEMORY, 4096
+              boot:
+                - hd
+              cloud_init_volume_name: iso
+              cloud_init_iface_up: ens3
+              volumes:
+                - name: system
+                  capacity: !os_env NODE_VOLUME_SIZE, 150
+                  backing_store: cloudimage1604
+                  format: qcow2
+                - name: cinder
+                  capacity: 50
+                  format: qcow2
+                - name: iso  # Volume with name 'iso' will be used
+                             # for store image with cloud-init metadata.
+                  capacity: 1
+                  format: raw
+                  device: cdrom
+                  bus: ide
+                  cloudinit_meta_data: *cloudinit_meta_data
+                  cloudinit_user_data: *cloudinit_user_data_1604_swp
+
+              interfaces: *all_interfaces
+              network_config: *all_network_config
+
+          - name: {{ HOSTNAME_GTW01 }}
+            role: salt_minion
+            params:
+              vcpu: !os_env SLAVE_NODE_CPU, 4
+              memory: !os_env SLAVE_NODE_MEMORY, 4096
+              boot:
+                - hd
+              cloud_init_volume_name: iso
+              cloud_init_iface_up: ens3
+              volumes:
+                - name: system
+                  capacity: !os_env NODE_VOLUME_SIZE, 150
+                  backing_store: cloudimage1604
+                  format: qcow2
+                - name: iso  # Volume with name 'iso' will be used
+                             # for store image with cloud-init metadata.
+                  capacity: 1
+                  format: raw
+                  device: cdrom
+                  bus: ide
+                  cloudinit_meta_data: *cloudinit_meta_data
+                  cloudinit_user_data: *cloudinit_user_data_1604_swp
+
+              interfaces: *all_interfaces
+              network_config: *all_network_config
+
+          - name: {{ HOSTNAME_KVM01 }}
+            role: salt_minion
+            params:
+              vcpu: {{ os_env('KVM_NODE_CPU', 1) }}
+              memory: {{ os_env('KVM_NODE_MEMORY', 1024) }}
+              boot:
+                - hd
+              cloud_init_volume_name: iso
+              cloud_init_iface_up: ens3
+              volumes:
+                - name: system
+                  capacity: {{ os_env('KVM_NODE_VOLUME_SIZE', 150) }}
+                  backing_store: mcp_ubuntu_1604_image
+                  format: qcow2
+                - name: iso  # Volume with name 'iso' will be used
+                             # for store image with cloud-init metadata.
+                  capacity: 1
+                  format: raw
+                  device: cdrom
+                  bus: ide
+                  cloudinit_meta_data: *cloudinit_meta_data
+                  cloudinit_user_data: *cloudinit_user_data_1604_swp
+
+              interfaces: *interfaces
+              network_config: *network_config
+
+          - name: {{ HOSTNAME_KVM02 }}
+            role: salt_minion
+            params:
+              vcpu: {{ os_env('KVM_NODE_CPU', 1) }}
+              memory: {{ os_env('KVM_NODE_MEMORY', 1024) }}
+              boot:
+                - hd
+              cloud_init_volume_name: iso
+              cloud_init_iface_up: ens3
+              volumes:
+                - name: system
+                  capacity: {{ os_env('KVM_NODE_VOLUME_SIZE', 150) }}
+                  backing_store: mcp_ubuntu_1604_image
+                  format: qcow2
+                - name: iso  # Volume with name 'iso' will be used
+                             # for store image with cloud-init metadata.
+                  capacity: 1
+                  format: raw
+                  device: cdrom
+                  bus: ide
+                  cloudinit_meta_data: *cloudinit_meta_data
+                  cloudinit_user_data: *cloudinit_user_data_1604_swp
+
+              interfaces: *interfaces
+              network_config: *network_config
+
+          - name: {{ HOSTNAME_KVM03 }}
+            role: salt_minion
+            params:
+              vcpu: {{ os_env('KVM_NODE_CPU', 1) }}
+              memory: {{ os_env('KVM_NODE_MEMORY', 1024) }}
+              boot:
+                - hd
+              cloud_init_volume_name: iso
+              cloud_init_iface_up: ens3
+              volumes:
+                - name: system
+                  capacity: {{ os_env('KVM_NODE_VOLUME_SIZE', 150) }}
+                  backing_store: mcp_ubuntu_1604_image
+                  format: qcow2
+                - name: iso  # Volume with name 'iso' will be used
+                             # for store image with cloud-init metadata.
+                  capacity: 1
+                  format: raw
+                  device: cdrom
+                  bus: ide
+                  cloudinit_meta_data: *cloudinit_meta_data
+                  cloudinit_user_data: *cloudinit_user_data_1604_swp
+
+              interfaces: *interfaces
+              network_config: *network_config
+
+          - name: {{ HOSTNAME_CID01 }}
+            role: salt_minion
+            params:
+              vcpu: {{ os_env('CID_NODE_CPU', 3) }}
+              memory: {{ os_env('CID_NODE_MEMORY', 8192) }}
+              boot:
+                - hd
+              cloud_init_volume_name: iso
+              cloud_init_iface_up: ens3
+              volumes:
+                - name: system
+                  capacity: {{ os_env('CID_NODE_VOLUME_SIZE', 150) }}
+                  backing_store: mcp_ubuntu_1604_image
+                  format: qcow2
+                - name: iso  # Volume with name 'iso' will be used
+                             # for store image with cloud-init metadata.
+                  capacity: 1
+                  format: raw
+                  device: cdrom
+                  bus: ide
+                  cloudinit_meta_data: *cloudinit_meta_data
+                  cloudinit_user_data: *cloudinit_user_data_1604_swp
+
+              interfaces: *interfaces
+              network_config: *network_config
+
+          - name: {{ HOSTNAME_CID02 }}
+            role: salt_minion
+            params:
+              vcpu: {{ os_env('CID_NODE_CPU', 3) }}
+              memory: {{ os_env('CID_NODE_MEMORY', 8192) }}
+              boot:
+                - hd
+              cloud_init_volume_name: iso
+              cloud_init_iface_up: ens3
+              volumes:
+                - name: system
+                  capacity: {{ os_env('CID_NODE_VOLUME_SIZE', 150) }}
+                  backing_store: mcp_ubuntu_1604_image
+                  format: qcow2
+                - name: iso  # Volume with name 'iso' will be used
+                             # for store image with cloud-init metadata.
+                  capacity: 1
+                  format: raw
+                  device: cdrom
+                  bus: ide
+                  cloudinit_meta_data: *cloudinit_meta_data
+                  cloudinit_user_data: *cloudinit_user_data_1604_swp
+
+              interfaces: *interfaces
+              network_config: *network_config
+
+          - name: {{ HOSTNAME_CID03 }}
+            role: salt_minion
+            params:
+              vcpu: {{ os_env('CID_NODE_CPU', 3) }}
+              memory: {{ os_env('CID_NODE_MEMORY', 8192) }}
+              boot:
+                - hd
+              cloud_init_volume_name: iso
+              cloud_init_iface_up: ens3
+              volumes:
+                - name: system
+                  capacity: {{ os_env('CID_NODE_VOLUME_SIZE', 150) }}
+                  backing_store: mcp_ubuntu_1604_image
+                  format: qcow2
+                - name: iso  # Volume with name 'iso' will be used
+                             # for store image with cloud-init metadata.
+                  capacity: 1
+                  format: raw
+                  device: cdrom
+                  bus: ide
+                  cloudinit_meta_data: *cloudinit_meta_data
+                  cloudinit_user_data: *cloudinit_user_data_1604_swp
+
+              interfaces: *interfaces
+              network_config: *network_config
diff --git a/tcp_tests/templates/virtual-mcp-ocata-dvr/_context-cookiecutter-mcp-ocata-dvr.yaml b/tcp_tests/templates/cookied-mcp-mitaka-dvr-ceph/_context-cookiecutter-mcp-mitaka-dvr-ceph.yaml
similarity index 83%
copy from tcp_tests/templates/virtual-mcp-ocata-dvr/_context-cookiecutter-mcp-ocata-dvr.yaml
copy to tcp_tests/templates/cookied-mcp-mitaka-dvr-ceph/_context-cookiecutter-mcp-mitaka-dvr-ceph.yaml
index 84c6c06..16b73bd 100644
--- a/tcp_tests/templates/virtual-mcp-ocata-dvr/_context-cookiecutter-mcp-ocata-dvr.yaml
+++ b/tcp_tests/templates/cookied-mcp-mitaka-dvr-ceph/_context-cookiecutter-mcp-mitaka-dvr-ceph.yaml
@@ -1,9 +1,44 @@
 default_context:
   bmk_enabled: 'False'
-  ceph_enabled: 'False'
+  designate_backend: bind
+  ceph_cluster_network: 172.16.10.0/24
+  ceph_enabled: 'True'
+  ceph_hyper_converged: 'False'
+  ceph_mon_node01_address: 172.16.10.66
+  ceph_mon_node01_hostname: cmn01
+  ceph_mon_node02_address: 172.16.10.67
+  ceph_mon_node02_hostname: cmn02
+  ceph_mon_node03_address: 172.16.10.68
+  ceph_mon_node03_hostname: cmn03
+  ceph_osd_backend: bluestore
+  ceph_osd_block_db_size: '10'
+  ceph_osd_bond_mode: active-backup
+  ceph_osd_count: '2'
+  ceph_osd_data_disks: /dev/vdb
+  ceph_osd_journal_or_block_db_disks: /dev/vdc
+  ceph_osd_node_count: '2'
+  ceph_osd_journal_size: '10'
+  ceph_osd_primary_first_nic: eth1
+  ceph_osd_primary_second_nic: eth2
+  ceph_osd_rack01_backend_subnet: 172.16.10
+  ceph_osd_rack01_hostname: osd
+  ceph_osd_rack01_single_subnet: 172.16.10
+  ceph_osd_single_address_ranges: 172.16.10.94-172.16.10.95
+  ceph_osd_deploy_address_ranges: 172.16.11.94-172.16.11.95
+  ceph_osd_backend_address_ranges: 172.16.10.94-172.16.10.95
+  ceph_public_network: 172.16.10.0/24
+  ceph_rgw_address: 172.16.10.75
+  ceph_rgw_hostname: rgw
+  ceph_rgw_node01_address: 172.16.10.76
+  ceph_rgw_node01_hostname: rgw01
+  ceph_rgw_node02_address: 172.16.10.77
+  ceph_rgw_node02_hostname: rgw02
+  ceph_rgw_node03_address: 172.16.10.78
+  ceph_rgw_node03_hostname: rgw03
+  ceph_version: luminous
   cicd_enabled: 'False'
-  cluster_domain: virtual-mcp-ocata-dvr.local
-  cluster_name: virtual-mcp-ocata-dvr
+  cluster_domain: cookied-mcp-mitaka-dvr-ceph.local
+  cluster_name: cookied-mcp-mitaka-dvr-ceph
   compute_bond_mode: active-backup
   compute_primary_first_nic: eth1
   compute_primary_second_nic: eth2
@@ -18,11 +53,12 @@
   deploy_network_netmask: 255.255.255.0
   deploy_network_subnet: 192.168.10.0/24
   deployment_type: physical
-  dns_server01: 172.18.176.6
-  dns_server02: 172.18.208.44
+  dns_server01: 8.8.8.8
+  dns_server02: 8.8.4.4
   email_address: ddmitriev@mirantis.com
   gateway_primary_first_nic: eth1
   gateway_primary_second_nic: eth2
+  gnocchi_aggregation_storage: ceph
   infra_bond_mode: active-backup
   infra_deploy_nic: eth0
   infra_kvm01_control_address: 172.16.10.101
@@ -54,7 +90,6 @@
   openstack_compute_single_address_ranges: 172.16.10.105-172.16.10.106
   openstack_compute_deploy_address_ranges: 192.168.10.105-192.168.10.106
   openstack_compute_tenant_address_ranges: 10.1.0.105-10.1.0.106
-  openstack_compute_backend_address_ranges: 10.1.0.105-10.1.0.106
   openstack_control_address: 172.16.10.100
   openstack_control_hostname: ctl
   openstack_control_node01_address: 172.16.10.101
@@ -104,13 +139,13 @@
   openstack_proxy_node02_address: 172.16.10.122
   openstack_proxy_node02_hostname: prx02
   openstack_upgrade_node01_address: 172.16.10.19
-  openstack_version: ocata
+  openstack_version: mitaka
   oss_enabled: 'False'
   oss_node03_address: ${_param:stacklight_monitor_node03_address}
-  oss_webhook_app_id: '24'
-  oss_pushkin_email_sender_password: password
-  oss_pushkin_smtp_port: '587'
-  oss_webhook_login_id: '13'
+  oss_notification_app_id: '24'
+  oss_notification_sender_password: password
+  oss_notification_smtp_port: '587'
+  oss_notification_webhook_login_id: '13'
   platform: openstack_enabled
   public_host: ${_param:openstack_proxy_address}
   publication_method: email
@@ -185,4 +220,4 @@
   tenant_network_subnet: 10.1.0.0/24
   tenant_vlan: '20'
   upstream_proxy_enabled: 'False'
-  use_default_network_scheme: 'False'
+  use_default_network_scheme: 'False'
\ No newline at end of file
diff --git a/tcp_tests/templates/virtual-mcp-ocata-ovs/_context-environment.yaml b/tcp_tests/templates/cookied-mcp-mitaka-dvr-ceph/_context-environment.yaml
similarity index 66%
copy from tcp_tests/templates/virtual-mcp-ocata-ovs/_context-environment.yaml
copy to tcp_tests/templates/cookied-mcp-mitaka-dvr-ceph/_context-environment.yaml
index 3e05cf0..89bf918 100644
--- a/tcp_tests/templates/virtual-mcp-ocata-ovs/_context-environment.yaml
+++ b/tcp_tests/templates/cookied-mcp-mitaka-dvr-ceph/_context-environment.yaml
@@ -21,6 +21,7 @@
       - features_designate_bind9_dns
       - features_designate_bind9
       - features_designate_bind9_keystone
+      - features_lvm_backend_control
       - linux_system_codename_xenial
       interfaces:
         ens3:
@@ -38,6 +39,7 @@
       - features_designate_bind9_database
       - features_designate_bind9_dns
       - features_designate_bind9
+      - features_lvm_backend_control
       - linux_system_codename_xenial
       interfaces:
         ens3:
@@ -54,6 +56,7 @@
       - openstack_message_queue
       - features_designate_bind9_database
       - features_designate_bind9
+      - features_lvm_backend_control
       - linux_system_codename_xenial
       interfaces:
         ens3:
@@ -117,6 +120,7 @@
       reclass_storage_name: openstack_compute_rack01
       roles:
       - openstack_compute
+      - features_lvm_backend_volume_vdb
       - linux_system_codename_xenial
       interfaces:
         ens3:
@@ -142,3 +146,80 @@
           role: bond0_ab_ovs_vxlan_mesh
         ens6:
           role: bond1_ab_ovs_floating
+
+    osd<<count>>.mcp11-ovs-dpdk.local:
+      reclass_storage_name: ceph_osd_rack01
+      roles:
+      - ceph_osd
+      - linux_system_codename_xenial
+      interfaces:
+        ens3:
+          role: single_dhcp
+        ens4:
+          role: single_ctl
+
+    cmn01.mcp11-ovs-dpdk.local:
+      reclass_storage_name: ceph_mon_node01
+      roles:
+      - ceph_mon
+      - linux_system_codename_xenial
+      interfaces:
+        ens3:
+          role: single_dhcp
+        ens4:
+          role: single_ctl
+
+    cmn02.mcp11-ovs-dpdk.local:
+      reclass_storage_name: ceph_mon_node02
+      roles:
+      - ceph_mon
+      - linux_system_codename_xenial
+      interfaces:
+        ens3:
+          role: single_dhcp
+        ens4:
+          role: single_ctl
+
+    cmn03.mcp11-ovs-dpdk.local:
+      reclass_storage_name: ceph_mon_node03
+      roles:
+      - ceph_mon
+      - linux_system_codename_xenial
+      interfaces:
+        ens3:
+          role: single_dhcp
+        ens4:
+          role: single_ctl
+
+    rgw01.mcp11-ovs-dpdk.local:
+      reclass_storage_name: ceph_rgw_node01
+      roles:
+      - ceph_rgw
+      - linux_system_codename_xenial
+      interfaces:
+        ens3:
+          role: single_dhcp
+        ens4:
+          role: single_ctl
+
+    rgw02.mcp11-ovs-dpdk.local:
+      reclass_storage_name: ceph_rgw_node02
+      roles:
+      - ceph_rgw
+      - linux_system_codename_xenial
+      interfaces:
+        ens3:
+          role: single_dhcp
+        ens4:
+          role: single_ctl
+
+    rgw03.cmcp11-ovs-dpdk.local:
+      reclass_storage_name: ceph_rgw_node03
+      roles:
+      - ceph_rgw
+      - linux_system_codename_xenial
+      interfaces:
+        ens3:
+          role: single_dhcp
+        ens4:
+          role: single_ctl
\ No newline at end of file
diff --git a/tcp_tests/templates/cookied-mcp-mitaka-dvr-ceph/core.yaml b/tcp_tests/templates/cookied-mcp-mitaka-dvr-ceph/core.yaml
new file mode 100644
index 0000000..546cc34
--- /dev/null
+++ b/tcp_tests/templates/cookied-mcp-mitaka-dvr-ceph/core.yaml
@@ -0,0 +1,12 @@
+{% from 'cookied-mcp-mitaka-dvr-ceph/underlay.yaml' import HOSTNAME_CFG01 with context %}
+
+{% import 'shared-core.yaml' as SHARED_CORE with context %}
+
+{{ SHARED_CORE.MACRO_INSTALL_KEEPALIVED() }}
+{{ SHARED_CORE.MACRO_INSTALL_GLUSTERFS() }}
+{{ SHARED_CORE.MACRO_INSTALL_RABBITMQ() }}
+{{ SHARED_CORE.MACRO_INSTALL_GALERA() }}
+{{ SHARED_CORE.MACRO_INSTALL_HAPROXY() }}
+{{ SHARED_CORE.MACRO_INSTALL_NGINX() }}
+{{ SHARED_CORE.MACRO_INSTALL_MEMCACHED() }}
+{{ SHARED_CORE.MACRO_CHECK_VIP() }}
\ No newline at end of file
diff --git a/tcp_tests/templates/virtual-mcp-ocata-ovs/openstack.yaml b/tcp_tests/templates/cookied-mcp-mitaka-dvr-ceph/openstack.yaml
similarity index 88%
copy from tcp_tests/templates/virtual-mcp-ocata-ovs/openstack.yaml
copy to tcp_tests/templates/cookied-mcp-mitaka-dvr-ceph/openstack.yaml
index 50bad03..318a992 100644
--- a/tcp_tests/templates/virtual-mcp-ocata-ovs/openstack.yaml
+++ b/tcp_tests/templates/cookied-mcp-mitaka-dvr-ceph/openstack.yaml
@@ -1,12 +1,15 @@
-{% from 'virtual-mcp-ocata-ovs/underlay.yaml' import HOSTNAME_CFG01 with context %}
-{% from 'virtual-mcp-ocata-ovs/underlay.yaml' import HOSTNAME_CTL01 with context %}
-{% from 'virtual-mcp-ocata-ovs/underlay.yaml' import HOSTNAME_CTL02 with context %}
-{% from 'virtual-mcp-ocata-ovs/underlay.yaml' import HOSTNAME_CTL03 with context %}
-{% from 'virtual-mcp-ocata-ovs/underlay.yaml' import HOSTNAME_GTW01 with context %}
+{% from 'cookied-mcp-mitaka-dvr-ceph/underlay.yaml' import HOSTNAME_CFG01 with context %}
+{% from 'cookied-mcp-mitaka-dvr-ceph/underlay.yaml' import HOSTNAME_CTL01 with context %}
+{% from 'cookied-mcp-mitaka-dvr-ceph/underlay.yaml' import HOSTNAME_CTL02 with context %}
+{% from 'cookied-mcp-mitaka-dvr-ceph/underlay.yaml' import HOSTNAME_CTL03 with context %}
+{% from 'cookied-mcp-mitaka-dvr-ceph/underlay.yaml' import HOSTNAME_GTW01 with context %}
 {% from 'shared-salt.yaml' import IPV4_NET_EXTERNAL_PREFIX with context %}
 {% from 'shared-salt.yaml' import IPV4_NET_TENANT_PREFIX with context %}
 
 {% import 'shared-openstack.yaml' as SHARED_OPENSTACK with context %}
+{% import 'shared-salt.yaml' as SHARED with context %}
+
+{% import 'shared-ceph.yaml' as SHARED_CEPH with context %}
 
 # Deploy nginx before openstack services (PROD-22740)
 - description: Deploy nginx proxy
@@ -16,6 +19,14 @@
   retry: {count: 1, delay: 5}
   skip_fail: true
 
+{{ SHARED_CEPH.MACRO_INSTALL_CEPH_MONS() }}
+
+{{ SHARED_CEPH.MACRO_INSTALL_CEPH_MGR() }}
+
+{{ SHARED_CEPH.MACRO_INSTALL_CEPH_OSD_AND_RADOSGW() }}
+
+{{ SHARED_CEPH.CONNECT_CEPH_TO_SERVICES() }}
+
 {{ SHARED_OPENSTACK.MACRO_INSTALL_KEYSTONE() }}
 
 {{ SHARED_OPENSTACK.MACRO_INSTALL_GLANCE() }}
@@ -148,17 +159,7 @@
   retry: {count: 1, delay: 30}
   skip_fail: false
 
-- description: Install docker.io on gtw
-  cmd: salt-call cmd.run 'apt-get install docker.io -y'
-  node_name: {{ HOSTNAME_GTW01 }}
-  retry: {count: 1, delay: 30}
-  skip_fail: false
-
-- description: Enable forward policy
-  cmd: iptables --policy FORWARD ACCEPT
-  node_name: {{ HOSTNAME_GTW01 }}
-  retry: {count: 1, delay: 30}
-  skip_fail: false
+{{ SHARED.INSTALL_DOCKER_ON_GTW() }}
 
 - description: create rc file on cfg
   cmd: scp ctl01:/root/keystonercv3 /root
diff --git a/tcp_tests/templates/cookied-mcp-mitaka-dvr-ceph/salt.yaml b/tcp_tests/templates/cookied-mcp-mitaka-dvr-ceph/salt.yaml
new file mode 100644
index 0000000..ef50b6d
--- /dev/null
+++ b/tcp_tests/templates/cookied-mcp-mitaka-dvr-ceph/salt.yaml
@@ -0,0 +1,51 @@
+{% from 'cookied-mcp-mitaka-dvr-ceph/underlay.yaml' import HOSTNAME_CFG01 with context %}
+{% from 'cookied-mcp-mitaka-dvr-ceph/underlay.yaml' import HOSTNAME_CMP01 with context %}
+{% from 'cookied-mcp-mitaka-dvr-ceph/underlay.yaml' import HOSTNAME_CMP02 with context %}
+{% from 'cookied-mcp-mitaka-dvr-ceph/underlay.yaml' import HOSTNAME_GTW01 with context %}
+{% from 'cookied-mcp-mitaka-dvr-ceph/underlay.yaml' import LAB_CONFIG_NAME with context %}
+{% from 'cookied-mcp-mitaka-dvr-ceph/underlay.yaml' import DOMAIN_NAME with context %}
+
+{% set SALT_MODELS_REPOSITORY = os_env('SALT_MODELS_REPOSITORY','https://gerrit.mcp.mirantis.com/salt-models/mcp-virtual-lab') %}
+# Other salt model repository parameters see in shared-salt.yaml
+{% set OVERRIDES = os_env('OVERRIDES', 'override_example: true') %}
+{% set OVERRIDES_FILENAME = os_env('OVERRIDES_FILENAME', '/srv/salt/reclass/classes/environment/cookied-mcp-mitaka-dvr-ceph/overrides.yml') %}
+
+{% import 'shared-salt.yaml' as SHARED with context %}
+
+{{ SHARED.MACRO_INSTALL_SALT_MASTER() }}
+
+{{ SHARED.MACRO_CLONE_RECLASS_MODELS() }}
+
+{{ SHARED.MACRO_CONFIGURE_RECLASS(FORMULA_SERVICES='\*') }}
+
+{{ SHARED.MACRO_INSTALL_SALT_MINIONS() }}
+
+{{ SHARED.MACRO_RUN_SALT_MASTER_UNDERLAY_STATES() }}
+
+{%- if OVERRIDES != '' %}
+{%- for param in OVERRIDES.splitlines() %}
+{%- set key, value = param.replace(' ','').split(':', 1) %}
+- description: Override cluster parameters
+  cmd: |
+    salt-call reclass.cluster_meta_set name='{{ key }}' value='{{ value }}' file_name='{{OVERRIDES_FILENAME}}'
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 1}
+  skip_fail: false
+{%- endfor %}
+
+- description: Refresh pillar
+  cmd: salt '*' saltutil.refresh_pillar
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 1}
+  skip_fail: false
+{%- endif %}
+
+{{ SHARED.ADJUST_SL_OPTS(OVERRIDES_FILENAME='/srv/salt/reclass/classes/cluster/' + SHARED.CLUSTER_NAME + '/stacklight/server.yml') }}
+
+{{ SHARED.MACRO_GENERATE_INVENTORY() }}
+
+{{ SHARED.MACRO_BOOTSTRAP_ALL_MINIONS() }}
+
+{{SHARED.MACRO_CHECK_SALT_VERSION_SERVICES_ON_CFG()}}
+
+{{SHARED.MACRO_CHECK_SALT_VERSION_ON_NODES()}}
diff --git a/tcp_tests/templates/virtual-mcp-ocata-dvr/sl.yaml b/tcp_tests/templates/cookied-mcp-mitaka-dvr-ceph/sl.yaml
similarity index 95%
copy from tcp_tests/templates/virtual-mcp-ocata-dvr/sl.yaml
copy to tcp_tests/templates/cookied-mcp-mitaka-dvr-ceph/sl.yaml
index ff0e77a..cb93ac9 100644
--- a/tcp_tests/templates/virtual-mcp-ocata-dvr/sl.yaml
+++ b/tcp_tests/templates/cookied-mcp-mitaka-dvr-ceph/sl.yaml
@@ -1,4 +1,4 @@
-{% from 'virtual-mcp-ocata-dvr/underlay.yaml' import HOSTNAME_CFG01 with context %}
+{% from 'cookied-mcp-mitaka-dvr-ceph/underlay.yaml' import HOSTNAME_CFG01 with context %}
 {% import 'shared-sl-tests.yaml' as SHARED_SL_TESTS with context %}
 # Install docker swarm
 - description: Configure docker service
@@ -66,6 +66,13 @@
   skip_fail: false
 
 # Install slv2 infra
+# Install MongoDB for alerta
+- description: Install MongoDB
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@mongodb:server' state.sls mongodb
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 2, delay: 10}
+  skip_fail: false
+
 - description: Install telegraf
   cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@telegraf:agent or I@telegraf:remote_agent' state.sls telegraf
   node_name: {{ HOSTNAME_CFG01 }}
diff --git a/tcp_tests/templates/cookied-bm-mcp-pike-k8s-contrail/underlay--meta-data.yaml b/tcp_tests/templates/cookied-mcp-mitaka-dvr-ceph/underlay--meta-data.yaml
similarity index 100%
copy from tcp_tests/templates/cookied-bm-mcp-pike-k8s-contrail/underlay--meta-data.yaml
copy to tcp_tests/templates/cookied-mcp-mitaka-dvr-ceph/underlay--meta-data.yaml
diff --git a/tcp_tests/templates/virtual-mcp-ocata-dvr/underlay--user-data-cfg01.yaml b/tcp_tests/templates/cookied-mcp-mitaka-dvr-ceph/underlay--user-data-cfg01.yaml
similarity index 89%
rename from tcp_tests/templates/virtual-mcp-ocata-dvr/underlay--user-data-cfg01.yaml
rename to tcp_tests/templates/cookied-mcp-mitaka-dvr-ceph/underlay--user-data-cfg01.yaml
index a73ca23..d75dab1 100644
--- a/tcp_tests/templates/virtual-mcp-ocata-dvr/underlay--user-data-cfg01.yaml
+++ b/tcp_tests/templates/cookied-mcp-mitaka-dvr-ceph/underlay--user-data-cfg01.yaml
@@ -47,6 +47,13 @@
    # Enable grub menu using updated config below
    - update-grub
 
+   - mkdir -p /srv/salt/reclass/nodes
+   - systemctl enable salt-master
+   - systemctl enable salt-minion
+   - systemctl start salt-master
+   - systemctl start salt-minion
+   - salt-call -l info --timeout=120 test.ping
+
   write_files:
    - path: /etc/default/grub.d/97-enable-grub-menu.cfg
      content: |
diff --git a/tcp_tests/templates/virtual-mcp-ocata-dvr/underlay--user-data1604.yaml b/tcp_tests/templates/cookied-mcp-mitaka-dvr-ceph/underlay--user-data1604.yaml
similarity index 100%
copy from tcp_tests/templates/virtual-mcp-ocata-dvr/underlay--user-data1604.yaml
copy to tcp_tests/templates/cookied-mcp-mitaka-dvr-ceph/underlay--user-data1604.yaml
diff --git a/tcp_tests/templates/virtual-mcp-ocata-dvr/underlay.yaml b/tcp_tests/templates/cookied-mcp-mitaka-dvr-ceph/underlay.yaml
similarity index 67%
copy from tcp_tests/templates/virtual-mcp-ocata-dvr/underlay.yaml
copy to tcp_tests/templates/cookied-mcp-mitaka-dvr-ceph/underlay.yaml
index de5427a..248d63e 100644
--- a/tcp_tests/templates/virtual-mcp-ocata-dvr/underlay.yaml
+++ b/tcp_tests/templates/cookied-mcp-mitaka-dvr-ceph/underlay.yaml
@@ -1,36 +1,41 @@
 # Set the repository suite, one of the: 'nightly', 'testing', 'stable', or any other required
 {% set REPOSITORY_SUITE = os_env('REPOSITORY_SUITE', 'testing') %}
 
-{% import 'virtual-mcp-ocata-dvr/underlay--meta-data.yaml' as CLOUDINIT_META_DATA with context %}
-{% import 'virtual-mcp-ocata-dvr/underlay--user-data-cfg01.yaml' as CLOUDINIT_USER_DATA_CFG01 with context %}
-{% import 'virtual-mcp-ocata-dvr/underlay--user-data1604.yaml' as CLOUDINIT_USER_DATA_1604 with context %}
+{% import 'cookied-mcp-mitaka-dvr-ceph/underlay--meta-data.yaml' as CLOUDINIT_META_DATA with context %}
+{% import 'cookied-mcp-mitaka-dvr-ceph/underlay--user-data-cfg01.yaml' as CLOUDINIT_USER_DATA_CFG01 with context %}
+{% import 'cookied-mcp-mitaka-dvr-ceph/underlay--user-data1604.yaml' as CLOUDINIT_USER_DATA_1604 with context %}
 
----
 aliases:
  - &interface_model {{ os_env('INTERFACE_MODEL', 'virtio') }}
  - &cloudinit_meta_data {{ CLOUDINIT_META_DATA }}
  - &cloudinit_user_data_cfg01 {{ CLOUDINIT_USER_DATA_CFG01 }}
  - &cloudinit_user_data_1604 {{ CLOUDINIT_USER_DATA_1604 }}
 
-{% set LAB_CONFIG_NAME = os_env('LAB_CONFIG_NAME', 'virtual-mcp-ocata-dvr') %}
+{% set LAB_CONFIG_NAME = os_env('LAB_CONFIG_NAME', 'cookied-mcp-mitaka-dvr-ceph') %}
 {% set DOMAIN_NAME = os_env('DOMAIN_NAME', LAB_CONFIG_NAME) + '.local' %}
-{% set HOSTNAME_CFG01 = os_env('HOSTNAME_CFG01', 'cfg01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CTL01 = os_env('HOSTNAME_CTL01', 'ctl01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CTL02 = os_env('HOSTNAME_CTL02', 'ctl02.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CTL03 = os_env('HOSTNAME_CTL03', 'ctl03.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CMP01 = os_env('HOSTNAME_CMP01', 'cmp01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CMP02 = os_env('HOSTNAME_CMP02', 'cmp02.' + DOMAIN_NAME) %}
-{% set HOSTNAME_MON01 = os_env('HOSTNAME_MON01', 'mon01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_MON02 = os_env('HOSTNAME_MON02', 'mon02.' + DOMAIN_NAME) %}
-{% set HOSTNAME_MON03 = os_env('HOSTNAME_MON03', 'mon03.' + DOMAIN_NAME) %}
-{% set HOSTNAME_GTW01 = os_env('HOSTNAME_GTW01', 'gtw01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_DNS01 = os_env('HOSTNAME_DNS01', 'dns01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_DNS02 = os_env('HOSTNAME_DNS02', 'dns02.' + DOMAIN_NAME) %}
-{% set HOSTNAME_PRX01 = os_env('HOSTNAME_PRX01', 'prx01.' + DOMAIN_NAME) %}
+{% set HOSTNAME_CFG01 = os_env('HOSTNAME_CFG01', 'cfg01.') %}
+{% set HOSTNAME_CTL01 = os_env('HOSTNAME_CTL01', 'ctl01.') %}
+{% set HOSTNAME_CTL02 = os_env('HOSTNAME_CTL02', 'ctl02.') %}
+{% set HOSTNAME_CTL03 = os_env('HOSTNAME_CTL03', 'ctl03.') %}
+{% set HOSTNAME_CMP01 = os_env('HOSTNAME_CMP01', 'cmp001.') %}
+{% set HOSTNAME_CMP02 = os_env('HOSTNAME_CMP02', 'cmp002.') %}
+{% set HOSTNAME_MON01 = os_env('HOSTNAME_MON01', 'mon01.') %}
+{% set HOSTNAME_MON02 = os_env('HOSTNAME_MON02', 'mon02.') %}
+{% set HOSTNAME_MON03 = os_env('HOSTNAME_MON03', 'mon03.') %}
+{% set HOSTNAME_CMN01 = os_env('HOSTNAME_CMN01', 'cmn01.') %}
+{% set HOSTNAME_CMN02 = os_env('HOSTNAME_CMN02', 'cmn02.') %}
+{% set HOSTNAME_CMN03 = os_env('HOSTNAME_CMN03', 'cmn03.') %}
+{% set HOSTNAME_RGW01 = os_env('HOSTNAME_CMN01', 'rgw01.') %}
+{% set HOSTNAME_RGW02 = os_env('HOSTNAME_CMN02', 'rgw02.') %}
+{% set HOSTNAME_RGW03 = os_env('HOSTNAME_CMN03', 'rgw03.') %}
+{% set HOSTNAME_OSD01 = os_env('HOSTNAME_OSD01', 'osd001.') %}
+{% set HOSTNAME_OSD02 = os_env('HOSTNAME_OSD02', 'osd002.') %}
+{% set HOSTNAME_GTW01 = os_env('HOSTNAME_GTW01', 'gtw01.') %}
+{% set HOSTNAME_PRX01 = os_env('HOSTNAME_PRX01', 'prx01.') %}
 
 template:
   devops_settings:
-    env_name: {{ os_env('ENV_NAME', 'virtual-mcp-ocata-dvr_' + REPOSITORY_SUITE + "_" + os_env('BUILD_NUMBER', '')) }}
+    env_name: {{ os_env('ENV_NAME', 'cookied-mcp-mitaka-dvr-ceph_' + REPOSITORY_SUITE + "_" + os_env('BUILD_NUMBER', '')) }}
 
     address_pools:
       private-pool01:
@@ -48,9 +53,15 @@
             default_{{ HOSTNAME_MON01 }}: +107
             default_{{ HOSTNAME_MON02 }}: +108
             default_{{ HOSTNAME_MON03 }}: +109
+            default_{{ HOSTNAME_OSD01 }}: +94
+            default_{{ HOSTNAME_OSD02 }}: +95
+            default_{{ HOSTNAME_CMN01 }}: +96
+            default_{{ HOSTNAME_CMN02 }}: +97
+            default_{{ HOSTNAME_CMN03 }}: +98
+            default_{{ HOSTNAME_RGW01 }}: +76
+            default_{{ HOSTNAME_RGW02 }}: +77
+            default_{{ HOSTNAME_RGW03 }}: +78
             default_{{ HOSTNAME_GTW01 }}: +110
-            default_{{ HOSTNAME_DNS01 }}: +111
-            default_{{ HOSTNAME_DNS02 }}: +112
             default_{{ HOSTNAME_PRX01 }}: +121
           ip_ranges:
             dhcp: [+90, -10]
@@ -70,9 +81,15 @@
             default_{{ HOSTNAME_MON01 }}: +107
             default_{{ HOSTNAME_MON02 }}: +108
             default_{{ HOSTNAME_MON03 }}: +109
+            default_{{ HOSTNAME_OSD01 }}: +94
+            default_{{ HOSTNAME_OSD02 }}: +95
+            default_{{ HOSTNAME_CMN01 }}: +96
+            default_{{ HOSTNAME_CMN02 }}: +97
+            default_{{ HOSTNAME_CMN03 }}: +98
+            default_{{ HOSTNAME_RGW01 }}: +76
+            default_{{ HOSTNAME_RGW02 }}: +77
+            default_{{ HOSTNAME_RGW03 }}: +78
             default_{{ HOSTNAME_GTW01 }}: +110
-            default_{{ HOSTNAME_DNS01 }}: +111
-            default_{{ HOSTNAME_DNS02 }}: +112
             default_{{ HOSTNAME_PRX01 }}: +121
           ip_ranges:
             dhcp: [+90, -10]
@@ -92,9 +109,15 @@
             default_{{ HOSTNAME_MON01 }}: +107
             default_{{ HOSTNAME_MON02 }}: +108
             default_{{ HOSTNAME_MON03 }}: +109
+            default_{{ HOSTNAME_OSD01 }}: +94
+            default_{{ HOSTNAME_OSD02 }}: +95
+            default_{{ HOSTNAME_CMN01 }}: +96
+            default_{{ HOSTNAME_CMN02 }}: +97
+            default_{{ HOSTNAME_CMN03 }}: +98
+            default_{{ HOSTNAME_RGW01 }}: +76
+            default_{{ HOSTNAME_RGW02 }}: +77
+            default_{{ HOSTNAME_RGW03 }}: +78
             default_{{ HOSTNAME_GTW01 }}: +110
-            default_{{ HOSTNAME_DNS01 }}: +111
-            default_{{ HOSTNAME_DNS02 }}: +112
             default_{{ HOSTNAME_PRX01 }}: +121
           ip_ranges:
             dhcp: [+10, -10]
@@ -114,9 +137,15 @@
             default_{{ HOSTNAME_MON01 }}: +107
             default_{{ HOSTNAME_MON02 }}: +108
             default_{{ HOSTNAME_MON03 }}: +109
+            default_{{ HOSTNAME_OSD01 }}: +94
+            default_{{ HOSTNAME_OSD02 }}: +95
+            default_{{ HOSTNAME_CMN01 }}: +96
+            default_{{ HOSTNAME_CMN02 }}: +97
+            default_{{ HOSTNAME_CMN03 }}: +98
+            default_{{ HOSTNAME_RGW01 }}: +76
+            default_{{ HOSTNAME_RGW02 }}: +77
+            default_{{ HOSTNAME_RGW03 }}: +78
             default_{{ HOSTNAME_GTW01 }}: +110
-            default_{{ HOSTNAME_DNS01 }}: +111
-            default_{{ HOSTNAME_DNS02 }}: +112
             default_{{ HOSTNAME_PRX01 }}: +121
           ip_ranges:
             dhcp: [+10, -10]
@@ -232,9 +261,6 @@
                   capacity: !os_env NODE_VOLUME_SIZE, 150
                   backing_store: mcp_ubuntu_1604_image
                   format: qcow2
-                - name: cinder
-                  capacity: 50
-                  format: qcow2
                 - name: iso  # Volume with name 'iso' will be used
                              # for store image with cloud-init metadata.
                   capacity: 1
@@ -273,9 +299,6 @@
                   capacity: !os_env NODE_VOLUME_SIZE, 150
                   backing_store: mcp_ubuntu_1604_image
                   format: qcow2
-                - name: cinder
-                  capacity: 50
-                  format: qcow2
                 - name: iso  # Volume with name 'iso' will be used
                              # for store image with cloud-init metadata.
                   capacity: 1
@@ -302,9 +325,6 @@
                   capacity: !os_env NODE_VOLUME_SIZE, 150
                   backing_store: mcp_ubuntu_1604_image
                   format: qcow2
-                - name: cinder
-                  capacity: 50
-                  format: qcow2
                 - name: iso  # Volume with name 'iso' will be used
                              # for store image with cloud-init metadata.
                   capacity: 1
@@ -409,9 +429,6 @@
                   capacity: !os_env NODE_VOLUME_SIZE, 150
                   backing_store: mcp_ubuntu_1604_image
                   format: qcow2
-                - name: cinder
-                  capacity: 50
-                  format: qcow2
                 - name: iso  # Volume with name 'iso' will be used
                              # for store image with cloud-init metadata.
                   capacity: 1
@@ -439,6 +456,9 @@
                   capacity: !os_env NODE_VOLUME_SIZE, 150
                   backing_store: cloudimage1604
                   format: qcow2
+                - name: cinder
+                  capacity: 50
+                  format: qcow2
                 - name: iso  # Volume with name 'iso' will be used
                              # for store image with cloud-init metadata.
                   capacity: 1
@@ -490,6 +510,9 @@
                   capacity: !os_env NODE_VOLUME_SIZE, 150
                   backing_store: cloudimage1604
                   format: qcow2
+                - name: cinder
+                  capacity: 50
+                  format: qcow2
                 - name: iso  # Volume with name 'iso' will be used
                              # for store image with cloud-init metadata.
                   capacity: 1
@@ -528,10 +551,10 @@
               interfaces: *all_interfaces
               network_config: *all_network_config
 
-          - name: {{ HOSTNAME_DNS01 }}
+          - name: {{ HOSTNAME_CMN01 }}
             role: salt_minion
             params:
-              vcpu: !os_env SLAVE_NODE_CPU, 1
+              vcpu: !os_env SLAVE_NODE_CPU, 2
               memory: !os_env SLAVE_NODE_MEMORY, 2048
               boot:
                 - hd
@@ -540,7 +563,7 @@
               volumes:
                 - name: system
                   capacity: !os_env NODE_VOLUME_SIZE, 150
-                  backing_store: mcp_ubuntu_1604_image
+                  backing_store: cloudimage1604
                   format: qcow2
                 - name: iso  # Volume with name 'iso' will be used
                              # for store image with cloud-init metadata.
@@ -551,13 +574,13 @@
                   cloudinit_meta_data: *cloudinit_meta_data
                   cloudinit_user_data: *cloudinit_user_data_1604
 
-              interfaces: *all_interfaces
-              network_config: *all_network_config
+              interfaces: *interfaces
+              network_config: *network_config
 
-          - name: {{ HOSTNAME_DNS02 }}
+          - name: {{ HOSTNAME_CMN02 }}
             role: salt_minion
             params:
-              vcpu: !os_env SLAVE_NODE_CPU, 1
+              vcpu: !os_env SLAVE_NODE_CPU, 2
               memory: !os_env SLAVE_NODE_MEMORY, 2048
               boot:
                 - hd
@@ -566,7 +589,7 @@
               volumes:
                 - name: system
                   capacity: !os_env NODE_VOLUME_SIZE, 150
-                  backing_store: mcp_ubuntu_1604_image
+                  backing_store: cloudimage1604
                   format: qcow2
                 - name: iso  # Volume with name 'iso' will be used
                              # for store image with cloud-init metadata.
@@ -577,5 +600,172 @@
                   cloudinit_meta_data: *cloudinit_meta_data
                   cloudinit_user_data: *cloudinit_user_data_1604
 
-              interfaces: *all_interfaces
-              network_config: *all_network_config
+              interfaces: *interfaces
+              network_config: *network_config
+
+          - name: {{ HOSTNAME_CMN03 }}
+            role: salt_minion
+            params:
+              vcpu: !os_env SLAVE_NODE_CPU, 2
+              memory: !os_env SLAVE_NODE_MEMORY, 2048
+              boot:
+                - hd
+              cloud_init_volume_name: iso
+              cloud_init_iface_up: ens3
+              volumes:
+                - name: system
+                  capacity: !os_env NODE_VOLUME_SIZE, 150
+                  backing_store: cloudimage1604
+                  format: qcow2
+                - name: iso  # Volume with name 'iso' will be used
+                             # for store image with cloud-init metadata.
+                  capacity: 1
+                  format: raw
+                  device: cdrom
+                  bus: ide
+                  cloudinit_meta_data: *cloudinit_meta_data
+                  cloudinit_user_data: *cloudinit_user_data_1604
+
+              interfaces: *interfaces
+              network_config: *network_config
+
+          - name: {{ HOSTNAME_OSD01 }}
+            role: salt_minion
+            params:
+              vcpu: !os_env SLAVE_NODE_CPU, 2
+              memory: !os_env SLAVE_NODE_MEMORY, 2048
+              boot:
+                - hd
+              cloud_init_volume_name: iso
+              cloud_init_iface_up: ens3
+              volumes:
+                - name: system
+                  capacity: !os_env NODE_VOLUME_SIZE, 150
+                  backing_store: cloudimage1604
+                  format: qcow2
+                - name: ceph_osd
+                  capacity: 50
+                  format: qcow2
+                - name: ceph_journal
+                  capacity: 50
+                  format: qcow2
+                - name: iso  # Volume with name 'iso' will be used
+                             # for store image with cloud-init metadata.
+                  capacity: 1
+                  format: raw
+                  device: cdrom
+                  bus: ide
+                  cloudinit_meta_data: *cloudinit_meta_data
+                  cloudinit_user_data: *cloudinit_user_data_1604
+
+              interfaces: *interfaces
+              network_config: *network_config
+
+          - name: {{ HOSTNAME_OSD02 }}
+            role: salt_minion
+            params:
+              vcpu: !os_env SLAVE_NODE_CPU, 2
+              memory: !os_env SLAVE_NODE_MEMORY, 2048
+              boot:
+                - hd
+              cloud_init_volume_name: iso
+              cloud_init_iface_up: ens3
+              volumes:
+                - name: system
+                  capacity: !os_env NODE_VOLUME_SIZE, 150
+                  backing_store: cloudimage1604
+                  format: qcow2
+                - name: ceph_osd
+                  capacity: 50
+                  format: qcow2
+                - name: ceph_journal
+                  capacity: 50
+                  format: qcow2
+                - name: iso  # Volume with name 'iso' will be used
+                             # for store image with cloud-init metadata.
+                  capacity: 1
+                  format: raw
+                  device: cdrom
+                  bus: ide
+                  cloudinit_meta_data: *cloudinit_meta_data
+                  cloudinit_user_data: *cloudinit_user_data_1604
+
+              interfaces: *interfaces
+              network_config: *network_config
+
+          - name: {{ HOSTNAME_RGW01 }}
+            role: salt_minion
+            params:
+              vcpu: !os_env SLAVE_NODE_CPU, 2
+              memory: !os_env SLAVE_NODE_MEMORY, 2048
+              boot:
+                - hd
+              cloud_init_volume_name: iso
+              cloud_init_iface_up: ens3
+              volumes:
+                - name: system
+                  capacity: !os_env NODE_VOLUME_SIZE, 150
+                  backing_store: cloudimage1604
+                  format: qcow2
+                - name: iso  # Volume with name 'iso' will be used
+                             # for store image with cloud-init metadata.
+                  capacity: 1
+                  format: raw
+                  device: cdrom
+                  bus: ide
+                  cloudinit_meta_data: *cloudinit_meta_data
+                  cloudinit_user_data: *cloudinit_user_data_1604
+
+              interfaces: *interfaces
+              network_config: *network_config
+
+          - name: {{ HOSTNAME_RGW02 }}
+            role: salt_minion
+            params:
+              vcpu: !os_env SLAVE_NODE_CPU, 2
+              memory: !os_env SLAVE_NODE_MEMORY, 2048
+              boot:
+                - hd
+              cloud_init_volume_name: iso
+              cloud_init_iface_up: ens3
+              volumes:
+                - name: system
+                  capacity: !os_env NODE_VOLUME_SIZE, 150
+                  backing_store: cloudimage1604
+                  format: qcow2
+                - name: iso  # Volume with name 'iso' will be used
+                             # for store image with cloud-init metadata.
+                  capacity: 1
+                  format: raw
+                  device: cdrom
+                  bus: ide
+                  cloudinit_meta_data: *cloudinit_meta_data
+                  cloudinit_user_data: *cloudinit_user_data_1604
+
+              interfaces: *interfaces
+              network_config: *network_config
+          - name: {{ HOSTNAME_RGW03 }}
+            role: salt_minion
+            params:
+              vcpu: !os_env SLAVE_NODE_CPU, 2
+              memory: !os_env SLAVE_NODE_MEMORY, 2048
+              boot:
+                - hd
+              cloud_init_volume_name: iso
+              cloud_init_iface_up: ens3
+              volumes:
+                - name: system
+                  capacity: !os_env NODE_VOLUME_SIZE, 150
+                  backing_store: cloudimage1604
+                  format: qcow2
+                - name: iso  # Volume with name 'iso' will be used
+                             # for store image with cloud-init metadata.
+                  capacity: 1
+                  format: raw
+                  device: cdrom
+                  bus: ide
+                  cloudinit_meta_data: *cloudinit_meta_data
+                  cloudinit_user_data: *cloudinit_user_data_1604
+
+              interfaces: *interfaces
+              network_config: *network_config
\ No newline at end of file
diff --git a/tcp_tests/templates/cookied-mcp-mitaka-dvr/_context-cookiecutter-mcp-mitaka-dvr.yaml b/tcp_tests/templates/cookied-mcp-mitaka-dvr/_context-cookiecutter-mcp-mitaka-dvr.yaml
index 8b320f4..58281a4 100644
--- a/tcp_tests/templates/cookied-mcp-mitaka-dvr/_context-cookiecutter-mcp-mitaka-dvr.yaml
+++ b/tcp_tests/templates/cookied-mcp-mitaka-dvr/_context-cookiecutter-mcp-mitaka-dvr.yaml
@@ -54,7 +54,6 @@
   openstack_compute_single_address_ranges: 172.16.10.105-172.16.10.106
   openstack_compute_deploy_address_ranges: 192.168.10.105-192.168.10.106
   openstack_compute_tenant_address_ranges: 10.1.0.105-10.1.0.106
-  openstack_compute_backend_address_ranges: 10.1.0.105-10.1.0.106
   openstack_control_address: 172.16.10.100
   openstack_control_hostname: ctl
   openstack_control_node01_address: 172.16.10.101
diff --git a/tcp_tests/templates/cookied-mcp-mitaka-dvr/_context-environment.yaml b/tcp_tests/templates/cookied-mcp-mitaka-dvr/_context-environment.yaml
index ca8114b..65f4131 100644
--- a/tcp_tests/templates/cookied-mcp-mitaka-dvr/_context-environment.yaml
+++ b/tcp_tests/templates/cookied-mcp-mitaka-dvr/_context-environment.yaml
@@ -20,6 +20,7 @@
       - features_designate_pool_manager_database
       - features_designate_pool_manager
       - features_designate_pool_manager_keystone
+      - features_lvm_backend_control
       - linux_system_codename_xenial
       interfaces:
         ens3:
@@ -36,6 +37,7 @@
       - openstack_message_queue
       - features_designate_pool_manager_database
       - features_designate_pool_manager
+      - features_lvm_backend_control
       - linux_system_codename_xenial
       interfaces:
         ens3:
@@ -52,6 +54,7 @@
       - openstack_message_queue
       - features_designate_pool_manager_database
       - features_designate_pool_manager
+      - features_lvm_backend_control
       - linux_system_codename_xenial
       interfaces:
         ens3:
@@ -115,6 +118,7 @@
       reclass_storage_name: openstack_compute_rack01
       roles:
       - openstack_compute
+      - features_lvm_backend_volume_vdb
       - linux_system_codename_xenial
       interfaces:
         ens3:
diff --git a/tcp_tests/templates/cookied-mcp-mitaka-dvr/core.yaml b/tcp_tests/templates/cookied-mcp-mitaka-dvr/core.yaml
index e1d2cb4..8954160 100644
--- a/tcp_tests/templates/cookied-mcp-mitaka-dvr/core.yaml
+++ b/tcp_tests/templates/cookied-mcp-mitaka-dvr/core.yaml
@@ -2,98 +2,11 @@
 
 {% import 'shared-core.yaml' as SHARED_CORE with context %}
 
-# Install support services
-- description: Install keepalived on ctl01
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@keepalived:cluster and *01*' state.sls keepalived
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 10}
-  skip_fail: true
-
-- description: Install keepalived
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@keepalived:cluster' state.sls keepalived
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 10}
-  skip_fail: true
-
+{{ SHARED_CORE.MACRO_INSTALL_KEEPALIVED() }}
 {{ SHARED_CORE.MACRO_INSTALL_GLUSTERFS() }}
-
-- description: Install RabbitMQ on ctl01
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@rabbitmq:server and *01*' state.sls rabbitmq
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Install RabbitMQ
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@rabbitmq:server' state.sls rabbitmq
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Check the rabbitmq status
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@rabbitmq:server' cmd.run 'rabbitmqctl cluster_status'
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Install Galera on first server
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@galera:master' state.sls galera
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Install Galera on other servers
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@galera:slave' state.sls galera -b 1
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Check mysql status
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@galera:*' mysql.status | grep -A1 -e "wsrep_incoming_addresses\|wsrep_cluster_size"
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: true
-
-- description: Install haproxy
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@haproxy:proxy' state.sls haproxy
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Check haproxy status
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@haproxy:proxy' service.status haproxy
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Restart rsyslog
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@haproxy:proxy' service.restart rsyslog
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Install memcached on all controllers
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@memcached:server' state.sls memcached
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Check the VIP
-  cmd: |
-    OPENSTACK_CONTROL_ADDRESS=`salt-call --out=newline_values_only pillar.get _param:openstack_control_address`;
-    echo "_param:openstack_control_address (vip): ${OPENSTACK_CONTROL_ADDRESS}";
-    salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@keepalived:cluster' cmd.run "ip a | grep ${OPENSTACK_CONTROL_ADDRESS}" | grep -B1 ${OPENSTACK_CONTROL_ADDRESS}
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 3, delay: 10}
-  skip_fail: false
+{{ SHARED_CORE.MACRO_INSTALL_RABBITMQ() }}
+{{ SHARED_CORE.MACRO_INSTALL_GALERA() }}
+{{ SHARED_CORE.MACRO_INSTALL_HAPROXY() }}
+{{ SHARED_CORE.MACRO_INSTALL_NGINX() }}
+{{ SHARED_CORE.MACRO_INSTALL_MEMCACHED() }}
+{{ SHARED_CORE.MACRO_CHECK_VIP() }}
diff --git a/tcp_tests/templates/cookied-mcp-mitaka-dvr/openstack.yaml b/tcp_tests/templates/cookied-mcp-mitaka-dvr/openstack.yaml
index b3c533d..b335251 100644
--- a/tcp_tests/templates/cookied-mcp-mitaka-dvr/openstack.yaml
+++ b/tcp_tests/templates/cookied-mcp-mitaka-dvr/openstack.yaml
@@ -9,6 +9,7 @@
 {% set OVERRIDE_POLICY = os_env('OVERRIDE_POLICY', '') %}
 
 {% import 'shared-openstack.yaml' as SHARED_OPENSTACK with context %}
+{% import 'shared-salt.yaml' as SHARED with context %}
 
 # Install OpenStack control services
 
@@ -172,11 +173,7 @@
   retry: {count: 1, delay: 30}
   skip_fail: false
 
-- description: Install docker.io on gtw
-  cmd: salt-call cmd.run 'apt-get install docker.io -y'
-  node_name: {{ HOSTNAME_GTW01 }}
-  retry: {count: 1, delay: 30}
-  skip_fail: false
+{{ SHARED.INSTALL_DOCKER_ON_GTW() }}
 
 - description: create rc file on cfg
   cmd: scp ctl01:/root/keystonercv3 /root
@@ -189,9 +186,3 @@
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 30}
   skip_fail: false
-
-- description: Set floating ip address on br-floating
-  cmd: ifconfig br-floating {{ IPV4_NET_EXTERNAL_PREFIX }}.110/24
-  node_name: {{ HOSTNAME_GTW01 }}
-  retry: {count: 1, delay: 30}
-  skip_fail: false
diff --git a/tcp_tests/templates/cookied-mcp-mitaka-dvr/salt.yaml b/tcp_tests/templates/cookied-mcp-mitaka-dvr/salt.yaml
index b965d0f..240f6e3 100644
--- a/tcp_tests/templates/cookied-mcp-mitaka-dvr/salt.yaml
+++ b/tcp_tests/templates/cookied-mcp-mitaka-dvr/salt.yaml
@@ -16,7 +16,7 @@
 
 {{ SHARED.MACRO_CLONE_RECLASS_MODELS() }}
 
-{{ SHARED.MACRO_CONFIGURE_RECLASS(FORMULA_SERVICES='"linux" "reclass" "salt" "openssh" "ntp" "git" "nginx" "collectd" "sensu" "heka" "sphinx" "keystone" "mysql" "grafana" "haproxy" "rsyslog" "horizon" "prometheus" "telegraf" "elasticsearch" "powerdns" "glusterfs" "jenkins" "maas" "backupninja" "fluentd" "auditd" "logrotate"') }}
+{{ SHARED.MACRO_CONFIGURE_RECLASS(FORMULA_SERVICES='\*') }}
 
 {{ SHARED.MACRO_INSTALL_SALT_MINIONS() }}
 
@@ -24,7 +24,7 @@
 
 {%- if OVERRIDES != '' %}
 {%- for param in OVERRIDES.splitlines() %}
-{%- set key, value = param.replace(' ','').split(':') %}
+{%- set key, value = param.replace(' ','').split(':', 1) %}
 - description: Override cluster parameters
   cmd: |
     salt-call reclass.cluster_meta_set name='{{ key }}' value='{{ value }}' file_name='{{OVERRIDES_FILENAME}}'
diff --git a/tcp_tests/templates/cookied-mcp-mitaka-dvr/underlay--user-data-cfg01.yaml b/tcp_tests/templates/cookied-mcp-mitaka-dvr/underlay--user-data-cfg01.yaml
index a73ca23..d75dab1 100644
--- a/tcp_tests/templates/cookied-mcp-mitaka-dvr/underlay--user-data-cfg01.yaml
+++ b/tcp_tests/templates/cookied-mcp-mitaka-dvr/underlay--user-data-cfg01.yaml
@@ -47,6 +47,13 @@
    # Enable grub menu using updated config below
    - update-grub
 
+   - mkdir -p /srv/salt/reclass/nodes
+   - systemctl enable salt-master
+   - systemctl enable salt-minion
+   - systemctl start salt-master
+   - systemctl start salt-minion
+   - salt-call -l info --timeout=120 test.ping
+
   write_files:
    - path: /etc/default/grub.d/97-enable-grub-menu.cfg
      content: |
diff --git a/tcp_tests/templates/cookied-mcp-mitaka-dvr/underlay.yaml b/tcp_tests/templates/cookied-mcp-mitaka-dvr/underlay.yaml
index 7225c6d..81afdb5 100644
--- a/tcp_tests/templates/cookied-mcp-mitaka-dvr/underlay.yaml
+++ b/tcp_tests/templates/cookied-mcp-mitaka-dvr/underlay.yaml
@@ -18,8 +18,8 @@
 {% set HOSTNAME_CTL01 = os_env('HOSTNAME_CTL01', 'ctl01.' + DOMAIN_NAME) %}
 {% set HOSTNAME_CTL02 = os_env('HOSTNAME_CTL02', 'ctl02.' + DOMAIN_NAME) %}
 {% set HOSTNAME_CTL03 = os_env('HOSTNAME_CTL03', 'ctl03.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CMP01 = os_env('HOSTNAME_CMP01', 'cmp01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CMP02 = os_env('HOSTNAME_CMP02', 'cmp02.' + DOMAIN_NAME) %}
+{% set HOSTNAME_CMP01 = os_env('HOSTNAME_CMP01', 'cmp001.' + DOMAIN_NAME) %}
+{% set HOSTNAME_CMP02 = os_env('HOSTNAME_CMP02', 'cmp002.' + DOMAIN_NAME) %}
 {% set HOSTNAME_MON01 = os_env('HOSTNAME_MON01', 'mon01.' + DOMAIN_NAME) %}
 {% set HOSTNAME_MON02 = os_env('HOSTNAME_MON02', 'mon02.' + DOMAIN_NAME) %}
 {% set HOSTNAME_MON03 = os_env('HOSTNAME_MON03', 'mon03.' + DOMAIN_NAME) %}
@@ -232,9 +232,6 @@
                   capacity: !os_env NODE_VOLUME_SIZE, 150
                   backing_store: mcp_ubuntu_1604_image
                   format: qcow2
-                - name: cinder
-                  capacity: 50
-                  format: qcow2
                 - name: iso  # Volume with name 'iso' will be used
                              # for store image with cloud-init metadata.
                   capacity: 1
@@ -273,9 +270,6 @@
                   capacity: !os_env NODE_VOLUME_SIZE, 150
                   backing_store: mcp_ubuntu_1604_image
                   format: qcow2
-                - name: cinder
-                  capacity: 50
-                  format: qcow2
                 - name: iso  # Volume with name 'iso' will be used
                              # for store image with cloud-init metadata.
                   capacity: 1
@@ -302,9 +296,6 @@
                   capacity: !os_env NODE_VOLUME_SIZE, 150
                   backing_store: mcp_ubuntu_1604_image
                   format: qcow2
-                - name: cinder
-                  capacity: 50
-                  format: qcow2
                 - name: iso  # Volume with name 'iso' will be used
                              # for store image with cloud-init metadata.
                   capacity: 1
@@ -409,9 +400,6 @@
                   capacity: !os_env NODE_VOLUME_SIZE, 150
                   backing_store: mcp_ubuntu_1604_image
                   format: qcow2
-                - name: cinder
-                  capacity: 50
-                  format: qcow2
                 - name: iso  # Volume with name 'iso' will be used
                              # for store image with cloud-init metadata.
                   capacity: 1
@@ -439,6 +427,9 @@
                   capacity: !os_env NODE_VOLUME_SIZE, 150
                   backing_store: cloudimage1604
                   format: qcow2
+                - name: cinder
+                  capacity: 50
+                  format: qcow2
                 - name: iso  # Volume with name 'iso' will be used
                              # for store image with cloud-init metadata.
                   capacity: 1
@@ -490,6 +481,9 @@
                   capacity: !os_env NODE_VOLUME_SIZE, 150
                   backing_store: cloudimage1604
                   format: qcow2
+                - name: cinder
+                  capacity: 50
+                  format: qcow2
                 - name: iso  # Volume with name 'iso' will be used
                              # for store image with cloud-init metadata.
                   capacity: 1
diff --git a/tcp_tests/templates/cookied-mcp-mitaka-ovs/_context-cookiecutter-mcp-mitaka-ovs.yaml b/tcp_tests/templates/cookied-mcp-mitaka-ovs/_context-cookiecutter-mcp-mitaka-ovs.yaml
index 5f2ede6..ecc8054 100644
--- a/tcp_tests/templates/cookied-mcp-mitaka-ovs/_context-cookiecutter-mcp-mitaka-ovs.yaml
+++ b/tcp_tests/templates/cookied-mcp-mitaka-ovs/_context-cookiecutter-mcp-mitaka-ovs.yaml
@@ -54,7 +54,6 @@
   openstack_compute_single_address_ranges: 172.16.10.105-172.16.10.106
   openstack_compute_deploy_address_ranges: 192.168.10.105-192.168.10.106
   openstack_compute_tenant_address_ranges: 10.1.0.105-10.1.0.106
-  openstack_compute_backend_address_ranges: 10.1.0.105-10.1.0.106
   openstack_control_address: 172.16.10.100
   openstack_control_hostname: ctl
   openstack_control_node01_address: 172.16.10.101
diff --git a/tcp_tests/templates/cookied-mcp-mitaka-ovs/_context-environment.yaml b/tcp_tests/templates/cookied-mcp-mitaka-ovs/_context-environment.yaml
index ca8114b..65f4131 100644
--- a/tcp_tests/templates/cookied-mcp-mitaka-ovs/_context-environment.yaml
+++ b/tcp_tests/templates/cookied-mcp-mitaka-ovs/_context-environment.yaml
@@ -20,6 +20,7 @@
       - features_designate_pool_manager_database
       - features_designate_pool_manager
       - features_designate_pool_manager_keystone
+      - features_lvm_backend_control
       - linux_system_codename_xenial
       interfaces:
         ens3:
@@ -36,6 +37,7 @@
       - openstack_message_queue
       - features_designate_pool_manager_database
       - features_designate_pool_manager
+      - features_lvm_backend_control
       - linux_system_codename_xenial
       interfaces:
         ens3:
@@ -52,6 +54,7 @@
       - openstack_message_queue
       - features_designate_pool_manager_database
       - features_designate_pool_manager
+      - features_lvm_backend_control
       - linux_system_codename_xenial
       interfaces:
         ens3:
@@ -115,6 +118,7 @@
       reclass_storage_name: openstack_compute_rack01
       roles:
       - openstack_compute
+      - features_lvm_backend_volume_vdb
       - linux_system_codename_xenial
       interfaces:
         ens3:
diff --git a/tcp_tests/templates/cookied-mcp-mitaka-ovs/core.yaml b/tcp_tests/templates/cookied-mcp-mitaka-ovs/core.yaml
index 0c03d81..6a1278e 100644
--- a/tcp_tests/templates/cookied-mcp-mitaka-ovs/core.yaml
+++ b/tcp_tests/templates/cookied-mcp-mitaka-ovs/core.yaml
@@ -2,99 +2,11 @@
 
 {% import 'shared-core.yaml' as SHARED_CORE with context %}
 
-# Install support services
-- description: Install keepalived on ctl01
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@keepalived:cluster and *01*' state.sls keepalived
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 10}
-  skip_fail: true
-
-- description: Install keepalived
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@keepalived:cluster' state.sls keepalived
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 10}
-  skip_fail: true
-
+{{ SHARED_CORE.MACRO_INSTALL_KEEPALIVED() }}
 {{ SHARED_CORE.MACRO_INSTALL_GLUSTERFS() }}
-
-- description: Install RabbitMQ on ctl01
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@rabbitmq:server and *01*' state.sls rabbitmq
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Install RabbitMQ
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@rabbitmq:server' state.sls rabbitmq
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Check the rabbitmq status
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@rabbitmq:server' cmd.run 'rabbitmqctl cluster_status'
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Install Galera on first server
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@galera:master' state.sls galera
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Install Galera on other servers
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@galera:slave' state.sls galera -b 1
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Check mysql status
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@galera:*' mysql.status | grep -A1 -e "wsrep_incoming_addresses\|wsrep_cluster_size"
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: true
-
-
-- description: Install haproxy
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@haproxy:proxy' state.sls haproxy
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Check haproxy status
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@haproxy:proxy' service.status haproxy
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Restart rsyslog
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@haproxy:proxy' service.restart rsyslog
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Install memcached on all controllers
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@memcached:server' state.sls memcached
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Check the VIP
-  cmd: |
-    OPENSTACK_CONTROL_ADDRESS=`salt-call --out=newline_values_only pillar.get _param:openstack_control_address`;
-    echo "_param:openstack_control_address (vip): ${OPENSTACK_CONTROL_ADDRESS}";
-    salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@keepalived:cluster' cmd.run "ip a | grep ${OPENSTACK_CONTROL_ADDRESS}" | grep -B1 ${OPENSTACK_CONTROL_ADDRESS}
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 3, delay: 10}
-  skip_fail: false
+{{ SHARED_CORE.MACRO_INSTALL_RABBITMQ() }}
+{{ SHARED_CORE.MACRO_INSTALL_GALERA() }}
+{{ SHARED_CORE.MACRO_INSTALL_HAPROXY() }}
+{{ SHARED_CORE.MACRO_INSTALL_NGINX() }}
+{{ SHARED_CORE.MACRO_INSTALL_MEMCACHED() }}
+{{ SHARED_CORE.MACRO_CHECK_VIP() }}
diff --git a/tcp_tests/templates/cookied-mcp-mitaka-ovs/openstack.yaml b/tcp_tests/templates/cookied-mcp-mitaka-ovs/openstack.yaml
index eade071..6672997 100644
--- a/tcp_tests/templates/cookied-mcp-mitaka-ovs/openstack.yaml
+++ b/tcp_tests/templates/cookied-mcp-mitaka-ovs/openstack.yaml
@@ -7,6 +7,7 @@
 {% from 'shared-salt.yaml' import IPV4_NET_TENANT_PREFIX with context %}
 
 {% import 'shared-openstack.yaml' as SHARED_OPENSTACK with context %}
+{% import 'shared-salt.yaml' as SHARED with context %}
 
 # Deploy nginx before openstack services (PROD-22740)
 - description: Deploy nginx proxy
@@ -194,17 +195,7 @@
   retry: {count: 1, delay: 30}
   skip_fail: false
 
-- description: Install docker.io on gtw
-  cmd: salt-call cmd.run 'apt-get install docker.io -y'
-  node_name: {{ HOSTNAME_GTW01 }}
-  retry: {count: 1, delay: 30}
-  skip_fail: false
-
-- description: Enable forward policy
-  cmd: iptables --policy FORWARD ACCEPT
-  node_name: {{ HOSTNAME_GTW01 }}
-  retry: {count: 1, delay: 30}
-  skip_fail: false
+{{ SHARED.INSTALL_DOCKER_ON_GTW() }}
 
 - description: create rc file on cfg
   cmd: scp ctl01:/root/keystonercv3 /root
diff --git a/tcp_tests/templates/cookied-mcp-mitaka-ovs/salt.yaml b/tcp_tests/templates/cookied-mcp-mitaka-ovs/salt.yaml
index 3f3def3..9f3767b 100644
--- a/tcp_tests/templates/cookied-mcp-mitaka-ovs/salt.yaml
+++ b/tcp_tests/templates/cookied-mcp-mitaka-ovs/salt.yaml
@@ -16,7 +16,7 @@
 
 {{ SHARED.MACRO_CLONE_RECLASS_MODELS() }}
 
-{{ SHARED.MACRO_CONFIGURE_RECLASS(FORMULA_SERVICES='"linux" "reclass" "salt" "openssh" "ntp" "git" "nginx" "collectd" "sensu" "heka" "sphinx" "keystone" "mysql" "grafana" "haproxy" "rsyslog" "horizon" "prometheus" "telegraf" "elasticsearch" "powerdns" "glusterfs" "jenkins" "maas" "backupninja" "fluentd" "auditd" "logrotate"') }}
+{{ SHARED.MACRO_CONFIGURE_RECLASS(FORMULA_SERVICES='\*') }}
 
 {{ SHARED.MACRO_INSTALL_SALT_MINIONS() }}
 
@@ -24,7 +24,7 @@
 
 {%- if OVERRIDES != '' %}
 {%- for param in OVERRIDES.splitlines() %}
-{%- set key, value = param.replace(' ','').split(':') %}
+{%- set key, value = param.replace(' ','').split(':', 1) %}
 - description: Override cluster parameters
   cmd: |
     salt-call reclass.cluster_meta_set name='{{ key }}' value='{{ value }}' file_name='{{OVERRIDES_FILENAME}}'
diff --git a/tcp_tests/templates/cookied-mcp-mitaka-ovs/underlay--user-data-cfg01.yaml b/tcp_tests/templates/cookied-mcp-mitaka-ovs/underlay--user-data-cfg01.yaml
index da7908d..48562ad 100644
--- a/tcp_tests/templates/cookied-mcp-mitaka-ovs/underlay--user-data-cfg01.yaml
+++ b/tcp_tests/templates/cookied-mcp-mitaka-ovs/underlay--user-data-cfg01.yaml
@@ -44,6 +44,13 @@
 
    - echo "nameserver 172.18.208.44" >> /etc/resolv.conf;
 
+   - mkdir -p /srv/salt/reclass/nodes
+   - systemctl enable salt-master
+   - systemctl enable salt-minion
+   - systemctl start salt-master
+   - systemctl start salt-minion
+   - salt-call -l info --timeout=120 test.ping
+
   write_files:
    - path: /etc/network/interfaces
      content: |
diff --git a/tcp_tests/templates/cookied-mcp-mitaka-ovs/underlay.yaml b/tcp_tests/templates/cookied-mcp-mitaka-ovs/underlay.yaml
index c02624c..4fee5c5 100644
--- a/tcp_tests/templates/cookied-mcp-mitaka-ovs/underlay.yaml
+++ b/tcp_tests/templates/cookied-mcp-mitaka-ovs/underlay.yaml
@@ -18,8 +18,8 @@
 {% set HOSTNAME_CTL01 = os_env('HOSTNAME_CTL01', 'ctl01.' + DOMAIN_NAME) %}
 {% set HOSTNAME_CTL02 = os_env('HOSTNAME_CTL02', 'ctl02.' + DOMAIN_NAME) %}
 {% set HOSTNAME_CTL03 = os_env('HOSTNAME_CTL03', 'ctl03.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CMP01 = os_env('HOSTNAME_CMP01', 'cmp01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CMP02 = os_env('HOSTNAME_CMP02', 'cmp02.' + DOMAIN_NAME) %}
+{% set HOSTNAME_CMP01 = os_env('HOSTNAME_CMP01', 'cmp001.' + DOMAIN_NAME) %}
+{% set HOSTNAME_CMP02 = os_env('HOSTNAME_CMP02', 'cmp002.' + DOMAIN_NAME) %}
 {% set HOSTNAME_MON01 = os_env('HOSTNAME_MON01', 'mon01.' + DOMAIN_NAME) %}
 {% set HOSTNAME_MON02 = os_env('HOSTNAME_MON02', 'mon02.' + DOMAIN_NAME) %}
 {% set HOSTNAME_MON03 = os_env('HOSTNAME_MON03', 'mon03.' + DOMAIN_NAME) %}
@@ -230,9 +230,6 @@
                   capacity: !os_env NODE_VOLUME_SIZE, 150
                   backing_store: mcp_ubuntu_1604_image
                   format: qcow2
-                - name: cinder
-                  capacity: 50
-                  format: qcow2
                 - name: iso  # Volume with name 'iso' will be used
                              # for store image with cloud-init metadata.
                   capacity: 1
@@ -271,9 +268,6 @@
                   capacity: !os_env NODE_VOLUME_SIZE, 150
                   backing_store: mcp_ubuntu_1604_image
                   format: qcow2
-                - name: cinder
-                  capacity: 50
-                  format: qcow2
                 - name: iso  # Volume with name 'iso' will be used
                              # for store image with cloud-init metadata.
                   capacity: 1
@@ -300,9 +294,6 @@
                   capacity: !os_env NODE_VOLUME_SIZE, 150
                   backing_store: mcp_ubuntu_1604_image
                   format: qcow2
-                - name: cinder
-                  capacity: 50
-                  format: qcow2
                 - name: iso  # Volume with name 'iso' will be used
                              # for store image with cloud-init metadata.
                   capacity: 1
@@ -407,9 +398,6 @@
                   capacity: !os_env NODE_VOLUME_SIZE, 150
                   backing_store: mcp_ubuntu_1604_image
                   format: qcow2
-                - name: cinder
-                  capacity: 50
-                  format: qcow2
                 - name: iso  # Volume with name 'iso' will be used
                              # for store image with cloud-init metadata.
                   capacity: 1
@@ -436,6 +424,9 @@
                   capacity: !os_env NODE_VOLUME_SIZE, 150
                   backing_store: cloudimage1604
                   format: qcow2
+                - name: cinder
+                  capacity: 50
+                  format: qcow2
                 - name: iso  # Volume with name 'iso' will be used
                              # for store image with cloud-init metadata.
                   capacity: 1
@@ -487,6 +478,9 @@
                   capacity: !os_env NODE_VOLUME_SIZE, 150
                   backing_store: cloudimage1604
                   format: qcow2
+                - name: cinder
+                  capacity: 50
+                  format: qcow2
                 - name: iso  # Volume with name 'iso' will be used
                              # for store image with cloud-init metadata.
                   capacity: 1
diff --git a/tcp_tests/templates/cookied-mcp-newton-dvr/_context-cookiecutter-mcp-newton-dvr.yaml b/tcp_tests/templates/cookied-mcp-newton-dvr/_context-cookiecutter-mcp-newton-dvr.yaml
index 5773e62..9cb3979 100644
--- a/tcp_tests/templates/cookied-mcp-newton-dvr/_context-cookiecutter-mcp-newton-dvr.yaml
+++ b/tcp_tests/templates/cookied-mcp-newton-dvr/_context-cookiecutter-mcp-newton-dvr.yaml
@@ -54,7 +54,6 @@
   openstack_compute_single_address_ranges: 172.16.10.105-172.16.10.106
   openstack_compute_deploy_address_ranges: 192.168.10.105-192.168.10.106
   openstack_compute_tenant_address_ranges: 10.1.0.105-10.1.0.106
-  openstack_compute_backend_address_ranges: 10.1.0.105-10.1.0.106
   openstack_control_address: 172.16.10.100
   openstack_control_hostname: ctl
   openstack_control_node01_address: 172.16.10.101
diff --git a/tcp_tests/templates/cookied-mcp-newton-dvr/_context-environment.yaml b/tcp_tests/templates/cookied-mcp-newton-dvr/_context-environment.yaml
index 803068e..6afe16e 100644
--- a/tcp_tests/templates/cookied-mcp-newton-dvr/_context-environment.yaml
+++ b/tcp_tests/templates/cookied-mcp-newton-dvr/_context-environment.yaml
@@ -20,6 +20,7 @@
       - features_designate_pool_manager_database
       - features_designate_pool_manager
       - features_designate_pool_manager_keystone
+      - features_lvm_backend_control
       - linux_system_codename_xenial
       interfaces:
         ens3:
@@ -36,6 +37,7 @@
       - openstack_message_queue
       - features_designate_pool_manager_database
       - features_designate_pool_manager
+      - features_lvm_backend_control
       - linux_system_codename_xenial
       interfaces:
         ens3:
@@ -52,6 +54,7 @@
       - openstack_message_queue
       - features_designate_pool_manager_database
       - features_designate_pool_manager
+      - features_lvm_backend_control
       - linux_system_codename_xenial
       interfaces:
         ens3:
@@ -115,6 +118,7 @@
       reclass_storage_name: openstack_compute_rack01
       roles:
       - openstack_compute
+      - features_lvm_backend_volume_vdb
       - linux_system_codename_xenial
       interfaces:
         ens3:
diff --git a/tcp_tests/templates/cookied-mcp-newton-dvr/core.yaml b/tcp_tests/templates/cookied-mcp-newton-dvr/core.yaml
index ac74382..edb5059 100644
--- a/tcp_tests/templates/cookied-mcp-newton-dvr/core.yaml
+++ b/tcp_tests/templates/cookied-mcp-newton-dvr/core.yaml
@@ -2,99 +2,11 @@
 
 {% import 'shared-core.yaml' as SHARED_CORE with context %}
 
-# Install support services
-- description: Install keepalived on ctl01
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@keepalived:cluster and *01*' state.sls keepalived
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 10}
-  skip_fail: true
-
-- description: Install keepalived
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@keepalived:cluster' state.sls keepalived
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 10}
-  skip_fail: true
-
+{{ SHARED_CORE.MACRO_INSTALL_KEEPALIVED() }}
 {{ SHARED_CORE.MACRO_INSTALL_GLUSTERFS() }}
-
-- description: Install RabbitMQ on ctl01
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@rabbitmq:server and *01*' state.sls rabbitmq
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Install RabbitMQ
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@rabbitmq:server' state.sls rabbitmq
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Check the rabbitmq status
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@rabbitmq:server' cmd.run 'rabbitmqctl cluster_status'
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Install Galera on first server
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@galera:master' state.sls galera
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Install Galera on other servers
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@galera:slave' state.sls galera -b 1
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Check mysql status
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@galera:*' mysql.status | grep -A1 -e "wsrep_incoming_addresses\|wsrep_cluster_size"
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: true
-
-
-- description: Install haproxy
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@haproxy:proxy' state.sls haproxy
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Check haproxy status
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@haproxy:proxy' service.status haproxy
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Restart rsyslog
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@haproxy:proxy' service.restart rsyslog
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Install memcached on all controllers
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@memcached:server' state.sls memcached
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Check the VIP
-  cmd: |
-    OPENSTACK_CONTROL_ADDRESS=`salt-call --out=newline_values_only pillar.get _param:openstack_control_address`;
-    echo "_param:openstack_control_address (vip): ${OPENSTACK_CONTROL_ADDRESS}";
-    salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@keepalived:cluster' cmd.run "ip a | grep ${OPENSTACK_CONTROL_ADDRESS}" | grep -B1 ${OPENSTACK_CONTROL_ADDRESS}
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 3, delay: 10}
-  skip_fail: false
+{{ SHARED_CORE.MACRO_INSTALL_RABBITMQ() }}
+{{ SHARED_CORE.MACRO_INSTALL_GALERA() }}
+{{ SHARED_CORE.MACRO_INSTALL_HAPROXY() }}
+{{ SHARED_CORE.MACRO_INSTALL_NGINX() }}
+{{ SHARED_CORE.MACRO_INSTALL_MEMCACHED() }}
+{{ SHARED_CORE.MACRO_CHECK_VIP() }}
diff --git a/tcp_tests/templates/cookied-mcp-newton-dvr/openstack.yaml b/tcp_tests/templates/cookied-mcp-newton-dvr/openstack.yaml
index 590d125..df28c5a 100644
--- a/tcp_tests/templates/cookied-mcp-newton-dvr/openstack.yaml
+++ b/tcp_tests/templates/cookied-mcp-newton-dvr/openstack.yaml
@@ -9,6 +9,7 @@
 {% set OVERRIDE_POLICY = os_env('OVERRIDE_POLICY', '') %}
 
 {% import 'shared-openstack.yaml' as SHARED_OPENSTACK with context %}
+{% import 'shared-salt.yaml' as SHARED with context %}
 
 # Install OpenStack control services
 
@@ -172,17 +173,7 @@
   retry: {count: 1, delay: 30}
   skip_fail: false
 
-- description: Install docker.io on gtw
-  cmd: salt-call cmd.run 'apt-get install docker.io -y'
-  node_name: {{ HOSTNAME_GTW01 }}
-  retry: {count: 1, delay: 30}
-  skip_fail: false
-
-- description: Enable forward policy
-  cmd: iptables --policy FORWARD ACCEPT
-  node_name: {{ HOSTNAME_GTW01 }}
-  retry: {count: 1, delay: 30}
-  skip_fail: false
+{{ SHARED.INSTALL_DOCKER_ON_GTW() }}
 
 - description: create rc file on cfg
   cmd: scp ctl01:/root/keystonercv3 /root
diff --git a/tcp_tests/templates/cookied-mcp-newton-dvr/salt.yaml b/tcp_tests/templates/cookied-mcp-newton-dvr/salt.yaml
index d869571..52ec2f4 100644
--- a/tcp_tests/templates/cookied-mcp-newton-dvr/salt.yaml
+++ b/tcp_tests/templates/cookied-mcp-newton-dvr/salt.yaml
@@ -16,7 +16,7 @@
 
 {{ SHARED.MACRO_CLONE_RECLASS_MODELS() }}
 
-{{ SHARED.MACRO_CONFIGURE_RECLASS(FORMULA_SERVICES='"linux" "reclass" "salt" "openssh" "ntp" "git" "nginx" "collectd" "sensu" "heka" "sphinx" "keystone" "mysql" "grafana" "haproxy" "rsyslog" "horizon" "prometheus" "telegraf" "elasticsearch" "powerdns" "glusterfs" "jenkins" "maas" "backupninja" "fluentd" "auditd" "logrotate"') }}
+{{ SHARED.MACRO_CONFIGURE_RECLASS(FORMULA_SERVICES='\*') }}
 
 {{ SHARED.MACRO_INSTALL_SALT_MINIONS() }}
 
@@ -24,7 +24,7 @@
 
 {%- if OVERRIDES != '' %}
 {%- for param in OVERRIDES.splitlines() %}
-{%- set key, value = param.replace(' ','').split(':') %}
+{%- set key, value = param.replace(' ','').split(':', 1) %}
 - description: Override cluster parameters
   cmd: |
     salt-call reclass.cluster_meta_set name='{{ key }}' value='{{ value }}' file_name='{{OVERRIDES_FILENAME}}'
diff --git a/tcp_tests/templates/cookied-mcp-newton-dvr/underlay--user-data-cfg01.yaml b/tcp_tests/templates/cookied-mcp-newton-dvr/underlay--user-data-cfg01.yaml
index a73ca23..d75dab1 100644
--- a/tcp_tests/templates/cookied-mcp-newton-dvr/underlay--user-data-cfg01.yaml
+++ b/tcp_tests/templates/cookied-mcp-newton-dvr/underlay--user-data-cfg01.yaml
@@ -47,6 +47,13 @@
    # Enable grub menu using updated config below
    - update-grub
 
+   - mkdir -p /srv/salt/reclass/nodes
+   - systemctl enable salt-master
+   - systemctl enable salt-minion
+   - systemctl start salt-master
+   - systemctl start salt-minion
+   - salt-call -l info --timeout=120 test.ping
+
   write_files:
    - path: /etc/default/grub.d/97-enable-grub-menu.cfg
      content: |
diff --git a/tcp_tests/templates/cookied-mcp-newton-dvr/underlay.yaml b/tcp_tests/templates/cookied-mcp-newton-dvr/underlay.yaml
index df0515f..7d6147d 100644
--- a/tcp_tests/templates/cookied-mcp-newton-dvr/underlay.yaml
+++ b/tcp_tests/templates/cookied-mcp-newton-dvr/underlay.yaml
@@ -18,8 +18,8 @@
 {% set HOSTNAME_CTL01 = os_env('HOSTNAME_CTL01', 'ctl01.' + DOMAIN_NAME) %}
 {% set HOSTNAME_CTL02 = os_env('HOSTNAME_CTL02', 'ctl02.' + DOMAIN_NAME) %}
 {% set HOSTNAME_CTL03 = os_env('HOSTNAME_CTL03', 'ctl03.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CMP01 = os_env('HOSTNAME_CMP01', 'cmp01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CMP02 = os_env('HOSTNAME_CMP02', 'cmp02.' + DOMAIN_NAME) %}
+{% set HOSTNAME_CMP01 = os_env('HOSTNAME_CMP01', 'cmp001.' + DOMAIN_NAME) %}
+{% set HOSTNAME_CMP02 = os_env('HOSTNAME_CMP02', 'cmp002.' + DOMAIN_NAME) %}
 {% set HOSTNAME_MON01 = os_env('HOSTNAME_MON01', 'mon01.' + DOMAIN_NAME) %}
 {% set HOSTNAME_MON02 = os_env('HOSTNAME_MON02', 'mon02.' + DOMAIN_NAME) %}
 {% set HOSTNAME_MON03 = os_env('HOSTNAME_MON03', 'mon03.' + DOMAIN_NAME) %}
@@ -232,9 +232,6 @@
                   capacity: !os_env NODE_VOLUME_SIZE, 150
                   backing_store: mcp_ubuntu_1604_image
                   format: qcow2
-                - name: cinder
-                  capacity: 50
-                  format: qcow2
                 - name: iso  # Volume with name 'iso' will be used
                              # for store image with cloud-init metadata.
                   capacity: 1
@@ -273,9 +270,6 @@
                   capacity: !os_env NODE_VOLUME_SIZE, 150
                   backing_store: mcp_ubuntu_1604_image
                   format: qcow2
-                - name: cinder
-                  capacity: 50
-                  format: qcow2
                 - name: iso  # Volume with name 'iso' will be used
                              # for store image with cloud-init metadata.
                   capacity: 1
@@ -302,9 +296,6 @@
                   capacity: !os_env NODE_VOLUME_SIZE, 150
                   backing_store: mcp_ubuntu_1604_image
                   format: qcow2
-                - name: cinder
-                  capacity: 50
-                  format: qcow2
                 - name: iso  # Volume with name 'iso' will be used
                              # for store image with cloud-init metadata.
                   capacity: 1
@@ -409,9 +400,6 @@
                   capacity: !os_env NODE_VOLUME_SIZE, 150
                   backing_store: mcp_ubuntu_1604_image
                   format: qcow2
-                - name: cinder
-                  capacity: 50
-                  format: qcow2
                 - name: iso  # Volume with name 'iso' will be used
                              # for store image with cloud-init metadata.
                   capacity: 1
@@ -439,6 +427,9 @@
                   capacity: !os_env NODE_VOLUME_SIZE, 150
                   backing_store: cloudimage1604
                   format: qcow2
+                - name: cinder
+                  capacity: 50
+                  format: qcow2
                 - name: iso  # Volume with name 'iso' will be used
                              # for store image with cloud-init metadata.
                   capacity: 1
@@ -490,6 +481,9 @@
                   capacity: !os_env NODE_VOLUME_SIZE, 150
                   backing_store: cloudimage1604
                   format: qcow2
+                - name: cinder
+                  capacity: 50
+                  format: qcow2
                 - name: iso  # Volume with name 'iso' will be used
                              # for store image with cloud-init metadata.
                   capacity: 1
diff --git a/tcp_tests/templates/cookied-mcp-newton-ovs/_context-cookiecutter-mcp-newton-ovs.yaml b/tcp_tests/templates/cookied-mcp-newton-ovs/_context-cookiecutter-mcp-newton-ovs.yaml
index 1786959..8049430 100644
--- a/tcp_tests/templates/cookied-mcp-newton-ovs/_context-cookiecutter-mcp-newton-ovs.yaml
+++ b/tcp_tests/templates/cookied-mcp-newton-ovs/_context-cookiecutter-mcp-newton-ovs.yaml
@@ -54,7 +54,6 @@
   openstack_compute_single_address_ranges: 172.16.10.105-172.16.10.106
   openstack_compute_deploy_address_ranges: 192.168.10.105-192.168.10.106
   openstack_compute_tenant_address_ranges: 10.1.0.105-10.1.0.106
-  openstack_compute_backend_address_ranges: 10.1.0.105-10.1.0.106
   openstack_control_address: 172.16.10.100
   openstack_control_hostname: ctl
   openstack_control_node01_address: 172.16.10.101
diff --git a/tcp_tests/templates/cookied-mcp-newton-ovs/_context-environment.yaml b/tcp_tests/templates/cookied-mcp-newton-ovs/_context-environment.yaml
index 3e05cf0..7baf03e 100644
--- a/tcp_tests/templates/cookied-mcp-newton-ovs/_context-environment.yaml
+++ b/tcp_tests/templates/cookied-mcp-newton-ovs/_context-environment.yaml
@@ -21,6 +21,7 @@
       - features_designate_bind9_dns
       - features_designate_bind9
       - features_designate_bind9_keystone
+      - features_lvm_backend_control
       - linux_system_codename_xenial
       interfaces:
         ens3:
@@ -38,6 +39,7 @@
       - features_designate_bind9_database
       - features_designate_bind9_dns
       - features_designate_bind9
+      - features_lvm_backend_control
       - linux_system_codename_xenial
       interfaces:
         ens3:
@@ -54,6 +56,7 @@
       - openstack_message_queue
       - features_designate_bind9_database
       - features_designate_bind9
+      - features_lvm_backend_control
       - linux_system_codename_xenial
       interfaces:
         ens3:
@@ -117,6 +120,7 @@
       reclass_storage_name: openstack_compute_rack01
       roles:
       - openstack_compute
+      - features_lvm_backend_volume_vdb
       - linux_system_codename_xenial
       interfaces:
         ens3:
diff --git a/tcp_tests/templates/cookied-mcp-newton-ovs/core.yaml b/tcp_tests/templates/cookied-mcp-newton-ovs/core.yaml
index a7cd35f..4b79fcb 100644
--- a/tcp_tests/templates/cookied-mcp-newton-ovs/core.yaml
+++ b/tcp_tests/templates/cookied-mcp-newton-ovs/core.yaml
@@ -2,99 +2,11 @@
 
 {% import 'shared-core.yaml' as SHARED_CORE with context %}
 
-# Install support services
-- description: Install keepalived on ctl01
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@keepalived:cluster and *01*' state.sls keepalived
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 10}
-  skip_fail: true
-
-- description: Install keepalived
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@keepalived:cluster' state.sls keepalived
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 10}
-  skip_fail: true
-
+{{ SHARED_CORE.MACRO_INSTALL_KEEPALIVED() }}
 {{ SHARED_CORE.MACRO_INSTALL_GLUSTERFS() }}
-
-- description: Install RabbitMQ on ctl01
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@rabbitmq:server and *01*' state.sls rabbitmq
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Install RabbitMQ
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@rabbitmq:server' state.sls rabbitmq
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Check the rabbitmq status
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@rabbitmq:server' cmd.run 'rabbitmqctl cluster_status'
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Install Galera on first server
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@galera:master' state.sls galera
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Install Galera on other servers
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@galera:slave' state.sls galera -b 1
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Check mysql status
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@galera:*' mysql.status | grep -A1 -e "wsrep_incoming_addresses\|wsrep_cluster_size"
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: true
-
-
-- description: Install haproxy
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@haproxy:proxy' state.sls haproxy
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Check haproxy status
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@haproxy:proxy' service.status haproxy
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Restart rsyslog
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@haproxy:proxy' service.restart rsyslog
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Install memcached on all controllers
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@memcached:server' state.sls memcached
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Check the VIP
-  cmd: |
-    OPENSTACK_CONTROL_ADDRESS=`salt-call --out=newline_values_only pillar.get _param:openstack_control_address`;
-    echo "_param:openstack_control_address (vip): ${OPENSTACK_CONTROL_ADDRESS}";
-    salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@keepalived:cluster' cmd.run "ip a | grep ${OPENSTACK_CONTROL_ADDRESS}" | grep -B1 ${OPENSTACK_CONTROL_ADDRESS}
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 3, delay: 10}
-  skip_fail: false
\ No newline at end of file
+{{ SHARED_CORE.MACRO_INSTALL_RABBITMQ() }}
+{{ SHARED_CORE.MACRO_INSTALL_GALERA() }}
+{{ SHARED_CORE.MACRO_INSTALL_HAPROXY() }}
+{{ SHARED_CORE.MACRO_INSTALL_NGINX() }}
+{{ SHARED_CORE.MACRO_INSTALL_MEMCACHED() }}
+{{ SHARED_CORE.MACRO_CHECK_VIP() }}
diff --git a/tcp_tests/templates/cookied-mcp-newton-ovs/openstack.yaml b/tcp_tests/templates/cookied-mcp-newton-ovs/openstack.yaml
index ac225a5..c10aa28 100644
--- a/tcp_tests/templates/cookied-mcp-newton-ovs/openstack.yaml
+++ b/tcp_tests/templates/cookied-mcp-newton-ovs/openstack.yaml
@@ -7,6 +7,7 @@
 {% from 'shared-salt.yaml' import IPV4_NET_TENANT_PREFIX with context %}
 
 {% import 'shared-openstack.yaml' as SHARED_OPENSTACK with context %}
+{% import 'shared-salt.yaml' as SHARED with context %}
 
 # Deploy nginx before openstack services (PROD-22740)
 - description: Deploy nginx proxy
@@ -194,17 +195,7 @@
   retry: {count: 1, delay: 30}
   skip_fail: false
 
-- description: Install docker.io on gtw
-  cmd: salt-call cmd.run 'apt-get install docker.io -y'
-  node_name: {{ HOSTNAME_GTW01 }}
-  retry: {count: 1, delay: 30}
-  skip_fail: false
-
-- description: Enable forward policy
-  cmd: iptables --policy FORWARD ACCEPT
-  node_name: {{ HOSTNAME_GTW01 }}
-  retry: {count: 1, delay: 30}
-  skip_fail: false
+{{ SHARED.INSTALL_DOCKER_ON_GTW() }}
 
 - description: create rc file on cfg
   cmd: scp ctl01:/root/keystonercv3 /root
diff --git a/tcp_tests/templates/cookied-mcp-newton-ovs/salt.yaml b/tcp_tests/templates/cookied-mcp-newton-ovs/salt.yaml
index 496da5b..89b705e 100644
--- a/tcp_tests/templates/cookied-mcp-newton-ovs/salt.yaml
+++ b/tcp_tests/templates/cookied-mcp-newton-ovs/salt.yaml
@@ -16,7 +16,7 @@
 
 {{ SHARED.MACRO_CLONE_RECLASS_MODELS() }}
 
-{{ SHARED.MACRO_CONFIGURE_RECLASS(FORMULA_SERVICES='"linux" "reclass" "salt" "openssh" "ntp" "git" "nginx" "collectd" "sensu" "heka" "sphinx" "keystone" "mysql" "grafana" "haproxy" "rsyslog" "horizon" "prometheus" "telegraf" "elasticsearch" "glusterfs" "jenkins" "maas" "backupninja" "fluentd" "auditd" "logrotate"') }}
+{{ SHARED.MACRO_CONFIGURE_RECLASS(FORMULA_SERVICES='\*') }}
 
 {{ SHARED.MACRO_INSTALL_SALT_MINIONS() }}
 
@@ -24,7 +24,7 @@
 
 {%- if OVERRIDES != '' %}
 {%- for param in OVERRIDES.splitlines() %}
-{%- set key, value = param.replace(' ','').split(':') %}
+{%- set key, value = param.replace(' ','').split(':', 1) %}
 - description: Override cluster parameters
   cmd: |
     salt-call reclass.cluster_meta_set name='{{ key }}' value='{{ value }}' file_name='{{OVERRIDES_FILENAME}}'
diff --git a/tcp_tests/templates/cookied-mcp-newton-ovs/underlay--user-data-cfg01.yaml b/tcp_tests/templates/cookied-mcp-newton-ovs/underlay--user-data-cfg01.yaml
index da7908d..48562ad 100644
--- a/tcp_tests/templates/cookied-mcp-newton-ovs/underlay--user-data-cfg01.yaml
+++ b/tcp_tests/templates/cookied-mcp-newton-ovs/underlay--user-data-cfg01.yaml
@@ -44,6 +44,13 @@
 
    - echo "nameserver 172.18.208.44" >> /etc/resolv.conf;
 
+   - mkdir -p /srv/salt/reclass/nodes
+   - systemctl enable salt-master
+   - systemctl enable salt-minion
+   - systemctl start salt-master
+   - systemctl start salt-minion
+   - salt-call -l info --timeout=120 test.ping
+
   write_files:
    - path: /etc/network/interfaces
      content: |
diff --git a/tcp_tests/templates/cookied-mcp-newton-ovs/underlay.yaml b/tcp_tests/templates/cookied-mcp-newton-ovs/underlay.yaml
index e84c25a..883c30f 100644
--- a/tcp_tests/templates/cookied-mcp-newton-ovs/underlay.yaml
+++ b/tcp_tests/templates/cookied-mcp-newton-ovs/underlay.yaml
@@ -18,8 +18,8 @@
 {% set HOSTNAME_CTL01 = os_env('HOSTNAME_CTL01', 'ctl01.' + DOMAIN_NAME) %}
 {% set HOSTNAME_CTL02 = os_env('HOSTNAME_CTL02', 'ctl02.' + DOMAIN_NAME) %}
 {% set HOSTNAME_CTL03 = os_env('HOSTNAME_CTL03', 'ctl03.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CMP01 = os_env('HOSTNAME_CMP01', 'cmp01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CMP02 = os_env('HOSTNAME_CMP02', 'cmp02.' + DOMAIN_NAME) %}
+{% set HOSTNAME_CMP01 = os_env('HOSTNAME_CMP01', 'cmp001.' + DOMAIN_NAME) %}
+{% set HOSTNAME_CMP02 = os_env('HOSTNAME_CMP02', 'cmp002.' + DOMAIN_NAME) %}
 {% set HOSTNAME_MON01 = os_env('HOSTNAME_MON01', 'mon01.' + DOMAIN_NAME) %}
 {% set HOSTNAME_MON02 = os_env('HOSTNAME_MON02', 'mon02.' + DOMAIN_NAME) %}
 {% set HOSTNAME_MON03 = os_env('HOSTNAME_MON03', 'mon03.' + DOMAIN_NAME) %}
@@ -222,9 +222,6 @@
                   capacity: !os_env NODE_VOLUME_SIZE, 150
                   backing_store: mcp_ubuntu_1604_image
                   format: qcow2
-                - name: cinder
-                  capacity: 50
-                  format: qcow2
                 - name: iso  # Volume with name 'iso' will be used
                              # for store image with cloud-init metadata.
                   capacity: 1
@@ -263,9 +260,6 @@
                   capacity: !os_env NODE_VOLUME_SIZE, 150
                   backing_store: mcp_ubuntu_1604_image
                   format: qcow2
-                - name: cinder
-                  capacity: 50
-                  format: qcow2
                 - name: iso  # Volume with name 'iso' will be used
                              # for store image with cloud-init metadata.
                   capacity: 1
@@ -292,9 +286,6 @@
                   capacity: !os_env NODE_VOLUME_SIZE, 150
                   backing_store: mcp_ubuntu_1604_image
                   format: qcow2
-                - name: cinder
-                  capacity: 50
-                  format: qcow2
                 - name: iso  # Volume with name 'iso' will be used
                              # for store image with cloud-init metadata.
                   capacity: 1
@@ -399,9 +390,6 @@
                   capacity: !os_env NODE_VOLUME_SIZE, 150
                   backing_store: mcp_ubuntu_1604_image
                   format: qcow2
-                - name: cinder
-                  capacity: 50
-                  format: qcow2
                 - name: iso  # Volume with name 'iso' will be used
                              # for store image with cloud-init metadata.
                   capacity: 1
@@ -428,6 +416,9 @@
                   capacity: !os_env NODE_VOLUME_SIZE, 150
                   backing_store: cloudimage1604
                   format: qcow2
+                - name: cinder
+                  capacity: 50
+                  format: qcow2
                 - name: iso  # Volume with name 'iso' will be used
                              # for store image with cloud-init metadata.
                   capacity: 1
@@ -479,6 +470,9 @@
                   capacity: !os_env NODE_VOLUME_SIZE, 150
                   backing_store: cloudimage1604
                   format: qcow2
+                - name: cinder
+                  capacity: 50
+                  format: qcow2
                 - name: iso  # Volume with name 'iso' will be used
                              # for store image with cloud-init metadata.
                   capacity: 1
diff --git a/tcp_tests/templates/virtual-mcp-ocata-dvr/_context-cookiecutter-mcp-ocata-dvr.yaml b/tcp_tests/templates/cookied-mcp-ocata-dvr/_context-cookiecutter-mcp-ocata-dvr.yaml
similarity index 98%
rename from tcp_tests/templates/virtual-mcp-ocata-dvr/_context-cookiecutter-mcp-ocata-dvr.yaml
rename to tcp_tests/templates/cookied-mcp-ocata-dvr/_context-cookiecutter-mcp-ocata-dvr.yaml
index 84c6c06..a74e3d7 100644
--- a/tcp_tests/templates/virtual-mcp-ocata-dvr/_context-cookiecutter-mcp-ocata-dvr.yaml
+++ b/tcp_tests/templates/cookied-mcp-ocata-dvr/_context-cookiecutter-mcp-ocata-dvr.yaml
@@ -2,8 +2,9 @@
   bmk_enabled: 'False'
   ceph_enabled: 'False'
   cicd_enabled: 'False'
-  cluster_domain: virtual-mcp-ocata-dvr.local
-  cluster_name: virtual-mcp-ocata-dvr
+  designate_backend: bind
+  cluster_domain: cookied-mcp-ocata-dvr.local
+  cluster_name: cookied-mcp-ocata-dvr
   compute_bond_mode: active-backup
   compute_primary_first_nic: eth1
   compute_primary_second_nic: eth2
@@ -54,7 +55,6 @@
   openstack_compute_single_address_ranges: 172.16.10.105-172.16.10.106
   openstack_compute_deploy_address_ranges: 192.168.10.105-192.168.10.106
   openstack_compute_tenant_address_ranges: 10.1.0.105-10.1.0.106
-  openstack_compute_backend_address_ranges: 10.1.0.105-10.1.0.106
   openstack_control_address: 172.16.10.100
   openstack_control_hostname: ctl
   openstack_control_node01_address: 172.16.10.101
diff --git a/tcp_tests/templates/virtual-mcp-ocata-dvr/_context-environment.yaml b/tcp_tests/templates/cookied-mcp-ocata-dvr/_context-environment.yaml
similarity index 89%
rename from tcp_tests/templates/virtual-mcp-ocata-dvr/_context-environment.yaml
rename to tcp_tests/templates/cookied-mcp-ocata-dvr/_context-environment.yaml
index 803068e..f7518bc 100644
--- a/tcp_tests/templates/virtual-mcp-ocata-dvr/_context-environment.yaml
+++ b/tcp_tests/templates/cookied-mcp-ocata-dvr/_context-environment.yaml
@@ -17,9 +17,10 @@
       - openstack_control_leader
       - openstack_database_leader
       - openstack_message_queue
-      - features_designate_pool_manager_database
-      - features_designate_pool_manager
-      - features_designate_pool_manager_keystone
+      - features_designate_bind9_database
+      - features_designate_bind9
+      - features_designate_bind9_keystone
+      - features_lvm_backend_control
       - linux_system_codename_xenial
       interfaces:
         ens3:
@@ -34,8 +35,9 @@
       - openstack_control
       - openstack_database
       - openstack_message_queue
-      - features_designate_pool_manager_database
-      - features_designate_pool_manager
+      - features_designate_bind9_database
+      - features_designate_bind9
+      - features_lvm_backend_control
       - linux_system_codename_xenial
       interfaces:
         ens3:
@@ -50,8 +52,9 @@
       - openstack_control
       - openstack_database
       - openstack_message_queue
-      - features_designate_pool_manager_database
-      - features_designate_pool_manager
+      - features_designate_bind9_database
+      - features_designate_bind9
+      - features_lvm_backend_control
       - linux_system_codename_xenial
       interfaces:
         ens3:
@@ -63,7 +66,7 @@
       reclass_storage_name: openstack_proxy_node01
       roles:
       - openstack_proxy
-      - features_designate_pool_manager_proxy
+      - features_designate_bind9_proxy
       - linux_system_codename_xenial
       interfaces:
         ens3:
@@ -115,6 +118,7 @@
       reclass_storage_name: openstack_compute_rack01
       roles:
       - openstack_compute
+      - features_lvm_backend_volume_vdb
       - linux_system_codename_xenial
       interfaces:
         ens3:
@@ -144,7 +148,7 @@
     dns01.mcp11-ovs-dpdk.local:
       reclass_storage_name: openstack_dns_node01
       roles:
-      - features_designate_pool_manager_dns
+      - features_designate_bind9_dns
       - linux_system_codename_xenial
       classes:
       - system.linux.system.repo.mcp.extra
@@ -161,7 +165,7 @@
     dns02.mcp11-ovs-dpdk.local:
       reclass_storage_name: openstack_dns_node02
       roles:
-      - features_designate_pool_manager_dns
+      - features_designate_bind9_dns
       - linux_system_codename_xenial
       classes:
       - system.linux.system.repo.mcp.extra
@@ -173,4 +177,4 @@
           role: single_dhcp
         ens4:
           role: single_ctl
-          single_address: ${_param:openstack_dns_node02_address}
+          single_address: ${_param:openstack_dns_node02_address}
\ No newline at end of file
diff --git a/tcp_tests/templates/cookied-mcp-ocata-dvr/core.yaml b/tcp_tests/templates/cookied-mcp-ocata-dvr/core.yaml
new file mode 100644
index 0000000..fc5d4f8
--- /dev/null
+++ b/tcp_tests/templates/cookied-mcp-ocata-dvr/core.yaml
@@ -0,0 +1,12 @@
+{% from 'cookied-mcp-ocata-dvr/underlay.yaml' import HOSTNAME_CFG01 with context %}
+
+{% import 'shared-core.yaml' as SHARED_CORE with context %}
+
+{{ SHARED_CORE.MACRO_INSTALL_KEEPALIVED() }}
+{{ SHARED_CORE.MACRO_INSTALL_GLUSTERFS() }}
+{{ SHARED_CORE.MACRO_INSTALL_RABBITMQ() }}
+{{ SHARED_CORE.MACRO_INSTALL_GALERA() }}
+{{ SHARED_CORE.MACRO_INSTALL_HAPROXY() }}
+{{ SHARED_CORE.MACRO_INSTALL_NGINX() }}
+{{ SHARED_CORE.MACRO_INSTALL_MEMCACHED() }}
+{{ SHARED_CORE.MACRO_CHECK_VIP() }}
diff --git a/tcp_tests/templates/virtual-mcp-ocata-dvr/openstack.yaml b/tcp_tests/templates/cookied-mcp-ocata-dvr/openstack.yaml
similarity index 90%
rename from tcp_tests/templates/virtual-mcp-ocata-dvr/openstack.yaml
rename to tcp_tests/templates/cookied-mcp-ocata-dvr/openstack.yaml
index e3a98f1..dc9de1c 100644
--- a/tcp_tests/templates/virtual-mcp-ocata-dvr/openstack.yaml
+++ b/tcp_tests/templates/cookied-mcp-ocata-dvr/openstack.yaml
@@ -1,14 +1,15 @@
-{% from 'virtual-mcp-ocata-dvr/underlay.yaml' import HOSTNAME_CFG01 with context %}
-{% from 'virtual-mcp-ocata-dvr/underlay.yaml' import HOSTNAME_CTL01 with context %}
-{% from 'virtual-mcp-ocata-dvr/underlay.yaml' import HOSTNAME_CTL02 with context %}
-{% from 'virtual-mcp-ocata-dvr/underlay.yaml' import HOSTNAME_CTL03 with context %}
-{% from 'virtual-mcp-ocata-dvr/underlay.yaml' import HOSTNAME_GTW01 with context %}
+{% from 'cookied-mcp-ocata-dvr/underlay.yaml' import HOSTNAME_CFG01 with context %}
+{% from 'cookied-mcp-ocata-dvr/underlay.yaml' import HOSTNAME_CTL01 with context %}
+{% from 'cookied-mcp-ocata-dvr/underlay.yaml' import HOSTNAME_CTL02 with context %}
+{% from 'cookied-mcp-ocata-dvr/underlay.yaml' import HOSTNAME_CTL03 with context %}
+{% from 'cookied-mcp-ocata-dvr/underlay.yaml' import HOSTNAME_GTW01 with context %}
 {% from 'shared-salt.yaml' import IPV4_NET_EXTERNAL_PREFIX with context %}
 {% from 'shared-salt.yaml' import IPV4_NET_TENANT_PREFIX with context %}
 {% set LAB_CONFIG_NAME = os_env('LAB_CONFIG_NAME') %}
 {% set OVERRIDE_POLICY = os_env('OVERRIDE_POLICY', '') %}
 
 {% import 'shared-openstack.yaml' as SHARED_OPENSTACK with context %}
+{% import 'shared-salt.yaml' as SHARED with context %}
 
 # Install OpenStack control services
 
@@ -53,9 +54,9 @@
 {{ SHARED_OPENSTACK.MACRO_INSTALL_HEAT() }}
 
 # install designate backend
-- description: Install powerdns
+- description: Install bind
   cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@powerdns:server' state.sls powerdns.server
+    -C 'I@bind:server' state.sls bind
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 5}
   skip_fail: false
@@ -172,17 +173,7 @@
   retry: {count: 1, delay: 30}
   skip_fail: false
 
-- description: Install docker.io on gtw
-  cmd: salt-call cmd.run 'apt-get install docker.io -y'
-  node_name: {{ HOSTNAME_GTW01 }}
-  retry: {count: 1, delay: 30}
-  skip_fail: false
-
-- description: Enable forward policy
-  cmd: iptables --policy FORWARD ACCEPT
-  node_name: {{ HOSTNAME_GTW01 }}
-  retry: {count: 1, delay: 30}
-  skip_fail: false
+{{ SHARED.INSTALL_DOCKER_ON_GTW() }}
 
 - description: create rc file on cfg
   cmd: scp ctl01:/root/keystonercv3 /root
diff --git a/tcp_tests/templates/virtual-mcp-ocata-dvr/overrides-policy.yml b/tcp_tests/templates/cookied-mcp-ocata-dvr/overrides-policy.yml
similarity index 100%
rename from tcp_tests/templates/virtual-mcp-ocata-dvr/overrides-policy.yml
rename to tcp_tests/templates/cookied-mcp-ocata-dvr/overrides-policy.yml
diff --git a/tcp_tests/templates/virtual-mcp-ocata-dvr/salt.yaml b/tcp_tests/templates/cookied-mcp-ocata-dvr/salt.yaml
similarity index 69%
rename from tcp_tests/templates/virtual-mcp-ocata-dvr/salt.yaml
rename to tcp_tests/templates/cookied-mcp-ocata-dvr/salt.yaml
index e7e32a6..9d3deb7 100644
--- a/tcp_tests/templates/virtual-mcp-ocata-dvr/salt.yaml
+++ b/tcp_tests/templates/cookied-mcp-ocata-dvr/salt.yaml
@@ -1,14 +1,14 @@
-{% from 'virtual-mcp-ocata-dvr/underlay.yaml' import HOSTNAME_CFG01 with context %}
-{% from 'virtual-mcp-ocata-dvr/underlay.yaml' import HOSTNAME_CMP01 with context %}
-{% from 'virtual-mcp-ocata-dvr/underlay.yaml' import HOSTNAME_CMP02 with context %}
-{% from 'virtual-mcp-ocata-dvr/underlay.yaml' import HOSTNAME_GTW01 with context %}
-{% from 'virtual-mcp-ocata-dvr/underlay.yaml' import LAB_CONFIG_NAME with context %}
-{% from 'virtual-mcp-ocata-dvr/underlay.yaml' import DOMAIN_NAME with context %}
+{% from 'cookied-mcp-ocata-dvr/underlay.yaml' import HOSTNAME_CFG01 with context %}
+{% from 'cookied-mcp-ocata-dvr/underlay.yaml' import HOSTNAME_CMP01 with context %}
+{% from 'cookied-mcp-ocata-dvr/underlay.yaml' import HOSTNAME_CMP02 with context %}
+{% from 'cookied-mcp-ocata-dvr/underlay.yaml' import HOSTNAME_GTW01 with context %}
+{% from 'cookied-mcp-ocata-dvr/underlay.yaml' import LAB_CONFIG_NAME with context %}
+{% from 'cookied-mcp-ocata-dvr/underlay.yaml' import DOMAIN_NAME with context %}
 
 {% set SALT_MODELS_REPOSITORY = os_env('SALT_MODELS_REPOSITORY','https://gerrit.mcp.mirantis.com/salt-models/mcp-virtual-lab') %}
 # Other salt model repository parameters see in shared-salt.yaml
 {% set OVERRIDES = os_env('OVERRIDES', 'override_example: true') %}
-{% set OVERRIDES_FILENAME = os_env('OVERRIDES_FILENAME', '/srv/salt/reclass/classes/environment/virtual-mcp-ocata-dvr/overrides.yml') %}
+{% set OVERRIDES_FILENAME = os_env('OVERRIDES_FILENAME', '/srv/salt/reclass/classes/environment/cookied-mcp-ocata-dvr/overrides.yml') %}
 
 {% import 'shared-salt.yaml' as SHARED with context %}
 
@@ -16,7 +16,7 @@
 
 {{ SHARED.MACRO_CLONE_RECLASS_MODELS() }}
 
-{{ SHARED.MACRO_CONFIGURE_RECLASS(FORMULA_SERVICES='"linux" "reclass" "salt" "openssh" "ntp" "git" "nginx" "collectd" "sensu" "heka" "sphinx" "keystone" "mysql" "grafana" "haproxy" "rsyslog" "horizon" "prometheus" "telegraf" "elasticsearch" "powerdns" "glusterfs" "jenkins" "maas" "backupninja" "fluentd" "auditd" "logrotate"') }}
+{{ SHARED.MACRO_CONFIGURE_RECLASS(FORMULA_SERVICES='\*') }}
 
 {{ SHARED.MACRO_INSTALL_SALT_MINIONS() }}
 
@@ -24,7 +24,7 @@
 
 {%- if OVERRIDES != '' %}
 {%- for param in OVERRIDES.splitlines() %}
-{%- set key, value = param.replace(' ','').split(':') %}
+{%- set key, value = param.replace(' ','').split(':', 1) %}
 - description: Override cluster parameters
   cmd: |
     salt-call reclass.cluster_meta_set name='{{ key }}' value='{{ value }}' file_name='{{OVERRIDES_FILENAME}}'
diff --git a/tcp_tests/templates/virtual-mcp-ocata-dvr/sl.yaml b/tcp_tests/templates/cookied-mcp-ocata-dvr/sl.yaml
similarity index 98%
rename from tcp_tests/templates/virtual-mcp-ocata-dvr/sl.yaml
rename to tcp_tests/templates/cookied-mcp-ocata-dvr/sl.yaml
index ff0e77a..405e647 100644
--- a/tcp_tests/templates/virtual-mcp-ocata-dvr/sl.yaml
+++ b/tcp_tests/templates/cookied-mcp-ocata-dvr/sl.yaml
@@ -1,4 +1,4 @@
-{% from 'virtual-mcp-ocata-dvr/underlay.yaml' import HOSTNAME_CFG01 with context %}
+{% from 'cookied-mcp-ocata-dvr/underlay.yaml' import HOSTNAME_CFG01 with context %}
 {% import 'shared-sl-tests.yaml' as SHARED_SL_TESTS with context %}
 # Install docker swarm
 - description: Configure docker service
diff --git a/tcp_tests/templates/virtual-mcp-ocata-dvr/underlay--meta-data.yaml b/tcp_tests/templates/cookied-mcp-ocata-dvr/underlay--meta-data.yaml
similarity index 100%
rename from tcp_tests/templates/virtual-mcp-ocata-dvr/underlay--meta-data.yaml
rename to tcp_tests/templates/cookied-mcp-ocata-dvr/underlay--meta-data.yaml
diff --git a/tcp_tests/templates/virtual-mcp-ocata-dvr/underlay--user-data-cfg01.yaml b/tcp_tests/templates/cookied-mcp-ocata-dvr/underlay--user-data-cfg01.yaml
similarity index 89%
copy from tcp_tests/templates/virtual-mcp-ocata-dvr/underlay--user-data-cfg01.yaml
copy to tcp_tests/templates/cookied-mcp-ocata-dvr/underlay--user-data-cfg01.yaml
index a73ca23..d75dab1 100644
--- a/tcp_tests/templates/virtual-mcp-ocata-dvr/underlay--user-data-cfg01.yaml
+++ b/tcp_tests/templates/cookied-mcp-ocata-dvr/underlay--user-data-cfg01.yaml
@@ -47,6 +47,13 @@
    # Enable grub menu using updated config below
    - update-grub
 
+   - mkdir -p /srv/salt/reclass/nodes
+   - systemctl enable salt-master
+   - systemctl enable salt-minion
+   - systemctl start salt-master
+   - systemctl start salt-minion
+   - salt-call -l info --timeout=120 test.ping
+
   write_files:
    - path: /etc/default/grub.d/97-enable-grub-menu.cfg
      content: |
diff --git a/tcp_tests/templates/virtual-mcp-ocata-dvr/underlay--user-data1604.yaml b/tcp_tests/templates/cookied-mcp-ocata-dvr/underlay--user-data1604.yaml
similarity index 100%
rename from tcp_tests/templates/virtual-mcp-ocata-dvr/underlay--user-data1604.yaml
rename to tcp_tests/templates/cookied-mcp-ocata-dvr/underlay--user-data1604.yaml
diff --git a/tcp_tests/templates/virtual-mcp-ocata-dvr/underlay.yaml b/tcp_tests/templates/cookied-mcp-ocata-dvr/underlay.yaml
similarity index 96%
rename from tcp_tests/templates/virtual-mcp-ocata-dvr/underlay.yaml
rename to tcp_tests/templates/cookied-mcp-ocata-dvr/underlay.yaml
index de5427a..4893e2c 100644
--- a/tcp_tests/templates/virtual-mcp-ocata-dvr/underlay.yaml
+++ b/tcp_tests/templates/cookied-mcp-ocata-dvr/underlay.yaml
@@ -1,9 +1,9 @@
 # Set the repository suite, one of the: 'nightly', 'testing', 'stable', or any other required
 {% set REPOSITORY_SUITE = os_env('REPOSITORY_SUITE', 'testing') %}
 
-{% import 'virtual-mcp-ocata-dvr/underlay--meta-data.yaml' as CLOUDINIT_META_DATA with context %}
-{% import 'virtual-mcp-ocata-dvr/underlay--user-data-cfg01.yaml' as CLOUDINIT_USER_DATA_CFG01 with context %}
-{% import 'virtual-mcp-ocata-dvr/underlay--user-data1604.yaml' as CLOUDINIT_USER_DATA_1604 with context %}
+{% import 'cookied-mcp-ocata-dvr/underlay--meta-data.yaml' as CLOUDINIT_META_DATA with context %}
+{% import 'cookied-mcp-ocata-dvr/underlay--user-data-cfg01.yaml' as CLOUDINIT_USER_DATA_CFG01 with context %}
+{% import 'cookied-mcp-ocata-dvr/underlay--user-data1604.yaml' as CLOUDINIT_USER_DATA_1604 with context %}
 
 ---
 aliases:
@@ -12,14 +12,14 @@
  - &cloudinit_user_data_cfg01 {{ CLOUDINIT_USER_DATA_CFG01 }}
  - &cloudinit_user_data_1604 {{ CLOUDINIT_USER_DATA_1604 }}
 
-{% set LAB_CONFIG_NAME = os_env('LAB_CONFIG_NAME', 'virtual-mcp-ocata-dvr') %}
+{% set LAB_CONFIG_NAME = os_env('LAB_CONFIG_NAME', 'cookied-mcp-ocata-dvr') %}
 {% set DOMAIN_NAME = os_env('DOMAIN_NAME', LAB_CONFIG_NAME) + '.local' %}
 {% set HOSTNAME_CFG01 = os_env('HOSTNAME_CFG01', 'cfg01.' + DOMAIN_NAME) %}
 {% set HOSTNAME_CTL01 = os_env('HOSTNAME_CTL01', 'ctl01.' + DOMAIN_NAME) %}
 {% set HOSTNAME_CTL02 = os_env('HOSTNAME_CTL02', 'ctl02.' + DOMAIN_NAME) %}
 {% set HOSTNAME_CTL03 = os_env('HOSTNAME_CTL03', 'ctl03.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CMP01 = os_env('HOSTNAME_CMP01', 'cmp01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CMP02 = os_env('HOSTNAME_CMP02', 'cmp02.' + DOMAIN_NAME) %}
+{% set HOSTNAME_CMP01 = os_env('HOSTNAME_CMP01', 'cmp001.' + DOMAIN_NAME) %}
+{% set HOSTNAME_CMP02 = os_env('HOSTNAME_CMP02', 'cmp002.' + DOMAIN_NAME) %}
 {% set HOSTNAME_MON01 = os_env('HOSTNAME_MON01', 'mon01.' + DOMAIN_NAME) %}
 {% set HOSTNAME_MON02 = os_env('HOSTNAME_MON02', 'mon02.' + DOMAIN_NAME) %}
 {% set HOSTNAME_MON03 = os_env('HOSTNAME_MON03', 'mon03.' + DOMAIN_NAME) %}
@@ -30,7 +30,7 @@
 
 template:
   devops_settings:
-    env_name: {{ os_env('ENV_NAME', 'virtual-mcp-ocata-dvr_' + REPOSITORY_SUITE + "_" + os_env('BUILD_NUMBER', '')) }}
+    env_name: {{ os_env('ENV_NAME', 'cookied-mcp-ocata-dvr_' + REPOSITORY_SUITE + "_" + os_env('BUILD_NUMBER', '')) }}
 
     address_pools:
       private-pool01:
@@ -232,9 +232,6 @@
                   capacity: !os_env NODE_VOLUME_SIZE, 150
                   backing_store: mcp_ubuntu_1604_image
                   format: qcow2
-                - name: cinder
-                  capacity: 50
-                  format: qcow2
                 - name: iso  # Volume with name 'iso' will be used
                              # for store image with cloud-init metadata.
                   capacity: 1
@@ -273,9 +270,6 @@
                   capacity: !os_env NODE_VOLUME_SIZE, 150
                   backing_store: mcp_ubuntu_1604_image
                   format: qcow2
-                - name: cinder
-                  capacity: 50
-                  format: qcow2
                 - name: iso  # Volume with name 'iso' will be used
                              # for store image with cloud-init metadata.
                   capacity: 1
@@ -302,9 +296,6 @@
                   capacity: !os_env NODE_VOLUME_SIZE, 150
                   backing_store: mcp_ubuntu_1604_image
                   format: qcow2
-                - name: cinder
-                  capacity: 50
-                  format: qcow2
                 - name: iso  # Volume with name 'iso' will be used
                              # for store image with cloud-init metadata.
                   capacity: 1
@@ -409,9 +400,6 @@
                   capacity: !os_env NODE_VOLUME_SIZE, 150
                   backing_store: mcp_ubuntu_1604_image
                   format: qcow2
-                - name: cinder
-                  capacity: 50
-                  format: qcow2
                 - name: iso  # Volume with name 'iso' will be used
                              # for store image with cloud-init metadata.
                   capacity: 1
@@ -439,6 +427,9 @@
                   capacity: !os_env NODE_VOLUME_SIZE, 150
                   backing_store: cloudimage1604
                   format: qcow2
+                - name: cinder
+                  capacity: 50
+                  format: qcow2
                 - name: iso  # Volume with name 'iso' will be used
                              # for store image with cloud-init metadata.
                   capacity: 1
@@ -490,6 +481,9 @@
                   capacity: !os_env NODE_VOLUME_SIZE, 150
                   backing_store: cloudimage1604
                   format: qcow2
+                - name: cinder
+                  capacity: 50
+                  format: qcow2
                 - name: iso  # Volume with name 'iso' will be used
                              # for store image with cloud-init metadata.
                   capacity: 1
diff --git a/tcp_tests/templates/virtual-mcp-ocata-ovs/_context-cookiecutter-mcp-ocata-ovs.yaml b/tcp_tests/templates/cookied-mcp-ocata-ovs/_context-cookiecutter-mcp-ocata-ovs.yaml
similarity index 98%
rename from tcp_tests/templates/virtual-mcp-ocata-ovs/_context-cookiecutter-mcp-ocata-ovs.yaml
rename to tcp_tests/templates/cookied-mcp-ocata-ovs/_context-cookiecutter-mcp-ocata-ovs.yaml
index 3e27c69..2a6d8f9 100644
--- a/tcp_tests/templates/virtual-mcp-ocata-ovs/_context-cookiecutter-mcp-ocata-ovs.yaml
+++ b/tcp_tests/templates/cookied-mcp-ocata-ovs/_context-cookiecutter-mcp-ocata-ovs.yaml
@@ -2,8 +2,8 @@
   bmk_enabled: 'False'
   ceph_enabled: 'False'
   cicd_enabled: 'False'
-  cluster_domain: virtual-mcp-ocata-ovs.local
-  cluster_name: virtual-mcp-ocata-ovs
+  cluster_domain: cookied-mcp-ocata-ovs.local
+  cluster_name: cookied-mcp-ocata-ovs
   compute_bond_mode: active-backup
   compute_primary_first_nic: eth1
   compute_primary_second_nic: eth2
@@ -54,7 +54,6 @@
   openstack_compute_single_address_ranges: 172.16.10.105-172.16.10.106
   openstack_compute_deploy_address_ranges: 192.168.10.105-192.168.10.106
   openstack_compute_tenant_address_ranges: 10.1.0.105-10.1.0.106
-  openstack_compute_backend_address_ranges: 10.1.0.105-10.1.0.106
   openstack_control_address: 172.16.10.100
   openstack_control_hostname: ctl
   openstack_control_node01_address: 172.16.10.101
diff --git a/tcp_tests/templates/virtual-mcp-ocata-ovs/_context-environment.yaml b/tcp_tests/templates/cookied-mcp-ocata-ovs/_context-environment.yaml
similarity index 95%
rename from tcp_tests/templates/virtual-mcp-ocata-ovs/_context-environment.yaml
rename to tcp_tests/templates/cookied-mcp-ocata-ovs/_context-environment.yaml
index 3e05cf0..7baf03e 100644
--- a/tcp_tests/templates/virtual-mcp-ocata-ovs/_context-environment.yaml
+++ b/tcp_tests/templates/cookied-mcp-ocata-ovs/_context-environment.yaml
@@ -21,6 +21,7 @@
       - features_designate_bind9_dns
       - features_designate_bind9
       - features_designate_bind9_keystone
+      - features_lvm_backend_control
       - linux_system_codename_xenial
       interfaces:
         ens3:
@@ -38,6 +39,7 @@
       - features_designate_bind9_database
       - features_designate_bind9_dns
       - features_designate_bind9
+      - features_lvm_backend_control
       - linux_system_codename_xenial
       interfaces:
         ens3:
@@ -54,6 +56,7 @@
       - openstack_message_queue
       - features_designate_bind9_database
       - features_designate_bind9
+      - features_lvm_backend_control
       - linux_system_codename_xenial
       interfaces:
         ens3:
@@ -117,6 +120,7 @@
       reclass_storage_name: openstack_compute_rack01
       roles:
       - openstack_compute
+      - features_lvm_backend_volume_vdb
       - linux_system_codename_xenial
       interfaces:
         ens3:
diff --git a/tcp_tests/templates/cookied-mcp-ocata-ovs/core.yaml b/tcp_tests/templates/cookied-mcp-ocata-ovs/core.yaml
new file mode 100644
index 0000000..6fc2af4
--- /dev/null
+++ b/tcp_tests/templates/cookied-mcp-ocata-ovs/core.yaml
@@ -0,0 +1,12 @@
+{% from 'cookied-mcp-ocata-ovs/underlay.yaml' import HOSTNAME_CFG01 with context %}
+
+{% import 'shared-core.yaml' as SHARED_CORE with context %}
+
+{{ SHARED_CORE.MACRO_INSTALL_KEEPALIVED() }}
+{{ SHARED_CORE.MACRO_INSTALL_GLUSTERFS() }}
+{{ SHARED_CORE.MACRO_INSTALL_RABBITMQ() }}
+{{ SHARED_CORE.MACRO_INSTALL_GALERA() }}
+{{ SHARED_CORE.MACRO_INSTALL_HAPROXY() }}
+{{ SHARED_CORE.MACRO_INSTALL_NGINX() }}
+{{ SHARED_CORE.MACRO_INSTALL_MEMCACHED() }}
+{{ SHARED_CORE.MACRO_CHECK_VIP() }}
diff --git a/tcp_tests/templates/virtual-mcp-ocata-ovs/openstack.yaml b/tcp_tests/templates/cookied-mcp-ocata-ovs/openstack.yaml
similarity index 89%
rename from tcp_tests/templates/virtual-mcp-ocata-ovs/openstack.yaml
rename to tcp_tests/templates/cookied-mcp-ocata-ovs/openstack.yaml
index 50bad03..4072632 100644
--- a/tcp_tests/templates/virtual-mcp-ocata-ovs/openstack.yaml
+++ b/tcp_tests/templates/cookied-mcp-ocata-ovs/openstack.yaml
@@ -1,12 +1,13 @@
-{% from 'virtual-mcp-ocata-ovs/underlay.yaml' import HOSTNAME_CFG01 with context %}
-{% from 'virtual-mcp-ocata-ovs/underlay.yaml' import HOSTNAME_CTL01 with context %}
-{% from 'virtual-mcp-ocata-ovs/underlay.yaml' import HOSTNAME_CTL02 with context %}
-{% from 'virtual-mcp-ocata-ovs/underlay.yaml' import HOSTNAME_CTL03 with context %}
-{% from 'virtual-mcp-ocata-ovs/underlay.yaml' import HOSTNAME_GTW01 with context %}
+{% from 'cookied-mcp-ocata-ovs/underlay.yaml' import HOSTNAME_CFG01 with context %}
+{% from 'cookied-mcp-ocata-ovs/underlay.yaml' import HOSTNAME_CTL01 with context %}
+{% from 'cookied-mcp-ocata-ovs/underlay.yaml' import HOSTNAME_CTL02 with context %}
+{% from 'cookied-mcp-ocata-ovs/underlay.yaml' import HOSTNAME_CTL03 with context %}
+{% from 'cookied-mcp-ocata-ovs/underlay.yaml' import HOSTNAME_GTW01 with context %}
 {% from 'shared-salt.yaml' import IPV4_NET_EXTERNAL_PREFIX with context %}
 {% from 'shared-salt.yaml' import IPV4_NET_TENANT_PREFIX with context %}
 
 {% import 'shared-openstack.yaml' as SHARED_OPENSTACK with context %}
+{% import 'shared-salt.yaml' as SHARED with context %}
 
 # Deploy nginx before openstack services (PROD-22740)
 - description: Deploy nginx proxy
@@ -148,17 +149,7 @@
   retry: {count: 1, delay: 30}
   skip_fail: false
 
-- description: Install docker.io on gtw
-  cmd: salt-call cmd.run 'apt-get install docker.io -y'
-  node_name: {{ HOSTNAME_GTW01 }}
-  retry: {count: 1, delay: 30}
-  skip_fail: false
-
-- description: Enable forward policy
-  cmd: iptables --policy FORWARD ACCEPT
-  node_name: {{ HOSTNAME_GTW01 }}
-  retry: {count: 1, delay: 30}
-  skip_fail: false
+{{ SHARED.INSTALL_DOCKER_ON_GTW() }}
 
 - description: create rc file on cfg
   cmd: scp ctl01:/root/keystonercv3 /root
diff --git a/tcp_tests/templates/virtual-mcp-ocata-dvr/salt.yaml b/tcp_tests/templates/cookied-mcp-ocata-ovs/salt.yaml
similarity index 69%
copy from tcp_tests/templates/virtual-mcp-ocata-dvr/salt.yaml
copy to tcp_tests/templates/cookied-mcp-ocata-ovs/salt.yaml
index e7e32a6..41827c7 100644
--- a/tcp_tests/templates/virtual-mcp-ocata-dvr/salt.yaml
+++ b/tcp_tests/templates/cookied-mcp-ocata-ovs/salt.yaml
@@ -1,14 +1,14 @@
-{% from 'virtual-mcp-ocata-dvr/underlay.yaml' import HOSTNAME_CFG01 with context %}
-{% from 'virtual-mcp-ocata-dvr/underlay.yaml' import HOSTNAME_CMP01 with context %}
-{% from 'virtual-mcp-ocata-dvr/underlay.yaml' import HOSTNAME_CMP02 with context %}
-{% from 'virtual-mcp-ocata-dvr/underlay.yaml' import HOSTNAME_GTW01 with context %}
-{% from 'virtual-mcp-ocata-dvr/underlay.yaml' import LAB_CONFIG_NAME with context %}
-{% from 'virtual-mcp-ocata-dvr/underlay.yaml' import DOMAIN_NAME with context %}
+{% from 'cookied-mcp-ocata-ovs/underlay.yaml' import HOSTNAME_CFG01 with context %}
+{% from 'cookied-mcp-ocata-ovs/underlay.yaml' import HOSTNAME_CMP01 with context %}
+{% from 'cookied-mcp-ocata-ovs/underlay.yaml' import HOSTNAME_CMP02 with context %}
+{% from 'cookied-mcp-ocata-ovs/underlay.yaml' import HOSTNAME_GTW01 with context %}
+{% from 'cookied-mcp-ocata-ovs/underlay.yaml' import LAB_CONFIG_NAME with context %}
+{% from 'cookied-mcp-ocata-ovs/underlay.yaml' import DOMAIN_NAME with context %}
 
 {% set SALT_MODELS_REPOSITORY = os_env('SALT_MODELS_REPOSITORY','https://gerrit.mcp.mirantis.com/salt-models/mcp-virtual-lab') %}
 # Other salt model repository parameters see in shared-salt.yaml
 {% set OVERRIDES = os_env('OVERRIDES', 'override_example: true') %}
-{% set OVERRIDES_FILENAME = os_env('OVERRIDES_FILENAME', '/srv/salt/reclass/classes/environment/virtual-mcp-ocata-dvr/overrides.yml') %}
+{% set OVERRIDES_FILENAME = os_env('OVERRIDES_FILENAME', '/srv/salt/reclass/classes/environment/cookied-mcp-ocata-ovs/overrides.yml') %}
 
 {% import 'shared-salt.yaml' as SHARED with context %}
 
@@ -16,7 +16,7 @@
 
 {{ SHARED.MACRO_CLONE_RECLASS_MODELS() }}
 
-{{ SHARED.MACRO_CONFIGURE_RECLASS(FORMULA_SERVICES='"linux" "reclass" "salt" "openssh" "ntp" "git" "nginx" "collectd" "sensu" "heka" "sphinx" "keystone" "mysql" "grafana" "haproxy" "rsyslog" "horizon" "prometheus" "telegraf" "elasticsearch" "powerdns" "glusterfs" "jenkins" "maas" "backupninja" "fluentd" "auditd" "logrotate"') }}
+{{ SHARED.MACRO_CONFIGURE_RECLASS(FORMULA_SERVICES='\*') }}
 
 {{ SHARED.MACRO_INSTALL_SALT_MINIONS() }}
 
@@ -24,7 +24,7 @@
 
 {%- if OVERRIDES != '' %}
 {%- for param in OVERRIDES.splitlines() %}
-{%- set key, value = param.replace(' ','').split(':') %}
+{%- set key, value = param.replace(' ','').split(':', 1) %}
 - description: Override cluster parameters
   cmd: |
     salt-call reclass.cluster_meta_set name='{{ key }}' value='{{ value }}' file_name='{{OVERRIDES_FILENAME}}'
diff --git a/tcp_tests/templates/virtual-mcp-ocata-ovs/sl.yaml b/tcp_tests/templates/cookied-mcp-ocata-ovs/sl.yaml
similarity index 98%
rename from tcp_tests/templates/virtual-mcp-ocata-ovs/sl.yaml
rename to tcp_tests/templates/cookied-mcp-ocata-ovs/sl.yaml
index 9ec64be..7cc598b 100644
--- a/tcp_tests/templates/virtual-mcp-ocata-ovs/sl.yaml
+++ b/tcp_tests/templates/cookied-mcp-ocata-ovs/sl.yaml
@@ -1,4 +1,4 @@
-{% from 'virtual-mcp-ocata-ovs/underlay.yaml' import HOSTNAME_CFG01 with context %}
+{% from 'cookied-mcp-ocata-ovs/underlay.yaml' import HOSTNAME_CFG01 with context %}
 
 {% import 'shared-sl-tests.yaml' as SHARED_SL_TESTS with context %}
 
diff --git a/tcp_tests/templates/virtual-mcp-ocata-ovs/underlay--meta-data.yaml b/tcp_tests/templates/cookied-mcp-ocata-ovs/underlay--meta-data.yaml
similarity index 100%
rename from tcp_tests/templates/virtual-mcp-ocata-ovs/underlay--meta-data.yaml
rename to tcp_tests/templates/cookied-mcp-ocata-ovs/underlay--meta-data.yaml
diff --git a/tcp_tests/templates/virtual-mcp-ocata-ovs/underlay--user-data-cfg01.yaml b/tcp_tests/templates/cookied-mcp-ocata-ovs/underlay--user-data-cfg01.yaml
similarity index 88%
rename from tcp_tests/templates/virtual-mcp-ocata-ovs/underlay--user-data-cfg01.yaml
rename to tcp_tests/templates/cookied-mcp-ocata-ovs/underlay--user-data-cfg01.yaml
index da7908d..48562ad 100644
--- a/tcp_tests/templates/virtual-mcp-ocata-ovs/underlay--user-data-cfg01.yaml
+++ b/tcp_tests/templates/cookied-mcp-ocata-ovs/underlay--user-data-cfg01.yaml
@@ -44,6 +44,13 @@
 
    - echo "nameserver 172.18.208.44" >> /etc/resolv.conf;
 
+   - mkdir -p /srv/salt/reclass/nodes
+   - systemctl enable salt-master
+   - systemctl enable salt-minion
+   - systemctl start salt-master
+   - systemctl start salt-minion
+   - salt-call -l info --timeout=120 test.ping
+
   write_files:
    - path: /etc/network/interfaces
      content: |
diff --git a/tcp_tests/templates/virtual-mcp-ocata-ovs/underlay--user-data1604.yaml b/tcp_tests/templates/cookied-mcp-ocata-ovs/underlay--user-data1604.yaml
similarity index 100%
rename from tcp_tests/templates/virtual-mcp-ocata-ovs/underlay--user-data1604.yaml
rename to tcp_tests/templates/cookied-mcp-ocata-ovs/underlay--user-data1604.yaml
diff --git a/tcp_tests/templates/virtual-mcp-ocata-ovs/underlay.yaml b/tcp_tests/templates/cookied-mcp-ocata-ovs/underlay.yaml
similarity index 96%
rename from tcp_tests/templates/virtual-mcp-ocata-ovs/underlay.yaml
rename to tcp_tests/templates/cookied-mcp-ocata-ovs/underlay.yaml
index 382dba4..2d31a5a 100644
--- a/tcp_tests/templates/virtual-mcp-ocata-ovs/underlay.yaml
+++ b/tcp_tests/templates/cookied-mcp-ocata-ovs/underlay.yaml
@@ -1,9 +1,9 @@
 # Set the repository suite, one of the: 'nightly', 'testing', 'stable', or any other required
 {% set REPOSITORY_SUITE = os_env('REPOSITORY_SUITE', 'testing') %}
 
-{% import 'virtual-mcp-ocata-ovs/underlay--meta-data.yaml' as CLOUDINIT_META_DATA with context %}
-{% import 'virtual-mcp-ocata-ovs/underlay--user-data-cfg01.yaml' as CLOUDINIT_USER_DATA_CFG01 with context %}
-{% import 'virtual-mcp-ocata-ovs/underlay--user-data1604.yaml' as CLOUDINIT_USER_DATA_1604 with context %}
+{% import 'cookied-mcp-ocata-ovs/underlay--meta-data.yaml' as CLOUDINIT_META_DATA with context %}
+{% import 'cookied-mcp-ocata-ovs/underlay--user-data-cfg01.yaml' as CLOUDINIT_USER_DATA_CFG01 with context %}
+{% import 'cookied-mcp-ocata-ovs/underlay--user-data1604.yaml' as CLOUDINIT_USER_DATA_1604 with context %}
 
 ---
 aliases:
@@ -12,14 +12,14 @@
  - &cloudinit_user_data_cfg01 {{ CLOUDINIT_USER_DATA_CFG01 }}
  - &cloudinit_user_data_1604 {{ CLOUDINIT_USER_DATA_1604 }}
 
-{% set LAB_CONFIG_NAME = os_env('LAB_CONFIG_NAME', 'virtual-mcp-ocata-ovs') %}
+{% set LAB_CONFIG_NAME = os_env('LAB_CONFIG_NAME', 'cookied-mcp-ocata-ovs') %}
 {% set DOMAIN_NAME = os_env('DOMAIN_NAME', LAB_CONFIG_NAME) + '.local' %}
 {% set HOSTNAME_CFG01 = os_env('HOSTNAME_CFG01', 'cfg01.' + DOMAIN_NAME) %}
 {% set HOSTNAME_CTL01 = os_env('HOSTNAME_CTL01', 'ctl01.' + DOMAIN_NAME) %}
 {% set HOSTNAME_CTL02 = os_env('HOSTNAME_CTL02', 'ctl02.' + DOMAIN_NAME) %}
 {% set HOSTNAME_CTL03 = os_env('HOSTNAME_CTL03', 'ctl03.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CMP01 = os_env('HOSTNAME_CMP01', 'cmp01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CMP02 = os_env('HOSTNAME_CMP02', 'cmp02.' + DOMAIN_NAME) %}
+{% set HOSTNAME_CMP01 = os_env('HOSTNAME_CMP01', 'cmp001.' + DOMAIN_NAME) %}
+{% set HOSTNAME_CMP02 = os_env('HOSTNAME_CMP02', 'cmp002.' + DOMAIN_NAME) %}
 {% set HOSTNAME_MON01 = os_env('HOSTNAME_MON01', 'mon01.' + DOMAIN_NAME) %}
 {% set HOSTNAME_MON02 = os_env('HOSTNAME_MON02', 'mon02.' + DOMAIN_NAME) %}
 {% set HOSTNAME_MON03 = os_env('HOSTNAME_MON03', 'mon03.' + DOMAIN_NAME) %}
@@ -28,7 +28,7 @@
 
 template:
   devops_settings:
-    env_name: {{ os_env('ENV_NAME', 'virtual-mcp-ocata-ovs_' + REPOSITORY_SUITE + "_" + os_env('BUILD_NUMBER', '')) }}
+    env_name: {{ os_env('ENV_NAME', 'cookied-mcp-ocata-ovs_' + REPOSITORY_SUITE + "_" + os_env('BUILD_NUMBER', '')) }}
 
     address_pools:
       private-pool01:
@@ -222,9 +222,6 @@
                   capacity: !os_env NODE_VOLUME_SIZE, 150
                   backing_store: mcp_ubuntu_1604_image
                   format: qcow2
-                - name: cinder
-                  capacity: 50
-                  format: qcow2
                 - name: iso  # Volume with name 'iso' will be used
                              # for store image with cloud-init metadata.
                   capacity: 1
@@ -263,9 +260,6 @@
                   capacity: !os_env NODE_VOLUME_SIZE, 150
                   backing_store: mcp_ubuntu_1604_image
                   format: qcow2
-                - name: cinder
-                  capacity: 50
-                  format: qcow2
                 - name: iso  # Volume with name 'iso' will be used
                              # for store image with cloud-init metadata.
                   capacity: 1
@@ -292,9 +286,6 @@
                   capacity: !os_env NODE_VOLUME_SIZE, 150
                   backing_store: mcp_ubuntu_1604_image
                   format: qcow2
-                - name: cinder
-                  capacity: 50
-                  format: qcow2
                 - name: iso  # Volume with name 'iso' will be used
                              # for store image with cloud-init metadata.
                   capacity: 1
@@ -399,9 +390,6 @@
                   capacity: !os_env NODE_VOLUME_SIZE, 150
                   backing_store: mcp_ubuntu_1604_image
                   format: qcow2
-                - name: cinder
-                  capacity: 50
-                  format: qcow2
                 - name: iso  # Volume with name 'iso' will be used
                              # for store image with cloud-init metadata.
                   capacity: 1
@@ -428,6 +416,9 @@
                   capacity: !os_env NODE_VOLUME_SIZE, 150
                   backing_store: cloudimage1604
                   format: qcow2
+                - name: cinder
+                  capacity: 50
+                  format: qcow2
                 - name: iso  # Volume with name 'iso' will be used
                              # for store image with cloud-init metadata.
                   capacity: 1
@@ -479,6 +470,9 @@
                   capacity: !os_env NODE_VOLUME_SIZE, 150
                   backing_store: cloudimage1604
                   format: qcow2
+                - name: cinder
+                  capacity: 50
+                  format: qcow2
                 - name: iso  # Volume with name 'iso' will be used
                              # for store image with cloud-init metadata.
                   capacity: 1
diff --git a/tcp_tests/templates/cookied-mcp-pike-dpdk/_context-cookiecutter-pike-ovs-dpdk.yaml b/tcp_tests/templates/cookied-mcp-pike-dpdk/_context-cookiecutter-pike-ovs-dpdk.yaml
index 807b66a..725ff1c 100644
--- a/tcp_tests/templates/cookied-mcp-pike-dpdk/_context-cookiecutter-pike-ovs-dpdk.yaml
+++ b/tcp_tests/templates/cookied-mcp-pike-dpdk/_context-cookiecutter-pike-ovs-dpdk.yaml
@@ -54,7 +54,6 @@
   openstack_compute_single_address_ranges: 172.16.10.105-172.16.10.106
   openstack_compute_deploy_address_ranges: 192.168.10.105-192.168.10.106
   openstack_compute_tenant_address_ranges: 10.1.0.105-10.1.0.106
-  openstack_compute_backend_address_ranges: 10.1.0.105-10.1.0.106
   openstack_control_address: 172.16.10.100
   openstack_control_hostname: ctl
   openstack_control_node01_address: 172.16.10.101
@@ -96,7 +95,7 @@
   openstack_nfv_sriov_enabled: 'False'
   openstack_nova_compute_hugepages_count: '2048'
   openstack_nova_compute_nfv_req_enabled: 'False'
-  openstack_nova_cpu_pinning: '3'
+  openstack_nova_cpu_pinning: '4,5,8,9,10,11'
   openstack_ovs_dvr_enabled: 'False'
   openstack_ovs_encapsulation_type: vxlan
   openstack_proxy_address: 172.16.10.80
diff --git a/tcp_tests/templates/cookied-mcp-pike-dpdk/_context-environment.yaml b/tcp_tests/templates/cookied-mcp-pike-dpdk/_context-environment.yaml
index bd516f9..0cd60ba 100644
--- a/tcp_tests/templates/cookied-mcp-pike-dpdk/_context-environment.yaml
+++ b/tcp_tests/templates/cookied-mcp-pike-dpdk/_context-environment.yaml
@@ -15,6 +15,7 @@
       roles:
       - infra_kvm
       - openstack_control_leader
+      - features_lvm_backend_control
       - linux_system_codename_xenial
       interfaces:
         ens3:
@@ -27,6 +28,7 @@
       roles:
       - infra_kvm
       - openstack_control
+      - features_lvm_backend_control
       - linux_system_codename_xenial
       interfaces:
         ens3:
@@ -39,6 +41,7 @@
       roles:
       - infra_kvm
       - openstack_control
+      - features_lvm_backend_control
       - linux_system_codename_xenial
       interfaces:
         ens3:
@@ -128,7 +131,7 @@
       reclass_storage_name: openstack_compute_rack01
       roles:
       - openstack_compute
-      - features_lvm_backend
+      - features_lvm_backend_volume_vdb
       - linux_system_codename_xenial
       interfaces:
         ens3:
@@ -155,7 +158,10 @@
         ens4:
           role: single_ctl
         ens5:
-          role: single_ovs_br_prv
-          mtu: 1500
+          role: bond0_ab_ovs_vxlan_mesh_no_tag
+        ens6:
+          role: bond0_ab_ovs_vxlan_mesh_no_tag
         ens7:
-          role: bond1_ab_ovs_floating
+          role: single_ovs_br_floating
+          external_address: 10.90.0.110
+          external_network_netmask: 255.255.255.0
diff --git a/tcp_tests/templates/cookied-mcp-pike-dpdk/core.yaml b/tcp_tests/templates/cookied-mcp-pike-dpdk/core.yaml
index 8057165..5716d76 100644
--- a/tcp_tests/templates/cookied-mcp-pike-dpdk/core.yaml
+++ b/tcp_tests/templates/cookied-mcp-pike-dpdk/core.yaml
@@ -1,118 +1,20 @@
 {% from 'cookied-mcp-pike-dpdk/underlay.yaml' import HOSTNAME_CFG01 with context %}
 {% import 'shared-backup-restore.yaml' as BACKUP with context %}
 
-# Install support services
-- description: Install keepalived on ctl01
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@keepalived:cluster and *01*' state.sls keepalived
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 10}
-  skip_fail: true
+{% import 'shared-core.yaml' as SHARED_CORE with context %}
 
-- description: Install keepalived
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@keepalived:cluster' state.sls keepalived
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 10}
-  skip_fail: true
+{{ SHARED_CORE.MACRO_INSTALL_KEEPALIVED() }}
 
-- description: Install glusterfs
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@glusterfs:server' state.sls glusterfs.server.service
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
+{{ SHARED_CORE.MACRO_INSTALL_GLUSTERFS() }}
 
-- description: Setup glusterfs on primary controller
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@glusterfs:server' state.sls glusterfs.server.setup -b 1
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 2, delay: 5}
-  skip_fail: false
+{{ SHARED_CORE.MACRO_INSTALL_RABBITMQ() }}
 
-- description: Check the gluster status
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@glusterfs:server' cmd.run 'gluster peer status; gluster volume status' -b 1
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
+{{ SHARED_CORE.MACRO_INSTALL_GALERA() }}
 
-- description: Install RabbitMQ on ctl01
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@rabbitmq:server and *01*' state.sls rabbitmq
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
+{{ SHARED_CORE.MACRO_INSTALL_HAPROXY() }}
 
-- description: Install RabbitMQ
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@rabbitmq:server' state.sls rabbitmq
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
+{{ SHARED_CORE.MACRO_INSTALL_NGINX() }}
 
-- description: Check the rabbitmq status
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@rabbitmq:server' cmd.run 'rabbitmqctl cluster_status'
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
+{{ SHARED_CORE.MACRO_INSTALL_MEMCACHED() }}
 
-- description: Install Galera on first server
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@galera:master' state.sls galera
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Install Galera on other servers
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@galera:slave' state.sls galera -b 1
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Check mysql status
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@galera:*' mysql.status | grep -A1 -e "wsrep_incoming_addresses\|wsrep_cluster_size"
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: true
-
-
-- description: Install haproxy
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@haproxy:proxy' state.sls haproxy
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Check haproxy status
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@haproxy:proxy' service.status haproxy
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Restart rsyslog
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@haproxy:proxy' service.restart rsyslog
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Install memcached on all controllers
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@memcached:server' state.sls memcached
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Check the VIP
-  cmd: |
-    OPENSTACK_CONTROL_ADDRESS=`salt-call --out=newline_values_only pillar.get _param:openstack_control_address`;
-    echo "_param:openstack_control_address (vip): ${OPENSTACK_CONTROL_ADDRESS}";
-    salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@keepalived:cluster' cmd.run "ip a | grep ${OPENSTACK_CONTROL_ADDRESS}" | grep -B1 ${OPENSTACK_CONTROL_ADDRESS}
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 3, delay: 10}
-  skip_fail: false
+{{ SHARED_CORE.MACRO_CHECK_VIP() }}
diff --git a/tcp_tests/templates/cookied-mcp-pike-dpdk/underlay--user-data-cfg01.yaml b/tcp_tests/templates/cookied-mcp-pike-dpdk/underlay--user-data-cfg01.yaml
index da7908d..48562ad 100644
--- a/tcp_tests/templates/cookied-mcp-pike-dpdk/underlay--user-data-cfg01.yaml
+++ b/tcp_tests/templates/cookied-mcp-pike-dpdk/underlay--user-data-cfg01.yaml
@@ -44,6 +44,13 @@
 
    - echo "nameserver 172.18.208.44" >> /etc/resolv.conf;
 
+   - mkdir -p /srv/salt/reclass/nodes
+   - systemctl enable salt-master
+   - systemctl enable salt-minion
+   - systemctl start salt-master
+   - systemctl start salt-minion
+   - salt-call -l info --timeout=120 test.ping
+
   write_files:
    - path: /etc/network/interfaces
      content: |
diff --git a/tcp_tests/templates/cookied-mcp-pike-dpdk/underlay.yaml b/tcp_tests/templates/cookied-mcp-pike-dpdk/underlay.yaml
index a9f0722..1dba85e 100644
--- a/tcp_tests/templates/cookied-mcp-pike-dpdk/underlay.yaml
+++ b/tcp_tests/templates/cookied-mcp-pike-dpdk/underlay.yaml
@@ -489,9 +489,6 @@
                   capacity: !os_env NODE_VOLUME_SIZE, 150
                   backing_store: cloudimage1604
                   format: qcow2
-                - name: cinder
-                  capacity: 50
-                  format: qcow2
                 - name: iso  # Volume with name 'iso' will be used
                              # for store image with cloud-init metadata.
                   capacity: 1
diff --git a/tcp_tests/templates/cookied-mcp-pike-dvr-ceph/cookiecutter-context-dvr-ceph.yaml b/tcp_tests/templates/cookied-mcp-pike-dvr-ceph/cookiecutter-context-dvr-ceph.yaml
index 24bf3bd..22e4442 100644
--- a/tcp_tests/templates/cookied-mcp-pike-dvr-ceph/cookiecutter-context-dvr-ceph.yaml
+++ b/tcp_tests/templates/cookied-mcp-pike-dvr-ceph/cookiecutter-context-dvr-ceph.yaml
@@ -55,6 +55,9 @@
   ceph_osd_rack01_backend_subnet: 10.167.4
   ceph_osd_rack01_hostname: osd
   ceph_osd_rack01_single_subnet: 10.167.4
+  ceph_osd_single_address_ranges: 10.167.4.94-10.167.4.95
+  ceph_osd_deploy_address_ranges: 10.167.5.94-10.167.5.95
+  ceph_osd_backend_address_ranges: 10.167.4.94-10.167.4.95
   ceph_public_network: 10.167.4.0/24
   ceph_rgw_address: 10.167.4.75
   ceph_rgw_hostname: rgw
@@ -89,7 +92,7 @@
   gainsight_service_enabled: 'False'
   gateway_primary_first_nic: eth1
   gateway_primary_second_nic: eth2
-  gnocchi_aggregation_storage: file
+  gnocchi_aggregation_storage: ceph
   infra_bond_mode: active-backup
   infra_deploy_nic: eth0
   infra_kvm01_control_address: 10.167.4.11
@@ -133,7 +136,6 @@
   openstack_compute_single_address_ranges: 172.16.10.105-172.16.10.106
   openstack_compute_deploy_address_ranges: 192.168.10.105-192.168.10.106
   openstack_compute_tenant_address_ranges: 10.1.0.105-10.1.0.106
-  openstack_compute_backend_address_ranges: 10.1.0.105-10.1.0.106
   openstack_control_address: 10.167.4.10
   openstack_control_hostname: ctl
   openstack_control_node01_address: 10.167.4.11
@@ -207,9 +209,17 @@
   tenant_network_gateway: 10.167.6.1
   tenant_network_netmask: 255.255.255.0
   tenant_network_subnet: 10.167.6.0/24
-  tenant_telemetry_enabled: 'False'
+  tenant_telemetry_enabled: 'True'
   tenant_vlan: '20'
   upstream_proxy_enabled: 'False'
   use_default_network_scheme: 'True'
   version: proposed
   vnf_onboarding_enabled: 'False'
+  openstack_telemetry_address: 172.16.10.83
+  openstack_telemetry_hostname: mdb
+  openstack_telemetry_node01_address: 172.16.10.84
+  openstack_telemetry_node01_hostname: mdb01
+  openstack_telemetry_node02_address: 172.16.10.85
+  openstack_telemetry_node02_hostname: mdb02
+  openstack_telemetry_node03_address: 172.16.10.86
+  openstack_telemetry_node03_hostname: mdb03
\ No newline at end of file
diff --git a/tcp_tests/templates/cookied-mcp-pike-dvr-ceph/core.yaml b/tcp_tests/templates/cookied-mcp-pike-dvr-ceph/core.yaml
index 5c6a2f8..08a3c00 100644
--- a/tcp_tests/templates/cookied-mcp-pike-dvr-ceph/core.yaml
+++ b/tcp_tests/templates/cookied-mcp-pike-dvr-ceph/core.yaml
@@ -1,118 +1,19 @@
 {% from 'cookied-mcp-pike-dvr-ceph/underlay.yaml' import HOSTNAME_CFG01 with context %}
 
-# Install support services
-- description: Install keepalived on ctl01
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@keepalived:cluster and *01*' state.sls keepalived
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 10}
-  skip_fail: true
+{% import 'shared-core.yaml' as SHARED_CORE with context %}
 
-- description: Install keepalived
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@keepalived:cluster' state.sls keepalived
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 10}
-  skip_fail: true
+{{ SHARED_CORE.MACRO_INSTALL_KEEPALIVED() }}
 
-- description: Install glusterfs
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@glusterfs:server' state.sls glusterfs.server.service
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
+{{ SHARED_CORE.MACRO_INSTALL_GLUSTERFS() }}
 
-- description: Setup glusterfs on primary controller
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@glusterfs:server' state.sls glusterfs.server.setup -b 1
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 2, delay: 5}
-  skip_fail: false
+{{ SHARED_CORE.MACRO_INSTALL_RABBITMQ() }}
 
-- description: Check the gluster status
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@glusterfs:server' cmd.run 'gluster peer status; gluster volume status' -b 1
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
+{{ SHARED_CORE.MACRO_INSTALL_GALERA() }}
 
-- description: Install RabbitMQ on ctl01
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@rabbitmq:server and *01*' state.sls rabbitmq
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
+{{ SHARED_CORE.MACRO_INSTALL_HAPROXY() }}
 
-- description: Install RabbitMQ
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@rabbitmq:server' state.sls rabbitmq
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
+{{ SHARED_CORE.MACRO_INSTALL_NGINX() }}
 
-- description: Check the rabbitmq status
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@rabbitmq:server' cmd.run 'rabbitmqctl cluster_status'
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
+{{ SHARED_CORE.MACRO_INSTALL_MEMCACHED() }}
 
-- description: Install Galera on first server
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@galera:master' state.sls galera
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Install Galera on other servers
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@galera:slave' state.sls galera -b 1
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Check mysql status
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@galera:*' mysql.status | grep -A1 -e "wsrep_incoming_addresses\|wsrep_cluster_size"
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: true
-
-
-- description: Install haproxy
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@haproxy:proxy' state.sls haproxy
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Check haproxy status
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@haproxy:proxy' service.status haproxy
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Restart rsyslog
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@haproxy:proxy' service.restart rsyslog
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Install memcached on all controllers
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@memcached:server' state.sls memcached
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Check the VIP
-  cmd: |
-    OPENSTACK_CONTROL_ADDRESS=`salt-call --out=newline_values_only pillar.get _param:openstack_control_address`;
-    echo "_param:openstack_control_address (vip): ${OPENSTACK_CONTROL_ADDRESS}";
-    salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@keepalived:cluster' cmd.run "ip a | grep ${OPENSTACK_CONTROL_ADDRESS}" | grep -B1 ${OPENSTACK_CONTROL_ADDRESS}
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 3, delay: 10}
-  skip_fail: false
-
+{{ SHARED_CORE.MACRO_CHECK_VIP() }}
diff --git a/tcp_tests/templates/cookied-mcp-pike-dvr-ceph/openstack.yaml b/tcp_tests/templates/cookied-mcp-pike-dvr-ceph/openstack.yaml
index 758a86d..8531cc3 100644
--- a/tcp_tests/templates/cookied-mcp-pike-dvr-ceph/openstack.yaml
+++ b/tcp_tests/templates/cookied-mcp-pike-dvr-ceph/openstack.yaml
@@ -31,6 +31,16 @@
 
 {{ SHARED_OPENSTACK.MACRO_INSTALL_HEAT() }}
 
+{{ SHARED_OPENSTACK.MACRO_INSTALL_REDIS() }}
+
+{{ SHARED_OPENSTACK.MACRO_INSTALL_GNOCCHI() }}
+
+{{ SHARED_OPENSTACK.MACRO_INSTALL_PANKO() }}
+
+{{ SHARED_OPENSTACK.MACRO_INSTALL_CEILOMETER() }}
+
+{{ SHARED_OPENSTACK.MACRO_INSTALL_AODH() }}
+
 {{ SHARED_OPENSTACK.MACRO_INSTALL_HORIZON() }}
 
-{{ SHARED_OPENSTACK.MACRO_INSTALL_COMPUTE() }}
\ No newline at end of file
+{{ SHARED_OPENSTACK.MACRO_INSTALL_COMPUTE(CELL_MAPPING=true) }}
\ No newline at end of file
diff --git a/tcp_tests/templates/cookied-mcp-pike-dvr-ceph/underlay--user-data-cfg01.yaml b/tcp_tests/templates/cookied-mcp-pike-dvr-ceph/underlay--user-data-cfg01.yaml
index da7908d..48562ad 100644
--- a/tcp_tests/templates/cookied-mcp-pike-dvr-ceph/underlay--user-data-cfg01.yaml
+++ b/tcp_tests/templates/cookied-mcp-pike-dvr-ceph/underlay--user-data-cfg01.yaml
@@ -44,6 +44,13 @@
 
    - echo "nameserver 172.18.208.44" >> /etc/resolv.conf;
 
+   - mkdir -p /srv/salt/reclass/nodes
+   - systemctl enable salt-master
+   - systemctl enable salt-minion
+   - systemctl start salt-master
+   - systemctl start salt-minion
+   - salt-call -l info --timeout=120 test.ping
+
   write_files:
    - path: /etc/network/interfaces
      content: |
diff --git a/tcp_tests/templates/cookied-mcp-pike-dvr-ceph/underlay.yaml b/tcp_tests/templates/cookied-mcp-pike-dvr-ceph/underlay.yaml
index 621d9b4..b36f8be 100644
--- a/tcp_tests/templates/cookied-mcp-pike-dvr-ceph/underlay.yaml
+++ b/tcp_tests/templates/cookied-mcp-pike-dvr-ceph/underlay.yaml
@@ -29,6 +29,9 @@
 {% set HOSTNAME_OSD02 = os_env('HOSTNAME_OSD02', 'osd2.' + DOMAIN_NAME) %}
 {% set HOSTNAME_GTW01 = os_env('HOSTNAME_GTW01', 'gtw01.' + DOMAIN_NAME) %}
 {% set HOSTNAME_PRX01 = os_env('HOSTNAME_PRX01', 'prx01.' + DOMAIN_NAME) %}
+{% set HOSTNAME_MDB01 = os_env('HOSTNAME_MDB01', 'mdb01.' + DOMAIN_NAME) %}
+{% set HOSTNAME_MDB02 = os_env('HOSTNAME_MDB02', 'mdb02.' + DOMAIN_NAME) %}
+{% set HOSTNAME_MDB03 = os_env('HOSTNAME_MDB03', 'mdb03.' + DOMAIN_NAME) %}
 
 template:
   devops_settings:
@@ -57,6 +60,9 @@
             default_{{ HOSTNAME_RGW03 }}: +78
             default_{{ HOSTNAME_GTW01 }}: +110
             default_{{ HOSTNAME_PRX01 }}: +121
+            default_{{ HOSTNAME_MDB01 }}: +84
+            default_{{ HOSTNAME_MDB02 }}: +85
+            default_{{ HOSTNAME_MDB03 }}: +86
           ip_ranges:
             dhcp: [+70, -10]
 
@@ -82,6 +88,9 @@
             default_{{ HOSTNAME_RGW03 }}: +78
             default_{{ HOSTNAME_GTW01 }}: +110
             default_{{ HOSTNAME_PRX01 }}: +121
+            default_{{ HOSTNAME_MDB01 }}: +84
+            default_{{ HOSTNAME_MDB02 }}: +85
+            default_{{ HOSTNAME_MDB03 }}: +86
           ip_ranges:
             dhcp: [+70, -10]
 
@@ -107,6 +116,9 @@
             default_{{ HOSTNAME_RGW03 }}: +78
             default_{{ HOSTNAME_GTW01 }}: +110
             default_{{ HOSTNAME_PRX01 }}: +121
+            default_{{ HOSTNAME_MDB01 }}: +84
+            default_{{ HOSTNAME_MDB02 }}: +85
+            default_{{ HOSTNAME_MDB03 }}: +86
           ip_ranges:
             dhcp: [+10, -10]
 
@@ -132,6 +144,9 @@
             default_{{ HOSTNAME_RGW03 }}: +78
             default_{{ HOSTNAME_GTW01 }}: +110
             default_{{ HOSTNAME_PRX01 }}: +121
+            default_{{ HOSTNAME_MDB01 }}: +84
+            default_{{ HOSTNAME_MDB02 }}: +85
+            default_{{ HOSTNAME_MDB03 }}: +86
           ip_ranges:
             dhcp: [+130, +230]
 
@@ -240,9 +255,6 @@
                   capacity: !os_env NODE_VOLUME_SIZE, 150
                   backing_store: mcp_ubuntu_1604_image
                   format: qcow2
-                - name: cinder
-                  capacity: 50
-                  format: qcow2
                 - name: iso  # Volume with name 'iso' will be used
                              # for store image with cloud-init metadata.
                   capacity: 1
@@ -281,9 +293,6 @@
                   capacity: !os_env NODE_VOLUME_SIZE, 150
                   backing_store: mcp_ubuntu_1604_image
                   format: qcow2
-                - name: cinder
-                  capacity: 50
-                  format: qcow2
                 - name: iso  # Volume with name 'iso' will be used
                              # for store image with cloud-init metadata.
                   capacity: 1
@@ -310,6 +319,90 @@
                   capacity: !os_env NODE_VOLUME_SIZE, 150
                   backing_store: mcp_ubuntu_1604_image
                   format: qcow2
+                - name: iso  # Volume with name 'iso' will be used
+                             # for store image with cloud-init metadata.
+                  capacity: 1
+                  format: raw
+                  device: cdrom
+                  bus: ide
+                  cloudinit_meta_data: *cloudinit_meta_data
+                  cloudinit_user_data: *cloudinit_user_data_1604
+
+              interfaces: *interfaces
+              network_config: *network_config
+
+          - name: {{ HOSTNAME_MDB01 }}
+            role: salt_minion
+            params:
+              vcpu: !os_env SLAVE_NODE_CPU, 2
+              memory: !os_env SLAVE_NODE_MEMORY, 8192
+              boot:
+                - hd
+              cloud_init_volume_name: iso
+              cloud_init_iface_up: ens3
+              volumes:
+                - name: system
+                  capacity: !os_env NODE_VOLUME_SIZE, 150
+                  backing_store: mcp_ubuntu_1604_image
+                  format: qcow2
+                - name: cinder
+                  capacity: 50
+                  format: qcow2
+                - name: iso  # Volume with name 'iso' will be used
+                             # for store image with cloud-init metadata.
+                  capacity: 1
+                  format: raw
+                  device: cdrom
+                  bus: ide
+                  cloudinit_meta_data: *cloudinit_meta_data
+                  cloudinit_user_data: *cloudinit_user_data_1604
+
+              interfaces: *interfaces
+              network_config: *network_config
+
+          - name: {{ HOSTNAME_MDB02 }}
+            role: salt_minion
+            params:
+              vcpu: !os_env SLAVE_NODE_CPU, 2
+              memory: !os_env SLAVE_NODE_MEMORY, 8192
+              boot:
+                - hd
+              cloud_init_volume_name: iso
+              cloud_init_iface_up: ens3
+              volumes:
+                - name: system
+                  capacity: !os_env NODE_VOLUME_SIZE, 150
+                  backing_store: mcp_ubuntu_1604_image
+                  format: qcow2
+                - name: cinder
+                  capacity: 50
+                  format: qcow2
+                - name: iso  # Volume with name 'iso' will be used
+                             # for store image with cloud-init metadata.
+                  capacity: 1
+                  format: raw
+                  device: cdrom
+                  bus: ide
+                  cloudinit_meta_data: *cloudinit_meta_data
+                  cloudinit_user_data: *cloudinit_user_data_1604
+
+              interfaces: *interfaces
+              network_config: *network_config
+
+          - name: {{ HOSTNAME_MDB03 }}
+            role: salt_minion
+            params:
+              vcpu: !os_env SLAVE_NODE_CPU, 2
+              memory: !os_env SLAVE_NODE_MEMORY, 8192
+              boot:
+                - hd
+              cloud_init_volume_name: iso
+              cloud_init_iface_up: ens3
+              volumes:
+                - name: system
+                  capacity: !os_env NODE_VOLUME_SIZE, 150
+                  backing_store: mcp_ubuntu_1604_image
+                  format: qcow2
                 - name: cinder
                   capacity: 50
                   format: qcow2
@@ -417,10 +510,10 @@
                   capacity: !os_env NODE_VOLUME_SIZE, 150
                   backing_store: cloudimage1604
                   format: qcow2
-                - name: cinder
+                - name: ceph_osd
                   capacity: 50
                   format: qcow2
-                - name: ceph
+                - name: ceph_journal
                   capacity: 50
                   format: qcow2
                 - name: iso  # Volume with name 'iso' will be used
@@ -449,10 +542,10 @@
                   capacity: !os_env NODE_VOLUME_SIZE, 150
                   backing_store: cloudimage1604
                   format: qcow2
-                - name: cinder
+                - name: ceph_osd
                   capacity: 50
                   format: qcow2
-                - name: ceph
+                - name: ceph_journal
                   capacity: 50
                   format: qcow2
                 - name: iso  # Volume with name 'iso' will be used
diff --git a/tcp_tests/templates/cookied-mcp-pike-dvr-ceph/vcp-context-environment.yaml b/tcp_tests/templates/cookied-mcp-pike-dvr-ceph/vcp-context-environment.yaml
index 74a1465..c89ec89 100644
--- a/tcp_tests/templates/cookied-mcp-pike-dvr-ceph/vcp-context-environment.yaml
+++ b/tcp_tests/templates/cookied-mcp-pike-dvr-ceph/vcp-context-environment.yaml
@@ -169,4 +169,37 @@
         ens3:
           role: single_dhcp
         ens4:
-          role: single_ctl
\ No newline at end of file
+          role: single_ctl
+
+    mdb01.cookied-mcp-pike-dvr-ceph.local:
+      reclass_storage_name: openstack_telemetry_node01
+      roles:
+      - linux_system_codename_xenial
+      - openstack_telemetry
+      interfaces:
+        ens3:
+          role: single_dhcp
+        ens4:
+          role: single_ctl
+
+    mdb02.cookied-mcp-pike-dvr-ceph.local:
+      reclass_storage_name: openstack_telemetry_node02
+      roles:
+      - linux_system_codename_xenial
+      - openstack_telemetry
+      interfaces:
+        ens3:
+          role: single_dhcp
+        ens4:
+          role: single_ctl
+
+    mdb03.cookied-mcp-pike-dvr-ceph.local:
+      reclass_storage_name: openstack_telemetry_node03
+      roles:
+      - linux_system_codename_xenial
+      - openstack_telemetry
+      interfaces:
+        ens3:
+          role: single_dhcp
+        ens4:
+          role: single_ctl
diff --git a/tcp_tests/templates/virtual-mcp-ocata-dvr/_context-cookiecutter-mcp-ocata-dvr.yaml b/tcp_tests/templates/cookied-mcp-pike-dvr-ssl-barbican/_context-cookiecutter-mcp-pike-dvr-ssl-barbican.yaml
similarity index 75%
copy from tcp_tests/templates/virtual-mcp-ocata-dvr/_context-cookiecutter-mcp-ocata-dvr.yaml
copy to tcp_tests/templates/cookied-mcp-pike-dvr-ssl-barbican/_context-cookiecutter-mcp-pike-dvr-ssl-barbican.yaml
index 84c6c06..0ad8daf 100644
--- a/tcp_tests/templates/virtual-mcp-ocata-dvr/_context-cookiecutter-mcp-ocata-dvr.yaml
+++ b/tcp_tests/templates/cookied-mcp-pike-dvr-ssl-barbican/_context-cookiecutter-mcp-pike-dvr-ssl-barbican.yaml
@@ -1,9 +1,12 @@
 default_context:
+  barbican_backend: dogtag
+  barbican_enabled: 'True'
+  auditd_enabled: 'True'
   bmk_enabled: 'False'
   ceph_enabled: 'False'
   cicd_enabled: 'False'
-  cluster_domain: virtual-mcp-ocata-dvr.local
-  cluster_name: virtual-mcp-ocata-dvr
+  cluster_domain: cookied-mcp-pike-dvr-ssl-barbican.local
+  cluster_name: cookied-mcp-pike-dvr-ssl-barbican
   compute_bond_mode: active-backup
   compute_primary_first_nic: eth1
   compute_primary_second_nic: eth2
@@ -41,7 +44,8 @@
   local_repositories: 'False'
   maas_deploy_address: 192.168.10.90
   maas_hostname: cfg01
-  mcp_version: stable
+  maas_enabled: 'False'
+  mcp_version: proposed
   offline_deployment: 'False'
   opencontrail_enabled: 'False'
   openstack_benchmark_node01_address: 172.16.10.95
@@ -54,7 +58,6 @@
   openstack_compute_single_address_ranges: 172.16.10.105-172.16.10.106
   openstack_compute_deploy_address_ranges: 192.168.10.105-192.168.10.106
   openstack_compute_tenant_address_ranges: 10.1.0.105-10.1.0.106
-  openstack_compute_backend_address_ranges: 10.1.0.105-10.1.0.106
   openstack_control_address: 172.16.10.100
   openstack_control_hostname: ctl
   openstack_control_node01_address: 172.16.10.101
@@ -104,7 +107,12 @@
   openstack_proxy_node02_address: 172.16.10.122
   openstack_proxy_node02_hostname: prx02
   openstack_upgrade_node01_address: 172.16.10.19
-  openstack_version: ocata
+  openstack_dns_hostname: dns
+  openstack_dns_node01_address: 172.16.10.113
+  openstack_dns_node01_hostname: dns01
+  openstack_dns_node02_address: 172.16.10.114
+  openstack_dns_node02_hostname: dns02
+  openstack_version: pike
   oss_enabled: 'False'
   oss_node03_address: ${_param:stacklight_monitor_node03_address}
   oss_webhook_app_id: '24'
@@ -151,34 +159,33 @@
   salt_master_management_address: 192.168.10.90
   shared_reclass_url: ssh://mcp-jenkins@gerrit.mcp.mirantis.com:29418/salt-models/reclass-system.git
   fluentd_enabled: 'True'
-  stacklight_enabled: 'True'
-  stacklight_log_address: 172.16.10.70
-  stacklight_log_hostname: mon
-  stacklight_log_node01_address: 172.16.10.107
-  stacklight_log_node01_hostname: mon01
-  stacklight_log_node02_address: 172.16.10.108
-  stacklight_log_node02_hostname: mon02
-  stacklight_log_node03_address: 172.16.10.109
-  stacklight_log_node03_hostname: mon03
+  stacklight_enabled: 'False'
+  stacklight_log_address: 172.16.10.60
+  stacklight_log_hostname: log
+  stacklight_log_node01_address: 172.16.10.61
+  stacklight_log_node01_hostname: log01
+  stacklight_log_node02_address: 172.16.10.62
+  stacklight_log_node02_hostname: log02
+  stacklight_log_node03_address: 172.16.10.63
+  stacklight_log_node03_hostname: log03
   stacklight_monitor_address: 172.16.10.70
   stacklight_monitor_hostname: mon
-  stacklight_monitor_node01_address: 172.16.10.107
+  stacklight_monitor_node01_address: 172.16.10.71
   stacklight_monitor_node01_hostname: mon01
-  stacklight_monitor_node02_address: 172.16.10.108
+  stacklight_monitor_node02_address: 172.16.10.72
   stacklight_monitor_node02_hostname: mon02
-  stacklight_monitor_node03_address: 172.16.10.109
+  stacklight_monitor_node03_address: 172.16.10.73
   stacklight_monitor_node03_hostname: mon03
-  stacklight_notification_address: alerts@localhost
-  stacklight_notification_smtp_host: 127.0.0.1
-  stacklight_telemetry_address: 172.16.10.70
-  stacklight_telemetry_hostname: mon
-  stacklight_telemetry_node01_address: 172.16.10.107
-  stacklight_telemetry_node01_hostname: mon01
-  stacklight_telemetry_node02_address: 172.16.10.108
-  stacklight_telemetry_node02_hostname: mon02
-  stacklight_telemetry_node03_address: 172.16.10.109
-  stacklight_telemetry_node03_hostname: mon03
+  stacklight_telemetry_address: 172.16.10.85
+  stacklight_telemetry_hostname: mtr
+  stacklight_telemetry_node01_address: 172.16.10.86
+  stacklight_telemetry_node01_hostname: mtr01
+  stacklight_telemetry_node02_address: 172.16.10.87
+  stacklight_telemetry_node02_hostname: mtr02
+  stacklight_telemetry_node03_address: 172.16.10.88
+  stacklight_telemetry_node03_hostname: mtr03
   stacklight_version: '2'
+  stacklight_long_term_storage_type: influxdb
   static_ips_on_deploy_network_enabled: 'False'
   tenant_network_gateway: 10.1.0.1
   tenant_network_netmask: 255.255.255.0
@@ -186,3 +193,32 @@
   tenant_vlan: '20'
   upstream_proxy_enabled: 'False'
   use_default_network_scheme: 'False'
+  rsync_fernet_rotation: 'True'
+  compute_padding_with_zeros: False
+  designate_backend: bind
+  designate_enabled: 'False'
+  nova_vnc_tls_enabled: 'True'
+  galera_ssl_enabled: 'True'
+  openstack_mysql_x509_enabled: 'True'
+  rabbitmq_ssl_enabled: 'True'
+  openstack_rabbitmq_x509_enabled: 'True'
+  openstack_internal_protocol: 'https'
+  tenant_telemetry_enabled: 'False'
+  gnocchi_aggregation_storage: file
+  manila_enabled: 'False'
+  manila_share_backend: 'lvm'
+  manila_lvm_volume_name: 'manila-volume'
+  manila_lvm_devices: '/dev/vdc'
+  openstack_share_address: 172.16.10.203
+  openstack_share_node01_address: 172.16.10.204
+  openstack_share_node01_deploy_address: 192.168.10.204
+  openstack_share_hostname: share
+  openstack_share_node01_hostname: share01
+  openstack_barbican_address: 172.16.10.44
+  openstack_barbican_hostname: kmn
+  openstack_barbican_node01_address: 172.16.10.45
+  openstack_barbican_node01_hostname: kmn01
+  openstack_barbican_node02_address: 172.16.10.46
+  openstack_barbican_node02_hostname: kmn02
+  openstack_barbican_node03_address: 172.16.10.47
+  openstack_barbican_node03_hostname: kmn03
diff --git a/tcp_tests/templates/virtual-mcp-ocata-ovs/_context-environment.yaml b/tcp_tests/templates/cookied-mcp-pike-dvr-ssl-barbican/_context-environment.yaml
similarity index 69%
copy from tcp_tests/templates/virtual-mcp-ocata-ovs/_context-environment.yaml
copy to tcp_tests/templates/cookied-mcp-pike-dvr-ssl-barbican/_context-environment.yaml
index 3e05cf0..f704f65 100644
--- a/tcp_tests/templates/virtual-mcp-ocata-ovs/_context-environment.yaml
+++ b/tcp_tests/templates/cookied-mcp-pike-dvr-ssl-barbican/_context-environment.yaml
@@ -1,5 +1,5 @@
 nodes:
-    cfg01.mcp11-ovs-dpdk.local:
+    cfg01.mcp-pike-dvr-ssl.local:
       reclass_storage_name: infra_config_node01
       roles:
       - infra_config
@@ -10,17 +10,14 @@
         ens4:
           role: single_ctl
 
-    ctl01.mcp11-ovs-dpdk.local:
+    ctl01.mcp-pike-dvr-ssl.local:
       reclass_storage_name: openstack_control_node01
       roles:
       - infra_kvm
       - openstack_control_leader
       - openstack_database_leader
       - openstack_message_queue
-      - features_designate_bind9_database
-      - features_designate_bind9_dns
-      - features_designate_bind9
-      - features_designate_bind9_keystone
+      - features_lvm_backend_control
       - linux_system_codename_xenial
       interfaces:
         ens3:
@@ -28,16 +25,14 @@
         ens4:
           role: single_ctl
 
-    ctl02.mcp11-ovs-dpdk.local:
+    ctl02.mcp-pike-dvr-ssl.local:
       reclass_storage_name: openstack_control_node02
       roles:
       - infra_kvm
       - openstack_control
       - openstack_database
       - openstack_message_queue
-      - features_designate_bind9_database
-      - features_designate_bind9_dns
-      - features_designate_bind9
+      - features_lvm_backend_control
       - linux_system_codename_xenial
       interfaces:
         ens3:
@@ -45,15 +40,14 @@
         ens4:
           role: single_ctl
 
-    ctl03.mcp11-ovs-dpdk.local:
+    ctl03.mcp-pike-dvr-ssl.local:
       reclass_storage_name: openstack_control_node03
       roles:
       - infra_kvm
       - openstack_control
       - openstack_database
       - openstack_message_queue
-      - features_designate_bind9_database
-      - features_designate_bind9
+      - features_lvm_backend_control
       - linux_system_codename_xenial
       interfaces:
         ens3:
@@ -61,50 +55,43 @@
         ens4:
           role: single_ctl
 
-    prx01.mcp11-ovs-dpdk.local:
+    kmn01.mcp-pike-dvr-ssl.local:
+      reclass_storage_name: openstack_barbican_node01
+      roles:
+      - openstack_barbican
+      - linux_system_codename_xenial
+      interfaces:
+        ens3:
+          role: single_dhcp
+        ens4:
+          role: single_ctl
+
+    kmn02.mcp-pike-dvr-ssl.local:
+      reclass_storage_name: openstack_barbican_node02
+      roles:
+      - openstack_barbican
+      - linux_system_codename_xenial
+      interfaces:
+        ens3:
+          role: single_dhcp
+        ens4:
+          role: single_ctl
+
+    kmn03.mcp-pike-dvr-ssl.local:
+      reclass_storage_name: openstack_barbican_node03
+      roles:
+      - openstack_barbican
+      - linux_system_codename_xenial
+      interfaces:
+        ens3:
+          role: single_dhcp
+        ens4:
+          role: single_ctl
+
+    prx01.mcp-pike-dvr-ssl.local:
       reclass_storage_name: openstack_proxy_node01
       roles:
       - openstack_proxy
-      - features_designate_bind9_proxy
-      - linux_system_codename_xenial
-      interfaces:
-        ens3:
-          role: single_dhcp
-        ens4:
-          role: single_ctl
-
-    mon01.mcp11-ovs-dpdk.local:
-      reclass_storage_name: stacklight_server_node01
-      roles:
-      - stacklightv2_server_leader
-      - stacklight_telemetry_leader
-      - stacklight_log_leader_v2
-      - linux_system_codename_xenial
-      interfaces:
-        ens3:
-          role: single_dhcp
-        ens4:
-          role: single_ctl
-
-    mon02.mcp11-ovs-dpdk.local:
-      reclass_storage_name: stacklight_server_node02
-      roles:
-      - stacklightv2_server
-      - stacklight_telemetry
-      - stacklight_log
-      - linux_system_codename_xenial
-      interfaces:
-        ens3:
-          role: single_dhcp
-        ens4:
-          role: single_ctl
-
-    mon03.mcp11-ovs-dpdk.local:
-      reclass_storage_name: stacklight_server_node03
-      roles:
-      - stacklightv2_server
-      - stacklight_telemetry
-      - stacklight_log
       - linux_system_codename_xenial
       interfaces:
         ens3:
@@ -113,10 +100,11 @@
           role: single_ctl
 
     # Generator-based computes. For compatibility only
-    cmp<<count>>.mcp11-ovs-dpdk.local:
+    cmp<<count>>.mcp-pike-dvr-ssl.local:
       reclass_storage_name: openstack_compute_rack01
       roles:
       - openstack_compute
+      - features_lvm_backend_volume_vdb
       - linux_system_codename_xenial
       interfaces:
         ens3:
@@ -128,7 +116,7 @@
         ens6:
           role: bond1_ab_ovs_floating
 
-    gtw01.mcp11-ovs-dpdk.local:
+    gtw01.mcp-pike-dvr-ssl.local:
       reclass_storage_name: openstack_gateway_node01
       roles:
       - openstack_gateway
diff --git a/tcp_tests/templates/cookied-mcp-pike-dvr-ssl-barbican/core.yaml b/tcp_tests/templates/cookied-mcp-pike-dvr-ssl-barbican/core.yaml
new file mode 100644
index 0000000..f5a0013
--- /dev/null
+++ b/tcp_tests/templates/cookied-mcp-pike-dvr-ssl-barbican/core.yaml
@@ -0,0 +1,19 @@
+{% from 'cookied-mcp-pike-dvr-ssl-barbican/underlay.yaml' import HOSTNAME_CFG01 with context %}
+
+{% import 'shared-core.yaml' as SHARED_CORE with context %}
+
+{{ SHARED_CORE.MACRO_INSTALL_KEEPALIVED() }}
+
+{{ SHARED_CORE.MACRO_INSTALL_GLUSTERFS() }}
+
+{{ SHARED_CORE.MACRO_INSTALL_RABBITMQ() }}
+
+{{ SHARED_CORE.MACRO_INSTALL_GALERA() }}
+
+{{ SHARED_CORE.MACRO_INSTALL_HAPROXY() }}
+
+{{ SHARED_CORE.MACRO_INSTALL_NGINX() }}
+
+{{ SHARED_CORE.MACRO_INSTALL_MEMCACHED() }}
+
+{{ SHARED_CORE.MACRO_CHECK_VIP() }}
diff --git a/tcp_tests/templates/cookied-mcp-pike-dvr-ssl-barbican/openstack.yaml b/tcp_tests/templates/cookied-mcp-pike-dvr-ssl-barbican/openstack.yaml
new file mode 100644
index 0000000..627ed30
--- /dev/null
+++ b/tcp_tests/templates/cookied-mcp-pike-dvr-ssl-barbican/openstack.yaml
@@ -0,0 +1,36 @@
+{% from 'cookied-mcp-pike-dvr-ssl-barbican/underlay.yaml' import HOSTNAME_CFG01 with context %}
+{% from 'cookied-mcp-pike-dvr-ssl-barbican/underlay.yaml' import HOSTNAME_CTL01 with context %}
+{% from 'cookied-mcp-pike-dvr-ssl-barbican/underlay.yaml' import HOSTNAME_CTL02 with context %}
+{% from 'cookied-mcp-pike-dvr-ssl-barbican/underlay.yaml' import HOSTNAME_CTL03 with context %}
+{% from 'cookied-mcp-pike-dvr-ssl-barbican/underlay.yaml' import HOSTNAME_GTW01 with context %}
+{% from 'cookied-mcp-pike-dvr-ssl-barbican/underlay.yaml' import LAB_CONFIG_NAME with context %}
+{% from 'cookied-mcp-pike-dvr-ssl-barbican/underlay.yaml' import DOMAIN_NAME with context %}
+{% from 'shared-salt.yaml' import IPV4_NET_EXTERNAL_PREFIX with context %}
+{% from 'shared-salt.yaml' import IPV4_NET_TENANT_PREFIX with context %}
+{% set LAB_CONFIG_NAME = os_env('LAB_CONFIG_NAME') %}
+{% set OVERRIDE_POLICY = os_env('OVERRIDE_POLICY', '') %}
+
+{% import 'shared-salt.yaml' as SHARED with context %}
+{% import 'shared-openstack.yaml' as SHARED_OPENSTACK with context %}
+
+# Install OpenStack control services
+
+{{ SHARED_OPENSTACK.MACRO_INSTALL_KEYSTONE(USE_ORCHESTRATE=true) }}
+
+{{ SHARED_OPENSTACK.MACRO_INSTALL_GLANCE() }}
+
+{{ SHARED_OPENSTACK.MACRO_INSTALL_NOVA() }}
+
+{{ SHARED_OPENSTACK.MACRO_INSTALL_CINDER(INSTALL_VOLUME=true) }}
+
+{{ SHARED_OPENSTACK.MACRO_INSTALL_NEUTRON() }}
+
+{{ SHARED_OPENSTACK.MACRO_INSTALL_HEAT() }}
+
+{{ SHARED_OPENSTACK.MACRO_INSTALL_HORIZON() }}
+
+{{ SHARED_OPENSTACK.MACRO_INSTALL_DOGTAG() }}
+
+{{ SHARED_OPENSTACK.MACRO_INSTALL_BARBICAN() }}
+
+{{ SHARED_OPENSTACK.MACRO_INSTALL_COMPUTE(CELL_MAPPING=true) }}
diff --git a/tcp_tests/templates/cookied-mcp-pike-dvr-ssl-barbican/salt.yaml b/tcp_tests/templates/cookied-mcp-pike-dvr-ssl-barbican/salt.yaml
new file mode 100644
index 0000000..df13ee9
--- /dev/null
+++ b/tcp_tests/templates/cookied-mcp-pike-dvr-ssl-barbican/salt.yaml
@@ -0,0 +1,43 @@
+{% from 'cookied-mcp-pike-dvr-ssl-barbican/underlay.yaml' import HOSTNAME_CFG01 with context %}
+{% from 'cookied-mcp-pike-dvr-ssl-barbican/underlay.yaml' import HOSTNAME_CMP01 with context %}
+{% from 'cookied-mcp-pike-dvr-ssl-barbican/underlay.yaml' import HOSTNAME_CMP02 with context %}
+{% from 'cookied-mcp-pike-dvr-ssl-barbican/underlay.yaml' import HOSTNAME_GTW01 with context %}
+{% from 'cookied-mcp-pike-dvr-ssl-barbican/underlay.yaml' import LAB_CONFIG_NAME with context %}
+{% from 'cookied-mcp-pike-dvr-ssl-barbican/underlay.yaml' import DOMAIN_NAME with context %}
+
+{% set SALT_MODELS_REPOSITORY = os_env('SALT_MODELS_REPOSITORY','https://gerrit.mcp.mirantis.com/salt-models/mcp-virtual-lab') %}
+# Other salt model repository parameters see in shared-salt.yaml
+
+{% import 'shared-salt.yaml' as SHARED with context %}
+
+{{ SHARED.MACRO_INSTALL_SALT_MASTER() }}
+
+{{ SHARED.MACRO_CLONE_RECLASS_MODELS() }}
+
+- description: "Temp fix"
+  cmd: |
+    set -e;
+    apt-get -y install python-virtualenv python-pip build-essential python-dev libssl-dev;
+    [[ -d /root/venv-reclass-tools ]] || virtualenv /root/venv-reclass-tools;
+    . /root/venv-reclass-tools/bin/activate;
+    pip install git+https://github.com/dis-xcom/reclass-tools;
+    reclass-tools add-key parameters._param.cluster_internal_protocol 'https' /srv/salt/reclass/classes/system/cinder/volume/single.yml;
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+{{ SHARED.MACRO_CONFIGURE_RECLASS(FORMULA_SERVICES='\*') }}
+
+{{ SHARED.MACRO_INSTALL_SALT_MINIONS() }}
+
+{{ SHARED.MACRO_RUN_SALT_MASTER_UNDERLAY_STATES() }}
+
+{{ SHARED.MACRO_GENERATE_INVENTORY() }}
+
+{{ SHARED.MACRO_NETWORKING_WORKAROUNDS() }}
+
+{{ SHARED.MACRO_BOOTSTRAP_ALL_MINIONS() }}
+
+{{SHARED.MACRO_CHECK_SALT_VERSION_SERVICES_ON_CFG()}}
+
+{{SHARED.MACRO_CHECK_SALT_VERSION_ON_NODES()}}
diff --git a/tcp_tests/templates/cookied-bm-mcp-pike-k8s-contrail/underlay--meta-data.yaml b/tcp_tests/templates/cookied-mcp-pike-dvr-ssl-barbican/underlay--meta-data.yaml
similarity index 100%
copy from tcp_tests/templates/cookied-bm-mcp-pike-k8s-contrail/underlay--meta-data.yaml
copy to tcp_tests/templates/cookied-mcp-pike-dvr-ssl-barbican/underlay--meta-data.yaml
diff --git a/tcp_tests/templates/virtual-mcp-ocata-dvr/underlay--user-data-cfg01.yaml b/tcp_tests/templates/cookied-mcp-pike-dvr-ssl-barbican/underlay--user-data-cfg01.yaml
similarity index 87%
copy from tcp_tests/templates/virtual-mcp-ocata-dvr/underlay--user-data-cfg01.yaml
copy to tcp_tests/templates/cookied-mcp-pike-dvr-ssl-barbican/underlay--user-data-cfg01.yaml
index a73ca23..48562ad 100644
--- a/tcp_tests/templates/virtual-mcp-ocata-dvr/underlay--user-data-cfg01.yaml
+++ b/tcp_tests/templates/cookied-mcp-pike-dvr-ssl-barbican/underlay--user-data-cfg01.yaml
@@ -44,16 +44,14 @@
 
    - echo "nameserver 172.18.208.44" >> /etc/resolv.conf;
 
-   # Enable grub menu using updated config below
-   - update-grub
+   - mkdir -p /srv/salt/reclass/nodes
+   - systemctl enable salt-master
+   - systemctl enable salt-minion
+   - systemctl start salt-master
+   - systemctl start salt-minion
+   - salt-call -l info --timeout=120 test.ping
 
   write_files:
-   - path: /etc/default/grub.d/97-enable-grub-menu.cfg
-     content: |
-         GRUB_RECORDFAIL_TIMEOUT=30
-         GRUB_TIMEOUT=3
-         GRUB_TIMEOUT_STYLE=menu
-
    - path: /etc/network/interfaces
      content: |
           auto ens3
diff --git a/tcp_tests/templates/virtual-mcp-ocata-dvr/underlay--user-data1604.yaml b/tcp_tests/templates/cookied-mcp-pike-dvr-ssl-barbican/underlay--user-data1604.yaml
similarity index 100%
copy from tcp_tests/templates/virtual-mcp-ocata-dvr/underlay--user-data1604.yaml
copy to tcp_tests/templates/cookied-mcp-pike-dvr-ssl-barbican/underlay--user-data1604.yaml
diff --git a/tcp_tests/templates/virtual-mcp-ocata-ovs/underlay.yaml b/tcp_tests/templates/cookied-mcp-pike-dvr-ssl-barbican/underlay.yaml
similarity index 86%
copy from tcp_tests/templates/virtual-mcp-ocata-ovs/underlay.yaml
copy to tcp_tests/templates/cookied-mcp-pike-dvr-ssl-barbican/underlay.yaml
index 382dba4..e1befcb 100644
--- a/tcp_tests/templates/virtual-mcp-ocata-ovs/underlay.yaml
+++ b/tcp_tests/templates/cookied-mcp-pike-dvr-ssl-barbican/underlay.yaml
@@ -1,9 +1,9 @@
 # Set the repository suite, one of the: 'nightly', 'testing', 'stable', or any other required
 {% set REPOSITORY_SUITE = os_env('REPOSITORY_SUITE', 'testing') %}
 
-{% import 'virtual-mcp-ocata-ovs/underlay--meta-data.yaml' as CLOUDINIT_META_DATA with context %}
-{% import 'virtual-mcp-ocata-ovs/underlay--user-data-cfg01.yaml' as CLOUDINIT_USER_DATA_CFG01 with context %}
-{% import 'virtual-mcp-ocata-ovs/underlay--user-data1604.yaml' as CLOUDINIT_USER_DATA_1604 with context %}
+{% import 'cookied-mcp-pike-dvr-ssl-barbican/underlay--meta-data.yaml' as CLOUDINIT_META_DATA with context %}
+{% import 'cookied-mcp-pike-dvr-ssl-barbican/underlay--user-data-cfg01.yaml' as CLOUDINIT_USER_DATA_CFG01 with context %}
+{% import 'cookied-mcp-pike-dvr-ssl-barbican/underlay--user-data1604.yaml' as CLOUDINIT_USER_DATA_1604 with context %}
 
 ---
 aliases:
@@ -12,23 +12,23 @@
  - &cloudinit_user_data_cfg01 {{ CLOUDINIT_USER_DATA_CFG01 }}
  - &cloudinit_user_data_1604 {{ CLOUDINIT_USER_DATA_1604 }}
 
-{% set LAB_CONFIG_NAME = os_env('LAB_CONFIG_NAME', 'virtual-mcp-ocata-ovs') %}
-{% set DOMAIN_NAME = os_env('DOMAIN_NAME', LAB_CONFIG_NAME) + '.local' %}
+{% set LAB_CONFIG_NAME = os_env('LAB_CONFIG_NAME', 'cookied-mcp-pike-dvr-ssl-barbican') %}
+{% set DOMAIN_NAME = os_env('DOMAIN_NAME', LAB_CONFIG_NAME + '.local') %}
 {% set HOSTNAME_CFG01 = os_env('HOSTNAME_CFG01', 'cfg01.' + DOMAIN_NAME) %}
 {% set HOSTNAME_CTL01 = os_env('HOSTNAME_CTL01', 'ctl01.' + DOMAIN_NAME) %}
 {% set HOSTNAME_CTL02 = os_env('HOSTNAME_CTL02', 'ctl02.' + DOMAIN_NAME) %}
 {% set HOSTNAME_CTL03 = os_env('HOSTNAME_CTL03', 'ctl03.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CMP01 = os_env('HOSTNAME_CMP01', 'cmp01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CMP02 = os_env('HOSTNAME_CMP02', 'cmp02.' + DOMAIN_NAME) %}
-{% set HOSTNAME_MON01 = os_env('HOSTNAME_MON01', 'mon01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_MON02 = os_env('HOSTNAME_MON02', 'mon02.' + DOMAIN_NAME) %}
-{% set HOSTNAME_MON03 = os_env('HOSTNAME_MON03', 'mon03.' + DOMAIN_NAME) %}
+{% set HOSTNAME_CMP01 = os_env('HOSTNAME_CMP01', 'cmp1.' + DOMAIN_NAME) %}
+{% set HOSTNAME_CMP02 = os_env('HOSTNAME_CMP02', 'cmp2.' + DOMAIN_NAME) %}
 {% set HOSTNAME_GTW01 = os_env('HOSTNAME_GTW01', 'gtw01.' + DOMAIN_NAME) %}
 {% set HOSTNAME_PRX01 = os_env('HOSTNAME_PRX01', 'prx01.' + DOMAIN_NAME) %}
+{% set HOSTNAME_KMN01 = os_env('HOSTNAME_KMN01', 'kmn01.' + DOMAIN_NAME) %}
+{% set HOSTNAME_KMN02 = os_env('HOSTNAME_KMN02', 'kmn02.' + DOMAIN_NAME) %}
+{% set HOSTNAME_KMN03 = os_env('HOSTNAME_KMN03', 'kmn03.' + DOMAIN_NAME) %}
 
 template:
   devops_settings:
-    env_name: {{ os_env('ENV_NAME', 'virtual-mcp-ocata-ovs_' + REPOSITORY_SUITE + "_" + os_env('BUILD_NUMBER', '')) }}
+    env_name: {{ os_env('ENV_NAME', 'cookied-mcp-pike-dvr-ssl-barbican_' + REPOSITORY_SUITE + "_" + os_env('BUILD_NUMBER', '')) }}
 
     address_pools:
       private-pool01:
@@ -43,11 +43,11 @@
             default_{{ HOSTNAME_CTL03 }}: +103
             default_{{ HOSTNAME_CMP01 }}: +105
             default_{{ HOSTNAME_CMP02 }}: +106
-            default_{{ HOSTNAME_MON01 }}: +107
-            default_{{ HOSTNAME_MON02 }}: +108
-            default_{{ HOSTNAME_MON03 }}: +109
             default_{{ HOSTNAME_GTW01 }}: +110
             default_{{ HOSTNAME_PRX01 }}: +121
+            default_{{ HOSTNAME_KMN01 }}: +45
+            default_{{ HOSTNAME_KMN02 }}: +46
+            default_{{ HOSTNAME_KMN03 }}: +47
           ip_ranges:
             dhcp: [+90, -10]
 
@@ -63,11 +63,11 @@
             default_{{ HOSTNAME_CTL03 }}: +103
             default_{{ HOSTNAME_CMP01 }}: +105
             default_{{ HOSTNAME_CMP02 }}: +106
-            default_{{ HOSTNAME_MON01 }}: +107
-            default_{{ HOSTNAME_MON02 }}: +108
-            default_{{ HOSTNAME_MON03 }}: +109
             default_{{ HOSTNAME_GTW01 }}: +110
             default_{{ HOSTNAME_PRX01 }}: +121
+            default_{{ HOSTNAME_KMN01 }}: +45
+            default_{{ HOSTNAME_KMN02 }}: +46
+            default_{{ HOSTNAME_KMN03 }}: +47
           ip_ranges:
             dhcp: [+90, -10]
 
@@ -83,11 +83,11 @@
             default_{{ HOSTNAME_CTL03 }}: +103
             default_{{ HOSTNAME_CMP01 }}: +105
             default_{{ HOSTNAME_CMP02 }}: +106
-            default_{{ HOSTNAME_MON01 }}: +107
-            default_{{ HOSTNAME_MON02 }}: +108
-            default_{{ HOSTNAME_MON03 }}: +109
             default_{{ HOSTNAME_GTW01 }}: +110
             default_{{ HOSTNAME_PRX01 }}: +121
+            default_{{ HOSTNAME_KMN01 }}: +45
+            default_{{ HOSTNAME_KMN02 }}: +46
+            default_{{ HOSTNAME_KMN03 }}: +47
           ip_ranges:
             dhcp: [+10, -10]
 
@@ -103,14 +103,13 @@
             default_{{ HOSTNAME_CTL03 }}: +103
             default_{{ HOSTNAME_CMP01 }}: +105
             default_{{ HOSTNAME_CMP02 }}: +106
-            default_{{ HOSTNAME_MON01 }}: +107
-            default_{{ HOSTNAME_MON02 }}: +108
-            default_{{ HOSTNAME_MON03 }}: +109
             default_{{ HOSTNAME_GTW01 }}: +110
             default_{{ HOSTNAME_PRX01 }}: +121
+            default_{{ HOSTNAME_KMN01 }}: +45
+            default_{{ HOSTNAME_KMN02 }}: +46
+            default_{{ HOSTNAME_KMN03 }}: +47
           ip_ranges:
-            dhcp: [+10, -10]
-
+            dhcp: [+130, +220]
 
     groups:
       - name: default
@@ -150,11 +149,10 @@
 
           external:
             address_pool: external-pool01
-            dhcp: true
+            dhcp: false
             forward:
               mode: route
 
-
         group_volumes:
          - name: cloudimage1604    # This name is used for 'backing_store' option for node volumes.
            source_image: !os_env IMAGE_PATH1604  # https://cloud-images.ubuntu.com/xenial/current/xenial-server-cloudimg-amd64-disk1.img
@@ -162,11 +160,8 @@
          - name: cfg01_day01_image               # Pre-configured day01 image
            source_image: {{ os_env('IMAGE_PATH_CFG01_DAY01', os_env('IMAGE_PATH1604')) }} # http://images.mirantis.com/cfg01-day01.qcow2 or fallback to IMAGE_PATH1604
            format: qcow2
-         - name: mcp_ubuntu_1604_image           # Pre-configured image for VCP nodes initially based on kvm nodes.
-           # http://images.mirantis.com/ubuntu-16-04-x64-latest.qcow2  (preffered)
-           # or
-           # https://cloud-images.ubuntu.com/xenial/current/xenial-server-cloudimg-amd64-disk1.img
-           source_image: {{ os_env('MCP_IMAGE_PATH1604', os_env('IMAGE_PATH1604')) }}
+         - name: mcp_ubuntu_1604_image           # Pre-configured image for control plane
+           source_image: !os_env MCP_IMAGE_PATH1604
            format: qcow2
 
         nodes:
@@ -222,9 +217,6 @@
                   capacity: !os_env NODE_VOLUME_SIZE, 150
                   backing_store: mcp_ubuntu_1604_image
                   format: qcow2
-                - name: cinder
-                  capacity: 50
-                  format: qcow2
                 - name: iso  # Volume with name 'iso' will be used
                              # for store image with cloud-init metadata.
                   capacity: 1
@@ -263,9 +255,6 @@
                   capacity: !os_env NODE_VOLUME_SIZE, 150
                   backing_store: mcp_ubuntu_1604_image
                   format: qcow2
-                - name: cinder
-                  capacity: 50
-                  format: qcow2
                 - name: iso  # Volume with name 'iso' will be used
                              # for store image with cloud-init metadata.
                   capacity: 1
@@ -292,9 +281,6 @@
                   capacity: !os_env NODE_VOLUME_SIZE, 150
                   backing_store: mcp_ubuntu_1604_image
                   format: qcow2
-                - name: cinder
-                  capacity: 50
-                  format: qcow2
                 - name: iso  # Volume with name 'iso' will be used
                              # for store image with cloud-init metadata.
                   capacity: 1
@@ -307,11 +293,11 @@
               interfaces: *interfaces
               network_config: *network_config
 
-          - name: {{ HOSTNAME_MON01 }}
+          - name: {{ HOSTNAME_KMN01 }}
             role: salt_minion
             params:
-              vcpu: !os_env SLAVE_NODE_CPU, 3
-              memory: !os_env SLAVE_NODE_MEMORY, 6144
+              vcpu: !os_env SLAVE_NODE_CPU, 2
+              memory: !os_env SLAVE_NODE_MEMORY, 16384
               boot:
                 - hd
               cloud_init_volume_name: iso
@@ -333,11 +319,11 @@
               interfaces: *interfaces
               network_config: *network_config
 
-          - name: {{ HOSTNAME_MON02 }}
+          - name: {{ HOSTNAME_KMN02 }}
             role: salt_minion
             params:
-              vcpu: !os_env SLAVE_NODE_CPU, 3
-              memory: !os_env SLAVE_NODE_MEMORY, 6144
+              vcpu: !os_env SLAVE_NODE_CPU, 2
+              memory: !os_env SLAVE_NODE_MEMORY, 16384
               boot:
                 - hd
               cloud_init_volume_name: iso
@@ -359,11 +345,11 @@
               interfaces: *interfaces
               network_config: *network_config
 
-          - name: {{ HOSTNAME_MON03 }}
+          - name: {{ HOSTNAME_KMN03 }}
             role: salt_minion
             params:
-              vcpu: !os_env SLAVE_NODE_CPU, 3
-              memory: !os_env SLAVE_NODE_MEMORY, 6144
+              vcpu: !os_env SLAVE_NODE_CPU, 2
+              memory: !os_env SLAVE_NODE_MEMORY, 16384
               boot:
                 - hd
               cloud_init_volume_name: iso
@@ -399,9 +385,6 @@
                   capacity: !os_env NODE_VOLUME_SIZE, 150
                   backing_store: mcp_ubuntu_1604_image
                   format: qcow2
-                - name: cinder
-                  capacity: 50
-                  format: qcow2
                 - name: iso  # Volume with name 'iso' will be used
                              # for store image with cloud-init metadata.
                   capacity: 1
@@ -428,6 +411,12 @@
                   capacity: !os_env NODE_VOLUME_SIZE, 150
                   backing_store: cloudimage1604
                   format: qcow2
+                - name: cinder
+                  capacity: 50
+                  format: qcow2
+                - name: manila
+                  capacity: 20
+                  format: qcow2
                 - name: iso  # Volume with name 'iso' will be used
                              # for store image with cloud-init metadata.
                   capacity: 1
@@ -479,6 +468,12 @@
                   capacity: !os_env NODE_VOLUME_SIZE, 150
                   backing_store: cloudimage1604
                   format: qcow2
+                - name: cinder
+                  capacity: 50
+                  format: qcow2
+                - name: manila
+                  capacity: 20
+                  format: qcow2
                 - name: iso  # Volume with name 'iso' will be used
                              # for store image with cloud-init metadata.
                   capacity: 1
@@ -494,8 +489,8 @@
           - name: {{ HOSTNAME_GTW01 }}
             role: salt_minion
             params:
-              vcpu: !os_env SLAVE_NODE_CPU, 1
-              memory: !os_env SLAVE_NODE_MEMORY, 2048
+              vcpu: !os_env SLAVE_NODE_CPU, 4
+              memory: !os_env SLAVE_NODE_MEMORY, 4096
               boot:
                 - hd
               cloud_init_volume_name: iso
diff --git a/tcp_tests/templates/cookied-mcp-pike-dvr-ssl/_context-cookiecutter-mcp-pike-dvr-ssl.yaml b/tcp_tests/templates/cookied-mcp-pike-dvr-ssl/_context-cookiecutter-mcp-pike-dvr-ssl.yaml
index 4a29768..2fdfc6b 100644
--- a/tcp_tests/templates/cookied-mcp-pike-dvr-ssl/_context-cookiecutter-mcp-pike-dvr-ssl.yaml
+++ b/tcp_tests/templates/cookied-mcp-pike-dvr-ssl/_context-cookiecutter-mcp-pike-dvr-ssl.yaml
@@ -1,4 +1,7 @@
 default_context:
+  barbican_backend: dogtag
+  barbican_enabled: 'False'
+  auditd_enabled: 'True'
   bmk_enabled: 'False'
   ceph_enabled: 'False'
   cicd_enabled: 'False'
@@ -41,16 +44,20 @@
   local_repositories: 'False'
   maas_deploy_address: 192.168.10.90
   maas_hostname: cfg01
+  maas_enabled: 'False'
   mcp_version: stable
   offline_deployment: 'False'
   opencontrail_enabled: 'False'
   openstack_benchmark_node01_address: 172.16.10.95
   openstack_benchmark_node01_hostname: bmk01
   openstack_cluster_size: compact
-  openstack_compute_count: '100'
+  openstack_compute_count: '2'
   openstack_compute_rack01_hostname: cmp
   openstack_compute_rack01_single_subnet: 172.16.10
   openstack_compute_rack01_tenant_subnet: 10.1.0
+  openstack_compute_single_address_ranges: 172.16.10.105-172.16.10.106
+  openstack_compute_deploy_address_ranges: 192.168.10.105-192.168.10.106
+  openstack_compute_tenant_address_ranges: 10.1.0.105-10.1.0.106
   openstack_control_address: 172.16.10.100
   openstack_control_hostname: ctl
   openstack_control_node01_address: 172.16.10.101
@@ -100,6 +107,11 @@
   openstack_proxy_node02_address: 172.16.10.122
   openstack_proxy_node02_hostname: prx02
   openstack_upgrade_node01_address: 172.16.10.19
+  openstack_dns_hostname: dns
+  openstack_dns_node01_address: 172.16.10.113
+  openstack_dns_node01_hostname: dns01
+  openstack_dns_node02_address: 172.16.10.114
+  openstack_dns_node02_hostname: dns02
   openstack_version: pike
   oss_enabled: 'False'
   oss_node03_address: ${_param:stacklight_monitor_node03_address}
@@ -146,38 +158,67 @@
   salt_master_hostname: cfg01
   salt_master_management_address: 192.168.10.90
   shared_reclass_url: ssh://mcp-jenkins@gerrit.mcp.mirantis.com:29418/salt-models/reclass-system.git
+  fluentd_enabled: 'True'
   stacklight_enabled: 'True'
-  stacklight_log_address: 172.16.10.70
-  stacklight_log_hostname: mon
-  stacklight_log_node01_address: 172.16.10.107
-  stacklight_log_node01_hostname: mon01
-  stacklight_log_node02_address: 172.16.10.108
-  stacklight_log_node02_hostname: mon02
-  stacklight_log_node03_address: 172.16.10.109
-  stacklight_log_node03_hostname: mon03
+  stacklight_log_address: 172.16.10.60
+  stacklight_log_hostname: log
+  stacklight_log_node01_address: 172.16.10.61
+  stacklight_log_node01_hostname: log01
+  stacklight_log_node02_address: 172.16.10.62
+  stacklight_log_node02_hostname: log02
+  stacklight_log_node03_address: 172.16.10.63
+  stacklight_log_node03_hostname: log03
   stacklight_monitor_address: 172.16.10.70
   stacklight_monitor_hostname: mon
-  stacklight_monitor_node01_address: 172.16.10.107
+  stacklight_monitor_node01_address: 172.16.10.71
   stacklight_monitor_node01_hostname: mon01
-  stacklight_monitor_node02_address: 172.16.10.108
+  stacklight_monitor_node02_address: 172.16.10.72
   stacklight_monitor_node02_hostname: mon02
-  stacklight_monitor_node03_address: 172.16.10.109
+  stacklight_monitor_node03_address: 172.16.10.73
   stacklight_monitor_node03_hostname: mon03
-  stacklight_notification_address: alerts@localhost
-  stacklight_notification_smtp_host: 127.0.0.1
-  stacklight_telemetry_address: 172.16.10.70
-  stacklight_telemetry_hostname: mon
-  stacklight_telemetry_node01_address: 172.16.10.107
-  stacklight_telemetry_node01_hostname: mon01
-  stacklight_telemetry_node02_address: 172.16.10.108
-  stacklight_telemetry_node02_hostname: mon02
-  stacklight_telemetry_node03_address: 172.16.10.109
-  stacklight_telemetry_node03_hostname: mon03
+  stacklight_telemetry_address: 172.16.10.85
+  stacklight_telemetry_hostname: mtr
+  stacklight_telemetry_node01_address: 172.16.10.86
+  stacklight_telemetry_node01_hostname: mtr01
+  stacklight_telemetry_node02_address: 172.16.10.87
+  stacklight_telemetry_node02_hostname: mtr02
+  stacklight_telemetry_node03_address: 172.16.10.88
+  stacklight_telemetry_node03_hostname: mtr03
   stacklight_version: '2'
+  stacklight_long_term_storage_type: influxdb
   static_ips_on_deploy_network_enabled: 'False'
+  openstack_telemetry_address: 172.16.10.96
+  openstack_telemetry_hostname: mdb
+  openstack_telemetry_node01_address: 172.16.10.97
+  openstack_telemetry_node01_hostname: mdb01
+  openstack_telemetry_node02_address: 172.16.10.98
+  openstack_telemetry_node02_hostname: mdb02
+  openstack_telemetry_node03_address: 172.16.10.99
+  openstack_telemetry_node03_hostname: mdb03
   tenant_network_gateway: 10.1.0.1
   tenant_network_netmask: 255.255.255.0
   tenant_network_subnet: 10.1.0.0/24
   tenant_vlan: '20'
   upstream_proxy_enabled: 'False'
   use_default_network_scheme: 'False'
+  rsync_fernet_rotation: 'True'
+  compute_padding_with_zeros: False
+  designate_backend: bind
+  designate_enabled: 'False'
+  nova_vnc_tls_enabled: 'True'
+  galera_ssl_enabled: 'True'
+  openstack_mysql_x509_enabled: 'True'
+  rabbitmq_ssl_enabled: 'True'
+  openstack_rabbitmq_x509_enabled: 'True'
+  openstack_internal_protocol: 'https'
+  tenant_telemetry_enabled: 'True'
+  gnocchi_aggregation_storage: file
+  manila_enabled: 'False'
+  manila_share_backend: 'lvm'
+  manila_lvm_volume_name: 'manila-volume'
+  manila_lvm_devices: '/dev/vdc'
+  openstack_share_address: 172.16.10.203
+  openstack_share_node01_address: 172.16.10.204
+  openstack_share_node01_deploy_address: 192.168.10.204
+  openstack_share_hostname: share
+  openstack_share_node01_hostname: share01
\ No newline at end of file
diff --git a/tcp_tests/templates/cookied-mcp-pike-dvr-ssl/_context-environment.yaml b/tcp_tests/templates/cookied-mcp-pike-dvr-ssl/_context-environment.yaml
index 327788e..83998a7 100644
--- a/tcp_tests/templates/cookied-mcp-pike-dvr-ssl/_context-environment.yaml
+++ b/tcp_tests/templates/cookied-mcp-pike-dvr-ssl/_context-environment.yaml
@@ -1,5 +1,5 @@
 nodes:
-    cfg01.cookied-mcp-pike-dvr-ssl.local:
+    cfg01.mcp-pike-dvr-ssl.local:
       reclass_storage_name: infra_config_node01
       roles:
       - infra_config
@@ -8,125 +8,181 @@
         ens3:
           role: single_dhcp
         ens4:
-          role: single_vlan_ctl
+          role: single_ctl
 
-    ctl01.cookied-mcp-pike-dvr-ssl.local:
+    ctl01.mcp-pike-dvr-ssl.local:
       reclass_storage_name: openstack_control_node01
       roles:
       - infra_kvm
       - openstack_control_leader
       - openstack_database_leader
       - openstack_message_queue
-      - features_designate_pool_manager_database
-      - features_designate_pool_manager
-      - features_designate_pool_manager_keystone
+      - features_lvm_backend_control
       - linux_system_codename_xenial
       interfaces:
         ens3:
           role: single_dhcp
         ens4:
-          role: single_vlan_ctl
+          role: single_ctl
 
-    ctl02.cookied-mcp-pike-dvr-ssl.local:
+    ctl02.mcp-pike-dvr-ssl.local:
       reclass_storage_name: openstack_control_node02
       roles:
       - infra_kvm
       - openstack_control
       - openstack_database
       - openstack_message_queue
-      - features_designate_pool_manager_database
-      - features_designate_pool_manager
+      - features_lvm_backend_control
       - linux_system_codename_xenial
       interfaces:
         ens3:
           role: single_dhcp
         ens4:
-          role: single_vlan_ctl
+          role: single_ctl
 
-    ctl03.cookied-mcp-pike-dvr-ssl.local:
+    ctl03.mcp-pike-dvr-ssl.local:
       reclass_storage_name: openstack_control_node03
       roles:
       - infra_kvm
       - openstack_control
       - openstack_database
       - openstack_message_queue
-      - features_designate_pool_manager_database
-      - features_designate_pool_manager
+      - features_lvm_backend_control
       - linux_system_codename_xenial
       interfaces:
         ens3:
           role: single_dhcp
         ens4:
-          role: single_vlan_ctl
+          role: single_ctl
 
-    prx01.cookied-mcp-pike-dvr-ssl.local:
+    prx01.mcp-pike-dvr-ssl.local:
       reclass_storage_name: openstack_proxy_node01
       roles:
       - openstack_proxy
-      - features_designate_pool_manager_proxy
       - linux_system_codename_xenial
       interfaces:
         ens3:
           role: single_dhcp
         ens4:
-          role: single_vlan_ctl
+          role: single_ctl
 
-    mon01.cookied-mcp-pike-dvr-ssl.local:
+    mon01.mcp-pike-dvr-ssl.local:
       reclass_storage_name: stacklight_server_node01
       roles:
       - stacklightv2_server_leader
-      - stacklight_telemetry_leader
+      - linux_system_codename_xenial
+      interfaces:
+        ens3:
+          role: single_dhcp
+        ens4:
+          role: single_ctl
+
+    mon02.mcp-pike-dvr-ssl.local:
+      reclass_storage_name: stacklight_server_node02
+      roles:
+      - stacklightv2_server
+      - linux_system_codename_xenial
+      interfaces:
+        ens3:
+          role: single_dhcp
+        ens4:
+          role: single_ctl
+
+    mon03.mcp-pike-dvr-ssl.local:
+      reclass_storage_name: stacklight_server_node03
+      roles:
+      - stacklightv2_server
+      - linux_system_codename_xenial
+      interfaces:
+        ens3:
+          role: single_dhcp
+        ens4:
+          role: single_ctl
+
+    log01.mcp-pike-dvr-ssl.local:
+      reclass_storage_name: stacklight_log_node01
+      roles:
       - stacklight_log_leader_v2
       - linux_system_codename_xenial
       interfaces:
         ens3:
           role: single_dhcp
         ens4:
-          role: single_vlan_ctl
+          role: single_ctl
 
-    mon02.cookied-mcp-pike-dvr-ssl.local:
-      reclass_storage_name: stacklight_server_node02
+    log02.mcp-pike-dvr-ssl.local:
+      reclass_storage_name: stacklight_log_node02
       roles:
-      - stacklightv2_server
-      - stacklight_telemetry
       - stacklight_log
       - linux_system_codename_xenial
       interfaces:
         ens3:
           role: single_dhcp
         ens4:
-          role: single_vlan_ctl
+          role: single_ctl
 
-    mon03.cookied-mcp-pike-dvr-ssl.local:
-      reclass_storage_name: stacklight_server_node03
+    log03.mcp-pike-dvr-ssl.local:
+      reclass_storage_name: stacklight_log_node03
       roles:
-      - stacklightv2_server
-      - stacklight_telemetry
       - stacklight_log
       - linux_system_codename_xenial
       interfaces:
         ens3:
           role: single_dhcp
         ens4:
-          role: single_vlan_ctl
+          role: single_ctl
+
+    mtr01.mcp-pike-dvr-ssl.local:
+      reclass_storage_name: stacklight_telemetry_node01
+      roles:
+      - stacklight_telemetry_leader
+      - linux_system_codename_xenial
+      interfaces:
+        ens3:
+          role: single_dhcp
+        ens4:
+          role: single_ctl
+
+    mtr02.mcp-pike-dvr-ssl.local:
+      reclass_storage_name: stacklight_telemetry_node02
+      roles:
+      - stacklight_telemetry
+      - linux_system_codename_xenial
+      interfaces:
+        ens3:
+          role: single_dhcp
+        ens4:
+          role: single_ctl
+
+    mtr03.mcp-pike-dvr-ssl.local:
+      reclass_storage_name: stacklight_telemetry_node03
+      roles:
+      - stacklight_telemetry
+      - linux_system_codename_xenial
+      interfaces:
+        ens3:
+          role: single_dhcp
+        ens4:
+          role: single_ctl
 
     # Generator-based computes. For compatibility only
-    cmp<<count>>.cookied-mcp-pike-dvr-ssl.local:
+    cmp<<count>>.mcp-pike-dvr-ssl.local:
       reclass_storage_name: openstack_compute_rack01
       roles:
       - openstack_compute
+      - features_lvm_backend_volume_vdb
       - linux_system_codename_xenial
       interfaces:
         ens3:
           role: single_dhcp
         ens4:
-          role: bond0_ab_ovs_vxlan_ctl_mesh
+          role: single_ctl
         ens5:
-          role: bond0_ab_ovs_vxlan_ctl_mesh
+          role: bond0_ab_ovs_vxlan_mesh
         ens6:
           role: bond1_ab_ovs_floating
 
-    gtw01.cookied-mcp-pike-dvr-ssl.local:
+    gtw01.mcp-pike-dvr-ssl.local:
       reclass_storage_name: openstack_gateway_node01
       roles:
       - openstack_gateway
@@ -135,42 +191,41 @@
         ens3:
           role: single_dhcp
         ens4:
-          role: bond0_ab_ovs_vxlan_ctl_mesh
+          role: single_ctl
         ens5:
-          role: bond0_ab_ovs_vxlan_ctl_mesh
+          role: bond0_ab_ovs_vxlan_mesh
         ens6:
           role: bond1_ab_ovs_floating
 
-    dns01.cookied-mcp-pike-dvr-ssl.local:
-      reclass_storage_name: openstack_dns_node01
+    mdb01.mcp-pike-dvr-ssl.local:
+      reclass_storage_name: openstack_telemetry_node01
       roles:
-      - features_designate_pool_manager_dns
       - linux_system_codename_xenial
-      classes:
-      - system.linux.system.repo.mcp.extra
-      - system.linux.system.repo.mcp.apt_mirantis.openstack
-      - system.linux.system.repo.mcp.apt_mirantis.ubuntu
-      - system.linux.system.repo.mcp.apt_mirantis.saltstack
+      - openstack_telemetry
       interfaces:
         ens3:
           role: single_dhcp
         ens4:
-          role: single_vlan_ctl
-          single_address: ${_param:openstack_dns_node01_address}
+          role: single_ctl
 
-    dns02.cookied-mcp-pike-dvr-ssl.local:
-      reclass_storage_name: openstack_dns_node02
+    mdb02.mcp-pike-dvr-ssl.local:
+      reclass_storage_name: openstack_telemetry_node02
       roles:
-      - features_designate_pool_manager_dns
       - linux_system_codename_xenial
-      classes:
-      - system.linux.system.repo.mcp.extra
-      - system.linux.system.repo.mcp.apt_mirantis.openstack
-      - system.linux.system.repo.mcp.apt_mirantis.ubuntu
-      - system.linux.system.repo.mcp.apt_mirantis.saltstack
+      - openstack_telemetry
       interfaces:
         ens3:
           role: single_dhcp
         ens4:
-          role: single_vlan_ctl
-          single_address: ${_param:openstack_dns_node02_address}
+          role: single_ctl
+
+    mdb03.mcp-pike-dvr-ssl.local:
+      reclass_storage_name: openstack_telemetry_node03
+      roles:
+      - linux_system_codename_xenial
+      - openstack_telemetry
+      interfaces:
+        ens3:
+          role: single_dhcp
+        ens4:
+          role: single_ctl
diff --git a/tcp_tests/templates/cookied-mcp-pike-dvr-ssl/core.yaml b/tcp_tests/templates/cookied-mcp-pike-dvr-ssl/core.yaml
index ded1586..f3d274a 100644
--- a/tcp_tests/templates/cookied-mcp-pike-dvr-ssl/core.yaml
+++ b/tcp_tests/templates/cookied-mcp-pike-dvr-ssl/core.yaml
@@ -1,117 +1,19 @@
 {% from 'cookied-mcp-pike-dvr-ssl/underlay.yaml' import HOSTNAME_CFG01 with context %}
 
-# Install support services
-- description: Install keepalived on ctl01
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@keepalived:cluster and *01*' state.sls keepalived
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 10}
-  skip_fail: true
+{% import 'shared-core.yaml' as SHARED_CORE with context %}
 
-- description: Install keepalived
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@keepalived:cluster' state.sls keepalived
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 10}
-  skip_fail: true
+{{ SHARED_CORE.MACRO_INSTALL_KEEPALIVED() }}
 
-- description: Install glusterfs
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@glusterfs:server' state.sls glusterfs.server.service
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
+{{ SHARED_CORE.MACRO_INSTALL_GLUSTERFS() }}
 
-- description: Setup glusterfs on primary controller
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@glusterfs:server' state.sls glusterfs.server.setup -b 1
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 2, delay: 5}
-  skip_fail: false
+{{ SHARED_CORE.MACRO_INSTALL_RABBITMQ() }}
 
-- description: Check the gluster status
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@glusterfs:server' cmd.run 'gluster peer status; gluster volume status' -b 1
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
+{{ SHARED_CORE.MACRO_INSTALL_GALERA() }}
 
-- description: Install RabbitMQ on ctl01
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@rabbitmq:server and *01*' state.sls rabbitmq
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
+{{ SHARED_CORE.MACRO_INSTALL_HAPROXY() }}
 
-- description: Install RabbitMQ
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@rabbitmq:server' state.sls rabbitmq
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
+{{ SHARED_CORE.MACRO_INSTALL_NGINX() }}
 
-- description: Check the rabbitmq status
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@rabbitmq:server' cmd.run 'rabbitmqctl cluster_status'
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
+{{ SHARED_CORE.MACRO_INSTALL_MEMCACHED() }}
 
-- description: Install Galera on first server
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@galera:master' state.sls galera
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Install Galera on other servers
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@galera:slave' state.sls galera -b 1
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Check mysql status
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@galera:*' mysql.status | grep -A1 -e "wsrep_incoming_addresses\|wsrep_cluster_size"
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: true
-
-
-- description: Install haproxy
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@haproxy:proxy' state.sls haproxy
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Check haproxy status
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@haproxy:proxy' service.status haproxy
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Restart rsyslog
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@haproxy:proxy' service.restart rsyslog
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Install memcached on all controllers
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@memcached:server' state.sls memcached
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Check the VIP
-  cmd: |
-    OPENSTACK_CONTROL_ADDRESS=`salt-call --out=newline_values_only pillar.get _param:openstack_control_address`;
-    echo "_param:openstack_control_address (vip): ${OPENSTACK_CONTROL_ADDRESS}";
-    salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@keepalived:cluster' cmd.run "ip a | grep ${OPENSTACK_CONTROL_ADDRESS}" | grep -B1 ${OPENSTACK_CONTROL_ADDRESS}
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 3, delay: 10}
-  skip_fail: false
+{{ SHARED_CORE.MACRO_CHECK_VIP() }}
diff --git a/tcp_tests/templates/cookied-mcp-pike-dvr-ssl/openstack.yaml b/tcp_tests/templates/cookied-mcp-pike-dvr-ssl/openstack.yaml
index a779e5d..c1e32ec 100644
--- a/tcp_tests/templates/cookied-mcp-pike-dvr-ssl/openstack.yaml
+++ b/tcp_tests/templates/cookied-mcp-pike-dvr-ssl/openstack.yaml
@@ -3,291 +3,40 @@
 {% from 'cookied-mcp-pike-dvr-ssl/underlay.yaml' import HOSTNAME_CTL02 with context %}
 {% from 'cookied-mcp-pike-dvr-ssl/underlay.yaml' import HOSTNAME_CTL03 with context %}
 {% from 'cookied-mcp-pike-dvr-ssl/underlay.yaml' import HOSTNAME_GTW01 with context %}
+{% from 'cookied-mcp-pike-dvr-ssl/underlay.yaml' import LAB_CONFIG_NAME with context %}
+{% from 'cookied-mcp-pike-dvr-ssl/underlay.yaml' import DOMAIN_NAME with context %}
 {% from 'shared-salt.yaml' import IPV4_NET_EXTERNAL_PREFIX with context %}
 {% from 'shared-salt.yaml' import IPV4_NET_TENANT_PREFIX with context %}
 {% set LAB_CONFIG_NAME = os_env('LAB_CONFIG_NAME') %}
 {% set OVERRIDE_POLICY = os_env('OVERRIDE_POLICY', '') %}
 
+{% import 'shared-salt.yaml' as SHARED with context %}
 {% import 'shared-openstack.yaml' as SHARED_OPENSTACK with context %}
 
 # Install OpenStack control services
 
-{%- if OVERRIDE_POLICY != '' %}
-- description: Upload policy override
-  upload:
-    local_path:  {{ config.salt_deploy.templates_dir }}{{ LAB_CONFIG_NAME }}/
-    local_filename: overrides-policy.yml
-    remote_path: /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/openstack/
-  node_name: {{ HOSTNAME_CFG01 }}
-
-- description: Create custom cluster control class
-  cmd: echo -e "classes:\n- cluster.{{ LAB_CONFIG_NAME }}.openstack.control_orig\n$(cat /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/openstack/overrides-policy.yml)" > /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/openstack/overrides-policy.yml
-  node_name: {{ HOSTNAME_CFG01 }}
-
-- description: Rename control classes
-  cmd: mv /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/openstack/control.yml /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/openstack/control_orig.yml &&
-    ln -s /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/openstack/overrides-policy.yml /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/openstack/control.yml &&
-    salt --hard-crash --state-output=mixed --state-verbose=False '*' saltutil.sync_all &&
-    salt --hard-crash --state-output=mixed --state-verbose=False '*' saltutil.refresh_pillar
-  node_name: {{ HOSTNAME_CFG01 }}
-{%- endif %}
-
-- description: Nginx
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@nginx:server' state.sls salt.minion
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: true
-
-- description: Deploy nginx proxy
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@nginx:server' state.sls nginx
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: true
-
-{{ SHARED_OPENSTACK.MACRO_INSTALL_KEYSTONE() }}
+{{ SHARED_OPENSTACK.MACRO_INSTALL_KEYSTONE(USE_ORCHESTRATE=true) }}
 
 {{ SHARED_OPENSTACK.MACRO_INSTALL_GLANCE() }}
 
 {{ SHARED_OPENSTACK.MACRO_INSTALL_NOVA() }}
 
-{{ SHARED_OPENSTACK.MACRO_INSTALL_CINDER() }}
+{{ SHARED_OPENSTACK.MACRO_INSTALL_CINDER(INSTALL_VOLUME=true) }}
 
 {{ SHARED_OPENSTACK.MACRO_INSTALL_NEUTRON() }}
 
-# isntall designate
-- description: Install powerdns
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@powerdns:server' state.sls powerdns.server
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Install designate
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@designate:server' state.sls designate -b 1
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 5, delay: 10}
-  skip_fail: false
-
 {{ SHARED_OPENSTACK.MACRO_INSTALL_HEAT() }}
 
-- description: Deploy horizon dashboard
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@horizon:server' state.sls horizon
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: true
+{{ SHARED_OPENSTACK.MACRO_INSTALL_REDIS() }}
 
-# Install compute node
+{{ SHARED_OPENSTACK.MACRO_INSTALL_GNOCCHI() }}
 
-- description: Apply formulas for compute node
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'cmp*' state.apply
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: true
+{{ SHARED_OPENSTACK.MACRO_INSTALL_PANKO() }}
 
-- description: Re-apply(as in doc) formulas for compute node
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'cmp*' state.apply
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
+{{ SHARED_OPENSTACK.MACRO_INSTALL_CEILOMETER() }}
 
-- description: Check IP on computes
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'cmp*' cmd.run
-    'ip a'
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 10, delay: 30}
-  skip_fail: false
+{{ SHARED_OPENSTACK.MACRO_INSTALL_AODH() }}
 
+{{ SHARED_OPENSTACK.MACRO_INSTALL_HORIZON() }}
 
-  # Upload cirros image
-
-- description: Upload cirros image on ctl01
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
-    'wget http://download.cirros-cloud.net/0.3.4/cirros-0.3.4-i386-disk.img'
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 2, delay: 30}
-  skip_fail: false
-
-- description: Register image in glance
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
-    '. /root/keystonercv3; glance --timeout 120 image-create --name cirros --visibility public --disk-format qcow2 --container-format bare --progress < /root/cirros-0.3.4-i386-disk.img'
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 30}
-  skip_fail: false
-
-- description: Create net04_external
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
-    '. /root/keystonercv3; neutron net-create net04_ext --router:external True --provider:physical_network physnet1 --provider:network_type flat'
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 30}
-  skip_fail: false
-
-- description: Create subnet_external
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
-    '. /root/keystonercv3; neutron subnet-create net04_ext {{ IPV4_NET_EXTERNAL_PREFIX }}.0/24 --name net04_ext__subnet --disable-dhcp --allocation-pool start={{ IPV4_NET_EXTERNAL_PREFIX }}.150,end={{ IPV4_NET_EXTERNAL_PREFIX }}.180 --gateway {{ IPV4_NET_EXTERNAL_PREFIX }}.1'
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 30}
-  skip_fail: false
-
-- description: Create net04
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
-    '. /root/keystonercv3; neutron net-create net04'
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 30}
-  skip_fail: false
-
-- description: Create subnet_net04
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
-    '. /root/keystonercv3; neutron subnet-create net04 {{ IPV4_NET_TENANT_PREFIX }}.0/24 --name net04__subnet --allocation-pool start={{ IPV4_NET_TENANT_PREFIX }}.120,end={{ IPV4_NET_TENANT_PREFIX }}.240'
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 30}
-  skip_fail: false
-
-- description: Create router
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
-    '. /root/keystonercv3; neutron router-create net04_router01'
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 30}
-  skip_fail: false
-
-- description: Set geteway
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
-    '. /root/keystonercv3; neutron router-gateway-set net04_router01 net04_ext'
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 30}
-  skip_fail: false
-
-- description:  Add interface
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
-    '. /root/keystonercv3; neutron router-interface-add net04_router01 net04__subnet'
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 30}
-  skip_fail: false
-
-#- description:  Allow all tcp
-#  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
-#    '. /root/keystonercv3; nova secgroup-add-rule default tcp 1 65535 0.0.0.0/0'
-#  node_name: {{ HOSTNAME_CFG01 }}
-#  retry: {count: 1, delay: 30}
-#  skip_fail: false
-#
-#- description:  Allow all icmp
-#  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
-#    '. /root/keystonercv3; nova secgroup-add-rule default icmp -1 -1 0.0.0.0/0'
-#  node_name: {{ HOSTNAME_CFG01 }}
-#  retry: {count: 1, delay: 30}
-#  skip_fail: false
-
-- description: sync time
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False '*' cmd.run
-    'service ntp stop; ntpd -gq;  service ntp start'
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 30}
-  skip_fail: false
-
-# Configure cinder-volume salt-call PROD-13167
-- description: Set disks 01
-  cmd: salt-call cmd.run 'echo -e "nn\np\n\n\n\nw" | fdisk /dev/vdb'
-  node_name: {{ HOSTNAME_CTL01 }}
-  retry: {count: 1, delay: 30}
-  skip_fail: false
-
-- description: Set disks 02
-  cmd: salt-call cmd.run 'echo -e "nn\np\n\n\n\nw" | fdisk /dev/vdb'
-  node_name: {{ HOSTNAME_CTL02 }}
-  retry: {count: 1, delay: 30}
-  skip_fail: false
-
-- description: Set disks 03
-  cmd: salt-call cmd.run 'echo -e "nn\np\n\n\n\nw" | fdisk /dev/vdb'
-  node_name: {{ HOSTNAME_CTL03 }}
-  retry: {count: 1, delay: 30}
-  skip_fail: false
-
-- description: Create partitions 01
-  cmd: salt-call cmd.run 'pvcreate /dev/vdb1'
-  node_name: {{ HOSTNAME_CTL01 }}
-  retry: {count: 1, delay: 30}
-  skip_fail: false
-
-- description: Create partitions 02
-  cmd: salt-call cmd.run 'pvcreate /dev/vdb1'
-  node_name: {{ HOSTNAME_CTL02 }}
-  retry: {count: 1, delay: 30}
-  skip_fail: false
-
-- description: Create partitions 03
-  cmd: salt-call cmd.run 'pvcreate /dev/vdb1'
-  node_name: {{ HOSTNAME_CTL03 }}
-  retry: {count: 1, delay: 30}
-  skip_fail: false
-
-- description: create volume_group
-  cmd: salt "ctl*" cmd.run 'vgcreate cinder-volumes /dev/vdb1'
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 30}
-  skip_fail: false
-
-- description: Install cinder-volume
-  cmd: salt 'ctl*' cmd.run 'apt-get install cinder-volume -y'
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 30}
-  skip_fail: false
-
-- description: Install crudini
-  cmd: salt "ctl*" cmd.run 'apt-get install crudini -y'
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 30}
-  skip_fail: false
-
-- description: Temporary WR set enabled backends value 01
-  cmd: salt-call cmd.run 'crudini --verbose --set /etc/cinder/cinder.conf DEFAULT enabled_backends lvm'
-  node_name: {{ HOSTNAME_CTL01 }}
-  retry: {count: 1, delay: 30}
-  skip_fail: false
-
-- description: Temporary WR set enabled backends value 02
-  cmd: salt-call cmd.run 'crudini --verbose --set /etc/cinder/cinder.conf DEFAULT enabled_backends lvm'
-  node_name: {{ HOSTNAME_CTL02 }}
-  retry: {count: 1, delay: 30}
-  skip_fail: false
-
-- description: Temporary WR set enabled backends value 03
-  cmd: salt-call cmd.run 'crudini --verbose --set /etc/cinder/cinder.conf DEFAULT enabled_backends lvm'
-  node_name: {{ HOSTNAME_CTL03 }}
-  retry: {count: 1, delay: 30}
-  skip_fail: false
-
-- description: Install docker.io on gtw
-  cmd: salt-call cmd.run 'apt-get install docker.io -y'
-  node_name: {{ HOSTNAME_GTW01 }}
-  retry: {count: 1, delay: 30}
-  skip_fail: false
-
-- description: Enable forward policy
-  cmd: iptables --policy FORWARD ACCEPT
-  node_name: {{ HOSTNAME_GTW01 }}
-  retry: {count: 1, delay: 30}
-  skip_fail: false
-
-- description: Restart cinder volume
-  cmd: |
-    salt -C 'I@cinder:controller' service.restart cinder-volume;
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 2, delay: 5}
-  skip_fail: false
-
-- description: create rc file on cfg
-  cmd: scp ctl01:/root/keystonercv3 /root
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 30}
-  skip_fail: false
-
-- description: Copy rc file
-  cmd: scp /root/keystonercv3 gtw01:/root
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 30}
-  skip_fail: false
+{{ SHARED_OPENSTACK.MACRO_INSTALL_COMPUTE(CELL_MAPPING=true) }}
diff --git a/tcp_tests/templates/cookied-mcp-pike-dvr-ssl/salt.yaml b/tcp_tests/templates/cookied-mcp-pike-dvr-ssl/salt.yaml
index 08657a2..c67a1ac 100644
--- a/tcp_tests/templates/cookied-mcp-pike-dvr-ssl/salt.yaml
+++ b/tcp_tests/templates/cookied-mcp-pike-dvr-ssl/salt.yaml
@@ -14,33 +14,30 @@
 
 {{ SHARED.MACRO_CLONE_RECLASS_MODELS() }}
 
+- description: "Temp fix"
+  cmd: |
+    set -e;
+    apt-get -y install python-virtualenv python-pip build-essential python-dev libssl-dev;
+    [[ -d /root/venv-reclass-tools ]] || virtualenv /root/venv-reclass-tools;
+    . /root/venv-reclass-tools/bin/activate;
+    pip install git+https://github.com/dis-xcom/reclass-tools;
+    reclass-tools add-key parameters._param.cluster_internal_protocol 'https' /srv/salt/reclass/classes/system/cinder/volume/single.yml;
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
 {{ SHARED.MACRO_CONFIGURE_RECLASS(FORMULA_SERVICES='\*') }}
+
 {{ SHARED.MACRO_INSTALL_SALT_MINIONS() }}
 
 {{ SHARED.MACRO_RUN_SALT_MASTER_UNDERLAY_STATES() }}
 
 {{ SHARED.MACRO_GENERATE_INVENTORY() }}
 
+{{ SHARED.MACRO_NETWORKING_WORKAROUNDS() }}
+
 {{ SHARED.MACRO_BOOTSTRAP_ALL_MINIONS() }}
 
 {{SHARED.MACRO_CHECK_SALT_VERSION_SERVICES_ON_CFG()}}
 
 {{SHARED.MACRO_CHECK_SALT_VERSION_ON_NODES()}}
-
-- description: Hack gtw node
-  cmd: salt '{{ HOSTNAME_GTW01 }}' cmd.run "ip addr del {{ SHARED.IPV4_NET_CONTROL_PREFIX }}.110/24 dev ens4; ip addr flush dev ens4";
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 10}
-  skip_fail: false
-
-- description: Hack cmp01 node
-  cmd: salt '{{ HOSTNAME_CMP01 }}' cmd.run "ip addr del {{ SHARED.IPV4_NET_CONTROL_PREFIX }}.105/24 dev ens4; ip addr flush dev ens4";
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 10}
-  skip_fail: false
-
-- description: Hack cmp02 node
-  cmd: salt '{{ HOSTNAME_CMP02 }}' cmd.run "ip addr del {{ SHARED.IPV4_NET_CONTROL_PREFIX }}.106/24 dev ens4; ip addr flush dev ens4";
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 10}
-  skip_fail: false
diff --git a/tcp_tests/templates/cookied-mcp-pike-dvr-ssl/sl.yaml b/tcp_tests/templates/cookied-mcp-pike-dvr-ssl/sl.yaml
index 4f3d9bc..07cfef8 100644
--- a/tcp_tests/templates/cookied-mcp-pike-dvr-ssl/sl.yaml
+++ b/tcp_tests/templates/cookied-mcp-pike-dvr-ssl/sl.yaml
@@ -1,198 +1,24 @@
 {% from 'cookied-mcp-pike-dvr-ssl/underlay.yaml' import HOSTNAME_CFG01 with context %}
+
+{% import 'shared-sl.yaml' as SHARED_SL with context %}
 {% import 'shared-sl-tests.yaml' as SHARED_SL_TESTS with context %}
-# Install docker swarm
-- description: Configure docker service
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' state.sls docker.host
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 10}
-  skip_fail: false
 
-- description: Install docker swarm on master node
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:master' state.sls docker.swarm
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 10}
-  skip_fail: false
+{{  SHARED_SL.MACRO_INSTALL_DOCKER_SWARM() }}
 
-- description: Send grains to the swarm slave nodes
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' state.sls salt.minion.grains
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 10}
-  skip_fail: false
+{{  SHARED_SL.MACRO_INSTALL_MONGODB() }}
 
-- description:  Update mine
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' mine.update
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 10}
-  skip_fail: false
+{{  SHARED_SL.MACRO_INSTALL_MONGODB_CLUSTER() }}
 
-- description:  Refresh modules
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' saltutil.refresh_modules; sleep 5;
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 10}
-  skip_fail: false
+{{  SHARED_SL.MACRO_INSTALL_TELEGRAF_AND_PROMETHEUS() }}
 
-- description:  Rerun swarm on slaves to proper token population
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:master' state.sls docker.swarm
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 10}
-  skip_fail: false
+{{  SHARED_SL.MACRO_INSTALL_ELASTICSEARCH_AND_KIBANA() }}
 
-- description:  Configure slave nodes
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:manager' state.sls docker.swarm -b 1
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 10}
-  skip_fail: false
+{{  SHARED_SL.MACRO_INSTALL_LOG_COLLECTION() }}
 
-- description:  List registered Docker swarm nodes
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:master' cmd.run 'docker node ls'
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 10}
-  skip_fail: false
+{{  SHARED_SL.MACRO_INSTALL_CEILOMETER_COLLECTOR() }}
 
-- description: Install keepalived on mon nodes
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'mon*' state.sls keepalived
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 10}
-  skip_fail: false
-
-- description: Check the VIP on mon nodes
-  cmd: |
-    SL_VIP=`salt-call --out=newline_values_only pillar.get _param:stacklight_monitor_address`;
-    echo "_param:stacklight_monitor_address (vip): ${SL_VIP}";
-    salt --hard-crash --state-output=mixed --state-verbose=False -C 'mon*' cmd.run "ip a | grep ${SL_VIP}" | grep -B1 ${SL_VIP}
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-# Install slv2 infra
-# Install MongoDB for alerta
-- description: Install Mongo if target matches
-  cmd: |
-    if salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@mongodb:server' match.pillar 'mongodb:server' ; then
-      salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@mongodb:server' state.sls mongodb.server
-    fi
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 10}
-  skip_fail: false
-
-# Create MongoDB cluster
-- description: Install Mongo if target matches
-  cmd: |
-    if salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@mongodb:server' match.pillar 'mongodb:server' ; then
-      salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@mongodb:server' state.sls mongodb.cluster
-    fi
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 5, delay: 20}
-  skip_fail: false
-
-- description: Install telegraf
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@telegraf:agent or I@telegraf:remote_agent' state.sls telegraf
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 2, delay: 10}
-  skip_fail: false
-
-- description: Configure Prometheus exporters, if pillar 'prometheus:exporters' exists on any server
-  cmd: |
-    if salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@prometheus:exporters' match.pillar 'prometheus:exporters' ; then
-      salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@prometheus:exporters' state.sls prometheus
-    fi
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 10}
-  skip_fail: false
-
-- description: Configure fluentd
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@fluentd:agent' state.sls fluentd
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 10}
-  skip_fail: false
-
-- description: Install elasticsearch server
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@elasticsearch:server' state.sls elasticsearch.server -b 1
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 10}
-  skip_fail: false
-
-- description: Install kibana server
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@kibana:server' state.sls kibana.server -b 1
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 10}
-  skip_fail: false
-
-- description: Install elasticsearch client
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@elasticsearch:client' state.sls elasticsearch.client
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 2, delay: 30}
-  skip_fail: false
-
-- description: Install kibana client
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@kibana:client' state.sls kibana.client
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 10}
-  skip_fail: false
-
-- description: Check influix db
-  cmd: |
-    INFLUXDB_SERVICE=`salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@influxdb:server' test.ping 1>/dev/null 2>&1 && echo true`;
-    echo "Influxdb service presence: ${INFLUXDB_SERVICE}";
-    if [[ "$INFLUXDB_SERVICE" == "true" ]]; then
-        salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@influxdb:server' state.sls influxdb
-    fi
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: true
-
-# Collect grains needed to configure the services
-
-- description: Get grains
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@salt:minion' state.sls salt.minion.grains
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 10}
-  skip_fail: false
-
-- description: Sync modules
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@salt:minion' saltutil.refresh_modules
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 10}
-  skip_fail: false
-
-- description: Update mine
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@salt:minion' mine.update; sleep 5;
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 5, delay: 15}
-  skip_fail: false
-
-# Configure the services running in Docker Swarm
-- description: Configure prometheus in docker swarm
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm and I@prometheus:server' state.sls prometheus,heka.remote_collector
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 10}
-  skip_fail: false
-
-#Launch containers
-- description: launch prometheus containers
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:master and I@prometheus:server' state.sls docker.client
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 2, delay: 10}
-  skip_fail: false
-
-- description: Check docker ps
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm and I@prometheus:server' cmd.run "docker ps"
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 2, delay: 10}
-  skip_fail: false
-
-- description: Configure Grafana dashboards and datasources
-  cmd: sleep 30;  salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@grafana:client' state.sls grafana.client
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 2, delay: 10}
-  skip_fail: false
-
-- description: Run salt minion to create cert files
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False "*" state.sls salt.minion
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 10}
-  skip_fail: false
+{{  SHARED_SL.MACRO_CONFIGURE_SERVICES() }}
 
 {{  SHARED_SL_TESTS.MACRO_CLONE_SL_TESTS() }}
+
 {{  SHARED_SL_TESTS.MACRO_CONFIGURE_TESTS() }}
diff --git a/tcp_tests/templates/cookied-mcp-pike-dvr-ssl/underlay--user-data-cfg01.yaml b/tcp_tests/templates/cookied-mcp-pike-dvr-ssl/underlay--user-data-cfg01.yaml
index da7908d..48562ad 100644
--- a/tcp_tests/templates/cookied-mcp-pike-dvr-ssl/underlay--user-data-cfg01.yaml
+++ b/tcp_tests/templates/cookied-mcp-pike-dvr-ssl/underlay--user-data-cfg01.yaml
@@ -44,6 +44,13 @@
 
    - echo "nameserver 172.18.208.44" >> /etc/resolv.conf;
 
+   - mkdir -p /srv/salt/reclass/nodes
+   - systemctl enable salt-master
+   - systemctl enable salt-minion
+   - systemctl start salt-master
+   - systemctl start salt-minion
+   - salt-call -l info --timeout=120 test.ping
+
   write_files:
    - path: /etc/network/interfaces
      content: |
diff --git a/tcp_tests/templates/cookied-mcp-pike-dvr-ssl/underlay.yaml b/tcp_tests/templates/cookied-mcp-pike-dvr-ssl/underlay.yaml
index 4c8efd8..26456f7 100644
--- a/tcp_tests/templates/cookied-mcp-pike-dvr-ssl/underlay.yaml
+++ b/tcp_tests/templates/cookied-mcp-pike-dvr-ssl/underlay.yaml
@@ -13,18 +13,28 @@
  - &cloudinit_user_data_1604 {{ CLOUDINIT_USER_DATA_1604 }}
 
 {% set LAB_CONFIG_NAME = os_env('LAB_CONFIG_NAME', 'cookied-mcp-pike-dvr-ssl') %}
-{% set DOMAIN_NAME = os_env('DOMAIN_NAME', LAB_CONFIG_NAME) + '.local' %}
+{% set DOMAIN_NAME = os_env('DOMAIN_NAME', LAB_CONFIG_NAME + '.local') %}
 {% set HOSTNAME_CFG01 = os_env('HOSTNAME_CFG01', 'cfg01.' + DOMAIN_NAME) %}
 {% set HOSTNAME_CTL01 = os_env('HOSTNAME_CTL01', 'ctl01.' + DOMAIN_NAME) %}
 {% set HOSTNAME_CTL02 = os_env('HOSTNAME_CTL02', 'ctl02.' + DOMAIN_NAME) %}
 {% set HOSTNAME_CTL03 = os_env('HOSTNAME_CTL03', 'ctl03.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CMP01 = os_env('HOSTNAME_CMP01', 'cmp01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CMP02 = os_env('HOSTNAME_CMP02', 'cmp02.' + DOMAIN_NAME) %}
+{% set HOSTNAME_CMP01 = os_env('HOSTNAME_CMP01', 'cmp1.' + DOMAIN_NAME) %}
+{% set HOSTNAME_CMP02 = os_env('HOSTNAME_CMP02', 'cmp2.' + DOMAIN_NAME) %}
 {% set HOSTNAME_MON01 = os_env('HOSTNAME_MON01', 'mon01.' + DOMAIN_NAME) %}
 {% set HOSTNAME_MON02 = os_env('HOSTNAME_MON02', 'mon02.' + DOMAIN_NAME) %}
 {% set HOSTNAME_MON03 = os_env('HOSTNAME_MON03', 'mon03.' + DOMAIN_NAME) %}
+{% set HOSTNAME_LOG01 = os_env('HOSTNAME_LOG01', 'log01.' + DOMAIN_NAME) %}
+{% set HOSTNAME_LOG02 = os_env('HOSTNAME_LOG02', 'log02.' + DOMAIN_NAME) %}
+{% set HOSTNAME_LOG03 = os_env('HOSTNAME_LOG03', 'log03.' + DOMAIN_NAME) %}
+{% set HOSTNAME_MTR01 = os_env('HOSTNAME_MTR01', 'mtr01.' + DOMAIN_NAME) %}
+{% set HOSTNAME_MTR02 = os_env('HOSTNAME_MTR02', 'mtr02.' + DOMAIN_NAME) %}
+{% set HOSTNAME_MTR03 = os_env('HOSTNAME_MTR03', 'mtr03.' + DOMAIN_NAME) %}
 {% set HOSTNAME_GTW01 = os_env('HOSTNAME_GTW01', 'gtw01.' + DOMAIN_NAME) %}
 {% set HOSTNAME_PRX01 = os_env('HOSTNAME_PRX01', 'prx01.' + DOMAIN_NAME) %}
+{% set HOSTNAME_MDB01 = os_env('HOSTNAME_MDB01', 'mdb01.' + DOMAIN_NAME) %}
+{% set HOSTNAME_MDB02 = os_env('HOSTNAME_MDB02', 'mdb02.' + DOMAIN_NAME) %}
+{% set HOSTNAME_MDB03 = os_env('HOSTNAME_MDB03', 'mdb03.' + DOMAIN_NAME) %}
+{% set HOSTNAME_SHARE01 = os_env('HOSTNAME_SHARE01', 'share01.' + DOMAIN_NAME) %}
 
 template:
   devops_settings:
@@ -43,11 +53,21 @@
             default_{{ HOSTNAME_CTL03 }}: +103
             default_{{ HOSTNAME_CMP01 }}: +105
             default_{{ HOSTNAME_CMP02 }}: +106
-            default_{{ HOSTNAME_MON01 }}: +107
-            default_{{ HOSTNAME_MON02 }}: +108
-            default_{{ HOSTNAME_MON03 }}: +109
+            default_{{ HOSTNAME_MON01 }}: +71
+            default_{{ HOSTNAME_MON02 }}: +72
+            default_{{ HOSTNAME_MON03 }}: +73
+            default_{{ HOSTNAME_LOG01 }}: +61
+            default_{{ HOSTNAME_LOG02 }}: +62
+            default_{{ HOSTNAME_LOG03 }}: +63
+            default_{{ HOSTNAME_MTR01 }}: +86
+            default_{{ HOSTNAME_MTR02 }}: +87
+            default_{{ HOSTNAME_MTR03 }}: +88
+            default_{{ HOSTNAME_MDB01 }}: +97
+            default_{{ HOSTNAME_MDB02 }}: +98
+            default_{{ HOSTNAME_MDB03 }}: +99
             default_{{ HOSTNAME_GTW01 }}: +110
             default_{{ HOSTNAME_PRX01 }}: +121
+            default_{{ HOSTNAME_SHARE01 }}: +204
           ip_ranges:
             dhcp: [+90, -10]
 
@@ -63,11 +83,21 @@
             default_{{ HOSTNAME_CTL03 }}: +103
             default_{{ HOSTNAME_CMP01 }}: +105
             default_{{ HOSTNAME_CMP02 }}: +106
-            default_{{ HOSTNAME_MON01 }}: +107
-            default_{{ HOSTNAME_MON02 }}: +108
-            default_{{ HOSTNAME_MON03 }}: +109
+            default_{{ HOSTNAME_MON01 }}: +71
+            default_{{ HOSTNAME_MON02 }}: +72
+            default_{{ HOSTNAME_MON03 }}: +73
+            default_{{ HOSTNAME_LOG01 }}: +61
+            default_{{ HOSTNAME_LOG02 }}: +62
+            default_{{ HOSTNAME_LOG03 }}: +63
+            default_{{ HOSTNAME_MTR01 }}: +86
+            default_{{ HOSTNAME_MTR02 }}: +87
+            default_{{ HOSTNAME_MTR03 }}: +88
+            default_{{ HOSTNAME_MDB01 }}: +97
+            default_{{ HOSTNAME_MDB02 }}: +98
+            default_{{ HOSTNAME_MDB03 }}: +99
             default_{{ HOSTNAME_GTW01 }}: +110
             default_{{ HOSTNAME_PRX01 }}: +121
+            default_{{ HOSTNAME_SHARE01 }}: +204
           ip_ranges:
             dhcp: [+90, -10]
 
@@ -83,11 +113,21 @@
             default_{{ HOSTNAME_CTL03 }}: +103
             default_{{ HOSTNAME_CMP01 }}: +105
             default_{{ HOSTNAME_CMP02 }}: +106
-            default_{{ HOSTNAME_MON01 }}: +107
-            default_{{ HOSTNAME_MON02 }}: +108
-            default_{{ HOSTNAME_MON03 }}: +109
+            default_{{ HOSTNAME_MON01 }}: +71
+            default_{{ HOSTNAME_MON02 }}: +72
+            default_{{ HOSTNAME_MON03 }}: +73
+            default_{{ HOSTNAME_LOG01 }}: +61
+            default_{{ HOSTNAME_LOG02 }}: +62
+            default_{{ HOSTNAME_LOG03 }}: +63
+            default_{{ HOSTNAME_MTR01 }}: +86
+            default_{{ HOSTNAME_MTR02 }}: +87
+            default_{{ HOSTNAME_MTR03 }}: +88
+            default_{{ HOSTNAME_MDB01 }}: +97
+            default_{{ HOSTNAME_MDB02 }}: +98
+            default_{{ HOSTNAME_MDB03 }}: +99
             default_{{ HOSTNAME_GTW01 }}: +110
             default_{{ HOSTNAME_PRX01 }}: +121
+            default_{{ HOSTNAME_SHARE01 }}: +204
           ip_ranges:
             dhcp: [+10, -10]
 
@@ -103,13 +143,23 @@
             default_{{ HOSTNAME_CTL03 }}: +103
             default_{{ HOSTNAME_CMP01 }}: +105
             default_{{ HOSTNAME_CMP02 }}: +106
-            default_{{ HOSTNAME_MON01 }}: +107
-            default_{{ HOSTNAME_MON02 }}: +108
-            default_{{ HOSTNAME_MON03 }}: +109
+            default_{{ HOSTNAME_MON01 }}: +71
+            default_{{ HOSTNAME_MON02 }}: +72
+            default_{{ HOSTNAME_MON03 }}: +73
+            default_{{ HOSTNAME_LOG01 }}: +61
+            default_{{ HOSTNAME_LOG02 }}: +62
+            default_{{ HOSTNAME_LOG03 }}: +63
+            default_{{ HOSTNAME_MTR01 }}: +86
+            default_{{ HOSTNAME_MTR02 }}: +87
+            default_{{ HOSTNAME_MTR03 }}: +88
+            default_{{ HOSTNAME_MDB01 }}: +97
+            default_{{ HOSTNAME_MDB02 }}: +98
+            default_{{ HOSTNAME_MDB03 }}: +99
             default_{{ HOSTNAME_GTW01 }}: +110
             default_{{ HOSTNAME_PRX01 }}: +121
+            default_{{ HOSTNAME_SHARE01 }}: +204
           ip_ranges:
-            dhcp: [+10, -10]
+            dhcp: [+130, +220]
 
 
     groups:
@@ -150,7 +200,7 @@
 
           external:
             address_pool: external-pool01
-            dhcp: true
+            dhcp: false
             forward:
               mode: route
 
@@ -219,9 +269,6 @@
                   capacity: !os_env NODE_VOLUME_SIZE, 150
                   backing_store: mcp_ubuntu_1604_image
                   format: qcow2
-                - name: cinder
-                  capacity: 50
-                  format: qcow2
                 - name: iso  # Volume with name 'iso' will be used
                              # for store image with cloud-init metadata.
                   capacity: 1
@@ -260,9 +307,6 @@
                   capacity: !os_env NODE_VOLUME_SIZE, 150
                   backing_store: mcp_ubuntu_1604_image
                   format: qcow2
-                - name: cinder
-                  capacity: 50
-                  format: qcow2
                 - name: iso  # Volume with name 'iso' will be used
                              # for store image with cloud-init metadata.
                   capacity: 1
@@ -289,6 +333,90 @@
                   capacity: !os_env NODE_VOLUME_SIZE, 150
                   backing_store: mcp_ubuntu_1604_image
                   format: qcow2
+                - name: iso  # Volume with name 'iso' will be used
+                             # for store image with cloud-init metadata.
+                  capacity: 1
+                  format: raw
+                  device: cdrom
+                  bus: ide
+                  cloudinit_meta_data: *cloudinit_meta_data
+                  cloudinit_user_data: *cloudinit_user_data_1604
+
+              interfaces: *interfaces
+              network_config: *network_config
+
+          - name: {{ HOSTNAME_MDB01 }}
+            role: salt_minion
+            params:
+              vcpu: !os_env SLAVE_NODE_CPU, 2
+              memory: !os_env SLAVE_NODE_MEMORY, 8192
+              boot:
+                - hd
+              cloud_init_volume_name: iso
+              cloud_init_iface_up: ens3
+              volumes:
+                - name: system
+                  capacity: !os_env NODE_VOLUME_SIZE, 150
+                  backing_store: mcp_ubuntu_1604_image
+                  format: qcow2
+                - name: cinder
+                  capacity: 50
+                  format: qcow2
+                - name: iso  # Volume with name 'iso' will be used
+                             # for store image with cloud-init metadata.
+                  capacity: 1
+                  format: raw
+                  device: cdrom
+                  bus: ide
+                  cloudinit_meta_data: *cloudinit_meta_data
+                  cloudinit_user_data: *cloudinit_user_data_1604
+
+              interfaces: *interfaces
+              network_config: *network_config
+
+          - name: {{ HOSTNAME_MDB02 }}
+            role: salt_minion
+            params:
+              vcpu: !os_env SLAVE_NODE_CPU, 2
+              memory: !os_env SLAVE_NODE_MEMORY, 8192
+              boot:
+                - hd
+              cloud_init_volume_name: iso
+              cloud_init_iface_up: ens3
+              volumes:
+                - name: system
+                  capacity: !os_env NODE_VOLUME_SIZE, 150
+                  backing_store: mcp_ubuntu_1604_image
+                  format: qcow2
+                - name: cinder
+                  capacity: 50
+                  format: qcow2
+                - name: iso  # Volume with name 'iso' will be used
+                             # for store image with cloud-init metadata.
+                  capacity: 1
+                  format: raw
+                  device: cdrom
+                  bus: ide
+                  cloudinit_meta_data: *cloudinit_meta_data
+                  cloudinit_user_data: *cloudinit_user_data_1604
+
+              interfaces: *interfaces
+              network_config: *network_config
+
+          - name: {{ HOSTNAME_MDB03 }}
+            role: salt_minion
+            params:
+              vcpu: !os_env SLAVE_NODE_CPU, 2
+              memory: !os_env SLAVE_NODE_MEMORY, 8192
+              boot:
+                - hd
+              cloud_init_volume_name: iso
+              cloud_init_iface_up: ens3
+              volumes:
+                - name: system
+                  capacity: !os_env NODE_VOLUME_SIZE, 150
+                  backing_store: mcp_ubuntu_1604_image
+                  format: qcow2
                 - name: cinder
                   capacity: 50
                   format: qcow2
@@ -307,8 +435,8 @@
           - name: {{ HOSTNAME_MON01 }}
             role: salt_minion
             params:
-              vcpu: !os_env SLAVE_NODE_CPU, 3
-              memory: !os_env SLAVE_NODE_MEMORY, 6144
+              vcpu: !os_env SLAVE_NODE_CPU, 2
+              memory: !os_env SLAVE_NODE_MEMORY, 4096
               boot:
                 - hd
               cloud_init_volume_name: iso
@@ -333,8 +461,8 @@
           - name: {{ HOSTNAME_MON02 }}
             role: salt_minion
             params:
-              vcpu: !os_env SLAVE_NODE_CPU, 3
-              memory: !os_env SLAVE_NODE_MEMORY, 6144
+              vcpu: !os_env SLAVE_NODE_CPU, 2
+              memory: !os_env SLAVE_NODE_MEMORY, 4096
               boot:
                 - hd
               cloud_init_volume_name: iso
@@ -359,8 +487,164 @@
           - name: {{ HOSTNAME_MON03 }}
             role: salt_minion
             params:
-              vcpu: !os_env SLAVE_NODE_CPU, 3
-              memory: !os_env SLAVE_NODE_MEMORY, 6144
+              vcpu: !os_env SLAVE_NODE_CPU, 2
+              memory: !os_env SLAVE_NODE_MEMORY, 4096
+              boot:
+                - hd
+              cloud_init_volume_name: iso
+              cloud_init_iface_up: ens3
+              volumes:
+                - name: system
+                  capacity: !os_env NODE_VOLUME_SIZE, 150
+                  backing_store: mcp_ubuntu_1604_image
+                  format: qcow2
+                - name: iso  # Volume with name 'iso' will be used
+                             # for store image with cloud-init metadata.
+                  capacity: 1
+                  format: raw
+                  device: cdrom
+                  bus: ide
+                  cloudinit_meta_data: *cloudinit_meta_data
+                  cloudinit_user_data: *cloudinit_user_data_1604
+
+              interfaces: *interfaces
+              network_config: *network_config
+
+          - name: {{ HOSTNAME_LOG01 }}
+            role: salt_minion
+            params:
+              vcpu: !os_env SLAVE_NODE_CPU, 2
+              memory: !os_env SLAVE_NODE_MEMORY, 4096
+              boot:
+                - hd
+              cloud_init_volume_name: iso
+              cloud_init_iface_up: ens3
+              volumes:
+                - name: system
+                  capacity: !os_env NODE_VOLUME_SIZE, 150
+                  backing_store: mcp_ubuntu_1604_image
+                  format: qcow2
+                - name: iso  # Volume with name 'iso' will be used
+                             # for store image with cloud-init metadata.
+                  capacity: 1
+                  format: raw
+                  device: cdrom
+                  bus: ide
+                  cloudinit_meta_data: *cloudinit_meta_data
+                  cloudinit_user_data: *cloudinit_user_data_1604
+
+              interfaces: *interfaces
+              network_config: *network_config
+
+          - name: {{ HOSTNAME_LOG02 }}
+            role: salt_minion
+            params:
+              vcpu: !os_env SLAVE_NODE_CPU, 2
+              memory: !os_env SLAVE_NODE_MEMORY, 4096
+              boot:
+                - hd
+              cloud_init_volume_name: iso
+              cloud_init_iface_up: ens3
+              volumes:
+                - name: system
+                  capacity: !os_env NODE_VOLUME_SIZE, 150
+                  backing_store: mcp_ubuntu_1604_image
+                  format: qcow2
+                - name: iso  # Volume with name 'iso' will be used
+                             # for store image with cloud-init metadata.
+                  capacity: 1
+                  format: raw
+                  device: cdrom
+                  bus: ide
+                  cloudinit_meta_data: *cloudinit_meta_data
+                  cloudinit_user_data: *cloudinit_user_data_1604
+
+              interfaces: *interfaces
+              network_config: *network_config
+
+          - name: {{ HOSTNAME_LOG03 }}
+            role: salt_minion
+            params:
+              vcpu: !os_env SLAVE_NODE_CPU, 2
+              memory: !os_env SLAVE_NODE_MEMORY, 4096
+              boot:
+                - hd
+              cloud_init_volume_name: iso
+              cloud_init_iface_up: ens3
+              volumes:
+                - name: system
+                  capacity: !os_env NODE_VOLUME_SIZE, 150
+                  backing_store: mcp_ubuntu_1604_image
+                  format: qcow2
+                - name: iso  # Volume with name 'iso' will be used
+                             # for store image with cloud-init metadata.
+                  capacity: 1
+                  format: raw
+                  device: cdrom
+                  bus: ide
+                  cloudinit_meta_data: *cloudinit_meta_data
+                  cloudinit_user_data: *cloudinit_user_data_1604
+
+              interfaces: *interfaces
+              network_config: *network_config
+
+          - name: {{ HOSTNAME_MTR01 }}
+            role: salt_minion
+            params:
+              vcpu: !os_env SLAVE_NODE_CPU, 2
+              memory: !os_env SLAVE_NODE_MEMORY, 4096
+              boot:
+                - hd
+              cloud_init_volume_name: iso
+              cloud_init_iface_up: ens3
+              volumes:
+                - name: system
+                  capacity: !os_env NODE_VOLUME_SIZE, 150
+                  backing_store: mcp_ubuntu_1604_image
+                  format: qcow2
+                - name: iso  # Volume with name 'iso' will be used
+                             # for store image with cloud-init metadata.
+                  capacity: 1
+                  format: raw
+                  device: cdrom
+                  bus: ide
+                  cloudinit_meta_data: *cloudinit_meta_data
+                  cloudinit_user_data: *cloudinit_user_data_1604
+
+              interfaces: *interfaces
+              network_config: *network_config
+
+          - name: {{ HOSTNAME_MTR02 }}
+            role: salt_minion
+            params:
+              vcpu: !os_env SLAVE_NODE_CPU, 2
+              memory: !os_env SLAVE_NODE_MEMORY, 4096
+              boot:
+                - hd
+              cloud_init_volume_name: iso
+              cloud_init_iface_up: ens3
+              volumes:
+                - name: system
+                  capacity: !os_env NODE_VOLUME_SIZE, 150
+                  backing_store: mcp_ubuntu_1604_image
+                  format: qcow2
+                - name: iso  # Volume with name 'iso' will be used
+                             # for store image with cloud-init metadata.
+                  capacity: 1
+                  format: raw
+                  device: cdrom
+                  bus: ide
+                  cloudinit_meta_data: *cloudinit_meta_data
+                  cloudinit_user_data: *cloudinit_user_data_1604
+
+              interfaces: *interfaces
+              network_config: *network_config
+
+          - name: {{ HOSTNAME_MTR03 }}
+            role: salt_minion
+            params:
+              vcpu: !os_env SLAVE_NODE_CPU, 2
+              memory: !os_env SLAVE_NODE_MEMORY, 4096
               boot:
                 - hd
               cloud_init_volume_name: iso
@@ -386,7 +670,7 @@
             role: salt_minion
             params:
               vcpu: !os_env SLAVE_NODE_CPU, 1
-              memory: !os_env SLAVE_NODE_MEMORY, 8192
+              memory: !os_env SLAVE_NODE_MEMORY, 2048
               boot:
                 - hd
               cloud_init_volume_name: iso
@@ -396,9 +680,6 @@
                   capacity: !os_env NODE_VOLUME_SIZE, 150
                   backing_store: mcp_ubuntu_1604_image
                   format: qcow2
-                - name: cinder
-                  capacity: 50
-                  format: qcow2
                 - name: iso  # Volume with name 'iso' will be used
                              # for store image with cloud-init metadata.
                   capacity: 1
@@ -426,6 +707,12 @@
                   capacity: !os_env NODE_VOLUME_SIZE, 150
                   backing_store: cloudimage1604
                   format: qcow2
+                - name: cinder
+                  capacity: 50
+                  format: qcow2
+                - name: manila
+                  capacity: 20
+                  format: qcow2
                 - name: iso  # Volume with name 'iso' will be used
                              # for store image with cloud-init metadata.
                   capacity: 1
@@ -477,6 +764,12 @@
                   capacity: !os_env NODE_VOLUME_SIZE, 150
                   backing_store: cloudimage1604
                   format: qcow2
+                - name: cinder
+                  capacity: 50
+                  format: qcow2
+                - name: manila
+                  capacity: 20
+                  format: qcow2
                 - name: iso  # Volume with name 'iso' will be used
                              # for store image with cloud-init metadata.
                   capacity: 1
diff --git a/tcp_tests/templates/cookied-mcp-pike-dvr/_context-cookiecutter-mcp-pike-dvr.yaml b/tcp_tests/templates/cookied-mcp-pike-dvr/_context-cookiecutter-mcp-pike-dvr.yaml
index 111520b..64031ea 100644
--- a/tcp_tests/templates/cookied-mcp-pike-dvr/_context-cookiecutter-mcp-pike-dvr.yaml
+++ b/tcp_tests/templates/cookied-mcp-pike-dvr/_context-cookiecutter-mcp-pike-dvr.yaml
@@ -54,7 +54,6 @@
   openstack_compute_single_address_ranges: 172.16.10.105-172.16.10.106
   openstack_compute_deploy_address_ranges: 192.168.10.105-192.168.10.106
   openstack_compute_tenant_address_ranges: 10.1.0.105-10.1.0.106
-  openstack_compute_backend_address_ranges: 10.1.0.105-10.1.0.106
   openstack_control_address: 172.16.10.100
   openstack_control_hostname: ctl
   openstack_control_node01_address: 172.16.10.101
@@ -104,6 +103,10 @@
   openstack_proxy_node02_address: 172.16.10.122
   openstack_proxy_node02_hostname: prx02
   openstack_upgrade_node01_address: 172.16.10.19
+  designate_backend: bind
+  designate_enabled: 'True'
+  openstack_dns_node01_address: 172.16.10.113
+  openstack_dns_node02_address: 172.16.10.114
   openstack_version: pike
   oss_enabled: 'False'
   oss_node03_address: ${_param:stacklight_monitor_node03_address}
@@ -144,6 +147,35 @@
       7r3i+QUZ7RBdOb24QcQ618q54ozNTCB7OywY78ptFzeoBeptiNr1
       -----END RSA PRIVATE KEY-----
   backup_public_key: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDEvr+tWAJ62wROllpSZeaSPxxnVY3R65sfUW8wM6L8tr1knJOTQLoBikmcjISb3ekyPlwubTypGoxb7al06FiNwfr3KDkytflKRGTyMKYgchighuFCfBuePd13cjf1l19TYU7u7a+VuCVWi7pmhDGUkMi24s23OroQb7D14XX17v46wLrqJQi2nrXzN/DWXcn/ycq8IZ7ZFgN/uYlbpfAKX8PCvImbDDO8+BgndAy4MPz8cWOWsnfGMVNePhvhazVcijLvx8Vu2Iuvg7CoJiSGjTe7YTms44/WpnFkHreyK8cwsw4wzls4BApu6UU2jIAsAMZh9zux/Rtni71dcNfF
+  octavia_private_key: |-
+    -----BEGIN RSA PRIVATE KEY-----
+    MIIEpAIBAAKCAQEAtjnPDJsQToHBtoqIo15mdSYpfi8z6DFMi8Gbo0KCN33OUn5u
+    OctbdtjUfeuhvI6px1SCnvyWi09Ft8eWwq+KwLCGKbUxLvqKltuJ7K3LIrGXkt+m
+    qZN4O9XKeVKfZH+mQWkkxRWgX2r8RKNV3GkdNtd74VjhP+R6XSKJQ1Z8b7eHM10v
+    6IjTY/jPczjK+eyCeEj4qbSnV8eKlqLhhquuSQRmUO2DRSjLVdpdf2BB4/BdWFsD
+    YOmX7mb8kpEr9vQ+c1JKMXDwD6ehzyU8kE+1kVm5zOeEy4HdYIMpvUfN49P1anRV
+    2ISQ1ZE+r22IAMKl0tekrGH0e/1NP1DF5rINMwIDAQABAoIBAQCkP/cgpaRNHyg8
+    ISKIHs67SWqdEm73G3ijgB+JSKmW2w7dzJgN//6xYUAnP/zIuM7PnJ0gMQyBBTMS
+    NBTv5spqZLKJZYivj6Tb1Ya8jupKm0jEWlMfBo2ZYVrfgFmrfGOfEebSvmuPlh9M
+    vuzlftmWVSSUOkjODmM9D6QpzgrbpktBuA/WpX+6esMTwJpOcQ5xZWEnHXnVzuTc
+    SncodVweE4gz6F1qorbqIJz8UAUQ5T0OZTdHzIS1IbamACHWaxQfixAO2s4+BoUK
+    ANGGZWkfneCxx7lthvY8DiKn7M5cSRnqFyDToGqaLezdkMNlGC7v3U11FF5blSEW
+    fL1o/HwBAoGBAOavhTr8eqezTchqZvarorFIq7HFWk/l0vguIotu6/wlh1V/KdF+
+    aLLHgPgJ5j+RrCMvTBoKqMeeHfVGrS2udEy8L1mK6b3meG+tMxU05OA55abmhYn7
+    7vF0q8XJmYIHIXmuCgF90R8Piscb0eaMlmHW9unKTKo8EOs5j+D8+AMJAoGBAMo4
+    8WW+D3XiD7fsymsfXalf7VpAt/H834QTbNZJweUWhg11eLutyahyyfjjHV200nNZ
+    cnU09DWKpBbLg7d1pyT69CNLXpNnxuWCt8oiUjhWCUpNqVm2nDJbUdlRFTzYb2fS
+    ZC4r0oQaPD5kMLSipjcwzMWe0PniySxNvKXKInFbAoGBAKxW2qD7uKKKuQSOQUft
+    aAksMmEIAHWKTDdvOA2VG6XvX5DHBLXmy08s7rPfqW06ZjCPCDq4Velzvgvc9koX
+    d/lP6cvqlL9za+x6p5wjPQ4rEt/CfmdcmOE4eY+1EgLrUt314LHGjjG3ScWAiirE
+    QyDrGOIGaYoQf89L3KqIMr0JAoGARYAklw8nSSCUvmXHe+Gf0yKA9M/haG28dCwo
+    780RsqZ3FBEXmYk1EYvCFqQX56jJ25MWX2n/tJcdpifz8Q2ikHcfiTHSI187YI34
+    lKQPFgWb08m1NnwoWrY//yx63BqWz1vjymqNQ5GwutC8XJi5/6Xp+tGGiRuEgJGH
+    EIPUKpkCgYAjBIVMkpNiLCREZ6b+qjrPV96ed3iTUt7TqP7yGlFI/OkORFS38xqC
+    hBP6Fk8iNWuOWQD+ohM/vMMnvIhk5jwlcwn+kF0ra04gi5KBFWSh/ddWMJxUtPC1
+    2htvlEc6zQAR6QfqXHmwhg1hP81JcpqpicQzCMhkzLoR1DC6stXdLg==
+    -----END RSA PRIVATE KEY-----
+  octavia_public_key: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQC2Oc8MmxBOgcG2ioijXmZ1Jil+LzPoMUyLwZujQoI3fc5Sfm45y1t22NR966G8jqnHVIKe/JaLT0W3x5bCr4rAsIYptTEu+oqW24nsrcsisZeS36apk3g71cp5Up9kf6ZBaSTFFaBfavxEo1XcaR0213vhWOE/5HpdIolDVnxvt4czXS/oiNNj+M9zOMr57IJ4SPiptKdXx4qWouGGq65JBGZQ7YNFKMtV2l1/YEHj8F1YWwNg6ZfuZvySkSv29D5zUkoxcPAPp6HPJTyQT7WRWbnM54TLgd1ggym9R83j0/VqdFXYhJDVkT6vbYgAwqXS16SsYfR7/U0/UMXmsg0z
   salt_api_password: H0rTPdmktZ8RI7T7y6fjqY0uEbbs7Kwi
   salt_api_password_hash: $6$lfbIFtMZ$.nTbTDMzs1iYv0WqkZHia8H8Fma963Nv3qyyz1x68jQh0YXK9i907B/hvoG4QHMvfolE7V7vQnFClJ1mVA3Yb.
   salt_master_address: 172.16.10.90
@@ -188,17 +220,16 @@
   manila_enabled: 'True'
   manila_share_backend: 'lvm'
   manila_lvm_volume_name: 'manila-volume'
-  manila_lvm_devices: '/dev/loop1'
-  openstack_share_address: 172.16.10.68
-  openstack_share_node01_address: 172.16.10.69
-  openstack_share_node02_address: 172.16.10.70
-  openstack_share_node03_address: 172.16.10.71
-  openstack_share_node01_deploy_address: 192.168.10.69
-  openstack_share_node02_deploy_address: 192.168.10.70
-  openstack_share_node03_deploy_address: 192.168.10.71
+  manila_lvm_devices: '/dev/vdc'
+  openstack_share_address: 172.16.10.203
+  openstack_share_node01_address: 172.16.10.204
+  openstack_share_node01_deploy_address: 192.168.10.204
   openstack_share_hostname: share
   openstack_share_node01_hostname: share01
-  openstack_share_node02_hostname: share02
-  openstack_share_node03_hostname: share03
-  designate_backend: bind
-  designate_enabled: 'True'
+  openstack_octavia_enabled: 'True'
+  octavia_hm_bind_ip: 192.168.1.12
+  octavia_lb_mgmt_cidr: 192.168.1.0/24
+  octavia_lb_mgmt_allocation_pool_start: 192.168.1.2
+  octavia_lb_mgmt_allocation_pool_end: 192.168.1.200
+
+
diff --git a/tcp_tests/templates/cookied-mcp-pike-dvr/_context-environment.yaml b/tcp_tests/templates/cookied-mcp-pike-dvr/_context-environment.yaml
index 04c1e4c..f1ba914 100644
--- a/tcp_tests/templates/cookied-mcp-pike-dvr/_context-environment.yaml
+++ b/tcp_tests/templates/cookied-mcp-pike-dvr/_context-environment.yaml
@@ -17,6 +17,7 @@
       - openstack_control_leader
       - openstack_database_leader
       - openstack_message_queue
+      - features_lvm_backend_control
       - linux_system_codename_xenial
       interfaces:
         ens3:
@@ -31,6 +32,7 @@
       - openstack_control
       - openstack_database
       - openstack_message_queue
+      - features_lvm_backend_control
       - linux_system_codename_xenial
       interfaces:
         ens3:
@@ -45,6 +47,7 @@
       - openstack_control
       - openstack_database
       - openstack_message_queue
+      - features_lvm_backend_control
       - linux_system_codename_xenial
       interfaces:
         ens3:
@@ -167,6 +170,7 @@
       reclass_storage_name: openstack_compute_rack01
       roles:
       - openstack_compute
+      - features_lvm_backend_volume_vdb
       - linux_system_codename_xenial
       interfaces:
         ens3:
@@ -181,7 +185,6 @@
     gtw01.mcp-pike-dvr.local:
       reclass_storage_name: openstack_gateway_node01
       roles:
-      - openstack_gateway
       - linux_system_codename_xenial
       interfaces:
         ens3:
@@ -204,28 +207,6 @@
         ens4:
           role: single_ctl
 
-    share02.mcp-pike-dvr.local:
-      reclass_storage_name: openstack_share_node02
-      roles:
-      - openstack_share
-      - linux_system_codename_xenial
-      interfaces:
-        ens3:
-          role: single_dhcp
-        ens4:
-          role: single_ctl
-
-    share03.mcp-pike-dvr.local:
-      reclass_storage_name: openstack_share_node03
-      roles:
-      - openstack_share
-      - linux_system_codename_xenial
-      interfaces:
-        ens3:
-          role: single_dhcp
-        ens4:
-          role: single_ctl
-
     dns01.mcp-pike-dvr.local:
       reclass_storage_name: openstack_dns_node01
       roles:
diff --git a/tcp_tests/templates/cookied-mcp-pike-dvr/core.yaml b/tcp_tests/templates/cookied-mcp-pike-dvr/core.yaml
index a6b2cc6..a39d636 100644
--- a/tcp_tests/templates/cookied-mcp-pike-dvr/core.yaml
+++ b/tcp_tests/templates/cookied-mcp-pike-dvr/core.yaml
@@ -14,6 +14,8 @@
 
 {{ SHARED_CORE.MACRO_INSTALL_HAPROXY() }}
 
+{{ SHARED_CORE.MACRO_INSTALL_NGINX() }}
+
 {{ SHARED_CORE.MACRO_INSTALL_MEMCACHED() }}
 
 {{ SHARED_CORE.MACRO_CHECK_VIP() }}
\ No newline at end of file
diff --git a/tcp_tests/templates/cookied-mcp-pike-dvr/openstack.yaml b/tcp_tests/templates/cookied-mcp-pike-dvr/openstack.yaml
index 000f4f9..59e85e3 100644
--- a/tcp_tests/templates/cookied-mcp-pike-dvr/openstack.yaml
+++ b/tcp_tests/templates/cookied-mcp-pike-dvr/openstack.yaml
@@ -56,3 +56,8 @@
 {{ SHARED_OPENSTACK.MACRO_INSTALL_COMPUTE(CELL_MAPPING=true) }}
 
 {{ SHARED_OPENSTACK.MACRO_INSTALL_MANILA() }}
+
+{{ SHARED_OPENSTACK.MACRO_INSTALL_OCTAVIA_API() }}
+
+{{ SHARED_OPENSTACK.MACRO_INSTALL_OCTAVIA_MANAGER() }}
+
diff --git a/tcp_tests/templates/cookied-mcp-pike-dvr/underlay--user-data-cfg01.yaml b/tcp_tests/templates/cookied-mcp-pike-dvr/underlay--user-data-cfg01.yaml
index da7908d..48562ad 100644
--- a/tcp_tests/templates/cookied-mcp-pike-dvr/underlay--user-data-cfg01.yaml
+++ b/tcp_tests/templates/cookied-mcp-pike-dvr/underlay--user-data-cfg01.yaml
@@ -44,6 +44,13 @@
 
    - echo "nameserver 172.18.208.44" >> /etc/resolv.conf;
 
+   - mkdir -p /srv/salt/reclass/nodes
+   - systemctl enable salt-master
+   - systemctl enable salt-minion
+   - systemctl start salt-master
+   - systemctl start salt-minion
+   - salt-call -l info --timeout=120 test.ping
+
   write_files:
    - path: /etc/network/interfaces
      content: |
diff --git a/tcp_tests/templates/cookied-mcp-pike-dvr/underlay.yaml b/tcp_tests/templates/cookied-mcp-pike-dvr/underlay.yaml
index 698c854..32ec67d 100644
--- a/tcp_tests/templates/cookied-mcp-pike-dvr/underlay.yaml
+++ b/tcp_tests/templates/cookied-mcp-pike-dvr/underlay.yaml
@@ -33,8 +33,6 @@
 {% set HOSTNAME_DNS01 = os_env('HOSTNAME_DNS01', 'dns01.' + DOMAIN_NAME) %}
 {% set HOSTNAME_DNS02 = os_env('HOSTNAME_DNS02', 'dns02.' + DOMAIN_NAME) %}
 {% set HOSTNAME_SHARE01 = os_env('HOSTNAME_SHARE01', 'share01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_SHARE02 = os_env('HOSTNAME_SHARE01', 'share02.' + DOMAIN_NAME) %}
-{% set HOSTNAME_SHARE03 = os_env('HOSTNAME_SHARE01', 'share03.' + DOMAIN_NAME) %}
 {% set HOSTNAME_PRX01 = os_env('HOSTNAME_PRX01', 'prx01.' + DOMAIN_NAME) %}
 
 template:
@@ -64,12 +62,10 @@
             default_{{ HOSTNAME_MTR02 }}: +87
             default_{{ HOSTNAME_MTR03 }}: +88
             default_{{ HOSTNAME_GTW01 }}: +110
-            default_{{ HOSTNAME_DNS01 }}: +111
-            default_{{ HOSTNAME_DNS02 }}: +112
+            default_{{ HOSTNAME_DNS01 }}: +113
+            default_{{ HOSTNAME_DNS02 }}: +114
             default_{{ HOSTNAME_PRX01 }}: +121
-            default_{{ HOSTNAME_SHARE01 }}: +69
-            default_{{ HOSTNAME_SHARE02 }}: +70
-            default_{{ HOSTNAME_SHARE03 }}: +71
+            default_{{ HOSTNAME_SHARE01 }}: +204
           ip_ranges:
             dhcp: [+90, -10]
 
@@ -95,12 +91,10 @@
             default_{{ HOSTNAME_MTR02 }}: +87
             default_{{ HOSTNAME_MTR03 }}: +88
             default_{{ HOSTNAME_GTW01 }}: +110
-            default_{{ HOSTNAME_DNS01 }}: +111
-            default_{{ HOSTNAME_DNS02 }}: +112
+            default_{{ HOSTNAME_DNS01 }}: +113
+            default_{{ HOSTNAME_DNS02 }}: +114
             default_{{ HOSTNAME_PRX01 }}: +121
-            default_{{ HOSTNAME_SHARE01 }}: +69
-            default_{{ HOSTNAME_SHARE02 }}: +70
-            default_{{ HOSTNAME_SHARE03 }}: +71
+            default_{{ HOSTNAME_SHARE01 }}: +204
           ip_ranges:
             dhcp: [+90, -10]
 
@@ -126,12 +120,10 @@
             default_{{ HOSTNAME_MTR02 }}: +87
             default_{{ HOSTNAME_MTR03 }}: +88
             default_{{ HOSTNAME_GTW01 }}: +110
-            default_{{ HOSTNAME_DNS01 }}: +111
-            default_{{ HOSTNAME_DNS02 }}: +112
+            default_{{ HOSTNAME_DNS01 }}: +113
+            default_{{ HOSTNAME_DNS02 }}: +114
             default_{{ HOSTNAME_PRX01 }}: +121
-            default_{{ HOSTNAME_SHARE01 }}: +69
-            default_{{ HOSTNAME_SHARE02 }}: +70
-            default_{{ HOSTNAME_SHARE03 }}: +71
+            default_{{ HOSTNAME_SHARE01 }}: +204
           ip_ranges:
             dhcp: [+10, -10]
 
@@ -157,12 +149,10 @@
             default_{{ HOSTNAME_MTR02 }}: +87
             default_{{ HOSTNAME_MTR03 }}: +88
             default_{{ HOSTNAME_GTW01 }}: +110
-            default_{{ HOSTNAME_DNS01 }}: +111
-            default_{{ HOSTNAME_DNS02 }}: +112
+            default_{{ HOSTNAME_DNS01 }}: +113
+            default_{{ HOSTNAME_DNS02 }}: +114
             default_{{ HOSTNAME_PRX01 }}: +121
-            default_{{ HOSTNAME_SHARE01 }}: +69
-            default_{{ HOSTNAME_SHARE02 }}: +70
-            default_{{ HOSTNAME_SHARE03 }}: +71
+            default_{{ HOSTNAME_SHARE01 }}: +204
           ip_ranges:
             dhcp: [+10, -10]
 
@@ -274,9 +264,6 @@
                   capacity: !os_env NODE_VOLUME_SIZE, 150
                   backing_store: mcp_ubuntu_1604_image
                   format: qcow2
-                - name: cinder
-                  capacity: 50
-                  format: qcow2
                 - name: iso  # Volume with name 'iso' will be used
                              # for store image with cloud-init metadata.
                   capacity: 1
@@ -315,9 +302,6 @@
                   capacity: !os_env NODE_VOLUME_SIZE, 150
                   backing_store: mcp_ubuntu_1604_image
                   format: qcow2
-                - name: cinder
-                  capacity: 50
-                  format: qcow2
                 - name: iso  # Volume with name 'iso' will be used
                              # for store image with cloud-init metadata.
                   capacity: 1
@@ -344,9 +328,6 @@
                   capacity: !os_env NODE_VOLUME_SIZE, 150
                   backing_store: mcp_ubuntu_1604_image
                   format: qcow2
-                - name: cinder
-                  capacity: 50
-                  format: qcow2
                 - name: iso  # Volume with name 'iso' will be used
                              # for store image with cloud-init metadata.
                   capacity: 1
@@ -607,9 +588,6 @@
                   capacity: !os_env NODE_VOLUME_SIZE, 150
                   backing_store: mcp_ubuntu_1604_image
                   format: qcow2
-                - name: cinder
-                  capacity: 50
-                  format: qcow2
                 - name: iso  # Volume with name 'iso' will be used
                              # for store image with cloud-init metadata.
                   capacity: 1
@@ -637,6 +615,12 @@
                   capacity: !os_env NODE_VOLUME_SIZE, 150
                   backing_store: cloudimage1604
                   format: qcow2
+                - name: cinder
+                  capacity: 50
+                  format: qcow2
+                - name: manila
+                  capacity: 20
+                  format: qcow2
                 - name: iso  # Volume with name 'iso' will be used
                              # for store image with cloud-init metadata.
                   capacity: 1
@@ -688,6 +672,12 @@
                   capacity: !os_env NODE_VOLUME_SIZE, 150
                   backing_store: cloudimage1604
                   format: qcow2
+                - name: cinder
+                  capacity: 50
+                  format: qcow2
+                - name: manila
+                  capacity: 20
+                  format: qcow2
                 - name: iso  # Volume with name 'iso' will be used
                              # for store image with cloud-init metadata.
                   capacity: 1
@@ -752,58 +742,6 @@
               interfaces: *all_interfaces
               network_config: *all_network_config
 
-          - name: {{ HOSTNAME_SHARE02 }}
-            role: salt_minion
-            params:
-              vcpu: !os_env SLAVE_NODE_CPU, 2
-              memory: !os_env SLAVE_NODE_MEMORY, 4096
-              boot:
-                - hd
-              cloud_init_volume_name: iso
-              cloud_init_iface_up: ens3
-              volumes:
-                - name: system
-                  capacity: !os_env NODE_VOLUME_SIZE, 150
-                  backing_store: mcp_ubuntu_1604_image
-                  format: qcow2
-                - name: iso  # Volume with name 'iso' will be used
-                             # for store image with cloud-init metadata.
-                  capacity: 1
-                  format: raw
-                  device: cdrom
-                  bus: ide
-                  cloudinit_meta_data: *cloudinit_meta_data
-                  cloudinit_user_data: *cloudinit_user_data_1604
-
-              interfaces: *all_interfaces
-              network_config: *all_network_config
-
-          - name: {{ HOSTNAME_SHARE03 }}
-            role: salt_minion
-            params:
-              vcpu: !os_env SLAVE_NODE_CPU, 2
-              memory: !os_env SLAVE_NODE_MEMORY, 4096
-              boot:
-                - hd
-              cloud_init_volume_name: iso
-              cloud_init_iface_up: ens3
-              volumes:
-                - name: system
-                  capacity: !os_env NODE_VOLUME_SIZE, 150
-                  backing_store: mcp_ubuntu_1604_image
-                  format: qcow2
-                - name: iso  # Volume with name 'iso' will be used
-                             # for store image with cloud-init metadata.
-                  capacity: 1
-                  format: raw
-                  device: cdrom
-                  bus: ide
-                  cloudinit_meta_data: *cloudinit_meta_data
-                  cloudinit_user_data: *cloudinit_user_data_1604
-
-              interfaces: *all_interfaces
-              network_config: *all_network_config
-
           - name: {{ HOSTNAME_DNS01 }}
             role: salt_minion
             params:
@@ -854,4 +792,4 @@
                   cloudinit_user_data: *cloudinit_user_data_1604
 
               interfaces: *all_interfaces
-              network_config: *all_network_config
\ No newline at end of file
+              network_config: *all_network_config
diff --git a/tcp_tests/templates/cookied-mcp-pike-ovs/_context-cookiecutter-mcp-pike-ovs.yaml b/tcp_tests/templates/cookied-mcp-pike-ovs/_context-cookiecutter-mcp-pike-ovs.yaml
index 131d153..80ca7f6 100644
--- a/tcp_tests/templates/cookied-mcp-pike-ovs/_context-cookiecutter-mcp-pike-ovs.yaml
+++ b/tcp_tests/templates/cookied-mcp-pike-ovs/_context-cookiecutter-mcp-pike-ovs.yaml
@@ -54,7 +54,6 @@
   openstack_compute_single_address_ranges: 172.16.10.105-172.16.10.106
   openstack_compute_deploy_address_ranges: 192.168.10.105-192.168.10.106
   openstack_compute_tenant_address_ranges: 10.1.0.105-10.1.0.106
-  openstack_compute_backend_address_ranges: 10.1.0.105-10.1.0.106
   openstack_control_address: 172.16.10.100
   openstack_control_hostname: ctl
   openstack_control_node01_address: 172.16.10.101
@@ -104,6 +103,10 @@
   openstack_proxy_node02_address: 172.16.10.122
   openstack_proxy_node02_hostname: prx02
   openstack_upgrade_node01_address: 172.16.10.19
+  designate_backend: powerdns
+  designate_enabled: 'True'
+  openstack_dns_node01_address: 172.16.10.113
+  openstack_dns_node02_address: 172.16.10.114
   openstack_version: pike
   oss_enabled: 'False'
   oss_node03_address: ${_param:stacklight_monitor_node03_address}
@@ -188,17 +191,9 @@
   manila_enabled: 'True'
   manila_share_backend: 'lvm'
   manila_lvm_volume_name: 'manila-volume'
-  manila_lvm_devices: '/dev/loop1'
-  openstack_share_address: 172.16.10.68
-  openstack_share_node01_address: 172.16.10.69
-  openstack_share_node02_address: 172.16.10.70
-  openstack_share_node03_address: 172.16.10.71
-  openstack_share_node01_deploy_address: 192.168.10.69
-  openstack_share_node02_deploy_address: 192.168.10.70
-  openstack_share_node03_deploy_address: 192.168.10.71
+  manila_lvm_devices: '/dev/vdc'
+  openstack_share_address: 172.16.10.203
+  openstack_share_node01_address: 172.16.10.204
+  openstack_share_node01_deploy_address: 192.168.10.204
   openstack_share_hostname: share
   openstack_share_node01_hostname: share01
-  openstack_share_node02_hostname: share02
-  openstack_share_node03_hostname: share03
-  designate_backend: powerdns
-  designate_enabled: 'True'
diff --git a/tcp_tests/templates/cookied-mcp-pike-ovs/_context-environment.yaml b/tcp_tests/templates/cookied-mcp-pike-ovs/_context-environment.yaml
index 2fd6295..d57ceaf 100644
--- a/tcp_tests/templates/cookied-mcp-pike-ovs/_context-environment.yaml
+++ b/tcp_tests/templates/cookied-mcp-pike-ovs/_context-environment.yaml
@@ -17,6 +17,7 @@
       - openstack_control_leader
       - openstack_database_leader
       - openstack_message_queue
+      - features_lvm_backend_control
       - linux_system_codename_xenial
       interfaces:
         ens3:
@@ -31,6 +32,7 @@
       - openstack_control
       - openstack_database
       - openstack_message_queue
+      - features_lvm_backend_control
       - linux_system_codename_xenial
       interfaces:
         ens3:
@@ -45,6 +47,7 @@
       - openstack_control
       - openstack_database
       - openstack_message_queue
+      - features_lvm_backend_control
       - linux_system_codename_xenial
       interfaces:
         ens3:
@@ -167,6 +170,7 @@
       reclass_storage_name: openstack_compute_rack01
       roles:
       - openstack_compute
+      - features_lvm_backend_volume_vdb
       - linux_system_codename_xenial
       interfaces:
         ens3:
@@ -225,25 +229,3 @@
           role: single_dhcp
         ens4:
           role: single_ctl
-
-    share02.mcp-pike-ovs.local:
-      reclass_storage_name: openstack_share_node02
-      roles:
-      - openstack_share
-      - linux_system_codename_xenial
-      interfaces:
-        ens3:
-          role: single_dhcp
-        ens4:
-          role: single_ctl
-
-    share03.mcp-pike-ovs.local:
-      reclass_storage_name: openstack_share_node03
-      roles:
-      - openstack_share
-      - linux_system_codename_xenial
-      interfaces:
-        ens3:
-          role: single_dhcp
-        ens4:
-          role: single_ctl
diff --git a/tcp_tests/templates/cookied-mcp-pike-ovs/core.yaml b/tcp_tests/templates/cookied-mcp-pike-ovs/core.yaml
index 4a4dfac..06946d4 100644
--- a/tcp_tests/templates/cookied-mcp-pike-ovs/core.yaml
+++ b/tcp_tests/templates/cookied-mcp-pike-ovs/core.yaml
@@ -13,6 +13,8 @@
 
 {{ SHARED_CORE.MACRO_INSTALL_HAPROXY() }}
 
+{{ SHARED_CORE.MACRO_INSTALL_NGINX() }}
+
 {{ SHARED_CORE.MACRO_INSTALL_MEMCACHED() }}
 
 {{ SHARED_CORE.MACRO_CHECK_VIP() }}
\ No newline at end of file
diff --git a/tcp_tests/templates/cookied-mcp-pike-ovs/underlay--user-data-cfg01.yaml b/tcp_tests/templates/cookied-mcp-pike-ovs/underlay--user-data-cfg01.yaml
index da7908d..48562ad 100644
--- a/tcp_tests/templates/cookied-mcp-pike-ovs/underlay--user-data-cfg01.yaml
+++ b/tcp_tests/templates/cookied-mcp-pike-ovs/underlay--user-data-cfg01.yaml
@@ -44,6 +44,13 @@
 
    - echo "nameserver 172.18.208.44" >> /etc/resolv.conf;
 
+   - mkdir -p /srv/salt/reclass/nodes
+   - systemctl enable salt-master
+   - systemctl enable salt-minion
+   - systemctl start salt-master
+   - systemctl start salt-minion
+   - salt-call -l info --timeout=120 test.ping
+
   write_files:
    - path: /etc/network/interfaces
      content: |
diff --git a/tcp_tests/templates/cookied-mcp-pike-ovs/underlay.yaml b/tcp_tests/templates/cookied-mcp-pike-ovs/underlay.yaml
index 95ffa33..d1c83dd 100644
--- a/tcp_tests/templates/cookied-mcp-pike-ovs/underlay.yaml
+++ b/tcp_tests/templates/cookied-mcp-pike-ovs/underlay.yaml
@@ -34,8 +34,6 @@
 {% set HOSTNAME_DNS02 = os_env('HOSTNAME_DNS02', 'dns02.' + DOMAIN_NAME) %}
 {% set HOSTNAME_PRX01 = os_env('HOSTNAME_PRX01', 'prx01.' + DOMAIN_NAME) %}
 {% set HOSTNAME_SHARE01 = os_env('HOSTNAME_SHARE01', 'share01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_SHARE02 = os_env('HOSTNAME_SHARE01', 'share02.' + DOMAIN_NAME) %}
-{% set HOSTNAME_SHARE03 = os_env('HOSTNAME_SHARE01', 'share03.' + DOMAIN_NAME) %}
 
 template:
   devops_settings:
@@ -64,12 +62,10 @@
             default_{{ HOSTNAME_MTR02 }}: +87
             default_{{ HOSTNAME_MTR03 }}: +88
             default_{{ HOSTNAME_GTW01 }}: +110
-            default_{{ HOSTNAME_DNS01 }}: +111
-            default_{{ HOSTNAME_DNS02 }}: +112
+            default_{{ HOSTNAME_DNS01 }}: +113
+            default_{{ HOSTNAME_DNS02 }}: +114
             default_{{ HOSTNAME_PRX01 }}: +121
-            default_{{ HOSTNAME_SHARE01 }}: +69
-            default_{{ HOSTNAME_SHARE02 }}: +70
-            default_{{ HOSTNAME_SHARE03 }}: +71
+            default_{{ HOSTNAME_SHARE01 }}: +204
           ip_ranges:
             dhcp: [+90, -10]
 
@@ -95,12 +91,10 @@
             default_{{ HOSTNAME_MTR02 }}: +87
             default_{{ HOSTNAME_MTR03 }}: +88
             default_{{ HOSTNAME_GTW01 }}: +110
-            default_{{ HOSTNAME_DNS01 }}: +111
-            default_{{ HOSTNAME_DNS02 }}: +112
+            default_{{ HOSTNAME_DNS01 }}: +113
+            default_{{ HOSTNAME_DNS02 }}: +114
             default_{{ HOSTNAME_PRX01 }}: +121
-            default_{{ HOSTNAME_SHARE01 }}: +69
-            default_{{ HOSTNAME_SHARE02 }}: +70
-            default_{{ HOSTNAME_SHARE03 }}: +711
+            default_{{ HOSTNAME_SHARE01 }}: +204
           ip_ranges:
             dhcp: [+90, -10]
 
@@ -126,12 +120,10 @@
             default_{{ HOSTNAME_MTR02 }}: +87
             default_{{ HOSTNAME_MTR03 }}: +88
             default_{{ HOSTNAME_GTW01 }}: +110
-            default_{{ HOSTNAME_DNS01 }}: +111
-            default_{{ HOSTNAME_DNS02 }}: +112
+            default_{{ HOSTNAME_DNS01 }}: +113
+            default_{{ HOSTNAME_DNS02 }}: +114
             default_{{ HOSTNAME_PRX01 }}: +121
-            default_{{ HOSTNAME_SHARE01 }}: +69
-            default_{{ HOSTNAME_SHARE02 }}: +70
-            default_{{ HOSTNAME_SHARE03 }}: +71
+            default_{{ HOSTNAME_SHARE01 }}: +204
           ip_ranges:
             dhcp: [+10, -10]
 
@@ -157,12 +149,10 @@
             default_{{ HOSTNAME_MTR02 }}: +87
             default_{{ HOSTNAME_MTR03 }}: +88
             default_{{ HOSTNAME_GTW01 }}: +110
-            default_{{ HOSTNAME_DNS01 }}: +111
-            default_{{ HOSTNAME_DNS02 }}: +112
+            default_{{ HOSTNAME_DNS01 }}: +113
+            default_{{ HOSTNAME_DNS02 }}: +114
             default_{{ HOSTNAME_PRX01 }}: +121
-            default_{{ HOSTNAME_SHARE01 }}: +69
-            default_{{ HOSTNAME_SHARE02 }}: +70
-            default_{{ HOSTNAME_SHARE03 }}: +71
+            default_{{ HOSTNAME_SHARE01 }}: +204
           ip_ranges:
             dhcp: [+10, -10]
 
@@ -274,9 +264,6 @@
                   capacity: !os_env NODE_VOLUME_SIZE, 150
                   backing_store: mcp_ubuntu_1604_image
                   format: qcow2
-                - name: cinder
-                  capacity: 50
-                  format: qcow2
                 - name: iso  # Volume with name 'iso' will be used
                              # for store image with cloud-init metadata.
                   capacity: 1
@@ -315,9 +302,6 @@
                   capacity: !os_env NODE_VOLUME_SIZE, 150
                   backing_store: mcp_ubuntu_1604_image
                   format: qcow2
-                - name: cinder
-                  capacity: 50
-                  format: qcow2
                 - name: iso  # Volume with name 'iso' will be used
                              # for store image with cloud-init metadata.
                   capacity: 1
@@ -344,9 +328,6 @@
                   capacity: !os_env NODE_VOLUME_SIZE, 150
                   backing_store: mcp_ubuntu_1604_image
                   format: qcow2
-                - name: cinder
-                  capacity: 50
-                  format: qcow2
                 - name: iso  # Volume with name 'iso' will be used
                              # for store image with cloud-init metadata.
                   capacity: 1
@@ -607,9 +588,6 @@
                   capacity: !os_env NODE_VOLUME_SIZE, 150
                   backing_store: mcp_ubuntu_1604_image
                   format: qcow2
-                - name: cinder
-                  capacity: 50
-                  format: qcow2
                 - name: iso  # Volume with name 'iso' will be used
                              # for store image with cloud-init metadata.
                   capacity: 1
@@ -636,6 +614,12 @@
                   capacity: !os_env NODE_VOLUME_SIZE, 150
                   backing_store: cloudimage1604
                   format: qcow2
+                - name: cinder
+                  capacity: 50
+                  format: qcow2
+                - name: manila
+                  capacity: 20
+                  format: qcow2
                 - name: iso  # Volume with name 'iso' will be used
                              # for store image with cloud-init metadata.
                   capacity: 1
@@ -687,6 +671,12 @@
                   capacity: !os_env NODE_VOLUME_SIZE, 150
                   backing_store: cloudimage1604
                   format: qcow2
+                - name: cinder
+                  capacity: 50
+                  format: qcow2
+                - name: manila
+                  capacity: 20
+                  format: qcow2
                 - name: iso  # Volume with name 'iso' will be used
                              # for store image with cloud-init metadata.
                   capacity: 1
@@ -802,55 +792,3 @@
 
               interfaces: *all_interfaces
               network_config: *all_network_config
-
-          - name: {{ HOSTNAME_SHARE02 }}
-            role: salt_minion
-            params:
-              vcpu: !os_env SLAVE_NODE_CPU, 2
-              memory: !os_env SLAVE_NODE_MEMORY, 4096
-              boot:
-                - hd
-              cloud_init_volume_name: iso
-              cloud_init_iface_up: ens3
-              volumes:
-                - name: system
-                  capacity: !os_env NODE_VOLUME_SIZE, 150
-                  backing_store: mcp_ubuntu_1604_image
-                  format: qcow2
-                - name: iso  # Volume with name 'iso' will be used
-                             # for store image with cloud-init metadata.
-                  capacity: 1
-                  format: raw
-                  device: cdrom
-                  bus: ide
-                  cloudinit_meta_data: *cloudinit_meta_data
-                  cloudinit_user_data: *cloudinit_user_data_1604
-
-              interfaces: *all_interfaces
-              network_config: *all_network_config
-
-          - name: {{ HOSTNAME_SHARE03 }}
-            role: salt_minion
-            params:
-              vcpu: !os_env SLAVE_NODE_CPU, 2
-              memory: !os_env SLAVE_NODE_MEMORY, 4096
-              boot:
-                - hd
-              cloud_init_volume_name: iso
-              cloud_init_iface_up: ens3
-              volumes:
-                - name: system
-                  capacity: !os_env NODE_VOLUME_SIZE, 150
-                  backing_store: mcp_ubuntu_1604_image
-                  format: qcow2
-                - name: iso  # Volume with name 'iso' will be used
-                             # for store image with cloud-init metadata.
-                  capacity: 1
-                  format: raw
-                  device: cdrom
-                  bus: ide
-                  cloudinit_meta_data: *cloudinit_meta_data
-                  cloudinit_user_data: *cloudinit_user_data_1604
-
-              interfaces: *all_interfaces
-              network_config: *all_network_config
\ No newline at end of file
diff --git a/tcp_tests/templates/cookied-mcp-queens-dvr-ceph/cookiecutter-context-dvr-ceph.yaml b/tcp_tests/templates/cookied-mcp-queens-dvr-ceph/cookiecutter-context-dvr-ceph.yaml
index 135a2c4..bfcc3fd 100644
--- a/tcp_tests/templates/cookied-mcp-queens-dvr-ceph/cookiecutter-context-dvr-ceph.yaml
+++ b/tcp_tests/templates/cookied-mcp-queens-dvr-ceph/cookiecutter-context-dvr-ceph.yaml
@@ -135,7 +135,6 @@
   openstack_compute_single_address_ranges: 10.167.4.105-10.167.4.106
   openstack_compute_deploy_address_ranges: 10.167.5.105-10.167.5.106
   openstack_compute_tenant_address_ranges: 10.167.6.105-10.167.6.106
-  openstack_compute_backend_address_ranges: 10.167.6.105-10.167.6.106
   openstack_dns_hostname: dns
   openstack_dns_node01_address: 10.167.4.111
   openstack_dns_node01_hostname: dns01
@@ -228,5 +227,13 @@
   openstack_mysql_x509_enabled: 'False'
   rabbitmq_ssl_enabled: 'False'
   openstack_rabbitmq_x509_enabled: 'False'
-  tenant_telemetry_enabled: 'False'
-  gnocchi_aggregation_storage: file
+  tenant_telemetry_enabled: 'True'
+  gnocchi_aggregation_storage: ceph
+  openstack_telemetry_address: 172.16.10.83
+  openstack_telemetry_hostname: mdb
+  openstack_telemetry_node01_address: 172.16.10.84
+  openstack_telemetry_node01_hostname: mdb01
+  openstack_telemetry_node02_address: 172.16.10.85
+  openstack_telemetry_node02_hostname: mdb02
+  openstack_telemetry_node03_address: 172.16.10.86
+  openstack_telemetry_node03_hostname: mdb03
\ No newline at end of file
diff --git a/tcp_tests/templates/cookied-mcp-queens-dvr-ceph/core.yaml b/tcp_tests/templates/cookied-mcp-queens-dvr-ceph/core.yaml
index fab7c18..fcce951 100644
--- a/tcp_tests/templates/cookied-mcp-queens-dvr-ceph/core.yaml
+++ b/tcp_tests/templates/cookied-mcp-queens-dvr-ceph/core.yaml
@@ -12,6 +12,8 @@
 
 {{ SHARED_CORE.MACRO_INSTALL_HAPROXY() }}
 
+{{ SHARED_CORE.MACRO_INSTALL_NGINX() }}
+
 {{ SHARED_CORE.MACRO_INSTALL_MEMCACHED() }}
 
 {{ SHARED_CORE.MACRO_CHECK_VIP() }}
diff --git a/tcp_tests/templates/cookied-mcp-queens-dvr-ceph/openstack.yaml b/tcp_tests/templates/cookied-mcp-queens-dvr-ceph/openstack.yaml
index 5d19c16..636187b 100644
--- a/tcp_tests/templates/cookied-mcp-queens-dvr-ceph/openstack.yaml
+++ b/tcp_tests/templates/cookied-mcp-queens-dvr-ceph/openstack.yaml
@@ -31,6 +31,16 @@
 
 {{ SHARED_OPENSTACK.MACRO_INSTALL_HEAT() }}
 
+{{ SHARED_OPENSTACK.MACRO_INSTALL_REDIS() }}
+
+{{ SHARED_OPENSTACK.MACRO_INSTALL_GNOCCHI() }}
+
+{{ SHARED_OPENSTACK.MACRO_INSTALL_PANKO() }}
+
+{{ SHARED_OPENSTACK.MACRO_INSTALL_CEILOMETER() }}
+
+{{ SHARED_OPENSTACK.MACRO_INSTALL_AODH() }}
+
 {{ SHARED_OPENSTACK.MACRO_INSTALL_HORIZON() }}
 
 {{ SHARED_OPENSTACK.MACRO_INSTALL_COMPUTE(CELL_MAPPING=true) }}
\ No newline at end of file
diff --git a/tcp_tests/templates/cookied-mcp-queens-dvr-ceph/underlay--user-data-cfg01.yaml b/tcp_tests/templates/cookied-mcp-queens-dvr-ceph/underlay--user-data-cfg01.yaml
index da7908d..48562ad 100644
--- a/tcp_tests/templates/cookied-mcp-queens-dvr-ceph/underlay--user-data-cfg01.yaml
+++ b/tcp_tests/templates/cookied-mcp-queens-dvr-ceph/underlay--user-data-cfg01.yaml
@@ -44,6 +44,13 @@
 
    - echo "nameserver 172.18.208.44" >> /etc/resolv.conf;
 
+   - mkdir -p /srv/salt/reclass/nodes
+   - systemctl enable salt-master
+   - systemctl enable salt-minion
+   - systemctl start salt-master
+   - systemctl start salt-minion
+   - salt-call -l info --timeout=120 test.ping
+
   write_files:
    - path: /etc/network/interfaces
      content: |
diff --git a/tcp_tests/templates/cookied-mcp-queens-dvr-ceph/underlay.yaml b/tcp_tests/templates/cookied-mcp-queens-dvr-ceph/underlay.yaml
index c898837..fe31142 100644
--- a/tcp_tests/templates/cookied-mcp-queens-dvr-ceph/underlay.yaml
+++ b/tcp_tests/templates/cookied-mcp-queens-dvr-ceph/underlay.yaml
@@ -29,6 +29,9 @@
 {% set HOSTNAME_OSD02 = os_env('HOSTNAME_OSD02', 'osd2.' + DOMAIN_NAME) %}
 {% set HOSTNAME_GTW01 = os_env('HOSTNAME_GTW01', 'gtw01.' + DOMAIN_NAME) %}
 {% set HOSTNAME_PRX01 = os_env('HOSTNAME_PRX01', 'prx01.' + DOMAIN_NAME) %}
+{% set HOSTNAME_MDB01 = os_env('HOSTNAME_MDB01', 'mdb01.' + DOMAIN_NAME) %}
+{% set HOSTNAME_MDB02 = os_env('HOSTNAME_MDB02', 'mdb02.' + DOMAIN_NAME) %}
+{% set HOSTNAME_MDB03 = os_env('HOSTNAME_MDB03', 'mdb03.' + DOMAIN_NAME) %}
 
 template:
   devops_settings:
@@ -57,6 +60,9 @@
             default_{{ HOSTNAME_RGW03 }}: +78
             default_{{ HOSTNAME_GTW01 }}: +110
             default_{{ HOSTNAME_PRX01 }}: +121
+            default_{{ HOSTNAME_MDB01 }}: +84
+            default_{{ HOSTNAME_MDB02 }}: +85
+            default_{{ HOSTNAME_MDB03 }}: +86
           ip_ranges:
             dhcp: [+70, -10]
 
@@ -82,6 +88,9 @@
             default_{{ HOSTNAME_RGW03 }}: +78
             default_{{ HOSTNAME_GTW01 }}: +110
             default_{{ HOSTNAME_PRX01 }}: +121
+            default_{{ HOSTNAME_MDB01 }}: +84
+            default_{{ HOSTNAME_MDB02 }}: +85
+            default_{{ HOSTNAME_MDB03 }}: +86
           ip_ranges:
             dhcp: [+70, -10]
 
@@ -107,6 +116,9 @@
             default_{{ HOSTNAME_RGW03 }}: +78
             default_{{ HOSTNAME_GTW01 }}: +110
             default_{{ HOSTNAME_PRX01 }}: +121
+            default_{{ HOSTNAME_MDB01 }}: +84
+            default_{{ HOSTNAME_MDB02 }}: +85
+            default_{{ HOSTNAME_MDB03 }}: +86
           ip_ranges:
             dhcp: [+10, -10]
 
@@ -132,6 +144,9 @@
             default_{{ HOSTNAME_RGW03 }}: +78
             default_{{ HOSTNAME_GTW01 }}: +110
             default_{{ HOSTNAME_PRX01 }}: +121
+            default_{{ HOSTNAME_MDB01 }}: +84
+            default_{{ HOSTNAME_MDB02 }}: +85
+            default_{{ HOSTNAME_MDB03 }}: +86
           ip_ranges:
             dhcp: [+130, +230]
 
@@ -240,9 +255,6 @@
                   capacity: !os_env NODE_VOLUME_SIZE, 150
                   backing_store: mcp_ubuntu_1604_image
                   format: qcow2
-                - name: cinder
-                  capacity: 50
-                  format: qcow2
                 - name: iso  # Volume with name 'iso' will be used
                              # for store image with cloud-init metadata.
                   capacity: 1
@@ -281,9 +293,6 @@
                   capacity: !os_env NODE_VOLUME_SIZE, 150
                   backing_store: mcp_ubuntu_1604_image
                   format: qcow2
-                - name: cinder
-                  capacity: 50
-                  format: qcow2
                 - name: iso  # Volume with name 'iso' will be used
                              # for store image with cloud-init metadata.
                   capacity: 1
@@ -310,9 +319,6 @@
                   capacity: !os_env NODE_VOLUME_SIZE, 150
                   backing_store: mcp_ubuntu_1604_image
                   format: qcow2
-                - name: cinder
-                  capacity: 50
-                  format: qcow2
                 - name: iso  # Volume with name 'iso' will be used
                              # for store image with cloud-init metadata.
                   capacity: 1
@@ -322,6 +328,8 @@
                   cloudinit_meta_data: *cloudinit_meta_data
                   cloudinit_user_data: *cloudinit_user_data_1604
 
+
+
               interfaces: *interfaces
               network_config: *network_config
 
@@ -351,6 +359,93 @@
               interfaces: *interfaces
               network_config: *network_config
 
+          - name: {{ HOSTNAME_MDB01 }}
+            role: salt_minion
+            params:
+              vcpu: !os_env SLAVE_NODE_CPU, 2
+              memory: !os_env SLAVE_NODE_MEMORY, 8192
+              boot:
+                - hd
+              cloud_init_volume_name: iso
+              cloud_init_iface_up: ens3
+              volumes:
+                - name: system
+                  capacity: !os_env NODE_VOLUME_SIZE, 150
+                  backing_store: mcp_ubuntu_1604_image
+                  format: qcow2
+                - name: cinder
+                  capacity: 50
+                  format: qcow2
+                - name: iso  # Volume with name 'iso' will be used
+                             # for store image with cloud-init metadata.
+                  capacity: 1
+                  format: raw
+                  device: cdrom
+                  bus: ide
+                  cloudinit_meta_data: *cloudinit_meta_data
+                  cloudinit_user_data: *cloudinit_user_data_1604
+
+              interfaces: *interfaces
+              network_config: *network_config
+
+          - name: {{ HOSTNAME_MDB02 }}
+            role: salt_minion
+            params:
+              vcpu: !os_env SLAVE_NODE_CPU, 2
+              memory: !os_env SLAVE_NODE_MEMORY, 8192
+              boot:
+                - hd
+              cloud_init_volume_name: iso
+              cloud_init_iface_up: ens3
+              volumes:
+                - name: system
+                  capacity: !os_env NODE_VOLUME_SIZE, 150
+                  backing_store: mcp_ubuntu_1604_image
+                  format: qcow2
+                - name: cinder
+                  capacity: 50
+                  format: qcow2
+                - name: iso  # Volume with name 'iso' will be used
+                             # for store image with cloud-init metadata.
+                  capacity: 1
+                  format: raw
+                  device: cdrom
+                  bus: ide
+                  cloudinit_meta_data: *cloudinit_meta_data
+                  cloudinit_user_data: *cloudinit_user_data_1604
+
+              interfaces: *interfaces
+              network_config: *network_config
+
+          - name: {{ HOSTNAME_MDB03 }}
+            role: salt_minion
+            params:
+              vcpu: !os_env SLAVE_NODE_CPU, 2
+              memory: !os_env SLAVE_NODE_MEMORY, 8192
+              boot:
+                - hd
+              cloud_init_volume_name: iso
+              cloud_init_iface_up: ens3
+              volumes:
+                - name: system
+                  capacity: !os_env NODE_VOLUME_SIZE, 150
+                  backing_store: mcp_ubuntu_1604_image
+                  format: qcow2
+                - name: cinder
+                  capacity: 50
+                  format: qcow2
+                - name: iso  # Volume with name 'iso' will be used
+                             # for store image with cloud-init metadata.
+                  capacity: 1
+                  format: raw
+                  device: cdrom
+                  bus: ide
+                  cloudinit_meta_data: *cloudinit_meta_data
+                  cloudinit_user_data: *cloudinit_user_data_1604
+
+              interfaces: *interfaces
+              network_config: *network_config
+
           - name: {{ HOSTNAME_CMN02 }}
             role: salt_minion
             params:
@@ -417,10 +512,10 @@
                   capacity: !os_env NODE_VOLUME_SIZE, 150
                   backing_store: cloudimage1604
                   format: qcow2
-                - name: cinder
+                - name: ceph_osd
                   capacity: 50
                   format: qcow2
-                - name: ceph
+                - name: ceph_journal
                   capacity: 50
                   format: qcow2
                 - name: iso  # Volume with name 'iso' will be used
@@ -449,10 +544,10 @@
                   capacity: !os_env NODE_VOLUME_SIZE, 150
                   backing_store: cloudimage1604
                   format: qcow2
-                - name: cinder
+                - name: ceph_osd
                   capacity: 50
                   format: qcow2
-                - name: ceph
+                - name: ceph_journal
                   capacity: 50
                   format: qcow2
                 - name: iso  # Volume with name 'iso' will be used
diff --git a/tcp_tests/templates/cookied-mcp-queens-dvr-ceph/vcp-context-environment.yaml b/tcp_tests/templates/cookied-mcp-queens-dvr-ceph/vcp-context-environment.yaml
index f279b44..28831da 100644
--- a/tcp_tests/templates/cookied-mcp-queens-dvr-ceph/vcp-context-environment.yaml
+++ b/tcp_tests/templates/cookied-mcp-queens-dvr-ceph/vcp-context-environment.yaml
@@ -169,4 +169,38 @@
         ens3:
           role: single_dhcp
         ens4:
-          role: single_ctl
\ No newline at end of file
+          role: single_ctl
+
+
+    mdb01.cookied-mcp-queens-dvr-ceph.local:
+      reclass_storage_name: openstack_telemetry_node01
+      roles:
+      - linux_system_codename_xenial
+      - openstack_telemetry
+      interfaces:
+        ens3:
+          role: single_dhcp
+        ens4:
+          role: single_ctl
+
+    mdb02.cookied-mcp-queens-dvr-ceph.local:
+      reclass_storage_name: openstack_telemetry_node02
+      roles:
+      - linux_system_codename_xenial
+      - openstack_telemetry
+      interfaces:
+        ens3:
+          role: single_dhcp
+        ens4:
+          role: single_ctl
+
+    mdb03.cookied-mcp-queens-dvr-ceph.local:
+      reclass_storage_name: openstack_telemetry_node03
+      roles:
+      - linux_system_codename_xenial
+      - openstack_telemetry
+      interfaces:
+        ens3:
+          role: single_dhcp
+        ens4:
+          role: single_ctl
diff --git a/tcp_tests/templates/virtual-mcp-ocata-dvr/_context-cookiecutter-mcp-ocata-dvr.yaml b/tcp_tests/templates/cookied-mcp-queens-dvr-ssl-barbican/_context-cookiecutter-mcp-queens-dvr-ssl-barbican.yaml
similarity index 75%
copy from tcp_tests/templates/virtual-mcp-ocata-dvr/_context-cookiecutter-mcp-ocata-dvr.yaml
copy to tcp_tests/templates/cookied-mcp-queens-dvr-ssl-barbican/_context-cookiecutter-mcp-queens-dvr-ssl-barbican.yaml
index 84c6c06..1cec753 100644
--- a/tcp_tests/templates/virtual-mcp-ocata-dvr/_context-cookiecutter-mcp-ocata-dvr.yaml
+++ b/tcp_tests/templates/cookied-mcp-queens-dvr-ssl-barbican/_context-cookiecutter-mcp-queens-dvr-ssl-barbican.yaml
@@ -1,9 +1,12 @@
 default_context:
+  barbican_backend: dogtag
+  barbican_enabled: 'True'
+  auditd_enabled: 'True'
   bmk_enabled: 'False'
   ceph_enabled: 'False'
   cicd_enabled: 'False'
-  cluster_domain: virtual-mcp-ocata-dvr.local
-  cluster_name: virtual-mcp-ocata-dvr
+  cluster_domain: cookied-mcp-queens-dvr-ssl-barbican.local
+  cluster_name: cookied-mcp-queens-dvr-ssl-barbican
   compute_bond_mode: active-backup
   compute_primary_first_nic: eth1
   compute_primary_second_nic: eth2
@@ -41,7 +44,8 @@
   local_repositories: 'False'
   maas_deploy_address: 192.168.10.90
   maas_hostname: cfg01
-  mcp_version: stable
+  maas_enabled: 'False'
+  mcp_version: proposed
   offline_deployment: 'False'
   opencontrail_enabled: 'False'
   openstack_benchmark_node01_address: 172.16.10.95
@@ -54,7 +58,6 @@
   openstack_compute_single_address_ranges: 172.16.10.105-172.16.10.106
   openstack_compute_deploy_address_ranges: 192.168.10.105-192.168.10.106
   openstack_compute_tenant_address_ranges: 10.1.0.105-10.1.0.106
-  openstack_compute_backend_address_ranges: 10.1.0.105-10.1.0.106
   openstack_control_address: 172.16.10.100
   openstack_control_hostname: ctl
   openstack_control_node01_address: 172.16.10.101
@@ -104,7 +107,12 @@
   openstack_proxy_node02_address: 172.16.10.122
   openstack_proxy_node02_hostname: prx02
   openstack_upgrade_node01_address: 172.16.10.19
-  openstack_version: ocata
+  openstack_dns_hostname: dns
+  openstack_dns_node01_address: 172.16.10.113
+  openstack_dns_node01_hostname: dns01
+  openstack_dns_node02_address: 172.16.10.114
+  openstack_dns_node02_hostname: dns02
+  openstack_version: queens
   oss_enabled: 'False'
   oss_node03_address: ${_param:stacklight_monitor_node03_address}
   oss_webhook_app_id: '24'
@@ -151,34 +159,33 @@
   salt_master_management_address: 192.168.10.90
   shared_reclass_url: ssh://mcp-jenkins@gerrit.mcp.mirantis.com:29418/salt-models/reclass-system.git
   fluentd_enabled: 'True'
-  stacklight_enabled: 'True'
-  stacklight_log_address: 172.16.10.70
-  stacklight_log_hostname: mon
-  stacklight_log_node01_address: 172.16.10.107
-  stacklight_log_node01_hostname: mon01
-  stacklight_log_node02_address: 172.16.10.108
-  stacklight_log_node02_hostname: mon02
-  stacklight_log_node03_address: 172.16.10.109
-  stacklight_log_node03_hostname: mon03
+  stacklight_enabled: 'False'
+  stacklight_log_address: 172.16.10.60
+  stacklight_log_hostname: log
+  stacklight_log_node01_address: 172.16.10.61
+  stacklight_log_node01_hostname: log01
+  stacklight_log_node02_address: 172.16.10.62
+  stacklight_log_node02_hostname: log02
+  stacklight_log_node03_address: 172.16.10.63
+  stacklight_log_node03_hostname: log03
   stacklight_monitor_address: 172.16.10.70
   stacklight_monitor_hostname: mon
-  stacklight_monitor_node01_address: 172.16.10.107
+  stacklight_monitor_node01_address: 172.16.10.71
   stacklight_monitor_node01_hostname: mon01
-  stacklight_monitor_node02_address: 172.16.10.108
+  stacklight_monitor_node02_address: 172.16.10.72
   stacklight_monitor_node02_hostname: mon02
-  stacklight_monitor_node03_address: 172.16.10.109
+  stacklight_monitor_node03_address: 172.16.10.73
   stacklight_monitor_node03_hostname: mon03
-  stacklight_notification_address: alerts@localhost
-  stacklight_notification_smtp_host: 127.0.0.1
-  stacklight_telemetry_address: 172.16.10.70
-  stacklight_telemetry_hostname: mon
-  stacklight_telemetry_node01_address: 172.16.10.107
-  stacklight_telemetry_node01_hostname: mon01
-  stacklight_telemetry_node02_address: 172.16.10.108
-  stacklight_telemetry_node02_hostname: mon02
-  stacklight_telemetry_node03_address: 172.16.10.109
-  stacklight_telemetry_node03_hostname: mon03
+  stacklight_telemetry_address: 172.16.10.85
+  stacklight_telemetry_hostname: mtr
+  stacklight_telemetry_node01_address: 172.16.10.86
+  stacklight_telemetry_node01_hostname: mtr01
+  stacklight_telemetry_node02_address: 172.16.10.87
+  stacklight_telemetry_node02_hostname: mtr02
+  stacklight_telemetry_node03_address: 172.16.10.88
+  stacklight_telemetry_node03_hostname: mtr03
   stacklight_version: '2'
+  stacklight_long_term_storage_type: influxdb
   static_ips_on_deploy_network_enabled: 'False'
   tenant_network_gateway: 10.1.0.1
   tenant_network_netmask: 255.255.255.0
@@ -186,3 +193,32 @@
   tenant_vlan: '20'
   upstream_proxy_enabled: 'False'
   use_default_network_scheme: 'False'
+  rsync_fernet_rotation: 'True'
+  compute_padding_with_zeros: False
+  designate_backend: bind
+  designate_enabled: 'False'
+  nova_vnc_tls_enabled: 'True'
+  galera_ssl_enabled: 'True'
+  openstack_mysql_x509_enabled: 'True'
+  rabbitmq_ssl_enabled: 'True'
+  openstack_rabbitmq_x509_enabled: 'True'
+  openstack_internal_protocol: 'https'
+  tenant_telemetry_enabled: 'False'
+  gnocchi_aggregation_storage: file
+  manila_enabled: 'False'
+  manila_share_backend: 'lvm'
+  manila_lvm_volume_name: 'manila-volume'
+  manila_lvm_devices: '/dev/vdc'
+  openstack_share_address: 172.16.10.203
+  openstack_share_node01_address: 172.16.10.204
+  openstack_share_node01_deploy_address: 192.168.10.204
+  openstack_share_hostname: share
+  openstack_share_node01_hostname: share01
+  openstack_barbican_address: 172.16.10.44
+  openstack_barbican_hostname: kmn
+  openstack_barbican_node01_address: 172.16.10.45
+  openstack_barbican_node01_hostname: kmn01
+  openstack_barbican_node02_address: 172.16.10.46
+  openstack_barbican_node02_hostname: kmn02
+  openstack_barbican_node03_address: 172.16.10.47
+  openstack_barbican_node03_hostname: kmn03
\ No newline at end of file
diff --git a/tcp_tests/templates/virtual-mcp-ocata-ovs/_context-environment.yaml b/tcp_tests/templates/cookied-mcp-queens-dvr-ssl-barbican/_context-environment.yaml
similarity index 69%
copy from tcp_tests/templates/virtual-mcp-ocata-ovs/_context-environment.yaml
copy to tcp_tests/templates/cookied-mcp-queens-dvr-ssl-barbican/_context-environment.yaml
index 3e05cf0..7102e9c 100644
--- a/tcp_tests/templates/virtual-mcp-ocata-ovs/_context-environment.yaml
+++ b/tcp_tests/templates/cookied-mcp-queens-dvr-ssl-barbican/_context-environment.yaml
@@ -1,5 +1,5 @@
 nodes:
-    cfg01.mcp11-ovs-dpdk.local:
+    cfg01.mcp-queens-dvr-ssl.local:
       reclass_storage_name: infra_config_node01
       roles:
       - infra_config
@@ -10,17 +10,14 @@
         ens4:
           role: single_ctl
 
-    ctl01.mcp11-ovs-dpdk.local:
+    ctl01.mcp-queens-dvr-ssl.local:
       reclass_storage_name: openstack_control_node01
       roles:
       - infra_kvm
       - openstack_control_leader
       - openstack_database_leader
       - openstack_message_queue
-      - features_designate_bind9_database
-      - features_designate_bind9_dns
-      - features_designate_bind9
-      - features_designate_bind9_keystone
+      - features_lvm_backend_control
       - linux_system_codename_xenial
       interfaces:
         ens3:
@@ -28,16 +25,14 @@
         ens4:
           role: single_ctl
 
-    ctl02.mcp11-ovs-dpdk.local:
+    ctl02.mcp-queens-dvr-ssl.local:
       reclass_storage_name: openstack_control_node02
       roles:
       - infra_kvm
       - openstack_control
       - openstack_database
       - openstack_message_queue
-      - features_designate_bind9_database
-      - features_designate_bind9_dns
-      - features_designate_bind9
+      - features_lvm_backend_control
       - linux_system_codename_xenial
       interfaces:
         ens3:
@@ -45,15 +40,14 @@
         ens4:
           role: single_ctl
 
-    ctl03.mcp11-ovs-dpdk.local:
+    ctl03.mcp-queens-dvr-ssl.local:
       reclass_storage_name: openstack_control_node03
       roles:
       - infra_kvm
       - openstack_control
       - openstack_database
       - openstack_message_queue
-      - features_designate_bind9_database
-      - features_designate_bind9
+      - features_lvm_backend_control
       - linux_system_codename_xenial
       interfaces:
         ens3:
@@ -61,50 +55,43 @@
         ens4:
           role: single_ctl
 
-    prx01.mcp11-ovs-dpdk.local:
+    kmn01.mcp-queens-dvr-ssl.local:
+      reclass_storage_name: openstack_barbican_node01
+      roles:
+      - openstack_barbican
+      - linux_system_codename_xenial
+      interfaces:
+        ens3:
+          role: single_dhcp
+        ens4:
+          role: single_ctl
+
+    kmn02.mcp-queens-dvr-ssl.local:
+      reclass_storage_name: openstack_barbican_node02
+      roles:
+      - openstack_barbican
+      - linux_system_codename_xenial
+      interfaces:
+        ens3:
+          role: single_dhcp
+        ens4:
+          role: single_ctl
+
+    kmn03.mcp-queens-dvr-ssl.local:
+      reclass_storage_name: openstack_barbican_node03
+      roles:
+      - openstack_barbican
+      - linux_system_codename_xenial
+      interfaces:
+        ens3:
+          role: single_dhcp
+        ens4:
+          role: single_ctl
+
+    prx01.mcp-queens-dvr-ssl.local:
       reclass_storage_name: openstack_proxy_node01
       roles:
       - openstack_proxy
-      - features_designate_bind9_proxy
-      - linux_system_codename_xenial
-      interfaces:
-        ens3:
-          role: single_dhcp
-        ens4:
-          role: single_ctl
-
-    mon01.mcp11-ovs-dpdk.local:
-      reclass_storage_name: stacklight_server_node01
-      roles:
-      - stacklightv2_server_leader
-      - stacklight_telemetry_leader
-      - stacklight_log_leader_v2
-      - linux_system_codename_xenial
-      interfaces:
-        ens3:
-          role: single_dhcp
-        ens4:
-          role: single_ctl
-
-    mon02.mcp11-ovs-dpdk.local:
-      reclass_storage_name: stacklight_server_node02
-      roles:
-      - stacklightv2_server
-      - stacklight_telemetry
-      - stacklight_log
-      - linux_system_codename_xenial
-      interfaces:
-        ens3:
-          role: single_dhcp
-        ens4:
-          role: single_ctl
-
-    mon03.mcp11-ovs-dpdk.local:
-      reclass_storage_name: stacklight_server_node03
-      roles:
-      - stacklightv2_server
-      - stacklight_telemetry
-      - stacklight_log
       - linux_system_codename_xenial
       interfaces:
         ens3:
@@ -113,10 +100,11 @@
           role: single_ctl
 
     # Generator-based computes. For compatibility only
-    cmp<<count>>.mcp11-ovs-dpdk.local:
+    cmp<<count>>.mcp-queens-dvr-ssl.local:
       reclass_storage_name: openstack_compute_rack01
       roles:
       - openstack_compute
+      - features_lvm_backend_volume_vdb
       - linux_system_codename_xenial
       interfaces:
         ens3:
@@ -128,7 +116,7 @@
         ens6:
           role: bond1_ab_ovs_floating
 
-    gtw01.mcp11-ovs-dpdk.local:
+    gtw01.mcp-queens-dvr-ssl.local:
       reclass_storage_name: openstack_gateway_node01
       roles:
       - openstack_gateway
diff --git a/tcp_tests/templates/cookied-mcp-queens-dvr-ssl-barbican/core.yaml b/tcp_tests/templates/cookied-mcp-queens-dvr-ssl-barbican/core.yaml
new file mode 100644
index 0000000..4efe25c
--- /dev/null
+++ b/tcp_tests/templates/cookied-mcp-queens-dvr-ssl-barbican/core.yaml
@@ -0,0 +1,19 @@
+{% from 'cookied-mcp-queens-dvr-ssl-barbican/underlay.yaml' import HOSTNAME_CFG01 with context %}
+
+{% import 'shared-core.yaml' as SHARED_CORE with context %}
+
+{{ SHARED_CORE.MACRO_INSTALL_KEEPALIVED() }}
+
+{{ SHARED_CORE.MACRO_INSTALL_GLUSTERFS() }}
+
+{{ SHARED_CORE.MACRO_INSTALL_RABBITMQ() }}
+
+{{ SHARED_CORE.MACRO_INSTALL_GALERA() }}
+
+{{ SHARED_CORE.MACRO_INSTALL_HAPROXY() }}
+
+{{ SHARED_CORE.MACRO_INSTALL_NGINX() }}
+
+{{ SHARED_CORE.MACRO_INSTALL_MEMCACHED() }}
+
+{{ SHARED_CORE.MACRO_CHECK_VIP() }}
diff --git a/tcp_tests/templates/cookied-mcp-queens-dvr-ssl-barbican/openstack.yaml b/tcp_tests/templates/cookied-mcp-queens-dvr-ssl-barbican/openstack.yaml
new file mode 100644
index 0000000..5a2bdac
--- /dev/null
+++ b/tcp_tests/templates/cookied-mcp-queens-dvr-ssl-barbican/openstack.yaml
@@ -0,0 +1,36 @@
+{% from 'cookied-mcp-queens-dvr-ssl-barbican/underlay.yaml' import HOSTNAME_CFG01 with context %}
+{% from 'cookied-mcp-queens-dvr-ssl-barbican/underlay.yaml' import HOSTNAME_CTL01 with context %}
+{% from 'cookied-mcp-queens-dvr-ssl-barbican/underlay.yaml' import HOSTNAME_CTL02 with context %}
+{% from 'cookied-mcp-queens-dvr-ssl-barbican/underlay.yaml' import HOSTNAME_CTL03 with context %}
+{% from 'cookied-mcp-queens-dvr-ssl-barbican/underlay.yaml' import HOSTNAME_GTW01 with context %}
+{% from 'cookied-mcp-queens-dvr-ssl-barbican/underlay.yaml' import LAB_CONFIG_NAME with context %}
+{% from 'cookied-mcp-queens-dvr-ssl-barbican/underlay.yaml' import DOMAIN_NAME with context %}
+{% from 'shared-salt.yaml' import IPV4_NET_EXTERNAL_PREFIX with context %}
+{% from 'shared-salt.yaml' import IPV4_NET_TENANT_PREFIX with context %}
+{% set LAB_CONFIG_NAME = os_env('LAB_CONFIG_NAME') %}
+{% set OVERRIDE_POLICY = os_env('OVERRIDE_POLICY', '') %}
+
+{% import 'shared-salt.yaml' as SHARED with context %}
+{% import 'shared-openstack.yaml' as SHARED_OPENSTACK with context %}
+
+# Install OpenStack control services
+
+{{ SHARED_OPENSTACK.MACRO_INSTALL_KEYSTONE(USE_ORCHESTRATE=true) }}
+
+{{ SHARED_OPENSTACK.MACRO_INSTALL_GLANCE() }}
+
+{{ SHARED_OPENSTACK.MACRO_INSTALL_NOVA() }}
+
+{{ SHARED_OPENSTACK.MACRO_INSTALL_CINDER(INSTALL_VOLUME=true) }}
+
+{{ SHARED_OPENSTACK.MACRO_INSTALL_NEUTRON() }}
+
+{{ SHARED_OPENSTACK.MACRO_INSTALL_HEAT() }}
+
+{{ SHARED_OPENSTACK.MACRO_INSTALL_HORIZON() }}
+
+{{ SHARED_OPENSTACK.MACRO_INSTALL_DOGTAG() }}
+
+{{ SHARED_OPENSTACK.MACRO_INSTALL_BARBICAN() }}
+
+{{ SHARED_OPENSTACK.MACRO_INSTALL_COMPUTE(CELL_MAPPING=true) }}
diff --git a/tcp_tests/templates/cookied-mcp-queens-dvr-ssl-barbican/salt.yaml b/tcp_tests/templates/cookied-mcp-queens-dvr-ssl-barbican/salt.yaml
new file mode 100644
index 0000000..edfadef
--- /dev/null
+++ b/tcp_tests/templates/cookied-mcp-queens-dvr-ssl-barbican/salt.yaml
@@ -0,0 +1,43 @@
+{% from 'cookied-mcp-queens-dvr-ssl-barbican/underlay.yaml' import HOSTNAME_CFG01 with context %}
+{% from 'cookied-mcp-queens-dvr-ssl-barbican/underlay.yaml' import HOSTNAME_CMP01 with context %}
+{% from 'cookied-mcp-queens-dvr-ssl-barbican/underlay.yaml' import HOSTNAME_CMP02 with context %}
+{% from 'cookied-mcp-queens-dvr-ssl-barbican/underlay.yaml' import HOSTNAME_GTW01 with context %}
+{% from 'cookied-mcp-queens-dvr-ssl-barbican/underlay.yaml' import LAB_CONFIG_NAME with context %}
+{% from 'cookied-mcp-queens-dvr-ssl-barbican/underlay.yaml' import DOMAIN_NAME with context %}
+
+{% set SALT_MODELS_REPOSITORY = os_env('SALT_MODELS_REPOSITORY','https://gerrit.mcp.mirantis.com/salt-models/mcp-virtual-lab') %}
+# Other salt model repository parameters see in shared-salt.yaml
+
+{% import 'shared-salt.yaml' as SHARED with context %}
+
+{{ SHARED.MACRO_INSTALL_SALT_MASTER() }}
+
+{{ SHARED.MACRO_CLONE_RECLASS_MODELS() }}
+
+- description: "Temp fix"
+  cmd: |
+    set -e;
+    apt-get -y install python-virtualenv python-pip build-essential python-dev libssl-dev;
+    [[ -d /root/venv-reclass-tools ]] || virtualenv /root/venv-reclass-tools;
+    . /root/venv-reclass-tools/bin/activate;
+    pip install git+https://github.com/dis-xcom/reclass-tools;
+    reclass-tools add-key parameters._param.cluster_internal_protocol 'https' /srv/salt/reclass/classes/system/cinder/volume/single.yml;
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+{{ SHARED.MACRO_CONFIGURE_RECLASS(FORMULA_SERVICES='\*') }}
+
+{{ SHARED.MACRO_INSTALL_SALT_MINIONS() }}
+
+{{ SHARED.MACRO_RUN_SALT_MASTER_UNDERLAY_STATES() }}
+
+{{ SHARED.MACRO_GENERATE_INVENTORY() }}
+
+{{ SHARED.MACRO_NETWORKING_WORKAROUNDS() }}
+
+{{ SHARED.MACRO_BOOTSTRAP_ALL_MINIONS() }}
+
+{{SHARED.MACRO_CHECK_SALT_VERSION_SERVICES_ON_CFG()}}
+
+{{SHARED.MACRO_CHECK_SALT_VERSION_ON_NODES()}}
diff --git a/tcp_tests/templates/cookied-bm-mcp-pike-k8s-contrail/underlay--meta-data.yaml b/tcp_tests/templates/cookied-mcp-queens-dvr-ssl-barbican/underlay--meta-data.yaml
similarity index 100%
copy from tcp_tests/templates/cookied-bm-mcp-pike-k8s-contrail/underlay--meta-data.yaml
copy to tcp_tests/templates/cookied-mcp-queens-dvr-ssl-barbican/underlay--meta-data.yaml
diff --git a/tcp_tests/templates/virtual-mcp-ocata-dvr/underlay--user-data-cfg01.yaml b/tcp_tests/templates/cookied-mcp-queens-dvr-ssl-barbican/underlay--user-data-cfg01.yaml
similarity index 87%
copy from tcp_tests/templates/virtual-mcp-ocata-dvr/underlay--user-data-cfg01.yaml
copy to tcp_tests/templates/cookied-mcp-queens-dvr-ssl-barbican/underlay--user-data-cfg01.yaml
index a73ca23..48562ad 100644
--- a/tcp_tests/templates/virtual-mcp-ocata-dvr/underlay--user-data-cfg01.yaml
+++ b/tcp_tests/templates/cookied-mcp-queens-dvr-ssl-barbican/underlay--user-data-cfg01.yaml
@@ -44,16 +44,14 @@
 
    - echo "nameserver 172.18.208.44" >> /etc/resolv.conf;
 
-   # Enable grub menu using updated config below
-   - update-grub
+   - mkdir -p /srv/salt/reclass/nodes
+   - systemctl enable salt-master
+   - systemctl enable salt-minion
+   - systemctl start salt-master
+   - systemctl start salt-minion
+   - salt-call -l info --timeout=120 test.ping
 
   write_files:
-   - path: /etc/default/grub.d/97-enable-grub-menu.cfg
-     content: |
-         GRUB_RECORDFAIL_TIMEOUT=30
-         GRUB_TIMEOUT=3
-         GRUB_TIMEOUT_STYLE=menu
-
    - path: /etc/network/interfaces
      content: |
           auto ens3
diff --git a/tcp_tests/templates/virtual-mcp-ocata-dvr/underlay--user-data1604.yaml b/tcp_tests/templates/cookied-mcp-queens-dvr-ssl-barbican/underlay--user-data1604.yaml
similarity index 100%
copy from tcp_tests/templates/virtual-mcp-ocata-dvr/underlay--user-data1604.yaml
copy to tcp_tests/templates/cookied-mcp-queens-dvr-ssl-barbican/underlay--user-data1604.yaml
diff --git a/tcp_tests/templates/virtual-mcp-ocata-ovs/underlay.yaml b/tcp_tests/templates/cookied-mcp-queens-dvr-ssl-barbican/underlay.yaml
similarity index 86%
copy from tcp_tests/templates/virtual-mcp-ocata-ovs/underlay.yaml
copy to tcp_tests/templates/cookied-mcp-queens-dvr-ssl-barbican/underlay.yaml
index 382dba4..c7dd479 100644
--- a/tcp_tests/templates/virtual-mcp-ocata-ovs/underlay.yaml
+++ b/tcp_tests/templates/cookied-mcp-queens-dvr-ssl-barbican/underlay.yaml
@@ -1,9 +1,9 @@
 # Set the repository suite, one of the: 'nightly', 'testing', 'stable', or any other required
 {% set REPOSITORY_SUITE = os_env('REPOSITORY_SUITE', 'testing') %}
 
-{% import 'virtual-mcp-ocata-ovs/underlay--meta-data.yaml' as CLOUDINIT_META_DATA with context %}
-{% import 'virtual-mcp-ocata-ovs/underlay--user-data-cfg01.yaml' as CLOUDINIT_USER_DATA_CFG01 with context %}
-{% import 'virtual-mcp-ocata-ovs/underlay--user-data1604.yaml' as CLOUDINIT_USER_DATA_1604 with context %}
+{% import 'cookied-mcp-queens-dvr-ssl-barbican/underlay--meta-data.yaml' as CLOUDINIT_META_DATA with context %}
+{% import 'cookied-mcp-queens-dvr-ssl-barbican/underlay--user-data-cfg01.yaml' as CLOUDINIT_USER_DATA_CFG01 with context %}
+{% import 'cookied-mcp-queens-dvr-ssl-barbican/underlay--user-data1604.yaml' as CLOUDINIT_USER_DATA_1604 with context %}
 
 ---
 aliases:
@@ -12,23 +12,23 @@
  - &cloudinit_user_data_cfg01 {{ CLOUDINIT_USER_DATA_CFG01 }}
  - &cloudinit_user_data_1604 {{ CLOUDINIT_USER_DATA_1604 }}
 
-{% set LAB_CONFIG_NAME = os_env('LAB_CONFIG_NAME', 'virtual-mcp-ocata-ovs') %}
-{% set DOMAIN_NAME = os_env('DOMAIN_NAME', LAB_CONFIG_NAME) + '.local' %}
+{% set LAB_CONFIG_NAME = os_env('LAB_CONFIG_NAME', 'cookied-mcp-queens-dvr-ssl-barbican') %}
+{% set DOMAIN_NAME = os_env('DOMAIN_NAME', LAB_CONFIG_NAME + '.local') %}
 {% set HOSTNAME_CFG01 = os_env('HOSTNAME_CFG01', 'cfg01.' + DOMAIN_NAME) %}
 {% set HOSTNAME_CTL01 = os_env('HOSTNAME_CTL01', 'ctl01.' + DOMAIN_NAME) %}
 {% set HOSTNAME_CTL02 = os_env('HOSTNAME_CTL02', 'ctl02.' + DOMAIN_NAME) %}
 {% set HOSTNAME_CTL03 = os_env('HOSTNAME_CTL03', 'ctl03.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CMP01 = os_env('HOSTNAME_CMP01', 'cmp01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CMP02 = os_env('HOSTNAME_CMP02', 'cmp02.' + DOMAIN_NAME) %}
-{% set HOSTNAME_MON01 = os_env('HOSTNAME_MON01', 'mon01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_MON02 = os_env('HOSTNAME_MON02', 'mon02.' + DOMAIN_NAME) %}
-{% set HOSTNAME_MON03 = os_env('HOSTNAME_MON03', 'mon03.' + DOMAIN_NAME) %}
+{% set HOSTNAME_CMP01 = os_env('HOSTNAME_CMP01', 'cmp1.' + DOMAIN_NAME) %}
+{% set HOSTNAME_CMP02 = os_env('HOSTNAME_CMP02', 'cmp2.' + DOMAIN_NAME) %}
 {% set HOSTNAME_GTW01 = os_env('HOSTNAME_GTW01', 'gtw01.' + DOMAIN_NAME) %}
 {% set HOSTNAME_PRX01 = os_env('HOSTNAME_PRX01', 'prx01.' + DOMAIN_NAME) %}
+{% set HOSTNAME_KMN01 = os_env('HOSTNAME_KMN01', 'kmn01.' + DOMAIN_NAME) %}
+{% set HOSTNAME_KMN02 = os_env('HOSTNAME_KMN02', 'kmn02.' + DOMAIN_NAME) %}
+{% set HOSTNAME_KMN03 = os_env('HOSTNAME_KMN03', 'kmn03.' + DOMAIN_NAME) %}
 
 template:
   devops_settings:
-    env_name: {{ os_env('ENV_NAME', 'virtual-mcp-ocata-ovs_' + REPOSITORY_SUITE + "_" + os_env('BUILD_NUMBER', '')) }}
+    env_name: {{ os_env('ENV_NAME', 'cookied-mcp-queens-dvr-ssl-barbican_' + REPOSITORY_SUITE + "_" + os_env('BUILD_NUMBER', '')) }}
 
     address_pools:
       private-pool01:
@@ -43,11 +43,11 @@
             default_{{ HOSTNAME_CTL03 }}: +103
             default_{{ HOSTNAME_CMP01 }}: +105
             default_{{ HOSTNAME_CMP02 }}: +106
-            default_{{ HOSTNAME_MON01 }}: +107
-            default_{{ HOSTNAME_MON02 }}: +108
-            default_{{ HOSTNAME_MON03 }}: +109
             default_{{ HOSTNAME_GTW01 }}: +110
             default_{{ HOSTNAME_PRX01 }}: +121
+            default_{{ HOSTNAME_KMN01 }}: +45
+            default_{{ HOSTNAME_KMN02 }}: +46
+            default_{{ HOSTNAME_KMN03 }}: +47
           ip_ranges:
             dhcp: [+90, -10]
 
@@ -63,11 +63,11 @@
             default_{{ HOSTNAME_CTL03 }}: +103
             default_{{ HOSTNAME_CMP01 }}: +105
             default_{{ HOSTNAME_CMP02 }}: +106
-            default_{{ HOSTNAME_MON01 }}: +107
-            default_{{ HOSTNAME_MON02 }}: +108
-            default_{{ HOSTNAME_MON03 }}: +109
             default_{{ HOSTNAME_GTW01 }}: +110
             default_{{ HOSTNAME_PRX01 }}: +121
+            default_{{ HOSTNAME_KMN01 }}: +45
+            default_{{ HOSTNAME_KMN02 }}: +46
+            default_{{ HOSTNAME_KMN03 }}: +47
           ip_ranges:
             dhcp: [+90, -10]
 
@@ -83,11 +83,11 @@
             default_{{ HOSTNAME_CTL03 }}: +103
             default_{{ HOSTNAME_CMP01 }}: +105
             default_{{ HOSTNAME_CMP02 }}: +106
-            default_{{ HOSTNAME_MON01 }}: +107
-            default_{{ HOSTNAME_MON02 }}: +108
-            default_{{ HOSTNAME_MON03 }}: +109
             default_{{ HOSTNAME_GTW01 }}: +110
             default_{{ HOSTNAME_PRX01 }}: +121
+            default_{{ HOSTNAME_KMN01 }}: +45
+            default_{{ HOSTNAME_KMN02 }}: +46
+            default_{{ HOSTNAME_KMN03 }}: +47
           ip_ranges:
             dhcp: [+10, -10]
 
@@ -103,14 +103,10 @@
             default_{{ HOSTNAME_CTL03 }}: +103
             default_{{ HOSTNAME_CMP01 }}: +105
             default_{{ HOSTNAME_CMP02 }}: +106
-            default_{{ HOSTNAME_MON01 }}: +107
-            default_{{ HOSTNAME_MON02 }}: +108
-            default_{{ HOSTNAME_MON03 }}: +109
             default_{{ HOSTNAME_GTW01 }}: +110
             default_{{ HOSTNAME_PRX01 }}: +121
           ip_ranges:
-            dhcp: [+10, -10]
-
+            dhcp: [+130, +220]
 
     groups:
       - name: default
@@ -150,11 +146,10 @@
 
           external:
             address_pool: external-pool01
-            dhcp: true
+            dhcp: false
             forward:
               mode: route
 
-
         group_volumes:
          - name: cloudimage1604    # This name is used for 'backing_store' option for node volumes.
            source_image: !os_env IMAGE_PATH1604  # https://cloud-images.ubuntu.com/xenial/current/xenial-server-cloudimg-amd64-disk1.img
@@ -162,11 +157,8 @@
          - name: cfg01_day01_image               # Pre-configured day01 image
            source_image: {{ os_env('IMAGE_PATH_CFG01_DAY01', os_env('IMAGE_PATH1604')) }} # http://images.mirantis.com/cfg01-day01.qcow2 or fallback to IMAGE_PATH1604
            format: qcow2
-         - name: mcp_ubuntu_1604_image           # Pre-configured image for VCP nodes initially based on kvm nodes.
-           # http://images.mirantis.com/ubuntu-16-04-x64-latest.qcow2  (preffered)
-           # or
-           # https://cloud-images.ubuntu.com/xenial/current/xenial-server-cloudimg-amd64-disk1.img
-           source_image: {{ os_env('MCP_IMAGE_PATH1604', os_env('IMAGE_PATH1604')) }}
+         - name: mcp_ubuntu_1604_image           # Pre-configured image for control plane
+           source_image: !os_env MCP_IMAGE_PATH1604
            format: qcow2
 
         nodes:
@@ -222,9 +214,6 @@
                   capacity: !os_env NODE_VOLUME_SIZE, 150
                   backing_store: mcp_ubuntu_1604_image
                   format: qcow2
-                - name: cinder
-                  capacity: 50
-                  format: qcow2
                 - name: iso  # Volume with name 'iso' will be used
                              # for store image with cloud-init metadata.
                   capacity: 1
@@ -263,9 +252,6 @@
                   capacity: !os_env NODE_VOLUME_SIZE, 150
                   backing_store: mcp_ubuntu_1604_image
                   format: qcow2
-                - name: cinder
-                  capacity: 50
-                  format: qcow2
                 - name: iso  # Volume with name 'iso' will be used
                              # for store image with cloud-init metadata.
                   capacity: 1
@@ -292,9 +278,6 @@
                   capacity: !os_env NODE_VOLUME_SIZE, 150
                   backing_store: mcp_ubuntu_1604_image
                   format: qcow2
-                - name: cinder
-                  capacity: 50
-                  format: qcow2
                 - name: iso  # Volume with name 'iso' will be used
                              # for store image with cloud-init metadata.
                   capacity: 1
@@ -307,11 +290,11 @@
               interfaces: *interfaces
               network_config: *network_config
 
-          - name: {{ HOSTNAME_MON01 }}
+          - name: {{ HOSTNAME_KMN01 }}
             role: salt_minion
             params:
-              vcpu: !os_env SLAVE_NODE_CPU, 3
-              memory: !os_env SLAVE_NODE_MEMORY, 6144
+              vcpu: !os_env SLAVE_NODE_CPU, 2
+              memory: !os_env SLAVE_NODE_MEMORY, 16384
               boot:
                 - hd
               cloud_init_volume_name: iso
@@ -333,11 +316,11 @@
               interfaces: *interfaces
               network_config: *network_config
 
-          - name: {{ HOSTNAME_MON02 }}
+          - name: {{ HOSTNAME_KMN02 }}
             role: salt_minion
             params:
-              vcpu: !os_env SLAVE_NODE_CPU, 3
-              memory: !os_env SLAVE_NODE_MEMORY, 6144
+              vcpu: !os_env SLAVE_NODE_CPU, 2
+              memory: !os_env SLAVE_NODE_MEMORY, 16384
               boot:
                 - hd
               cloud_init_volume_name: iso
@@ -359,11 +342,11 @@
               interfaces: *interfaces
               network_config: *network_config
 
-          - name: {{ HOSTNAME_MON03 }}
+          - name: {{ HOSTNAME_KMN03 }}
             role: salt_minion
             params:
-              vcpu: !os_env SLAVE_NODE_CPU, 3
-              memory: !os_env SLAVE_NODE_MEMORY, 6144
+              vcpu: !os_env SLAVE_NODE_CPU, 2
+              memory: !os_env SLAVE_NODE_MEMORY, 16384
               boot:
                 - hd
               cloud_init_volume_name: iso
@@ -399,9 +382,6 @@
                   capacity: !os_env NODE_VOLUME_SIZE, 150
                   backing_store: mcp_ubuntu_1604_image
                   format: qcow2
-                - name: cinder
-                  capacity: 50
-                  format: qcow2
                 - name: iso  # Volume with name 'iso' will be used
                              # for store image with cloud-init metadata.
                   capacity: 1
@@ -428,6 +408,12 @@
                   capacity: !os_env NODE_VOLUME_SIZE, 150
                   backing_store: cloudimage1604
                   format: qcow2
+                - name: cinder
+                  capacity: 50
+                  format: qcow2
+                - name: manila
+                  capacity: 20
+                  format: qcow2
                 - name: iso  # Volume with name 'iso' will be used
                              # for store image with cloud-init metadata.
                   capacity: 1
@@ -479,6 +465,12 @@
                   capacity: !os_env NODE_VOLUME_SIZE, 150
                   backing_store: cloudimage1604
                   format: qcow2
+                - name: cinder
+                  capacity: 50
+                  format: qcow2
+                - name: manila
+                  capacity: 20
+                  format: qcow2
                 - name: iso  # Volume with name 'iso' will be used
                              # for store image with cloud-init metadata.
                   capacity: 1
@@ -494,8 +486,8 @@
           - name: {{ HOSTNAME_GTW01 }}
             role: salt_minion
             params:
-              vcpu: !os_env SLAVE_NODE_CPU, 1
-              memory: !os_env SLAVE_NODE_MEMORY, 2048
+              vcpu: !os_env SLAVE_NODE_CPU, 4
+              memory: !os_env SLAVE_NODE_MEMORY, 4096
               boot:
                 - hd
               cloud_init_volume_name: iso
diff --git a/tcp_tests/templates/cookied-mcp-queens-dvr-ssl/_context-cookiecutter-mcp-queens-dvr-ssl.yaml b/tcp_tests/templates/cookied-mcp-queens-dvr-ssl/_context-cookiecutter-mcp-queens-dvr-ssl.yaml
index 35587d1..dcc8bc5 100644
--- a/tcp_tests/templates/cookied-mcp-queens-dvr-ssl/_context-cookiecutter-mcp-queens-dvr-ssl.yaml
+++ b/tcp_tests/templates/cookied-mcp-queens-dvr-ssl/_context-cookiecutter-mcp-queens-dvr-ssl.yaml
@@ -58,7 +58,6 @@
   openstack_compute_single_address_ranges: 172.16.10.105-172.16.10.106
   openstack_compute_deploy_address_ranges: 192.168.10.105-192.168.10.106
   openstack_compute_tenant_address_ranges: 10.1.0.105-10.1.0.106
-  openstack_compute_backend_address_ranges: 10.1.0.105-10.1.0.106
   openstack_control_address: 172.16.10.100
   openstack_control_hostname: ctl
   openstack_control_node01_address: 172.16.10.101
@@ -109,9 +108,9 @@
   openstack_proxy_node02_hostname: prx02
   openstack_upgrade_node01_address: 172.16.10.19
   openstack_dns_hostname: dns
-  openstack_dns_node01_address: 172.16.10.111
+  openstack_dns_node01_address: 172.16.10.113
   openstack_dns_node01_hostname: dns01
-  openstack_dns_node02_address: 172.16.10.112
+  openstack_dns_node02_address: 172.16.10.114
   openstack_dns_node02_hostname: dns02
   openstack_version: queens
   oss_enabled: 'False'
@@ -188,6 +187,14 @@
   stacklight_version: '2'
   stacklight_long_term_storage_type: influxdb
   static_ips_on_deploy_network_enabled: 'False'
+  openstack_telemetry_address: 172.16.10.96
+  openstack_telemetry_hostname: mdb
+  openstack_telemetry_node01_address: 172.16.10.97
+  openstack_telemetry_node01_hostname: mdb01
+  openstack_telemetry_node02_address: 172.16.10.98
+  openstack_telemetry_node02_hostname: mdb02
+  openstack_telemetry_node03_address: 172.16.10.99
+  openstack_telemetry_node03_hostname: mdb03
   tenant_network_gateway: 10.1.0.1
   tenant_network_netmask: 255.255.255.0
   tenant_network_subnet: 10.1.0.0/24
@@ -197,7 +204,7 @@
   rsync_fernet_rotation: 'True'
   compute_padding_with_zeros: False
   designate_backend: bind
-  designate_enabled: 'True'
+  designate_enabled: 'False'
   nova_vnc_tls_enabled: 'True'
   galera_ssl_enabled: 'True'
   openstack_mysql_x509_enabled: 'True'
@@ -206,18 +213,12 @@
   openstack_internal_protocol: 'https'
   tenant_telemetry_enabled: 'True'
   gnocchi_aggregation_storage: file
-  manila_enabled: 'True'
+  manila_enabled: 'False'
   manila_share_backend: 'lvm'
   manila_lvm_volume_name: 'manila-volume'
-  manila_lvm_devices: '/dev/loop1'
-  openstack_share_address: 172.16.10.68
-  openstack_share_node01_address: 172.16.10.69
-  openstack_share_node02_address: 172.16.10.70
-  openstack_share_node03_address: 172.16.10.71
-  openstack_share_node01_deploy_address: 192.168.10.69
-  openstack_share_node02_deploy_address: 192.168.10.70
-  openstack_share_node03_deploy_address: 192.168.10.71
+  manila_lvm_devices: '/dev/vdc'
+  openstack_share_address: 172.16.10.203
+  openstack_share_node01_address: 172.16.10.204
+  openstack_share_node01_deploy_address: 192.168.10.204
   openstack_share_hostname: share
-  openstack_share_node01_hostname: share01
-  openstack_share_node02_hostname: share02
-  openstack_share_node03_hostname: share03
\ No newline at end of file
+  openstack_share_node01_hostname: share01
\ No newline at end of file
diff --git a/tcp_tests/templates/cookied-mcp-queens-dvr-ssl/_context-environment.yaml b/tcp_tests/templates/cookied-mcp-queens-dvr-ssl/_context-environment.yaml
index 4ff3212..b1c7e3d 100644
--- a/tcp_tests/templates/cookied-mcp-queens-dvr-ssl/_context-environment.yaml
+++ b/tcp_tests/templates/cookied-mcp-queens-dvr-ssl/_context-environment.yaml
@@ -17,6 +17,7 @@
       - openstack_control_leader
       - openstack_database_leader
       - openstack_message_queue
+      - features_lvm_backend_control
       - linux_system_codename_xenial
       interfaces:
         ens3:
@@ -31,6 +32,7 @@
       - openstack_control
       - openstack_database
       - openstack_message_queue
+      - features_lvm_backend_control
       - linux_system_codename_xenial
       interfaces:
         ens3:
@@ -45,6 +47,7 @@
       - openstack_control
       - openstack_database
       - openstack_message_queue
+      - features_lvm_backend_control
       - linux_system_codename_xenial
       interfaces:
         ens3:
@@ -167,6 +170,7 @@
       reclass_storage_name: openstack_compute_rack01
       roles:
       - openstack_compute
+      - features_lvm_backend_volume_vdb
       - linux_system_codename_xenial
       interfaces:
         ens3:
@@ -193,57 +197,35 @@
         ens6:
           role: bond1_ab_ovs_floating
 
-    share01.mcp-queens-dvr-ssl.local:
-      reclass_storage_name: openstack_share_node01
+    mdb01.mcp-queens-dvr-ssl.local:
+      reclass_storage_name: openstack_telemetry_node01
       roles:
-      - openstack_share
       - linux_system_codename_xenial
+      - openstack_telemetry
       interfaces:
         ens3:
           role: single_dhcp
         ens4:
           role: single_ctl
 
-    share02.mcp-queens-dvr-ssl.local:
-      reclass_storage_name: openstack_share_node02
+    mdb02.mcp-queens-dvr-ssl.local:
+      reclass_storage_name: openstack_telemetry_node02
       roles:
-      - openstack_share
       - linux_system_codename_xenial
+      - openstack_telemetry
       interfaces:
         ens3:
           role: single_dhcp
         ens4:
           role: single_ctl
 
-    share03.mcp-queens-dvr-ssl.local:
-      reclass_storage_name: openstack_share_node03
+    mdb03.mcp-queens-dvr-ssl.local:
+      reclass_storage_name: openstack_telemetry_node03
       roles:
-      - openstack_share
       - linux_system_codename_xenial
+      - openstack_telemetry
       interfaces:
         ens3:
           role: single_dhcp
         ens4:
           role: single_ctl
-
-    dns01.mcp-queens-dvr-ssl.local:
-      reclass_storage_name: openstack_dns_node01
-      roles:
-      - openstack_dns
-      - linux_system_codename_xenial
-      interfaces:
-        ens3:
-          role: single_dhcp
-        ens4:
-          role: single_ctl
-
-    dns02.mcp-queens-dvr-ssl.local:
-      reclass_storage_name: openstack_dns_node02
-      roles:
-      - openstack_dns
-      - linux_system_codename_xenial
-      interfaces:
-        ens3:
-          role: single_dhcp
-        ens4:
-          role: single_ctl
\ No newline at end of file
diff --git a/tcp_tests/templates/cookied-mcp-queens-dvr-ssl/core.yaml b/tcp_tests/templates/cookied-mcp-queens-dvr-ssl/core.yaml
index 8a48f49..e10bccc 100644
--- a/tcp_tests/templates/cookied-mcp-queens-dvr-ssl/core.yaml
+++ b/tcp_tests/templates/cookied-mcp-queens-dvr-ssl/core.yaml
@@ -12,6 +12,8 @@
 
 {{ SHARED_CORE.MACRO_INSTALL_HAPROXY() }}
 
+{{ SHARED_CORE.MACRO_INSTALL_NGINX() }}
+
 {{ SHARED_CORE.MACRO_INSTALL_MEMCACHED() }}
 
 {{ SHARED_CORE.MACRO_CHECK_VIP() }}
diff --git a/tcp_tests/templates/cookied-mcp-queens-dvr-ssl/openstack.yaml b/tcp_tests/templates/cookied-mcp-queens-dvr-ssl/openstack.yaml
index a768afe..278b78b 100644
--- a/tcp_tests/templates/cookied-mcp-queens-dvr-ssl/openstack.yaml
+++ b/tcp_tests/templates/cookied-mcp-queens-dvr-ssl/openstack.yaml
@@ -27,10 +27,16 @@
 
 {{ SHARED_OPENSTACK.MACRO_INSTALL_HEAT() }}
 
-{{ SHARED_OPENSTACK.MACRO_INSTALL_DESIGNATE(INSTALL_BIND=true) }}
+{{ SHARED_OPENSTACK.MACRO_INSTALL_REDIS() }}
+
+{{ SHARED_OPENSTACK.MACRO_INSTALL_GNOCCHI() }}
+
+{{ SHARED_OPENSTACK.MACRO_INSTALL_PANKO() }}
+
+{{ SHARED_OPENSTACK.MACRO_INSTALL_CEILOMETER() }}
+
+{{ SHARED_OPENSTACK.MACRO_INSTALL_AODH() }}
 
 {{ SHARED_OPENSTACK.MACRO_INSTALL_HORIZON() }}
 
 {{ SHARED_OPENSTACK.MACRO_INSTALL_COMPUTE(CELL_MAPPING=true) }}
-
-{{ SHARED_OPENSTACK.MACRO_INSTALL_MANILA() }}
diff --git a/tcp_tests/templates/cookied-mcp-queens-dvr-ssl/salt.yaml b/tcp_tests/templates/cookied-mcp-queens-dvr-ssl/salt.yaml
index 1667dd8..eaf8a1f 100644
--- a/tcp_tests/templates/cookied-mcp-queens-dvr-ssl/salt.yaml
+++ b/tcp_tests/templates/cookied-mcp-queens-dvr-ssl/salt.yaml
@@ -14,6 +14,18 @@
 
 {{ SHARED.MACRO_CLONE_RECLASS_MODELS() }}
 
+- description: "Temp fix"
+  cmd: |
+    set -e;
+    apt-get -y install python-virtualenv python-pip build-essential python-dev libssl-dev;
+    [[ -d /root/venv-reclass-tools ]] || virtualenv /root/venv-reclass-tools;
+    . /root/venv-reclass-tools/bin/activate;
+    pip install git+https://github.com/dis-xcom/reclass-tools;
+    reclass-tools add-key parameters._param.cluster_internal_protocol 'https' /srv/salt/reclass/classes/system/cinder/volume/single.yml;
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
 {{ SHARED.MACRO_CONFIGURE_RECLASS(FORMULA_SERVICES='\*') }}
 
 {{ SHARED.MACRO_INSTALL_SALT_MINIONS() }}
diff --git a/tcp_tests/templates/cookied-mcp-queens-dvr-ssl/sl.yaml b/tcp_tests/templates/cookied-mcp-queens-dvr-ssl/sl.yaml
index 8efefc3..7d65097 100644
--- a/tcp_tests/templates/cookied-mcp-queens-dvr-ssl/sl.yaml
+++ b/tcp_tests/templates/cookied-mcp-queens-dvr-ssl/sl.yaml
@@ -5,8 +5,6 @@
 
 {{  SHARED_SL.MACRO_INSTALL_DOCKER_SWARM() }}
 
-{{  SHARED_SL.MACRO_INSTALL_GLUSTERFS_CLIENT() }}
-
 {{  SHARED_SL.MACRO_INSTALL_MONGODB() }}
 
 {{  SHARED_SL.MACRO_INSTALL_MONGODB_CLUSTER() }}
diff --git a/tcp_tests/templates/cookied-mcp-queens-dvr-ssl/underlay--user-data-cfg01.yaml b/tcp_tests/templates/cookied-mcp-queens-dvr-ssl/underlay--user-data-cfg01.yaml
index da7908d..48562ad 100644
--- a/tcp_tests/templates/cookied-mcp-queens-dvr-ssl/underlay--user-data-cfg01.yaml
+++ b/tcp_tests/templates/cookied-mcp-queens-dvr-ssl/underlay--user-data-cfg01.yaml
@@ -44,6 +44,13 @@
 
    - echo "nameserver 172.18.208.44" >> /etc/resolv.conf;
 
+   - mkdir -p /srv/salt/reclass/nodes
+   - systemctl enable salt-master
+   - systemctl enable salt-minion
+   - systemctl start salt-master
+   - systemctl start salt-minion
+   - salt-call -l info --timeout=120 test.ping
+
   write_files:
    - path: /etc/network/interfaces
      content: |
diff --git a/tcp_tests/templates/cookied-mcp-queens-dvr-ssl/underlay.yaml b/tcp_tests/templates/cookied-mcp-queens-dvr-ssl/underlay.yaml
index 895945a..f6a8998 100644
--- a/tcp_tests/templates/cookied-mcp-queens-dvr-ssl/underlay.yaml
+++ b/tcp_tests/templates/cookied-mcp-queens-dvr-ssl/underlay.yaml
@@ -31,9 +31,10 @@
 {% set HOSTNAME_MTR03 = os_env('HOSTNAME_MTR03', 'mtr03.' + DOMAIN_NAME) %}
 {% set HOSTNAME_GTW01 = os_env('HOSTNAME_GTW01', 'gtw01.' + DOMAIN_NAME) %}
 {% set HOSTNAME_PRX01 = os_env('HOSTNAME_PRX01', 'prx01.' + DOMAIN_NAME) %}
+{% set HOSTNAME_MDB01 = os_env('HOSTNAME_MDB01', 'mdb01.' + DOMAIN_NAME) %}
+{% set HOSTNAME_MDB02 = os_env('HOSTNAME_MDB02', 'mdb02.' + DOMAIN_NAME) %}
+{% set HOSTNAME_MDB03 = os_env('HOSTNAME_MDB03', 'mdb03.' + DOMAIN_NAME) %}
 {% set HOSTNAME_SHARE01 = os_env('HOSTNAME_SHARE01', 'share01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_SHARE02 = os_env('HOSTNAME_SHARE01', 'share02.' + DOMAIN_NAME) %}
-{% set HOSTNAME_SHARE03 = os_env('HOSTNAME_SHARE01', 'share03.' + DOMAIN_NAME) %}
 
 template:
   devops_settings:
@@ -61,11 +62,12 @@
             default_{{ HOSTNAME_MTR01 }}: +86
             default_{{ HOSTNAME_MTR02 }}: +87
             default_{{ HOSTNAME_MTR03 }}: +88
+            default_{{ HOSTNAME_MDB01 }}: +97
+            default_{{ HOSTNAME_MDB02 }}: +98
+            default_{{ HOSTNAME_MDB03 }}: +99
             default_{{ HOSTNAME_GTW01 }}: +110
             default_{{ HOSTNAME_PRX01 }}: +121
-            default_{{ HOSTNAME_SHARE01 }}: +69
-            default_{{ HOSTNAME_SHARE02 }}: +70
-            default_{{ HOSTNAME_SHARE03 }}: +71
+            default_{{ HOSTNAME_SHARE01 }}: +204
           ip_ranges:
             dhcp: [+90, -10]
 
@@ -90,11 +92,12 @@
             default_{{ HOSTNAME_MTR01 }}: +86
             default_{{ HOSTNAME_MTR02 }}: +87
             default_{{ HOSTNAME_MTR03 }}: +88
+            default_{{ HOSTNAME_MDB01 }}: +97
+            default_{{ HOSTNAME_MDB02 }}: +98
+            default_{{ HOSTNAME_MDB03 }}: +99
             default_{{ HOSTNAME_GTW01 }}: +110
             default_{{ HOSTNAME_PRX01 }}: +121
-            default_{{ HOSTNAME_SHARE01 }}: +69
-            default_{{ HOSTNAME_SHARE02 }}: +70
-            default_{{ HOSTNAME_SHARE03 }}: +71
+            default_{{ HOSTNAME_SHARE01 }}: +204
           ip_ranges:
             dhcp: [+90, -10]
 
@@ -119,11 +122,12 @@
             default_{{ HOSTNAME_MTR01 }}: +86
             default_{{ HOSTNAME_MTR02 }}: +87
             default_{{ HOSTNAME_MTR03 }}: +88
+            default_{{ HOSTNAME_MDB01 }}: +97
+            default_{{ HOSTNAME_MDB02 }}: +98
+            default_{{ HOSTNAME_MDB03 }}: +99
             default_{{ HOSTNAME_GTW01 }}: +110
             default_{{ HOSTNAME_PRX01 }}: +121
-            default_{{ HOSTNAME_SHARE01 }}: +69
-            default_{{ HOSTNAME_SHARE02 }}: +70
-            default_{{ HOSTNAME_SHARE03 }}: +71
+            default_{{ HOSTNAME_SHARE01 }}: +204
           ip_ranges:
             dhcp: [+10, -10]
 
@@ -148,11 +152,12 @@
             default_{{ HOSTNAME_MTR01 }}: +86
             default_{{ HOSTNAME_MTR02 }}: +87
             default_{{ HOSTNAME_MTR03 }}: +88
+            default_{{ HOSTNAME_MDB01 }}: +97
+            default_{{ HOSTNAME_MDB02 }}: +98
+            default_{{ HOSTNAME_MDB03 }}: +99
             default_{{ HOSTNAME_GTW01 }}: +110
             default_{{ HOSTNAME_PRX01 }}: +121
-            default_{{ HOSTNAME_SHARE01 }}: +69
-            default_{{ HOSTNAME_SHARE02 }}: +70
-            default_{{ HOSTNAME_SHARE03 }}: +71
+            default_{{ HOSTNAME_SHARE01 }}: +204
           ip_ranges:
             dhcp: [+130, +220]
 
@@ -264,9 +269,6 @@
                   capacity: !os_env NODE_VOLUME_SIZE, 150
                   backing_store: mcp_ubuntu_1604_image
                   format: qcow2
-                - name: cinder
-                  capacity: 50
-                  format: qcow2
                 - name: iso  # Volume with name 'iso' will be used
                              # for store image with cloud-init metadata.
                   capacity: 1
@@ -305,9 +307,6 @@
                   capacity: !os_env NODE_VOLUME_SIZE, 150
                   backing_store: mcp_ubuntu_1604_image
                   format: qcow2
-                - name: cinder
-                  capacity: 50
-                  format: qcow2
                 - name: iso  # Volume with name 'iso' will be used
                              # for store image with cloud-init metadata.
                   capacity: 1
@@ -334,6 +333,90 @@
                   capacity: !os_env NODE_VOLUME_SIZE, 150
                   backing_store: mcp_ubuntu_1604_image
                   format: qcow2
+                - name: iso  # Volume with name 'iso' will be used
+                             # for store image with cloud-init metadata.
+                  capacity: 1
+                  format: raw
+                  device: cdrom
+                  bus: ide
+                  cloudinit_meta_data: *cloudinit_meta_data
+                  cloudinit_user_data: *cloudinit_user_data_1604
+
+              interfaces: *interfaces
+              network_config: *network_config
+
+          - name: {{ HOSTNAME_MDB01 }}
+            role: salt_minion
+            params:
+              vcpu: !os_env SLAVE_NODE_CPU, 2
+              memory: !os_env SLAVE_NODE_MEMORY, 8192
+              boot:
+                - hd
+              cloud_init_volume_name: iso
+              cloud_init_iface_up: ens3
+              volumes:
+                - name: system
+                  capacity: !os_env NODE_VOLUME_SIZE, 150
+                  backing_store: mcp_ubuntu_1604_image
+                  format: qcow2
+                - name: cinder
+                  capacity: 50
+                  format: qcow2
+                - name: iso  # Volume with name 'iso' will be used
+                             # for store image with cloud-init metadata.
+                  capacity: 1
+                  format: raw
+                  device: cdrom
+                  bus: ide
+                  cloudinit_meta_data: *cloudinit_meta_data
+                  cloudinit_user_data: *cloudinit_user_data_1604
+
+              interfaces: *interfaces
+              network_config: *network_config
+
+          - name: {{ HOSTNAME_MDB02 }}
+            role: salt_minion
+            params:
+              vcpu: !os_env SLAVE_NODE_CPU, 2
+              memory: !os_env SLAVE_NODE_MEMORY, 8192
+              boot:
+                - hd
+              cloud_init_volume_name: iso
+              cloud_init_iface_up: ens3
+              volumes:
+                - name: system
+                  capacity: !os_env NODE_VOLUME_SIZE, 150
+                  backing_store: mcp_ubuntu_1604_image
+                  format: qcow2
+                - name: cinder
+                  capacity: 50
+                  format: qcow2
+                - name: iso  # Volume with name 'iso' will be used
+                             # for store image with cloud-init metadata.
+                  capacity: 1
+                  format: raw
+                  device: cdrom
+                  bus: ide
+                  cloudinit_meta_data: *cloudinit_meta_data
+                  cloudinit_user_data: *cloudinit_user_data_1604
+
+              interfaces: *interfaces
+              network_config: *network_config
+
+          - name: {{ HOSTNAME_MDB03 }}
+            role: salt_minion
+            params:
+              vcpu: !os_env SLAVE_NODE_CPU, 2
+              memory: !os_env SLAVE_NODE_MEMORY, 8192
+              boot:
+                - hd
+              cloud_init_volume_name: iso
+              cloud_init_iface_up: ens3
+              volumes:
+                - name: system
+                  capacity: !os_env NODE_VOLUME_SIZE, 150
+                  backing_store: mcp_ubuntu_1604_image
+                  format: qcow2
                 - name: cinder
                   capacity: 50
                   format: qcow2
@@ -597,9 +680,6 @@
                   capacity: !os_env NODE_VOLUME_SIZE, 150
                   backing_store: mcp_ubuntu_1604_image
                   format: qcow2
-                - name: cinder
-                  capacity: 50
-                  format: qcow2
                 - name: iso  # Volume with name 'iso' will be used
                              # for store image with cloud-init metadata.
                   capacity: 1
@@ -627,6 +707,12 @@
                   capacity: !os_env NODE_VOLUME_SIZE, 150
                   backing_store: cloudimage1604
                   format: qcow2
+                - name: cinder
+                  capacity: 50
+                  format: qcow2
+                - name: manila
+                  capacity: 20
+                  format: qcow2
                 - name: iso  # Volume with name 'iso' will be used
                              # for store image with cloud-init metadata.
                   capacity: 1
@@ -678,6 +764,12 @@
                   capacity: !os_env NODE_VOLUME_SIZE, 150
                   backing_store: cloudimage1604
                   format: qcow2
+                - name: cinder
+                  capacity: 50
+                  format: qcow2
+                - name: manila
+                  capacity: 20
+                  format: qcow2
                 - name: iso  # Volume with name 'iso' will be used
                              # for store image with cloud-init metadata.
                   capacity: 1
@@ -715,81 +807,3 @@
 
               interfaces: *all_interfaces
               network_config: *all_network_config
-
-          - name: {{ HOSTNAME_SHARE01 }}
-            role: salt_minion
-            params:
-              vcpu: !os_env SLAVE_NODE_CPU, 2
-              memory: !os_env SLAVE_NODE_MEMORY, 4096
-              boot:
-                - hd
-              cloud_init_volume_name: iso
-              cloud_init_iface_up: ens3
-              volumes:
-                - name: system
-                  capacity: !os_env NODE_VOLUME_SIZE, 150
-                  backing_store: mcp_ubuntu_1604_image
-                  format: qcow2
-                - name: iso  # Volume with name 'iso' will be used
-                             # for store image with cloud-init metadata.
-                  capacity: 1
-                  format: raw
-                  device: cdrom
-                  bus: ide
-                  cloudinit_meta_data: *cloudinit_meta_data
-                  cloudinit_user_data: *cloudinit_user_data_1604
-
-              interfaces: *all_interfaces
-              network_config: *all_network_config
-
-          - name: {{ HOSTNAME_SHARE02 }}
-            role: salt_minion
-            params:
-              vcpu: !os_env SLAVE_NODE_CPU, 2
-              memory: !os_env SLAVE_NODE_MEMORY, 4096
-              boot:
-                - hd
-              cloud_init_volume_name: iso
-              cloud_init_iface_up: ens3
-              volumes:
-                - name: system
-                  capacity: !os_env NODE_VOLUME_SIZE, 150
-                  backing_store: mcp_ubuntu_1604_image
-                  format: qcow2
-                - name: iso  # Volume with name 'iso' will be used
-                             # for store image with cloud-init metadata.
-                  capacity: 1
-                  format: raw
-                  device: cdrom
-                  bus: ide
-                  cloudinit_meta_data: *cloudinit_meta_data
-                  cloudinit_user_data: *cloudinit_user_data_1604
-
-              interfaces: *all_interfaces
-              network_config: *all_network_config
-
-          - name: {{ HOSTNAME_SHARE03 }}
-            role: salt_minion
-            params:
-              vcpu: !os_env SLAVE_NODE_CPU, 2
-              memory: !os_env SLAVE_NODE_MEMORY, 4096
-              boot:
-                - hd
-              cloud_init_volume_name: iso
-              cloud_init_iface_up: ens3
-              volumes:
-                - name: system
-                  capacity: !os_env NODE_VOLUME_SIZE, 150
-                  backing_store: mcp_ubuntu_1604_image
-                  format: qcow2
-                - name: iso  # Volume with name 'iso' will be used
-                             # for store image with cloud-init metadata.
-                  capacity: 1
-                  format: raw
-                  device: cdrom
-                  bus: ide
-                  cloudinit_meta_data: *cloudinit_meta_data
-                  cloudinit_user_data: *cloudinit_user_data_1604
-
-              interfaces: *all_interfaces
-              network_config: *all_network_config
diff --git a/tcp_tests/templates/cookied-mcp-queens-dvr/_context-cookiecutter-mcp-queens-dvr.yaml b/tcp_tests/templates/cookied-mcp-queens-dvr/_context-cookiecutter-mcp-queens-dvr.yaml
index a37d4e2..15f8d68 100644
--- a/tcp_tests/templates/cookied-mcp-queens-dvr/_context-cookiecutter-mcp-queens-dvr.yaml
+++ b/tcp_tests/templates/cookied-mcp-queens-dvr/_context-cookiecutter-mcp-queens-dvr.yaml
@@ -58,7 +58,6 @@
   openstack_compute_single_address_ranges: 172.16.10.105-172.16.10.106
   openstack_compute_deploy_address_ranges: 192.168.10.105-192.168.10.106
   openstack_compute_tenant_address_ranges: 10.1.0.105-10.1.0.106
-  openstack_compute_backend_address_ranges: 10.1.0.105-10.1.0.106
   openstack_control_address: 172.16.10.100
   openstack_control_hostname: ctl
   openstack_control_node01_address: 172.16.10.101
@@ -109,9 +108,9 @@
   openstack_proxy_node02_hostname: prx02
   openstack_upgrade_node01_address: 172.16.10.19
   openstack_dns_hostname: dns
-  openstack_dns_node01_address: 172.16.10.111
+  openstack_dns_node01_address: 172.16.10.113
   openstack_dns_node01_hostname: dns01
-  openstack_dns_node02_address: 172.16.10.112
+  openstack_dns_node02_address: 172.16.10.114
   openstack_dns_node02_hostname: dns02
   openstack_version: queens
   oss_enabled: 'False'
@@ -208,15 +207,9 @@
   manila_enabled: 'True'
   manila_share_backend: 'lvm'
   manila_lvm_volume_name: 'manila-volume'
-  manila_lvm_devices: '/dev/loop1'
-  openstack_share_address: 172.16.10.68
-  openstack_share_node01_address: 172.16.10.69
-  openstack_share_node02_address: 172.16.10.70
-  openstack_share_node03_address: 172.16.10.71
-  openstack_share_node01_deploy_address: 192.168.10.69
-  openstack_share_node02_deploy_address: 192.168.10.70
-  openstack_share_node03_deploy_address: 192.168.10.71
+  manila_lvm_devices: '/dev/vdc'
+  openstack_share_address: 172.16.10.203
+  openstack_share_node01_address: 172.16.10.204
+  openstack_share_node01_deploy_address: 192.168.10.204
   openstack_share_hostname: share
   openstack_share_node01_hostname: share01
-  openstack_share_node02_hostname: share02
-  openstack_share_node03_hostname: share03
\ No newline at end of file
diff --git a/tcp_tests/templates/cookied-mcp-queens-dvr/_context-environment.yaml b/tcp_tests/templates/cookied-mcp-queens-dvr/_context-environment.yaml
index 42d9589..081c51d 100644
--- a/tcp_tests/templates/cookied-mcp-queens-dvr/_context-environment.yaml
+++ b/tcp_tests/templates/cookied-mcp-queens-dvr/_context-environment.yaml
@@ -17,6 +17,7 @@
       - openstack_control_leader
       - openstack_database_leader
       - openstack_message_queue
+      - features_lvm_backend_control
       - linux_system_codename_xenial
       interfaces:
         ens3:
@@ -31,6 +32,7 @@
       - openstack_control
       - openstack_database
       - openstack_message_queue
+      - features_lvm_backend_control
       - linux_system_codename_xenial
       interfaces:
         ens3:
@@ -45,6 +47,7 @@
       - openstack_control
       - openstack_database
       - openstack_message_queue
+      - features_lvm_backend_control
       - linux_system_codename_xenial
       interfaces:
         ens3:
@@ -167,6 +170,7 @@
       reclass_storage_name: openstack_compute_rack01
       roles:
       - openstack_compute
+      - features_lvm_backend_volume_vdb
       - linux_system_codename_xenial
       interfaces:
         ens3:
@@ -204,28 +208,6 @@
         ens4:
           role: single_ctl
 
-    share02.mcp-queens-dvr.local:
-      reclass_storage_name: openstack_share_node02
-      roles:
-      - openstack_share
-      - linux_system_codename_xenial
-      interfaces:
-        ens3:
-          role: single_dhcp
-        ens4:
-          role: single_ctl
-
-    share03.mcp-queens-dvr.local:
-      reclass_storage_name: openstack_share_node03
-      roles:
-      - openstack_share
-      - linux_system_codename_xenial
-      interfaces:
-        ens3:
-          role: single_dhcp
-        ens4:
-          role: single_ctl
-
     dns01.mcp-queens-dvr.local:
       reclass_storage_name: openstack_dns_node01
       roles:
diff --git a/tcp_tests/templates/cookied-mcp-queens-dvr/core.yaml b/tcp_tests/templates/cookied-mcp-queens-dvr/core.yaml
index bb1185e..293863a 100644
--- a/tcp_tests/templates/cookied-mcp-queens-dvr/core.yaml
+++ b/tcp_tests/templates/cookied-mcp-queens-dvr/core.yaml
@@ -12,6 +12,8 @@
 
 {{ SHARED_CORE.MACRO_INSTALL_HAPROXY() }}
 
+{{ SHARED_CORE.MACRO_INSTALL_NGINX() }}
+
 {{ SHARED_CORE.MACRO_INSTALL_MEMCACHED() }}
 
 {{ SHARED_CORE.MACRO_CHECK_VIP() }}
diff --git a/tcp_tests/templates/cookied-mcp-queens-dvr/sl.yaml b/tcp_tests/templates/cookied-mcp-queens-dvr/sl.yaml
index c8875a4..414187b 100644
--- a/tcp_tests/templates/cookied-mcp-queens-dvr/sl.yaml
+++ b/tcp_tests/templates/cookied-mcp-queens-dvr/sl.yaml
@@ -5,8 +5,6 @@
 
 {{  SHARED_SL.MACRO_INSTALL_DOCKER_SWARM() }}
 
-{{  SHARED_SL.MACRO_INSTALL_GLUSTERFS_CLIENT() }}
-
 {{  SHARED_SL.MACRO_INSTALL_MONGODB() }}
 
 {{  SHARED_SL.MACRO_INSTALL_MONGODB_CLUSTER() }}
diff --git a/tcp_tests/templates/cookied-mcp-queens-dvr/underlay--user-data-cfg01.yaml b/tcp_tests/templates/cookied-mcp-queens-dvr/underlay--user-data-cfg01.yaml
index da7908d..48562ad 100644
--- a/tcp_tests/templates/cookied-mcp-queens-dvr/underlay--user-data-cfg01.yaml
+++ b/tcp_tests/templates/cookied-mcp-queens-dvr/underlay--user-data-cfg01.yaml
@@ -44,6 +44,13 @@
 
    - echo "nameserver 172.18.208.44" >> /etc/resolv.conf;
 
+   - mkdir -p /srv/salt/reclass/nodes
+   - systemctl enable salt-master
+   - systemctl enable salt-minion
+   - systemctl start salt-master
+   - systemctl start salt-minion
+   - salt-call -l info --timeout=120 test.ping
+
   write_files:
    - path: /etc/network/interfaces
      content: |
diff --git a/tcp_tests/templates/cookied-mcp-queens-dvr/underlay.yaml b/tcp_tests/templates/cookied-mcp-queens-dvr/underlay.yaml
index 18ea180..f6d9b98 100644
--- a/tcp_tests/templates/cookied-mcp-queens-dvr/underlay.yaml
+++ b/tcp_tests/templates/cookied-mcp-queens-dvr/underlay.yaml
@@ -34,8 +34,6 @@
 {% set HOSTNAME_DNS01 = os_env('HOSTNAME_DNS01', 'dns01.' + DOMAIN_NAME) %}
 {% set HOSTNAME_DNS02 = os_env('HOSTNAME_DNS02', 'dns02.' + DOMAIN_NAME) %}
 {% set HOSTNAME_SHARE01 = os_env('HOSTNAME_SHARE01', 'share01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_SHARE02 = os_env('HOSTNAME_SHARE01', 'share02.' + DOMAIN_NAME) %}
-{% set HOSTNAME_SHARE03 = os_env('HOSTNAME_SHARE01', 'share03.' + DOMAIN_NAME) %}
 
 template:
   devops_settings:
@@ -64,12 +62,10 @@
             default_{{ HOSTNAME_MTR02 }}: +87
             default_{{ HOSTNAME_MTR03 }}: +88
             default_{{ HOSTNAME_GTW01 }}: +110
-            default_{{ HOSTNAME_DNS01 }}: +111
-            default_{{ HOSTNAME_DNS02 }}: +112
+            default_{{ HOSTNAME_DNS01 }}: +113
+            default_{{ HOSTNAME_DNS02 }}: +114
             default_{{ HOSTNAME_PRX01 }}: +121
-            default_{{ HOSTNAME_SHARE01 }}: +69
-            default_{{ HOSTNAME_SHARE02 }}: +70
-            default_{{ HOSTNAME_SHARE03 }}: +71
+            default_{{ HOSTNAME_SHARE01 }}: +204
           ip_ranges:
             dhcp: [+90, -10]
 
@@ -95,12 +91,10 @@
             default_{{ HOSTNAME_MTR02 }}: +87
             default_{{ HOSTNAME_MTR03 }}: +88
             default_{{ HOSTNAME_GTW01 }}: +110
-            default_{{ HOSTNAME_DNS01 }}: +111
-            default_{{ HOSTNAME_DNS02 }}: +112
+            default_{{ HOSTNAME_DNS01 }}: +113
+            default_{{ HOSTNAME_DNS02 }}: +114
             default_{{ HOSTNAME_PRX01 }}: +121
-            default_{{ HOSTNAME_SHARE01 }}: +69
-            default_{{ HOSTNAME_SHARE02 }}: +70
-            default_{{ HOSTNAME_SHARE03 }}: +71
+            default_{{ HOSTNAME_SHARE01 }}: +204
           ip_ranges:
             dhcp: [+90, -10]
 
@@ -127,11 +121,9 @@
             default_{{ HOSTNAME_MTR03 }}: +88
             default_{{ HOSTNAME_GTW01 }}: +110
             default_{{ HOSTNAME_PRX01 }}: +121
-            default_{{ HOSTNAME_DNS01 }}: +111
-            default_{{ HOSTNAME_DNS02 }}: +112
-            default_{{ HOSTNAME_SHARE01 }}: +69
-            default_{{ HOSTNAME_SHARE02 }}: +70
-            default_{{ HOSTNAME_SHARE03 }}: +71
+            default_{{ HOSTNAME_DNS01 }}: +113
+            default_{{ HOSTNAME_DNS02 }}: +114
+            default_{{ HOSTNAME_SHARE01 }}: +204
           ip_ranges:
             dhcp: [+10, -10]
 
@@ -157,12 +149,10 @@
             default_{{ HOSTNAME_MTR02 }}: +87
             default_{{ HOSTNAME_MTR03 }}: +88
             default_{{ HOSTNAME_GTW01 }}: +110
-            default_{{ HOSTNAME_DNS01 }}: +111
-            default_{{ HOSTNAME_DNS02 }}: +112
+            default_{{ HOSTNAME_DNS01 }}: +113
+            default_{{ HOSTNAME_DNS02 }}: +114
             default_{{ HOSTNAME_PRX01 }}: +121
-            default_{{ HOSTNAME_SHARE01 }}: +69
-            default_{{ HOSTNAME_SHARE02 }}: +70
-            default_{{ HOSTNAME_SHARE03 }}: +71
+            default_{{ HOSTNAME_SHARE01 }}: +204
           ip_ranges:
             dhcp: [+130, +220]
 
@@ -274,9 +264,6 @@
                   capacity: !os_env NODE_VOLUME_SIZE, 150
                   backing_store: mcp_ubuntu_1604_image
                   format: qcow2
-                - name: cinder
-                  capacity: 50
-                  format: qcow2
                 - name: iso  # Volume with name 'iso' will be used
                              # for store image with cloud-init metadata.
                   capacity: 1
@@ -315,9 +302,6 @@
                   capacity: !os_env NODE_VOLUME_SIZE, 150
                   backing_store: mcp_ubuntu_1604_image
                   format: qcow2
-                - name: cinder
-                  capacity: 50
-                  format: qcow2
                 - name: iso  # Volume with name 'iso' will be used
                              # for store image with cloud-init metadata.
                   capacity: 1
@@ -344,9 +328,6 @@
                   capacity: !os_env NODE_VOLUME_SIZE, 150
                   backing_store: mcp_ubuntu_1604_image
                   format: qcow2
-                - name: cinder
-                  capacity: 50
-                  format: qcow2
                 - name: iso  # Volume with name 'iso' will be used
                              # for store image with cloud-init metadata.
                   capacity: 1
@@ -607,9 +588,6 @@
                   capacity: !os_env NODE_VOLUME_SIZE, 150
                   backing_store: mcp_ubuntu_1604_image
                   format: qcow2
-                - name: cinder
-                  capacity: 50
-                  format: qcow2
                 - name: iso  # Volume with name 'iso' will be used
                              # for store image with cloud-init metadata.
                   capacity: 1
@@ -637,6 +615,12 @@
                   capacity: !os_env NODE_VOLUME_SIZE, 150
                   backing_store: cloudimage1604
                   format: qcow2
+                - name: cinder
+                  capacity: 50
+                  format: qcow2
+                - name: manila
+                  capacity: 20
+                  format: qcow2
                 - name: iso  # Volume with name 'iso' will be used
                              # for store image with cloud-init metadata.
                   capacity: 1
@@ -688,6 +672,12 @@
                   capacity: !os_env NODE_VOLUME_SIZE, 150
                   backing_store: cloudimage1604
                   format: qcow2
+                - name: cinder
+                  capacity: 50
+                  format: qcow2
+                - name: manila
+                  capacity: 20
+                  format: qcow2
                 - name: iso  # Volume with name 'iso' will be used
                              # for store image with cloud-init metadata.
                   capacity: 1
@@ -752,58 +742,6 @@
               interfaces: *all_interfaces
               network_config: *all_network_config
 
-          - name: {{ HOSTNAME_SHARE02 }}
-            role: salt_minion
-            params:
-              vcpu: !os_env SLAVE_NODE_CPU, 2
-              memory: !os_env SLAVE_NODE_MEMORY, 4096
-              boot:
-                - hd
-              cloud_init_volume_name: iso
-              cloud_init_iface_up: ens3
-              volumes:
-                - name: system
-                  capacity: !os_env NODE_VOLUME_SIZE, 150
-                  backing_store: mcp_ubuntu_1604_image
-                  format: qcow2
-                - name: iso  # Volume with name 'iso' will be used
-                             # for store image with cloud-init metadata.
-                  capacity: 1
-                  format: raw
-                  device: cdrom
-                  bus: ide
-                  cloudinit_meta_data: *cloudinit_meta_data
-                  cloudinit_user_data: *cloudinit_user_data_1604
-
-              interfaces: *all_interfaces
-              network_config: *all_network_config
-
-          - name: {{ HOSTNAME_SHARE03 }}
-            role: salt_minion
-            params:
-              vcpu: !os_env SLAVE_NODE_CPU, 2
-              memory: !os_env SLAVE_NODE_MEMORY, 4096
-              boot:
-                - hd
-              cloud_init_volume_name: iso
-              cloud_init_iface_up: ens3
-              volumes:
-                - name: system
-                  capacity: !os_env NODE_VOLUME_SIZE, 150
-                  backing_store: mcp_ubuntu_1604_image
-                  format: qcow2
-                - name: iso  # Volume with name 'iso' will be used
-                             # for store image with cloud-init metadata.
-                  capacity: 1
-                  format: raw
-                  device: cdrom
-                  bus: ide
-                  cloudinit_meta_data: *cloudinit_meta_data
-                  cloudinit_user_data: *cloudinit_user_data_1604
-
-              interfaces: *all_interfaces
-              network_config: *all_network_config
-
           - name: {{ HOSTNAME_DNS01 }}
             role: salt_minion
             params:
diff --git a/tcp_tests/templates/cookied-mcp-queens-ovs/_context-cookiecutter-mcp-queens-ovs.yaml b/tcp_tests/templates/cookied-mcp-queens-ovs/_context-cookiecutter-mcp-queens-ovs.yaml
index a3707d1..18a8beb 100644
--- a/tcp_tests/templates/cookied-mcp-queens-ovs/_context-cookiecutter-mcp-queens-ovs.yaml
+++ b/tcp_tests/templates/cookied-mcp-queens-ovs/_context-cookiecutter-mcp-queens-ovs.yaml
@@ -58,7 +58,6 @@
   openstack_compute_single_address_ranges: 172.16.10.105-172.16.10.106
   openstack_compute_deploy_address_ranges: 192.168.10.105-192.168.10.106
   openstack_compute_tenant_address_ranges: 10.1.0.105-10.1.0.106
-  openstack_compute_backend_address_ranges: 10.1.0.105-10.1.0.106
   openstack_control_address: 172.16.10.100
   openstack_control_hostname: ctl
   openstack_control_node01_address: 172.16.10.101
@@ -109,9 +108,9 @@
   openstack_proxy_node02_hostname: prx02
   openstack_upgrade_node01_address: 172.16.10.19
   openstack_dns_hostname: dns
-  openstack_dns_node01_address: 172.16.10.111
+  openstack_dns_node01_address: 172.16.10.113
   openstack_dns_node01_hostname: dns01
-  openstack_dns_node02_address: 172.16.10.112
+  openstack_dns_node02_address: 172.16.10.114
   openstack_dns_node02_hostname: dns02
   openstack_version: queens
   oss_enabled: 'False'
@@ -208,15 +207,9 @@
   manila_enabled: 'True'
   manila_share_backend: 'lvm'
   manila_lvm_volume_name: 'manila-volume'
-  manila_lvm_devices: '/dev/loop1'
-  openstack_share_address: 172.16.10.68
-  openstack_share_node01_address: 172.16.10.69
-  openstack_share_node02_address: 172.16.10.70
-  openstack_share_node03_address: 172.16.10.71
-  openstack_share_node01_deploy_address: 192.168.10.69
-  openstack_share_node02_deploy_address: 192.168.10.70
-  openstack_share_node03_deploy_address: 192.168.10.71
+  manila_lvm_devices: '/dev/vdc'
+  openstack_share_address: 172.16.10.203
+  openstack_share_node01_address: 172.16.10.204
+  openstack_share_node01_deploy_address: 192.168.10.204
   openstack_share_hostname: share
   openstack_share_node01_hostname: share01
-  openstack_share_node02_hostname: share02
-  openstack_share_node03_hostname: share03
diff --git a/tcp_tests/templates/cookied-mcp-queens-ovs/_context-environment.yaml b/tcp_tests/templates/cookied-mcp-queens-ovs/_context-environment.yaml
index 2be8edf..1593d43 100644
--- a/tcp_tests/templates/cookied-mcp-queens-ovs/_context-environment.yaml
+++ b/tcp_tests/templates/cookied-mcp-queens-ovs/_context-environment.yaml
@@ -17,6 +17,7 @@
       - openstack_control_leader
       - openstack_database_leader
       - openstack_message_queue
+      - features_lvm_backend_control
       - linux_system_codename_xenial
       interfaces:
         ens3:
@@ -31,6 +32,7 @@
       - openstack_control
       - openstack_database
       - openstack_message_queue
+      - features_lvm_backend_control
       - linux_system_codename_xenial
       interfaces:
         ens3:
@@ -45,6 +47,7 @@
       - openstack_control
       - openstack_database
       - openstack_message_queue
+      - features_lvm_backend_control
       - linux_system_codename_xenial
       interfaces:
         ens3:
@@ -167,6 +170,7 @@
       reclass_storage_name: openstack_compute_rack01
       roles:
       - openstack_compute
+      - features_lvm_backend_volume_vdb
       - linux_system_codename_xenial
       interfaces:
         ens3:
@@ -225,25 +229,3 @@
           role: single_dhcp
         ens4:
           role: single_ctl
-
-    share02.mcp-queens-ovs.local:
-      reclass_storage_name: openstack_share_node02
-      roles:
-      - openstack_share
-      - linux_system_codename_xenial
-      interfaces:
-        ens3:
-          role: single_dhcp
-        ens4:
-          role: single_ctl
-
-    share03.mcp-queens-ovs.local:
-      reclass_storage_name: openstack_share_node03
-      roles:
-      - openstack_share
-      - linux_system_codename_xenial
-      interfaces:
-        ens3:
-          role: single_dhcp
-        ens4:
-          role: single_ctl
\ No newline at end of file
diff --git a/tcp_tests/templates/cookied-mcp-queens-ovs/core.yaml b/tcp_tests/templates/cookied-mcp-queens-ovs/core.yaml
index 6b36603..739c58c 100644
--- a/tcp_tests/templates/cookied-mcp-queens-ovs/core.yaml
+++ b/tcp_tests/templates/cookied-mcp-queens-ovs/core.yaml
@@ -12,6 +12,8 @@
 
 {{ SHARED_CORE.MACRO_INSTALL_HAPROXY() }}
 
+{{ SHARED_CORE.MACRO_INSTALL_NGINX() }}
+
 {{ SHARED_CORE.MACRO_INSTALL_MEMCACHED() }}
 
 {{ SHARED_CORE.MACRO_CHECK_VIP() }}
diff --git a/tcp_tests/templates/cookied-mcp-queens-ovs/sl.yaml b/tcp_tests/templates/cookied-mcp-queens-ovs/sl.yaml
index 07eb3af..5ab3fd0 100644
--- a/tcp_tests/templates/cookied-mcp-queens-ovs/sl.yaml
+++ b/tcp_tests/templates/cookied-mcp-queens-ovs/sl.yaml
@@ -5,8 +5,6 @@
 
 {{  SHARED_SL.MACRO_INSTALL_DOCKER_SWARM() }}
 
-{{  SHARED_SL.MACRO_INSTALL_GLUSTERFS_CLIENT() }}
-
 {{  SHARED_SL.MACRO_INSTALL_MONGODB() }}
 
 {{  SHARED_SL.MACRO_INSTALL_MONGODB_CLUSTER() }}
diff --git a/tcp_tests/templates/cookied-mcp-queens-ovs/underlay--user-data-cfg01.yaml b/tcp_tests/templates/cookied-mcp-queens-ovs/underlay--user-data-cfg01.yaml
index da7908d..48562ad 100644
--- a/tcp_tests/templates/cookied-mcp-queens-ovs/underlay--user-data-cfg01.yaml
+++ b/tcp_tests/templates/cookied-mcp-queens-ovs/underlay--user-data-cfg01.yaml
@@ -44,6 +44,13 @@
 
    - echo "nameserver 172.18.208.44" >> /etc/resolv.conf;
 
+   - mkdir -p /srv/salt/reclass/nodes
+   - systemctl enable salt-master
+   - systemctl enable salt-minion
+   - systemctl start salt-master
+   - systemctl start salt-minion
+   - salt-call -l info --timeout=120 test.ping
+
   write_files:
    - path: /etc/network/interfaces
      content: |
diff --git a/tcp_tests/templates/cookied-mcp-queens-ovs/underlay.yaml b/tcp_tests/templates/cookied-mcp-queens-ovs/underlay.yaml
index ac187d1..6ea4098 100644
--- a/tcp_tests/templates/cookied-mcp-queens-ovs/underlay.yaml
+++ b/tcp_tests/templates/cookied-mcp-queens-ovs/underlay.yaml
@@ -34,8 +34,6 @@
 {% set HOSTNAME_DNS02 = os_env('HOSTNAME_DNS02', 'dns02.' + DOMAIN_NAME) %}
 {% set HOSTNAME_PRX01 = os_env('HOSTNAME_PRX01', 'prx01.' + DOMAIN_NAME) %}
 {% set HOSTNAME_SHARE01 = os_env('HOSTNAME_SHARE01', 'share01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_SHARE02 = os_env('HOSTNAME_SHARE01', 'share02.' + DOMAIN_NAME) %}
-{% set HOSTNAME_SHARE03 = os_env('HOSTNAME_SHARE01', 'share03.' + DOMAIN_NAME) %}
 template:
   devops_settings:
     env_name: {{ os_env('ENV_NAME', 'cookied-mcp-queens-ovs_' + REPOSITORY_SUITE + "_" + os_env('BUILD_NUMBER', '')) }}
@@ -63,12 +61,10 @@
             default_{{ HOSTNAME_MTR02 }}: +87
             default_{{ HOSTNAME_MTR03 }}: +88
             default_{{ HOSTNAME_GTW01 }}: +110
-            default_{{ HOSTNAME_DNS01 }}: +111
-            default_{{ HOSTNAME_DNS02 }}: +112
+            default_{{ HOSTNAME_DNS01 }}: +113
+            default_{{ HOSTNAME_DNS02 }}: +114
             default_{{ HOSTNAME_PRX01 }}: +121
-            default_{{ HOSTNAME_SHARE01 }}: +69
-            default_{{ HOSTNAME_SHARE02 }}: +70
-            default_{{ HOSTNAME_SHARE03 }}: +71
+            default_{{ HOSTNAME_SHARE01 }}: +204
           ip_ranges:
             dhcp: [+90, -10]
 
@@ -94,12 +90,10 @@
             default_{{ HOSTNAME_MTR02 }}: +87
             default_{{ HOSTNAME_MTR03 }}: +88
             default_{{ HOSTNAME_GTW01 }}: +110
-            default_{{ HOSTNAME_DNS01 }}: +111
-            default_{{ HOSTNAME_DNS02 }}: +112
+            default_{{ HOSTNAME_DNS01 }}: +113
+            default_{{ HOSTNAME_DNS02 }}: +114
             default_{{ HOSTNAME_PRX01 }}: +121
-            default_{{ HOSTNAME_SHARE01 }}: +69
-            default_{{ HOSTNAME_SHARE02 }}: +70
-            default_{{ HOSTNAME_SHARE03 }}: +71
+            default_{{ HOSTNAME_SHARE01 }}: +204
           ip_ranges:
             dhcp: [+90, -10]
 
@@ -125,12 +119,10 @@
             default_{{ HOSTNAME_MTR02 }}: +87
             default_{{ HOSTNAME_MTR03 }}: +88
             default_{{ HOSTNAME_GTW01 }}: +110
-            default_{{ HOSTNAME_DNS01 }}: +111
-            default_{{ HOSTNAME_DNS02 }}: +112
+            default_{{ HOSTNAME_DNS01 }}: +113
+            default_{{ HOSTNAME_DNS02 }}: +114
             default_{{ HOSTNAME_PRX01 }}: +121
-            default_{{ HOSTNAME_SHARE01 }}: +69
-            default_{{ HOSTNAME_SHARE02 }}: +70
-            default_{{ HOSTNAME_SHARE03 }}: +71
+            default_{{ HOSTNAME_SHARE01 }}: +204
           ip_ranges:
             dhcp: [+10, -10]
 
@@ -156,12 +148,10 @@
             default_{{ HOSTNAME_MTR02 }}: +87
             default_{{ HOSTNAME_MTR03 }}: +88
             default_{{ HOSTNAME_GTW01 }}: +110
-            default_{{ HOSTNAME_DNS01 }}: +111
-            default_{{ HOSTNAME_DNS02 }}: +112
+            default_{{ HOSTNAME_DNS01 }}: +113
+            default_{{ HOSTNAME_DNS02 }}: +114
             default_{{ HOSTNAME_PRX01 }}: +121
-            default_{{ HOSTNAME_SHARE01 }}: +69
-            default_{{ HOSTNAME_SHARE02 }}: +70
-            default_{{ HOSTNAME_SHARE03 }}: +71
+            default_{{ HOSTNAME_SHARE01 }}: +204
           ip_ranges:
             dhcp: [+130, +220]
 
@@ -273,9 +263,6 @@
                   capacity: !os_env NODE_VOLUME_SIZE, 150
                   backing_store: mcp_ubuntu_1604_image
                   format: qcow2
-                - name: cinder
-                  capacity: 50
-                  format: qcow2
                 - name: iso  # Volume with name 'iso' will be used
                              # for store image with cloud-init metadata.
                   capacity: 1
@@ -314,9 +301,6 @@
                   capacity: !os_env NODE_VOLUME_SIZE, 150
                   backing_store: mcp_ubuntu_1604_image
                   format: qcow2
-                - name: cinder
-                  capacity: 50
-                  format: qcow2
                 - name: iso  # Volume with name 'iso' will be used
                              # for store image with cloud-init metadata.
                   capacity: 1
@@ -343,9 +327,6 @@
                   capacity: !os_env NODE_VOLUME_SIZE, 150
                   backing_store: mcp_ubuntu_1604_image
                   format: qcow2
-                - name: cinder
-                  capacity: 50
-                  format: qcow2
                 - name: iso  # Volume with name 'iso' will be used
                              # for store image with cloud-init metadata.
                   capacity: 1
@@ -606,9 +587,6 @@
                   capacity: !os_env NODE_VOLUME_SIZE, 150
                   backing_store: mcp_ubuntu_1604_image
                   format: qcow2
-                - name: cinder
-                  capacity: 50
-                  format: qcow2
                 - name: iso  # Volume with name 'iso' will be used
                              # for store image with cloud-init metadata.
                   capacity: 1
@@ -635,6 +613,12 @@
                   capacity: !os_env NODE_VOLUME_SIZE, 150
                   backing_store: cloudimage1604
                   format: qcow2
+                - name: cinder
+                  capacity: 50
+                  format: qcow2
+                - name: manila
+                  capacity: 20
+                  format: qcow2
                 - name: iso  # Volume with name 'iso' will be used
                              # for store image with cloud-init metadata.
                   capacity: 1
@@ -686,6 +670,12 @@
                   capacity: !os_env NODE_VOLUME_SIZE, 150
                   backing_store: cloudimage1604
                   format: qcow2
+                - name: cinder
+                  capacity: 50
+                  format: qcow2
+                - name: manila
+                  capacity: 20
+                  format: qcow2
                 - name: iso  # Volume with name 'iso' will be used
                              # for store image with cloud-init metadata.
                   capacity: 1
@@ -801,55 +791,3 @@
 
               interfaces: *all_interfaces
               network_config: *all_network_config
-
-          - name: {{ HOSTNAME_SHARE02 }}
-            role: salt_minion
-            params:
-              vcpu: !os_env SLAVE_NODE_CPU, 2
-              memory: !os_env SLAVE_NODE_MEMORY, 4096
-              boot:
-                - hd
-              cloud_init_volume_name: iso
-              cloud_init_iface_up: ens3
-              volumes:
-                - name: system
-                  capacity: !os_env NODE_VOLUME_SIZE, 150
-                  backing_store: mcp_ubuntu_1604_image
-                  format: qcow2
-                - name: iso  # Volume with name 'iso' will be used
-                             # for store image with cloud-init metadata.
-                  capacity: 1
-                  format: raw
-                  device: cdrom
-                  bus: ide
-                  cloudinit_meta_data: *cloudinit_meta_data
-                  cloudinit_user_data: *cloudinit_user_data_1604
-
-              interfaces: *all_interfaces
-              network_config: *all_network_config
-
-          - name: {{ HOSTNAME_SHARE03 }}
-            role: salt_minion
-            params:
-              vcpu: !os_env SLAVE_NODE_CPU, 2
-              memory: !os_env SLAVE_NODE_MEMORY, 4096
-              boot:
-                - hd
-              cloud_init_volume_name: iso
-              cloud_init_iface_up: ens3
-              volumes:
-                - name: system
-                  capacity: !os_env NODE_VOLUME_SIZE, 150
-                  backing_store: mcp_ubuntu_1604_image
-                  format: qcow2
-                - name: iso  # Volume with name 'iso' will be used
-                             # for store image with cloud-init metadata.
-                  capacity: 1
-                  format: raw
-                  device: cdrom
-                  bus: ide
-                  cloudinit_meta_data: *cloudinit_meta_data
-                  cloudinit_user_data: *cloudinit_user_data_1604
-
-              interfaces: *all_interfaces
-              network_config: *all_network_config
diff --git a/tcp_tests/templates/cookied-model-generator/salt_cookied-bm-mcp-dvr-vxlan.yaml b/tcp_tests/templates/cookied-model-generator/salt_cookied-bm-mcp-dvr-vxlan.yaml
index c5d17cf..9a830b9 100644
--- a/tcp_tests/templates/cookied-model-generator/salt_cookied-bm-mcp-dvr-vxlan.yaml
+++ b/tcp_tests/templates/cookied-model-generator/salt_cookied-bm-mcp-dvr-vxlan.yaml
@@ -29,6 +29,14 @@
   retry: {count: 1, delay: 5}
   skip_fail: true
 
+- description: Temporary workaround for removing virtual gtw nodes
+  cmd: |
+    sed -i 's/\-\ system\.salt\.control\.sizes\.ovs\.compact//g' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/kvm.yml;
+    sed -i 's/\-\ system\.salt\.control\.placement\.ovs\.compact//g' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/kvm.yml;
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: true
+
 - description: Temporary WR for correct bridge name according to envoronment templates
   cmd: |
     sed -i 's/br\-ctl/br\_ctl/g' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/kvm.yml;
@@ -39,5 +47,15 @@
   retry: {count: 1, delay: 10}
   skip_fail: false
 
+- description: "WR for PROD-24311"
+  cmd: |
+    set -e;
+    . /root/venv-reclass-tools/bin/activate;
+    reclass-tools add-key parameters._param.salt_control_xenial_image 'http://images.mcp.mirantis.net/ubuntu-16-04-x64-mcp{{ SHARED.REPOSITORY_SUITE }}.qcow2' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/init.yml;
+    reclass-tools add-key parameters._param.salt_control_trusty_image 'http://images.mcp.mirantis.net/ubuntu-14-04-x64-mcp{{ SHARED.REPOSITORY_SUITE }}.qcow2' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/init.yml;
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 10}
+  skip_fail: false
+
 {{ SHARED.MACRO_GENERATE_INVENTORY(RERUN_SALTMASTER_STATE=true) }}
 
diff --git a/tcp_tests/templates/cookied-model-generator/salt_cookied-bm-mcp-ocata-contrail.yaml b/tcp_tests/templates/cookied-model-generator/salt_cookied-bm-mcp-ocata-contrail.yaml
index 62da3ec..9970edd 100644
--- a/tcp_tests/templates/cookied-model-generator/salt_cookied-bm-mcp-ocata-contrail.yaml
+++ b/tcp_tests/templates/cookied-model-generator/salt_cookied-bm-mcp-ocata-contrail.yaml
@@ -32,12 +32,12 @@
     set -e;
     . /root/venv-reclass-tools/bin/activate;
     # Remove rack01 key
-    reclass-tools del-key parameters.reclass.storage.node.openstack_compute_rack01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
+    reclass-tools del-key parameters.reclass.storage.node.openstack_compute_rack01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
     # Add openstack_compute_node definition from system
-    reclass-tools add-key 'classes' 'system.reclass.storage.system.openstack_compute_multi' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml --merge;
+    reclass-tools add-key 'classes' 'system.reclass.storage.system.openstack_compute_multi' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml --merge;
     # Workaround for compute nodes addresses
-    reclass-tools add-key parameters._param.openstack_compute_node01_address '${_param:openstack_compute_rack01_single_subnet}'.105 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
-    reclass-tools add-key parameters._param.openstack_compute_node02_address '${_param:openstack_compute_rack01_single_subnet}'.106 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
+    reclass-tools add-key parameters._param.openstack_compute_node01_address '${_param:openstack_compute_rack01_single_subnet}'.105 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+    reclass-tools add-key parameters._param.openstack_compute_node02_address '${_param:openstack_compute_rack01_single_subnet}'.106 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 10}
   skip_fail: false
diff --git a/tcp_tests/templates/cookied-model-generator/salt_cookied-cicd-k8s-calico-sl.yaml b/tcp_tests/templates/cookied-model-generator/salt_cookied-cicd-k8s-calico-sl.yaml
index cf03c60..7cf52a7 100644
--- a/tcp_tests/templates/cookied-model-generator/salt_cookied-cicd-k8s-calico-sl.yaml
+++ b/tcp_tests/templates/cookied-model-generator/salt_cookied-cicd-k8s-calico-sl.yaml
@@ -18,18 +18,4 @@
 
 {{ SHARED.MACRO_GENERATE_AND_ENABLE_ENVIRONMENT_MODEL() }}
 
-- description: "Workaround for combined roles: remove unnecessary classes"
-  cmd: |
-    set -e;
-    . /root/venv-reclass-tools/bin/activate;
-    # Workaround for compute nodes. Auto-registration for compute nodes cannot be used without external address inventory
-    reclass-tools add-key parameters._param.kubernetes_compute_node01_address {{ SHARED.IPV4_NET_CONTROL_PREFIX }}.101 /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/kubernetes/init.yml;
-    reclass-tools add-key parameters._param.kubernetes_compute_node02_address {{ SHARED.IPV4_NET_CONTROL_PREFIX }}.102 /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/kubernetes/init.yml;
-    reclass-tools add-key parameters._param.kubernetes_compute_node03_address {{ SHARED.IPV4_NET_CONTROL_PREFIX }}.103 /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/kubernetes/init.yml;
-    reclass-tools add-key parameters._param.kubernetes_compute_node04_address {{ SHARED.IPV4_NET_CONTROL_PREFIX }}.104 /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/kubernetes/init.yml;
-
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
 {{ SHARED.MACRO_GENERATE_INVENTORY(RERUN_SALTMASTER_STATE=true) }}
diff --git a/tcp_tests/templates/cookied-model-generator/salt_cookied-cicd-k8s-calico.yaml b/tcp_tests/templates/cookied-model-generator/salt_cookied-cicd-k8s-calico.yaml
index 7eacc05..130b3b3 100644
--- a/tcp_tests/templates/cookied-model-generator/salt_cookied-cicd-k8s-calico.yaml
+++ b/tcp_tests/templates/cookied-model-generator/salt_cookied-cicd-k8s-calico.yaml
@@ -18,18 +18,4 @@
 
 {{ SHARED.MACRO_GENERATE_AND_ENABLE_ENVIRONMENT_MODEL() }}
 
-- description: "Workaround for combined roles: remove unnecessary classes"
-  cmd: |
-    set -e;
-    . /root/venv-reclass-tools/bin/activate;
-    # Workaround for compute nodes. Auto-registration for compute nodes cannot be used without external address inventory
-    reclass-tools add-key parameters._param.kubernetes_compute_node01_address {{ SHARED.IPV4_NET_CONTROL_PREFIX }}.101 /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/kubernetes/init.yml;
-    reclass-tools add-key parameters._param.kubernetes_compute_node02_address {{ SHARED.IPV4_NET_CONTROL_PREFIX }}.102 /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/kubernetes/init.yml;
-    reclass-tools add-key parameters._param.kubernetes_compute_node03_address {{ SHARED.IPV4_NET_CONTROL_PREFIX }}.103 /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/kubernetes/init.yml;
-    reclass-tools add-key parameters._param.kubernetes_compute_node04_address {{ SHARED.IPV4_NET_CONTROL_PREFIX }}.104 /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/kubernetes/init.yml;
-
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
 {{ SHARED.MACRO_GENERATE_INVENTORY(RERUN_SALTMASTER_STATE=true) }}
diff --git a/tcp_tests/templates/cookied-model-generator/salt_cookied-cicd-k8s-genie.yaml b/tcp_tests/templates/cookied-model-generator/salt_cookied-cicd-k8s-genie.yaml
index 21349bf..65f3c2b 100644
--- a/tcp_tests/templates/cookied-model-generator/salt_cookied-cicd-k8s-genie.yaml
+++ b/tcp_tests/templates/cookied-model-generator/salt_cookied-cicd-k8s-genie.yaml
@@ -18,18 +18,4 @@
 
 {{ SHARED.MACRO_GENERATE_AND_ENABLE_ENVIRONMENT_MODEL() }}
 
-- description: "Workaround for computes"
-  cmd: |
-    set -e;
-    . /root/venv-reclass-tools/bin/activate;
-    # Workaround for compute nodes. Auto-registration for compute nodes cannot be used without external address inventory
-    reclass-tools add-key parameters._param.kubernetes_compute_node01_address {{ SHARED.IPV4_NET_CONTROL_PREFIX }}.101 /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/kubernetes/init.yml;
-    reclass-tools add-key parameters._param.kubernetes_compute_node02_address {{ SHARED.IPV4_NET_CONTROL_PREFIX }}.102 /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/kubernetes/init.yml;
-    reclass-tools add-key parameters._param.kubernetes_compute_node03_address {{ SHARED.IPV4_NET_CONTROL_PREFIX }}.103 /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/kubernetes/init.yml;
-    reclass-tools add-key parameters._param.kubernetes_compute_node04_address {{ SHARED.IPV4_NET_CONTROL_PREFIX }}.104 /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/kubernetes/init.yml;
-
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
 {{ SHARED.MACRO_GENERATE_INVENTORY(RERUN_SALTMASTER_STATE=true) }}
diff --git a/tcp_tests/templates/cookied-model-generator/salt_cookied-cicd-pike-dpdk.yaml b/tcp_tests/templates/cookied-model-generator/salt_cookied-cicd-pike-dpdk.yaml
index 6d36cfd..e31a230 100644
--- a/tcp_tests/templates/cookied-model-generator/salt_cookied-cicd-pike-dpdk.yaml
+++ b/tcp_tests/templates/cookied-model-generator/salt_cookied-cicd-pike-dpdk.yaml
@@ -21,23 +21,31 @@
 - description: "Workaround for combined roles: remove unnecessary classes"
   cmd: |
     set -e;
-    sed -i '/system.reclass.storage.system.stacklight_telemetry_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
-    sed -i '/system.reclass.storage.system.stacklight_log_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
+    sed -i '/system.reclass.storage.system.stacklight_telemetry_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+    sed -i '/system.reclass.storage.system.stacklight_log_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
 
     # set wider cpu mask for DPDK
-    salt-call reclass.cluster_meta_set name='compute_ovs_dpdk_lcore_mask' value='"0xF"' file_name=/srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/init.yml
+    salt-call reclass.cluster_meta_set name='compute_ovs_dpdk_lcore_mask' value='"0x41"' file_name=/srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/init.yml
+    salt-call reclass.cluster_meta_set name='compute_ovs_pmd_cpu_mask' value='"0xe"' file_name=/srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/init.yml
+    salt-call reclass.cluster_meta_set name='compute_ovs_dpdk_socket_mem' value='"512,512"' file_name=/srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/init.yml
     salt-call reclass.cluster_meta_set name='compute_hugepages_size' value='"2M"' file_name=/srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/init.yml
     salt-call reclass.cluster_meta_set name='compute_hugepages_mount' value='"/mnt/hugepages_2M"' file_name=/srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/init.yml
-    # set virtual disks for compute
-    sed -i 's/cinder_lvm_devices: \[ "\/dev\/sdb" \]/cinder_lvm_devices: \[ "\/dev\/vdb" \]/g' /srv/salt/reclass/classes/environment/{{ SHARED.CLUSTER_NAME }}/features/lvm_backend/init.yml
 
     . /root/venv-reclass-tools/bin/activate;
-    reclass-tools del-key parameters.reclass.storage.node.stacklight_log_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
+    reclass-tools del-key parameters.reclass.storage.node.stacklight_log_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
 
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 5}
   skip_fail: false
 
+- description: "Temporary workaround: remove cinder-volume from CTL nodes"
+  cmd: |
+    sed -i 's/\-\ system\.cinder\.volume\.single//g' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/control.yml;
+    sed -i 's/\-\ system\.cinder\.volume\.notification\.messagingv2//g' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/control.yml;
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: true
+
 - description: "Workaround for combined roles: remove unnecessary classes"
   cmd: |
     cat << 'EOF' >> /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/compute/dpdk.yml
diff --git a/tcp_tests/templates/cookied-model-generator/salt_cookied-cicd-pike-dvr-sl.yaml b/tcp_tests/templates/cookied-model-generator/salt_cookied-cicd-pike-dvr-sl.yaml
index 2b2a2e3..55d6a8b 100644
--- a/tcp_tests/templates/cookied-model-generator/salt_cookied-cicd-pike-dvr-sl.yaml
+++ b/tcp_tests/templates/cookied-model-generator/salt_cookied-cicd-pike-dvr-sl.yaml
@@ -21,10 +21,10 @@
 - description: "Workaround for combined roles: remove unnecessary classes"
   cmd: |
     set -e;
-    sed -i '/system.reclass.storage.system.openstack_database_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
-    sed -i '/system.reclass.storage.system.openstack_message_queue_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
-    # sed -i '/system.reclass.storage.system.stacklight_telemetry_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
-    # sed -i '/system.reclass.storage.system.stacklight_log_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
+    sed -i '/system.reclass.storage.system.openstack_database_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+    sed -i '/system.reclass.storage.system.openstack_message_queue_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+    # sed -i '/system.reclass.storage.system.stacklight_telemetry_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+    # sed -i '/system.reclass.storage.system.stacklight_log_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
 
     # salt-call reclass.cluster_meta_set name='openstack_dns_node01_address' value='{{ SHARED.IPV4_NET_CONTROL_PREFIX }}.111' file_name=/srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/init.yml
     # salt-call reclass.cluster_meta_set name='openstack_dns_node02_address' value='{{ SHARED.IPV4_NET_CONTROL_PREFIX }}.112' file_name=/srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/init.yml
@@ -32,25 +32,24 @@
     # salt-call reclass.cluster_meta_set name='salt_master_host' value='${_param:infra_config_deploy_address}' file_name=/srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/init.yml
 
     . /root/venv-reclass-tools/bin/activate;
-    reclass-tools del-key parameters.reclass.storage.node.openstack_database_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
-    reclass-tools del-key parameters.reclass.storage.node.openstack_database_node02 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
-    reclass-tools del-key parameters.reclass.storage.node.openstack_database_node03 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
-    reclass-tools del-key parameters.reclass.storage.node.openstack_message_queue_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
-    reclass-tools del-key parameters.reclass.storage.node.openstack_message_queue_node02 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
-    reclass-tools del-key parameters.reclass.storage.node.openstack_message_queue_node03 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
-    # reclass-tools del-key parameters.reclass.storage.node.stacklight_log_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
+    reclass-tools del-key parameters.reclass.storage.node.openstack_database_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+    reclass-tools del-key parameters.reclass.storage.node.openstack_database_node02 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+    reclass-tools del-key parameters.reclass.storage.node.openstack_database_node03 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+    reclass-tools del-key parameters.reclass.storage.node.openstack_message_queue_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+    reclass-tools del-key parameters.reclass.storage.node.openstack_message_queue_node02 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+    reclass-tools del-key parameters.reclass.storage.node.openstack_message_queue_node03 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+    # reclass-tools del-key parameters.reclass.storage.node.stacklight_log_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
 
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 5}
   skip_fail: false
 
-- description: Temporary workaround for removing cinder-volume from CTL nodes
+- description: "Temporary workaround: remove cinder-volume from CTL nodes"
   cmd: |
     sed -i 's/\-\ system\.cinder\.volume\.single//g' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/control.yml;
     sed -i 's/\-\ system\.cinder\.volume\.notification\.messagingv2//g' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/control.yml;
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 5}
   skip_fail: true
-  
 
 {{ SHARED.MACRO_GENERATE_INVENTORY(RERUN_SALTMASTER_STATE=true) }}
diff --git a/tcp_tests/templates/cookied-model-generator/salt_cookied-cicd-pike-ovs-sl.yaml b/tcp_tests/templates/cookied-model-generator/salt_cookied-cicd-pike-ovs-sl.yaml
index d189cce..c9961c2 100644
--- a/tcp_tests/templates/cookied-model-generator/salt_cookied-cicd-pike-ovs-sl.yaml
+++ b/tcp_tests/templates/cookied-model-generator/salt_cookied-cicd-pike-ovs-sl.yaml
@@ -21,26 +21,34 @@
 - description: "Workaround for combined roles: remove unnecessary classes"
   cmd: |
     set -e;
-    sed -i '/system.reclass.storage.system.openstack_database_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
-    sed -i '/system.reclass.storage.system.openstack_message_queue_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
-    # sed -i '/system.reclass.storage.system.stacklight_telemetry_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
-    # sed -i '/system.reclass.storage.system.stacklight_log_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
+    sed -i '/system.reclass.storage.system.openstack_database_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+    sed -i '/system.reclass.storage.system.openstack_message_queue_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+    # sed -i '/system.reclass.storage.system.stacklight_telemetry_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+    # sed -i '/system.reclass.storage.system.stacklight_log_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
 
     # Bind9 services are placed on the first two ctl nodes
     # salt-call reclass.cluster_meta_set name='openstack_dns_node01_address' value='${_param:openstack_control_node01_address}' file_name=/srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/init.yml
     # salt-call reclass.cluster_meta_set name='openstack_dns_node02_address' value='${_param:openstack_control_node02_address}' file_name=/srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/init.yml
 
     . /root/venv-reclass-tools/bin/activate;
-    reclass-tools del-key parameters.reclass.storage.node.openstack_database_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
-    reclass-tools del-key parameters.reclass.storage.node.openstack_database_node02 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
-    reclass-tools del-key parameters.reclass.storage.node.openstack_database_node03 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
-    reclass-tools del-key parameters.reclass.storage.node.openstack_message_queue_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
-    reclass-tools del-key parameters.reclass.storage.node.openstack_message_queue_node02 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
-    reclass-tools del-key parameters.reclass.storage.node.openstack_message_queue_node03 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
-    # reclass-tools del-key parameters.reclass.storage.node.stacklight_log_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
+    reclass-tools del-key parameters.reclass.storage.node.openstack_database_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+    reclass-tools del-key parameters.reclass.storage.node.openstack_database_node02 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+    reclass-tools del-key parameters.reclass.storage.node.openstack_database_node03 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+    reclass-tools del-key parameters.reclass.storage.node.openstack_message_queue_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+    reclass-tools del-key parameters.reclass.storage.node.openstack_message_queue_node02 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+    reclass-tools del-key parameters.reclass.storage.node.openstack_message_queue_node03 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+    # reclass-tools del-key parameters.reclass.storage.node.stacklight_log_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
 
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 5}
   skip_fail: false
 
+- description: "Temporary workaround: remove cinder-volume from CTL nodes"
+  cmd: |
+    sed -i 's/\-\ system\.cinder\.volume\.single//g' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/control.yml;
+    sed -i 's/\-\ system\.cinder\.volume\.notification\.messagingv2//g' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/control.yml;
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: true
+
 {{ SHARED.MACRO_GENERATE_INVENTORY(RERUN_SALTMASTER_STATE=true) }}
diff --git a/tcp_tests/templates/cookied-model-generator/salt_cookied-cicd-queens-dvr-sl.yaml b/tcp_tests/templates/cookied-model-generator/salt_cookied-cicd-queens-dvr-sl.yaml
new file mode 100644
index 0000000..0c7d928
--- /dev/null
+++ b/tcp_tests/templates/cookied-model-generator/salt_cookied-cicd-queens-dvr-sl.yaml
@@ -0,0 +1,55 @@
+{% from 'cookied-model-generator/underlay.yaml' import HOSTNAME_CFG01 with context %}
+{% from 'cookied-model-generator/underlay.yaml' import DOMAIN_NAME with context %}
+
+{% set LAB_CONFIG_NAME = 'cookied-cicd-queens-dvr-sl' %}
+# Name of the context file (without extension, that is fixed .yaml) used to render the Environment model
+{% set ENVIRONMENT_MODEL_INVENTORY_NAME = os_env('ENVIRONMENT_MODEL_INVENTORY_NAME', LAB_CONFIG_NAME) %}
+# Path to the context files used to render Cluster and Environment models
+{%- set CLUSTER_CONTEXT_NAME = 'cookiecutter-context-queens-dvr-sl.yaml' %}
+{%- set ENVIRONMENT_CONTEXT_NAMES = ['environment_context.yaml'] %}
+
+{% import 'shared-salt.yaml' as SHARED with context %}
+
+{{ SHARED.MACRO_INSTALL_PACKAGES_ON_NODES(HOSTNAME_CFG01) }}
+
+{{ SHARED.MACRO_INSTALL_FORMULAS('\*') }}
+
+{{ SHARED.MACRO_GENERATE_COOKIECUTTER_MODEL() }}
+
+{{ SHARED.MACRO_GENERATE_AND_ENABLE_ENVIRONMENT_MODEL() }}
+
+- description: "Workaround for combined roles: remove unnecessary classes"
+  cmd: |
+    set -e;
+    sed -i '/system.reclass.storage.system.openstack_database_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+    sed -i '/system.reclass.storage.system.openstack_message_queue_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+    # sed -i '/system.reclass.storage.system.stacklight_telemetry_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+    # sed -i '/system.reclass.storage.system.stacklight_log_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+
+    # salt-call reclass.cluster_meta_set name='openstack_dns_node01_address' value='{{ SHARED.IPV4_NET_CONTROL_PREFIX }}.111' file_name=/srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/init.yml
+    # salt-call reclass.cluster_meta_set name='openstack_dns_node02_address' value='{{ SHARED.IPV4_NET_CONTROL_PREFIX }}.112' file_name=/srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/init.yml
+    # Workaround of missing reclass.system for dns role
+    # salt-call reclass.cluster_meta_set name='salt_master_host' value='${_param:infra_config_deploy_address}' file_name=/srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/init.yml
+
+    . /root/venv-reclass-tools/bin/activate;
+    reclass-tools del-key parameters.reclass.storage.node.openstack_database_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+    reclass-tools del-key parameters.reclass.storage.node.openstack_database_node02 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+    reclass-tools del-key parameters.reclass.storage.node.openstack_database_node03 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+    reclass-tools del-key parameters.reclass.storage.node.openstack_message_queue_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+    reclass-tools del-key parameters.reclass.storage.node.openstack_message_queue_node02 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+    reclass-tools del-key parameters.reclass.storage.node.openstack_message_queue_node03 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+    # reclass-tools del-key parameters.reclass.storage.node.stacklight_log_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+- description: "Temporary workaround: remove cinder-volume from CTL nodes"
+  cmd: |
+    sed -i 's/\-\ system\.cinder\.volume\.single//g' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/control.yml;
+    sed -i 's/\-\ system\.cinder\.volume\.notification\.messagingv2//g' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/control.yml;
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: true
+
+{{ SHARED.MACRO_GENERATE_INVENTORY(RERUN_SALTMASTER_STATE=true) }}
diff --git a/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-mitaka-dvr-ceph.yaml b/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-mitaka-dvr-ceph.yaml
new file mode 100644
index 0000000..4cdda3b
--- /dev/null
+++ b/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-mitaka-dvr-ceph.yaml
@@ -0,0 +1,94 @@
+{% from 'cookied-model-generator/underlay.yaml' import HOSTNAME_CFG01 with context %}
+{% from 'cookied-model-generator/underlay.yaml' import DOMAIN_NAME with context %}
+
+{% set SALT_MODELS_REPOSITORY = os_env('SALT_MODELS_REPOSITORY','https://gerrit.mcp.mirantis.com/salt-models/mcp-virtual-lab') %}
+# Other salt model repository parameters see in shared-salt.yaml
+{% set SALT_VERSION = os_env('SALT_VERSION', '2017.7') %}
+
+{% set LAB_CONFIG_NAME = 'cookied-mcp-mitaka-dvr-ceph' %}
+# Name of the context file (without extension, that is fixed .yaml) used to render the Environment model
+{% set ENVIRONMENT_MODEL_INVENTORY_NAME = os_env('ENVIRONMENT_MODEL_INVENTORY_NAME', LAB_CONFIG_NAME) %}
+# Path to the context files used to render Cluster and Environment models
+{%- set CLUSTER_CONTEXT_NAME = '_context-cookiecutter-mcp-mitaka-dvr-ceph.yaml' %}
+{%- set ENVIRONMENT_CONTEXT_NAMES = ['_context-environment.yaml'] %}
+{% set REPOSITORY_SUITE = os_env('REPOSITORY_SUITE', 'testing') %}
+{% set FORMULA_REPOSITORY = os_env('FORMULA_REPOSITORY', 'deb [arch=amd64] http://apt.mirantis.com/${DISTRIB_CODENAME} ' + REPOSITORY_SUITE + ' salt extra') %}
+{% set FORMULA_GPG = os_env('FORMULA_GPG', 'http://apt.mirantis.com/public.gpg') %}
+{% set SALT_REPOSITORY = os_env('SALT_REPOSITORY', "deb [arch=amd64] http://apt.mirantis.com/${DISTRIB_CODENAME}/salt/" + SALT_VERSION  + REPOSITORY_SUITE + " main") %}
+{# set SALT_REPOSITORY = os_env('SALT_REPOSITORY', "deb [arch=amd64] http://apt.mirantis.com/${DISTRIB_CODENAME}/salt/2016.3 " + REPOSITORY_SUITE + " main") #}
+{% set SALT_GPG = os_env('SALT_GPG', 'http://apt.mirantis.com/public.gpg') %}
+{% set UBUNTU_REPOSITORY = os_env('UBUNTU_REPOSITORY', "deb [arch=amd64] http://mirror.mirantis.com/" + REPOSITORY_SUITE + "/ubuntu/ ${DISTRIB_CODENAME} main restricted universe") %}
+{% set UBUNTU_UPDATES_REPOSITORY = os_env('UBUNTU_UPDATES_REPOSITORY', "deb [arch=amd64] http://mirror.mirantis.com/" + REPOSITORY_SUITE + "/ubuntu/ ${DISTRIB_CODENAME}-updates main restricted universe") %}
+{% set UBUNTU_SECURITY_REPOSITORY = os_env('UBUNTU_SECURITY_REPOSITORY', "deb [arch=amd64] http://mirror.mirantis.com/" + REPOSITORY_SUITE + "/ubuntu/ ${DISTRIB_CODENAME}-security main restricted universe") %}
+
+{% import 'shared-salt.yaml' as SHARED with context %}
+
+{{ SHARED.MACRO_INSTALL_PACKAGES_ON_NODES(HOSTNAME_CFG01) }}
+
+{{ SHARED.MACRO_INSTALL_FORMULAS('\*') }}
+
+{{ SHARED.MACRO_GENERATE_COOKIECUTTER_MODEL() }}
+
+{{ SHARED.MACRO_GENERATE_AND_ENABLE_ENVIRONMENT_MODEL() }}
+
+- description: "Workaround for combined roles: remove unnecessary classes"
+  cmd: |
+    set -e;
+    sed -i '/system.reclass.storage.system.physical_control_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+    sed -i '/system.reclass.storage.system.openstack_database_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+    sed -i '/system.reclass.storage.system.openstack_message_queue_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+    sed -i '/system.reclass.storage.system.stacklight_telemetry_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+    sed -i '/system.reclass.storage.system.stacklight_log_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+    sed -i '/system.salt.control.placement.openstack.compact/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/kvm.yml;
+
+
+    # Bind9 services are placed on the first two ctl nodes
+    salt-call reclass.cluster_meta_set name='openstack_dns_node01_address' value='${_param:openstack_control_node01_address}' file_name=/srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/init.yml
+    salt-call reclass.cluster_meta_set name='openstack_dns_node02_address' value='${_param:openstack_control_node02_address}' file_name=/srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/init.yml
+
+    . /root/venv-reclass-tools/bin/activate;
+    reclass-tools del-key parameters.reclass.storage.node.infra_kvm_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+    reclass-tools del-key parameters.reclass.storage.node.infra_kvm_node02 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+    reclass-tools del-key parameters.reclass.storage.node.infra_kvm_node03 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+    reclass-tools del-key parameters.reclass.storage.node.openstack_database_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+    reclass-tools del-key parameters.reclass.storage.node.openstack_database_node02 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+    reclass-tools del-key parameters.reclass.storage.node.openstack_database_node03 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+    reclass-tools del-key parameters.reclass.storage.node.openstack_message_queue_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+    reclass-tools del-key parameters.reclass.storage.node.openstack_message_queue_node02 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+    reclass-tools del-key parameters.reclass.storage.node.openstack_message_queue_node03 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+    reclass-tools del-key parameters.reclass.storage.node.stacklight_log_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+- description: "Disable designate worker for Mitaka release"
+  cmd: |
+    set -e;
+    salt-call reclass.cluster_meta_set name='designate_worker_enabled' value='false' file_name=/srv/salt/reclass/classes/environment/{{ SHARED.CLUSTER_NAME }}/features/designate_bind9/init.yml
+    salt-call reclass.cluster_meta_set name='designate_worker_enabled' value='false' file_name=/srv/salt/reclass/classes/environment/{{ SHARED.CLUSTER_NAME }}/features/designate_pool_manager/init.yml
+    salt-call reclass.cluster_meta_set name='designate_worker_enabled' value='false' file_name=/srv/salt/reclass/classes/environment/{{ SHARED.CLUSTER_NAME }}/features/designate/init.yml
+
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+- description: Temporary workaround for removing cinder-volume from CTL nodes
+  cmd: |
+    sed -i 's/\-\ system\.cinder\.volume\.single//g' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/control.yml;
+    sed -i 's/\-\ system\.cinder\.volume\.notification\.messagingv2//g' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/control.yml;
+
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: true
+
+- description: Temporary workaround !! Fix or debug
+  cmd: |
+    sed -i 's/pg_num: 128/pg_num: 4/g' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/ceph/setup.yml;
+    sed -i 's/pgp_num: 128/pgp_num: 4/g' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/ceph/setup.yml;
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: true
+
+
+{{ SHARED.MACRO_GENERATE_INVENTORY(RERUN_SALTMASTER_STATE=true) }}
diff --git a/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-mitaka-dvr.yaml b/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-mitaka-dvr.yaml
index 8a1031a..a54ce3d 100644
--- a/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-mitaka-dvr.yaml
+++ b/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-mitaka-dvr.yaml
@@ -34,11 +34,11 @@
 - description: "Workaround for combined roles: remove unnecessary classes"
   cmd: |
     set -e;
-    sed -i '/system.reclass.storage.system.physical_control_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
-    sed -i '/system.reclass.storage.system.openstack_database_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
-    sed -i '/system.reclass.storage.system.openstack_message_queue_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
-    sed -i '/system.reclass.storage.system.stacklight_telemetry_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
-    sed -i '/system.reclass.storage.system.stacklight_log_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
+    sed -i '/system.reclass.storage.system.physical_control_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+    sed -i '/system.reclass.storage.system.openstack_database_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+    sed -i '/system.reclass.storage.system.openstack_message_queue_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+    sed -i '/system.reclass.storage.system.stacklight_telemetry_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+    sed -i '/system.reclass.storage.system.stacklight_log_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
     sed -i '/system.salt.control.placement.openstack.compact/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/kvm.yml;
 
     salt-call reclass.cluster_meta_set name='openstack_dns_node01_address' value='{{ SHARED.IPV4_NET_CONTROL_PREFIX }}.111' file_name=/srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/init.yml
@@ -47,21 +47,16 @@
     salt-call reclass.cluster_meta_set name='salt_master_host' value='${_param:infra_config_deploy_address}' file_name=/srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/init.yml
 
     . /root/venv-reclass-tools/bin/activate;
-    reclass-tools del-key parameters.reclass.storage.node.infra_kvm_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
-    reclass-tools del-key parameters.reclass.storage.node.infra_kvm_node02 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
-    reclass-tools del-key parameters.reclass.storage.node.infra_kvm_node03 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
-    reclass-tools del-key parameters.reclass.storage.node.openstack_database_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
-    reclass-tools del-key parameters.reclass.storage.node.openstack_database_node02 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
-    reclass-tools del-key parameters.reclass.storage.node.openstack_database_node03 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
-    reclass-tools del-key parameters.reclass.storage.node.openstack_message_queue_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
-    reclass-tools del-key parameters.reclass.storage.node.openstack_message_queue_node02 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
-    reclass-tools del-key parameters.reclass.storage.node.openstack_message_queue_node03 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
-    reclass-tools del-key parameters.reclass.storage.node.stacklight_log_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
-    # Add cinder volume on cmp nodes. PROD-20945
-    reclass-tools add-key 'classes' 'system.cinder.volume.single' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/compute/init.yml --merge ;
-    reclass-tools add-key 'classes' 'system.cinder.volume.backend.lvm' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/compute/init.yml --merge ;
-    reclass-tools add-key 'classes' 'system.linux.storage.loopback' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/compute/init.yml --merge ;
-    reclass-tools add-key parameters._param.loopback_device_size '20' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/init.yml;
+    reclass-tools del-key parameters.reclass.storage.node.infra_kvm_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+    reclass-tools del-key parameters.reclass.storage.node.infra_kvm_node02 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+    reclass-tools del-key parameters.reclass.storage.node.infra_kvm_node03 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+    reclass-tools del-key parameters.reclass.storage.node.openstack_database_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+    reclass-tools del-key parameters.reclass.storage.node.openstack_database_node02 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+    reclass-tools del-key parameters.reclass.storage.node.openstack_database_node03 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+    reclass-tools del-key parameters.reclass.storage.node.openstack_message_queue_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+    reclass-tools del-key parameters.reclass.storage.node.openstack_message_queue_node02 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+    reclass-tools del-key parameters.reclass.storage.node.openstack_message_queue_node03 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+    reclass-tools del-key parameters.reclass.storage.node.stacklight_log_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
 
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 5}
diff --git a/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-mitaka-ovs.yaml b/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-mitaka-ovs.yaml
index 91eafe9..bd28102 100644
--- a/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-mitaka-ovs.yaml
+++ b/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-mitaka-ovs.yaml
@@ -32,11 +32,11 @@
 - description: "Workaround for combined roles: remove unnecessary classes"
   cmd: |
     set -e;
-    sed -i '/system.reclass.storage.system.physical_control_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
-    sed -i '/system.reclass.storage.system.openstack_database_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
-    sed -i '/system.reclass.storage.system.openstack_message_queue_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
-    sed -i '/system.reclass.storage.system.stacklight_telemetry_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
-    sed -i '/system.reclass.storage.system.stacklight_log_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
+    sed -i '/system.reclass.storage.system.physical_control_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+    sed -i '/system.reclass.storage.system.openstack_database_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+    sed -i '/system.reclass.storage.system.openstack_message_queue_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+    sed -i '/system.reclass.storage.system.stacklight_telemetry_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+    sed -i '/system.reclass.storage.system.stacklight_log_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
     sed -i '/system.salt.control.placement.openstack.compact/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/kvm.yml;
 
     salt-call reclass.cluster_meta_set name='openstack_dns_node01_address' value='{{ SHARED.IPV4_NET_CONTROL_PREFIX }}.111' file_name=/srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/init.yml
@@ -45,21 +45,16 @@
     salt-call reclass.cluster_meta_set name='salt_master_host' value='${_param:infra_config_deploy_address}' file_name=/srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/init.yml
 
     . /root/venv-reclass-tools/bin/activate;
-    reclass-tools del-key parameters.reclass.storage.node.infra_kvm_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
-    reclass-tools del-key parameters.reclass.storage.node.infra_kvm_node02 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
-    reclass-tools del-key parameters.reclass.storage.node.infra_kvm_node03 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
-    reclass-tools del-key parameters.reclass.storage.node.openstack_database_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
-    reclass-tools del-key parameters.reclass.storage.node.openstack_database_node02 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
-    reclass-tools del-key parameters.reclass.storage.node.openstack_database_node03 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
-    reclass-tools del-key parameters.reclass.storage.node.openstack_message_queue_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
-    reclass-tools del-key parameters.reclass.storage.node.openstack_message_queue_node02 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
-    reclass-tools del-key parameters.reclass.storage.node.openstack_message_queue_node03 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
-    reclass-tools del-key parameters.reclass.storage.node.stacklight_log_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
-    # Add cinder volume on cmp nodes. PROD-20945
-    reclass-tools add-key 'classes' 'system.cinder.volume.single' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/compute/init.yml --merge ;
-    reclass-tools add-key 'classes' 'system.cinder.volume.backend.lvm' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/compute/init.yml --merge ;
-    reclass-tools add-key 'classes' 'system.linux.storage.loopback' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/compute/init.yml --merge ;
-    reclass-tools add-key parameters._param.loopback_device_size '20' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/init.yml;
+    reclass-tools del-key parameters.reclass.storage.node.infra_kvm_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+    reclass-tools del-key parameters.reclass.storage.node.infra_kvm_node02 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+    reclass-tools del-key parameters.reclass.storage.node.infra_kvm_node03 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+    reclass-tools del-key parameters.reclass.storage.node.openstack_database_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+    reclass-tools del-key parameters.reclass.storage.node.openstack_database_node02 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+    reclass-tools del-key parameters.reclass.storage.node.openstack_database_node03 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+    reclass-tools del-key parameters.reclass.storage.node.openstack_message_queue_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+    reclass-tools del-key parameters.reclass.storage.node.openstack_message_queue_node02 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+    reclass-tools del-key parameters.reclass.storage.node.openstack_message_queue_node03 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+    reclass-tools del-key parameters.reclass.storage.node.stacklight_log_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
 
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 5}
diff --git a/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-newton-dvr.yaml b/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-newton-dvr.yaml
index 3a0f213..948b051 100644
--- a/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-newton-dvr.yaml
+++ b/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-newton-dvr.yaml
@@ -24,11 +24,11 @@
 - description: "Workaround for combined roles: remove unnecessary classes"
   cmd: |
     set -e;
-    sed -i '/system.reclass.storage.system.physical_control_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
-    sed -i '/system.reclass.storage.system.openstack_database_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
-    sed -i '/system.reclass.storage.system.openstack_message_queue_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
-    sed -i '/system.reclass.storage.system.stacklight_telemetry_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
-    sed -i '/system.reclass.storage.system.stacklight_log_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
+    sed -i '/system.reclass.storage.system.physical_control_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+    sed -i '/system.reclass.storage.system.openstack_database_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+    sed -i '/system.reclass.storage.system.openstack_message_queue_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+    sed -i '/system.reclass.storage.system.stacklight_telemetry_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+    sed -i '/system.reclass.storage.system.stacklight_log_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
     sed -i '/system.salt.control.placement.openstack.compact/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/kvm.yml;
 
     salt-call reclass.cluster_meta_set name='openstack_dns_node01_address' value='{{ SHARED.IPV4_NET_CONTROL_PREFIX }}.111' file_name=/srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/init.yml
@@ -37,21 +37,16 @@
     salt-call reclass.cluster_meta_set name='salt_master_host' value='${_param:infra_config_deploy_address}' file_name=/srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/init.yml
 
     . /root/venv-reclass-tools/bin/activate;
-    reclass-tools del-key parameters.reclass.storage.node.infra_kvm_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
-    reclass-tools del-key parameters.reclass.storage.node.infra_kvm_node02 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
-    reclass-tools del-key parameters.reclass.storage.node.infra_kvm_node03 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
-    reclass-tools del-key parameters.reclass.storage.node.openstack_database_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
-    reclass-tools del-key parameters.reclass.storage.node.openstack_database_node02 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
-    reclass-tools del-key parameters.reclass.storage.node.openstack_database_node03 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
-    reclass-tools del-key parameters.reclass.storage.node.openstack_message_queue_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
-    reclass-tools del-key parameters.reclass.storage.node.openstack_message_queue_node02 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
-    reclass-tools del-key parameters.reclass.storage.node.openstack_message_queue_node03 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
-    reclass-tools del-key parameters.reclass.storage.node.stacklight_log_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
-    # Add cinder volume on cmp nodes. PROD-20945
-    reclass-tools add-key 'classes' 'system.cinder.volume.single' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/compute/init.yml --merge ;
-    reclass-tools add-key 'classes' 'system.cinder.volume.backend.lvm' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/compute/init.yml --merge ;
-    reclass-tools add-key 'classes' 'system.linux.storage.loopback' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/compute/init.yml --merge ;
-    reclass-tools add-key parameters._param.loopback_device_size '20' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/init.yml;
+    reclass-tools del-key parameters.reclass.storage.node.infra_kvm_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+    reclass-tools del-key parameters.reclass.storage.node.infra_kvm_node02 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+    reclass-tools del-key parameters.reclass.storage.node.infra_kvm_node03 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+    reclass-tools del-key parameters.reclass.storage.node.openstack_database_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+    reclass-tools del-key parameters.reclass.storage.node.openstack_database_node02 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+    reclass-tools del-key parameters.reclass.storage.node.openstack_database_node03 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+    reclass-tools del-key parameters.reclass.storage.node.openstack_message_queue_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+    reclass-tools del-key parameters.reclass.storage.node.openstack_message_queue_node02 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+    reclass-tools del-key parameters.reclass.storage.node.openstack_message_queue_node03 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+    reclass-tools del-key parameters.reclass.storage.node.stacklight_log_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
 
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 5}
diff --git a/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-newton-ovs.yaml b/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-newton-ovs.yaml
index a9da336..ee24ff1 100644
--- a/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-newton-ovs.yaml
+++ b/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-newton-ovs.yaml
@@ -24,11 +24,11 @@
 - description: "Workaround for combined roles: remove unnecessary classes"
   cmd: |
     set -e;
-    sed -i '/system.reclass.storage.system.physical_control_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
-    sed -i '/system.reclass.storage.system.openstack_database_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
-    sed -i '/system.reclass.storage.system.openstack_message_queue_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
-    sed -i '/system.reclass.storage.system.stacklight_telemetry_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
-    sed -i '/system.reclass.storage.system.stacklight_log_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
+    sed -i '/system.reclass.storage.system.physical_control_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+    sed -i '/system.reclass.storage.system.openstack_database_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+    sed -i '/system.reclass.storage.system.openstack_message_queue_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+    sed -i '/system.reclass.storage.system.stacklight_telemetry_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+    sed -i '/system.reclass.storage.system.stacklight_log_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
     sed -i '/system.salt.control.placement.openstack.compact/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/kvm.yml;
 
     # Bind9 services are placed on the first two ctl nodes
@@ -36,21 +36,16 @@
     salt-call reclass.cluster_meta_set name='openstack_dns_node02_address' value='${_param:openstack_control_node02_address}' file_name=/srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/init.yml
 
     . /root/venv-reclass-tools/bin/activate;
-    reclass-tools del-key parameters.reclass.storage.node.infra_kvm_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
-    reclass-tools del-key parameters.reclass.storage.node.infra_kvm_node02 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
-    reclass-tools del-key parameters.reclass.storage.node.infra_kvm_node03 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
-    reclass-tools del-key parameters.reclass.storage.node.openstack_database_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
-    reclass-tools del-key parameters.reclass.storage.node.openstack_database_node02 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
-    reclass-tools del-key parameters.reclass.storage.node.openstack_database_node03 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
-    reclass-tools del-key parameters.reclass.storage.node.openstack_message_queue_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
-    reclass-tools del-key parameters.reclass.storage.node.openstack_message_queue_node02 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
-    reclass-tools del-key parameters.reclass.storage.node.openstack_message_queue_node03 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
-    reclass-tools del-key parameters.reclass.storage.node.stacklight_log_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
-    # Add cinder volume on cmp nodes. PROD-20945
-    reclass-tools add-key 'classes' 'system.cinder.volume.single' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/compute/init.yml --merge ;
-    reclass-tools add-key 'classes' 'system.cinder.volume.backend.lvm' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/compute/init.yml --merge ;
-    reclass-tools add-key 'classes' 'system.linux.storage.loopback' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/compute/init.yml --merge ;
-    reclass-tools add-key parameters._param.loopback_device_size '20' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/init.yml;
+    reclass-tools del-key parameters.reclass.storage.node.infra_kvm_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+    reclass-tools del-key parameters.reclass.storage.node.infra_kvm_node02 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+    reclass-tools del-key parameters.reclass.storage.node.infra_kvm_node03 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+    reclass-tools del-key parameters.reclass.storage.node.openstack_database_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+    reclass-tools del-key parameters.reclass.storage.node.openstack_database_node02 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+    reclass-tools del-key parameters.reclass.storage.node.openstack_database_node03 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+    reclass-tools del-key parameters.reclass.storage.node.openstack_message_queue_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+    reclass-tools del-key parameters.reclass.storage.node.openstack_message_queue_node02 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+    reclass-tools del-key parameters.reclass.storage.node.openstack_message_queue_node03 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+    reclass-tools del-key parameters.reclass.storage.node.stacklight_log_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
 
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 5}
diff --git a/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-ocata-dvr.yaml b/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-ocata-dvr.yaml
index b92f638..7adb184 100644
--- a/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-ocata-dvr.yaml
+++ b/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-ocata-dvr.yaml
@@ -4,7 +4,7 @@
 {% set SALT_MODELS_REPOSITORY = os_env('SALT_MODELS_REPOSITORY','https://gerrit.mcp.mirantis.com/salt-models/mcp-virtual-lab') %}
 # Other salt model repository parameters see in shared-salt.yaml
 
-{% set LAB_CONFIG_NAME = 'virtual-mcp-ocata-dvr' %}
+{% set LAB_CONFIG_NAME = 'cookied-mcp-ocata-dvr' %}
 # Name of the context file (without extension, that is fixed .yaml) used to render the Environment model
 {% set ENVIRONMENT_MODEL_INVENTORY_NAME = os_env('ENVIRONMENT_MODEL_INVENTORY_NAME', LAB_CONFIG_NAME) %}
 # Path to the context files used to render Cluster and Environment models
@@ -24,11 +24,11 @@
 - description: "Workaround for combined roles: remove unnecessary classes"
   cmd: |
     set -e;
-    sed -i '/system.reclass.storage.system.physical_control_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
-    sed -i '/system.reclass.storage.system.openstack_database_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
-    sed -i '/system.reclass.storage.system.openstack_message_queue_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
-    sed -i '/system.reclass.storage.system.stacklight_telemetry_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
-    sed -i '/system.reclass.storage.system.stacklight_log_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
+    sed -i '/system.reclass.storage.system.physical_control_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+    sed -i '/system.reclass.storage.system.openstack_database_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+    sed -i '/system.reclass.storage.system.openstack_message_queue_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+    sed -i '/system.reclass.storage.system.stacklight_telemetry_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+    sed -i '/system.reclass.storage.system.stacklight_log_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
     sed -i '/system.salt.control.placement.openstack.compact/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/kvm.yml;
 
     salt-call reclass.cluster_meta_set name='openstack_dns_node01_address' value='{{ SHARED.IPV4_NET_CONTROL_PREFIX }}.111' file_name=/srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/init.yml
@@ -37,21 +37,16 @@
     salt-call reclass.cluster_meta_set name='salt_master_host' value='${_param:infra_config_deploy_address}' file_name=/srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/init.yml
 
     . /root/venv-reclass-tools/bin/activate;
-    reclass-tools del-key parameters.reclass.storage.node.infra_kvm_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
-    reclass-tools del-key parameters.reclass.storage.node.infra_kvm_node02 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
-    reclass-tools del-key parameters.reclass.storage.node.infra_kvm_node03 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
-    reclass-tools del-key parameters.reclass.storage.node.openstack_database_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
-    reclass-tools del-key parameters.reclass.storage.node.openstack_database_node02 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
-    reclass-tools del-key parameters.reclass.storage.node.openstack_database_node03 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
-    reclass-tools del-key parameters.reclass.storage.node.openstack_message_queue_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
-    reclass-tools del-key parameters.reclass.storage.node.openstack_message_queue_node02 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
-    reclass-tools del-key parameters.reclass.storage.node.openstack_message_queue_node03 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
-    reclass-tools del-key parameters.reclass.storage.node.stacklight_log_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
-    # Add cinder volume on cmp nodes. PROD-20945
-    reclass-tools add-key 'classes' 'system.cinder.volume.single' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/compute/init.yml --merge ;
-    reclass-tools add-key 'classes' 'system.cinder.volume.backend.lvm' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/compute/init.yml --merge ;
-    reclass-tools add-key 'classes' 'system.linux.storage.loopback' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/compute/init.yml --merge ;
-    reclass-tools add-key parameters._param.loopback_device_size '20' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/init.yml;
+    reclass-tools del-key parameters.reclass.storage.node.infra_kvm_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+    reclass-tools del-key parameters.reclass.storage.node.infra_kvm_node02 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+    reclass-tools del-key parameters.reclass.storage.node.infra_kvm_node03 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+    reclass-tools del-key parameters.reclass.storage.node.openstack_database_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+    reclass-tools del-key parameters.reclass.storage.node.openstack_database_node02 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+    reclass-tools del-key parameters.reclass.storage.node.openstack_database_node03 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+    reclass-tools del-key parameters.reclass.storage.node.openstack_message_queue_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+    reclass-tools del-key parameters.reclass.storage.node.openstack_message_queue_node02 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+    reclass-tools del-key parameters.reclass.storage.node.openstack_message_queue_node03 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+    reclass-tools del-key parameters.reclass.storage.node.stacklight_log_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
 
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 5}
diff --git a/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-ocata-ovs.yaml b/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-ocata-ovs.yaml
index 8a984f8..0d0bd6b 100644
--- a/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-ocata-ovs.yaml
+++ b/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-ocata-ovs.yaml
@@ -4,7 +4,7 @@
 {% set SALT_MODELS_REPOSITORY = os_env('SALT_MODELS_REPOSITORY','https://gerrit.mcp.mirantis.com/salt-models/mcp-virtual-lab') %}
 # Other salt model repository parameters see in shared-salt.yaml
 
-{% set LAB_CONFIG_NAME = 'virtual-mcp-ocata-ovs' %}
+{% set LAB_CONFIG_NAME = 'cookied-mcp-ocata-ovs' %}
 # Name of the context file (without extension, that is fixed .yaml) used to render the Environment model
 {% set ENVIRONMENT_MODEL_INVENTORY_NAME = os_env('ENVIRONMENT_MODEL_INVENTORY_NAME', LAB_CONFIG_NAME) %}
 # Path to the context files used to render Cluster and Environment models
@@ -24,11 +24,11 @@
 - description: "Workaround for combined roles: remove unnecessary classes"
   cmd: |
     set -e;
-    sed -i '/system.reclass.storage.system.physical_control_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
-    sed -i '/system.reclass.storage.system.openstack_database_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
-    sed -i '/system.reclass.storage.system.openstack_message_queue_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
-    sed -i '/system.reclass.storage.system.stacklight_telemetry_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
-    sed -i '/system.reclass.storage.system.stacklight_log_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
+    sed -i '/system.reclass.storage.system.physical_control_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+    sed -i '/system.reclass.storage.system.openstack_database_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+    sed -i '/system.reclass.storage.system.openstack_message_queue_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+    sed -i '/system.reclass.storage.system.stacklight_telemetry_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+    sed -i '/system.reclass.storage.system.stacklight_log_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
     sed -i '/system.salt.control.placement.openstack.compact/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/kvm.yml;
 
     # Bind9 services are placed on the first two ctl nodes
@@ -36,21 +36,16 @@
     salt-call reclass.cluster_meta_set name='openstack_dns_node02_address' value='${_param:openstack_control_node02_address}' file_name=/srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/init.yml
 
     . /root/venv-reclass-tools/bin/activate;
-    reclass-tools del-key parameters.reclass.storage.node.infra_kvm_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
-    reclass-tools del-key parameters.reclass.storage.node.infra_kvm_node02 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
-    reclass-tools del-key parameters.reclass.storage.node.infra_kvm_node03 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
-    reclass-tools del-key parameters.reclass.storage.node.openstack_database_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
-    reclass-tools del-key parameters.reclass.storage.node.openstack_database_node02 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
-    reclass-tools del-key parameters.reclass.storage.node.openstack_database_node03 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
-    reclass-tools del-key parameters.reclass.storage.node.openstack_message_queue_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
-    reclass-tools del-key parameters.reclass.storage.node.openstack_message_queue_node02 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
-    reclass-tools del-key parameters.reclass.storage.node.openstack_message_queue_node03 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
-    reclass-tools del-key parameters.reclass.storage.node.stacklight_log_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
-    # Add cinder volume on cmp nodes. PROD-20945
-    reclass-tools add-key 'classes' 'system.cinder.volume.single' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/compute/init.yml --merge ;
-    reclass-tools add-key 'classes' 'system.cinder.volume.backend.lvm' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/compute/init.yml --merge ;
-    reclass-tools add-key 'classes' 'system.linux.storage.loopback' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/compute/init.yml --merge ;
-    reclass-tools add-key parameters._param.loopback_device_size '20' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/init.yml;
+    reclass-tools del-key parameters.reclass.storage.node.infra_kvm_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+    reclass-tools del-key parameters.reclass.storage.node.infra_kvm_node02 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+    reclass-tools del-key parameters.reclass.storage.node.infra_kvm_node03 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+    reclass-tools del-key parameters.reclass.storage.node.openstack_database_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+    reclass-tools del-key parameters.reclass.storage.node.openstack_database_node02 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+    reclass-tools del-key parameters.reclass.storage.node.openstack_database_node03 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+    reclass-tools del-key parameters.reclass.storage.node.openstack_message_queue_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+    reclass-tools del-key parameters.reclass.storage.node.openstack_message_queue_node02 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+    reclass-tools del-key parameters.reclass.storage.node.openstack_message_queue_node03 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+    reclass-tools del-key parameters.reclass.storage.node.stacklight_log_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
 
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 5}
diff --git a/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-pike-dpdk.yaml b/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-pike-dpdk.yaml
index 4bae834..b59248a 100644
--- a/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-pike-dpdk.yaml
+++ b/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-pike-dpdk.yaml
@@ -21,27 +21,35 @@
 - description: "Workaround for combined roles: remove unnecessary classes"
   cmd: |
     set -e;
-    sed -i '/system.reclass.storage.system.physical_control_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
-    sed -i '/system.reclass.storage.system.stacklight_telemetry_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
-    sed -i '/system.reclass.storage.system.stacklight_log_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
+    sed -i '/system.reclass.storage.system.physical_control_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+    sed -i '/system.reclass.storage.system.stacklight_telemetry_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+    sed -i '/system.reclass.storage.system.stacklight_log_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
 
     # set wider cpu mask for DPDK
-    salt-call reclass.cluster_meta_set name='compute_ovs_dpdk_lcore_mask' value='"0xF"' file_name=/srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/init.yml
+    salt-call reclass.cluster_meta_set name='compute_ovs_dpdk_lcore_mask' value='"0x41"' file_name=/srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/init.yml
+    salt-call reclass.cluster_meta_set name='compute_ovs_pmd_cpu_mask' value='"0xe"' file_name=/srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/init.yml
+    salt-call reclass.cluster_meta_set name='compute_ovs_dpdk_socket_mem' value='"512,512"' file_name=/srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/init.yml
     salt-call reclass.cluster_meta_set name='compute_hugepages_size' value='"2M"' file_name=/srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/init.yml
     salt-call reclass.cluster_meta_set name='compute_hugepages_mount' value='"/mnt/hugepages_2M"' file_name=/srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/init.yml
-    # set virtual disks for compute
-    sed -i 's/cinder_lvm_devices: \[ "\/dev\/sdb" \]/cinder_lvm_devices: \[ "\/dev\/vdb" \]/g' /srv/salt/reclass/classes/environment/{{ SHARED.CLUSTER_NAME }}/features/lvm_backend/init.yml
 
     . /root/venv-reclass-tools/bin/activate;
-    reclass-tools del-key parameters.reclass.storage.node.infra_kvm_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
-    reclass-tools del-key parameters.reclass.storage.node.infra_kvm_node02 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
-    reclass-tools del-key parameters.reclass.storage.node.infra_kvm_node03 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
-    reclass-tools del-key parameters.reclass.storage.node.stacklight_log_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
+    reclass-tools del-key parameters.reclass.storage.node.infra_kvm_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+    reclass-tools del-key parameters.reclass.storage.node.infra_kvm_node02 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+    reclass-tools del-key parameters.reclass.storage.node.infra_kvm_node03 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+    reclass-tools del-key parameters.reclass.storage.node.stacklight_log_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
 
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 5}
   skip_fail: false
 
+- description: Temporary workaround for removing cinder-volume from CTL nodes
+  cmd: |
+    sed -i 's/\-\ system\.cinder\.volume\.single//g' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/control.yml;
+    sed -i 's/\-\ system\.cinder\.volume\.notification\.messagingv2//g' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/control.yml;
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: true
+
 - description: "Workaround for combined roles: remove unnecessary classes"
   cmd: |
     cat << 'EOF' >> /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/compute/dpdk.yml
@@ -55,4 +63,13 @@
   retry: {count: 1, delay: 5}
   skip_fail: false
 
+- description: Temporary workaround for removing cinder-volume from CTL nodes
+  cmd: |
+    sed -i 's/\-\ system\.cinder\.volume\.single//g' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/control.yml;
+    sed -i 's/\-\ system\.cinder\.volume\.notification\.messagingv2//g' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/control.yml;
+
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: true
+
 {{ SHARED.MACRO_GENERATE_INVENTORY(RERUN_SALTMASTER_STATE=true) }}
diff --git a/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-pike-dvr-ceph.yaml b/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-pike-dvr-ceph.yaml
index a726b96..d4377b7 100644
--- a/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-pike-dvr-ceph.yaml
+++ b/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-pike-dvr-ceph.yaml
@@ -20,21 +20,21 @@
 - description: "Workaround for combined roles: remove unnecessary classes"
   cmd: |
     set -e;
-    sed -i '/system.reclass.storage.system.physical_control_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
-    sed -i '/system.reclass.storage.system.openstack_database_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
-    sed -i '/system.reclass.storage.system.openstack_message_queue_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
+    sed -i '/system.reclass.storage.system.physical_control_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+    sed -i '/system.reclass.storage.system.openstack_database_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+    sed -i '/system.reclass.storage.system.openstack_message_queue_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
     sed -i '/system.salt.control.placement.openstack.compact/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/kvm.yml;
 
     . /root/venv-reclass-tools/bin/activate;
-    reclass-tools del-key parameters.reclass.storage.node.infra_kvm_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
-    reclass-tools del-key parameters.reclass.storage.node.infra_kvm_node02 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
-    reclass-tools del-key parameters.reclass.storage.node.infra_kvm_node03 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
-    reclass-tools del-key parameters.reclass.storage.node.openstack_database_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
-    reclass-tools del-key parameters.reclass.storage.node.openstack_database_node02 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
-    reclass-tools del-key parameters.reclass.storage.node.openstack_database_node03 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
-    reclass-tools del-key parameters.reclass.storage.node.openstack_message_queue_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
-    reclass-tools del-key parameters.reclass.storage.node.openstack_message_queue_node02 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
-    reclass-tools del-key parameters.reclass.storage.node.openstack_message_queue_node03 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
+    reclass-tools del-key parameters.reclass.storage.node.infra_kvm_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+    reclass-tools del-key parameters.reclass.storage.node.infra_kvm_node02 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+    reclass-tools del-key parameters.reclass.storage.node.infra_kvm_node03 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+    reclass-tools del-key parameters.reclass.storage.node.openstack_database_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+    reclass-tools del-key parameters.reclass.storage.node.openstack_database_node02 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+    reclass-tools del-key parameters.reclass.storage.node.openstack_database_node03 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+    reclass-tools del-key parameters.reclass.storage.node.openstack_message_queue_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+    reclass-tools del-key parameters.reclass.storage.node.openstack_message_queue_node02 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+    reclass-tools del-key parameters.reclass.storage.node.openstack_message_queue_node03 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
 
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 5}
diff --git a/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-pike-dvr-ssl-barbican.yaml b/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-pike-dvr-ssl-barbican.yaml
new file mode 100644
index 0000000..4b86b85
--- /dev/null
+++ b/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-pike-dvr-ssl-barbican.yaml
@@ -0,0 +1,51 @@
+{% from 'cookied-model-generator/underlay.yaml' import HOSTNAME_CFG01 with context %}
+{% from 'cookied-model-generator/underlay.yaml' import DOMAIN_NAME with context %}
+
+{% set LAB_CONFIG_NAME = 'cookied-mcp-pike-dvr-ssl-barbican' %}
+# Name of the context file (without extension, that is fixed .yaml) used to render the Environment model
+{% set ENVIRONMENT_MODEL_INVENTORY_NAME = os_env('ENVIRONMENT_MODEL_INVENTORY_NAME', LAB_CONFIG_NAME) %}
+# Path to the context files used to render Cluster and Environment models
+{%- set CLUSTER_CONTEXT_NAME = '_context-cookiecutter-mcp-pike-dvr-ssl-barbican.yaml' %}
+{%- set ENVIRONMENT_CONTEXT_NAMES = ['_context-environment.yaml'] %}
+
+{% import 'shared-salt.yaml' as SHARED with context %}
+
+{{ SHARED.MACRO_INSTALL_PACKAGES_ON_NODES(HOSTNAME_CFG01) }}
+
+{{ SHARED.MACRO_INSTALL_FORMULAS('\*') }}
+
+{{ SHARED.MACRO_GENERATE_COOKIECUTTER_MODEL() }}
+
+{{ SHARED.MACRO_GENERATE_AND_ENABLE_ENVIRONMENT_MODEL() }}
+
+- description: "Workaround for combined roles: remove unnecessary classes"
+  cmd: |
+    set -e;
+    sed -i '/system.reclass.storage.system.physical_control_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+    sed -i '/system.reclass.storage.system.openstack_database_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+    sed -i '/system.reclass.storage.system.openstack_message_queue_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+    sed -i '/system.salt.control.placement.openstack.compact/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/kvm.yml;
+    . /root/venv-reclass-tools/bin/activate;
+    reclass-tools del-key parameters.reclass.storage.node.infra_kvm_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+    reclass-tools del-key parameters.reclass.storage.node.infra_kvm_node02 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+    reclass-tools del-key parameters.reclass.storage.node.infra_kvm_node03 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+    reclass-tools del-key parameters.reclass.storage.node.openstack_database_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+    reclass-tools del-key parameters.reclass.storage.node.openstack_database_node02 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+    reclass-tools del-key parameters.reclass.storage.node.openstack_database_node03 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+    reclass-tools del-key parameters.reclass.storage.node.openstack_message_queue_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+    reclass-tools del-key parameters.reclass.storage.node.openstack_message_queue_node02 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+    reclass-tools del-key parameters.reclass.storage.node.openstack_message_queue_node03 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+    reclass-tools add-key parameters._param.cluster_internal_protocol 'https' /srv/salt/reclass/classes/system/cinder/volume/single.yml;
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+- description: Temporary workaround for removing cinder-volume from CTL nodes
+  cmd: |
+    sed -i 's/\-\ system\.cinder\.volume\.single//g' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/control.yml;
+    sed -i 's/\-\ system\.cinder\.volume\.notification\.messagingv2//g' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/control.yml;
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: true
+
+{{ SHARED.MACRO_GENERATE_INVENTORY(RERUN_SALTMASTER_STATE=true) }}
diff --git a/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-pike-dvr-ssl.yaml b/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-pike-dvr-ssl.yaml
new file mode 100644
index 0000000..c7de965
--- /dev/null
+++ b/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-pike-dvr-ssl.yaml
@@ -0,0 +1,51 @@
+{% from 'cookied-model-generator/underlay.yaml' import HOSTNAME_CFG01 with context %}
+{% from 'cookied-model-generator/underlay.yaml' import DOMAIN_NAME with context %}
+
+{% set LAB_CONFIG_NAME = 'cookied-mcp-pike-dvr-ssl' %}
+# Name of the context file (without extension, that is fixed .yaml) used to render the Environment model
+{% set ENVIRONMENT_MODEL_INVENTORY_NAME = os_env('ENVIRONMENT_MODEL_INVENTORY_NAME', LAB_CONFIG_NAME) %}
+# Path to the context files used to render Cluster and Environment models
+{%- set CLUSTER_CONTEXT_NAME = '_context-cookiecutter-mcp-pike-dvr-ssl.yaml' %}
+{%- set ENVIRONMENT_CONTEXT_NAMES = ['_context-environment.yaml'] %}
+
+{% import 'shared-salt.yaml' as SHARED with context %}
+
+{{ SHARED.MACRO_INSTALL_PACKAGES_ON_NODES(HOSTNAME_CFG01) }}
+
+{{ SHARED.MACRO_INSTALL_FORMULAS('\*') }}
+
+{{ SHARED.MACRO_GENERATE_COOKIECUTTER_MODEL() }}
+
+{{ SHARED.MACRO_GENERATE_AND_ENABLE_ENVIRONMENT_MODEL() }}
+
+- description: "Workaround for combined roles: remove unnecessary classes"
+  cmd: |
+    set -e;
+    sed -i '/system.reclass.storage.system.physical_control_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+    sed -i '/system.reclass.storage.system.openstack_database_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+    sed -i '/system.reclass.storage.system.openstack_message_queue_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+    sed -i '/system.salt.control.placement.openstack.compact/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/kvm.yml;
+    . /root/venv-reclass-tools/bin/activate;
+    reclass-tools del-key parameters.reclass.storage.node.infra_kvm_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+    reclass-tools del-key parameters.reclass.storage.node.infra_kvm_node02 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+    reclass-tools del-key parameters.reclass.storage.node.infra_kvm_node03 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+    reclass-tools del-key parameters.reclass.storage.node.openstack_database_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+    reclass-tools del-key parameters.reclass.storage.node.openstack_database_node02 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+    reclass-tools del-key parameters.reclass.storage.node.openstack_database_node03 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+    reclass-tools del-key parameters.reclass.storage.node.openstack_message_queue_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+    reclass-tools del-key parameters.reclass.storage.node.openstack_message_queue_node02 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+    reclass-tools del-key parameters.reclass.storage.node.openstack_message_queue_node03 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+    reclass-tools add-key parameters._param.cluster_internal_protocol 'https' /srv/salt/reclass/classes/system/cinder/volume/single.yml;
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+- description: Temporary workaround for removing cinder-volume from CTL nodes
+  cmd: |
+    sed -i 's/\-\ system\.cinder\.volume\.single//g' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/control.yml;
+    sed -i 's/\-\ system\.cinder\.volume\.notification\.messagingv2//g' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/control.yml;
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: true
+
+{{ SHARED.MACRO_GENERATE_INVENTORY(RERUN_SALTMASTER_STATE=true) }}
diff --git a/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-pike-dvr.yaml b/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-pike-dvr.yaml
index e1bbc0a..2f19cd5 100644
--- a/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-pike-dvr.yaml
+++ b/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-pike-dvr.yaml
@@ -21,35 +21,24 @@
 - description: "Workaround for combined roles: remove unnecessary classes"
   cmd: |
     set -e;
-    sed -i '/system.reclass.storage.system.physical_control_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
-    sed -i '/system.reclass.storage.system.openstack_database_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
-    sed -i '/system.reclass.storage.system.openstack_message_queue_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
-    # sed -i '/system.reclass.storage.system.stacklight_telemetry_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
-    # sed -i '/system.reclass.storage.system.stacklight_log_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
+    sed -i '/system.reclass.storage.system.physical_control_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+    sed -i '/system.reclass.storage.system.openstack_database_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+    sed -i '/system.reclass.storage.system.openstack_message_queue_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+    # sed -i '/system.reclass.storage.system.stacklight_telemetry_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+    # sed -i '/system.reclass.storage.system.stacklight_log_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
     sed -i '/system.salt.control.placement.openstack.compact/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/kvm.yml;
 
-    # salt-call reclass.cluster_meta_set name='openstack_dns_node01_address' value='{{ SHARED.IPV4_NET_CONTROL_PREFIX }}.111' file_name=/srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/init.yml
-    # salt-call reclass.cluster_meta_set name='openstack_dns_node02_address' value='{{ SHARED.IPV4_NET_CONTROL_PREFIX }}.112' file_name=/srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/init.yml
-    # Workaround of missing reclass.system for dns role
-    # salt-call reclass.cluster_meta_set name='salt_master_host' value='${_param:infra_config_deploy_address}' file_name=/srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/init.yml
-
     . /root/venv-reclass-tools/bin/activate;
-    reclass-tools del-key parameters.reclass.storage.node.infra_kvm_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
-    reclass-tools del-key parameters.reclass.storage.node.infra_kvm_node02 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
-    reclass-tools del-key parameters.reclass.storage.node.infra_kvm_node03 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
-    reclass-tools del-key parameters.reclass.storage.node.openstack_database_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
-    reclass-tools del-key parameters.reclass.storage.node.openstack_database_node02 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
-    reclass-tools del-key parameters.reclass.storage.node.openstack_database_node03 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
-    reclass-tools del-key parameters.reclass.storage.node.openstack_message_queue_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
-    reclass-tools del-key parameters.reclass.storage.node.openstack_message_queue_node02 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
-    reclass-tools del-key parameters.reclass.storage.node.openstack_message_queue_node03 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
-    # reclass-tools del-key parameters.reclass.storage.node.stacklight_log_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
-    # Add cinder volume on cmp nodes. PROD-20945
-    reclass-tools add-key 'classes' 'system.cinder.volume.single' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/compute/init.yml --merge ;
-    reclass-tools add-key 'classes' 'system.cinder.volume.backend.lvm' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/compute/init.yml --merge ;
-    reclass-tools add-key 'classes' 'system.linux.storage.loopback' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/compute/init.yml --merge ;
-    reclass-tools add-key parameters._param.loopback_device_size '20' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/init.yml;
-
+    reclass-tools del-key parameters.reclass.storage.node.infra_kvm_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+    reclass-tools del-key parameters.reclass.storage.node.infra_kvm_node02 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+    reclass-tools del-key parameters.reclass.storage.node.infra_kvm_node03 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+    reclass-tools del-key parameters.reclass.storage.node.openstack_database_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+    reclass-tools del-key parameters.reclass.storage.node.openstack_database_node02 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+    reclass-tools del-key parameters.reclass.storage.node.openstack_database_node03 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+    reclass-tools del-key parameters.reclass.storage.node.openstack_message_queue_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+    reclass-tools del-key parameters.reclass.storage.node.openstack_message_queue_node02 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+    reclass-tools del-key parameters.reclass.storage.node.openstack_message_queue_node03 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+    # reclass-tools del-key parameters.reclass.storage.node.stacklight_log_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 5}
   skip_fail: false
diff --git a/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-pike-ovs.yaml b/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-pike-ovs.yaml
index d1d8222..ed3a6c9 100644
--- a/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-pike-ovs.yaml
+++ b/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-pike-ovs.yaml
@@ -21,33 +21,24 @@
 - description: "Workaround for combined roles: remove unnecessary classes"
   cmd: |
     set -e;
-    sed -i '/system.reclass.storage.system.physical_control_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
-    sed -i '/system.reclass.storage.system.openstack_database_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
-    sed -i '/system.reclass.storage.system.openstack_message_queue_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
-    # sed -i '/system.reclass.storage.system.stacklight_telemetry_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
+    sed -i '/system.reclass.storage.system.physical_control_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+    sed -i '/system.reclass.storage.system.openstack_database_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+    sed -i '/system.reclass.storage.system.openstack_message_queue_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+    # sed -i '/system.reclass.storage.system.stacklight_telemetry_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
     sed -i '/system.salt.control.placement.openstack.compact/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/kvm.yml;
-    # sed -i '/system.reclass.storage.system.stacklight_log_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
-
-    # Bind9 services are placed on the first two ctl nodes
-    # salt-call reclass.cluster_meta_set name='openstack_dns_node01_address' value='${_param:openstack_control_node01_address}' file_name=/srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/init.yml
-    # salt-call reclass.cluster_meta_set name='openstack_dns_node02_address' value='${_param:openstack_control_node02_address}' file_name=/srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/init.yml
+    # sed -i '/system.reclass.storage.system.stacklight_log_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
 
     . /root/venv-reclass-tools/bin/activate;
-    reclass-tools del-key parameters.reclass.storage.node.infra_kvm_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
-    reclass-tools del-key parameters.reclass.storage.node.infra_kvm_node02 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
-    reclass-tools del-key parameters.reclass.storage.node.infra_kvm_node03 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
-    reclass-tools del-key parameters.reclass.storage.node.openstack_database_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
-    reclass-tools del-key parameters.reclass.storage.node.openstack_database_node02 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
-    reclass-tools del-key parameters.reclass.storage.node.openstack_database_node03 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
-    reclass-tools del-key parameters.reclass.storage.node.openstack_message_queue_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
-    reclass-tools del-key parameters.reclass.storage.node.openstack_message_queue_node02 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
-    reclass-tools del-key parameters.reclass.storage.node.openstack_message_queue_node03 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
-    # reclass-tools del-key parameters.reclass.storage.node.stacklight_log_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
-    # Add cinder volume on cmp nodes. PROD-20945
-    reclass-tools add-key 'classes' 'system.cinder.volume.single' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/compute/init.yml --merge ;
-    reclass-tools add-key 'classes' 'system.cinder.volume.backend.lvm' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/compute/init.yml --merge ;
-    reclass-tools add-key 'classes' 'system.linux.storage.loopback' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/compute/init.yml --merge ;
-    reclass-tools add-key parameters._param.loopback_device_size '20' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/init.yml;
+    reclass-tools del-key parameters.reclass.storage.node.infra_kvm_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+    reclass-tools del-key parameters.reclass.storage.node.infra_kvm_node02 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+    reclass-tools del-key parameters.reclass.storage.node.infra_kvm_node03 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+    reclass-tools del-key parameters.reclass.storage.node.openstack_database_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+    reclass-tools del-key parameters.reclass.storage.node.openstack_database_node02 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+    reclass-tools del-key parameters.reclass.storage.node.openstack_database_node03 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+    reclass-tools del-key parameters.reclass.storage.node.openstack_message_queue_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+    reclass-tools del-key parameters.reclass.storage.node.openstack_message_queue_node02 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+    reclass-tools del-key parameters.reclass.storage.node.openstack_message_queue_node03 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+    # reclass-tools del-key parameters.reclass.storage.node.stacklight_log_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
 
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 5}
diff --git a/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-queens-dvr-ceph.yaml b/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-queens-dvr-ceph.yaml
index b7f1c59..657e7c2 100644
--- a/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-queens-dvr-ceph.yaml
+++ b/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-queens-dvr-ceph.yaml
@@ -20,21 +20,21 @@
 - description: "Workaround for combined roles: remove unnecessary classes"
   cmd: |
     set -e;
-    sed -i '/system.reclass.storage.system.physical_control_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
-    sed -i '/system.reclass.storage.system.openstack_database_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
-    sed -i '/system.reclass.storage.system.openstack_message_queue_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
+    sed -i '/system.reclass.storage.system.physical_control_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+    sed -i '/system.reclass.storage.system.openstack_database_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+    sed -i '/system.reclass.storage.system.openstack_message_queue_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
     sed -i '/system.salt.control.placement.openstack.compact/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/kvm.yml;
 
     . /root/venv-reclass-tools/bin/activate;
-    reclass-tools del-key parameters.reclass.storage.node.infra_kvm_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
-    reclass-tools del-key parameters.reclass.storage.node.infra_kvm_node02 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
-    reclass-tools del-key parameters.reclass.storage.node.infra_kvm_node03 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
-    reclass-tools del-key parameters.reclass.storage.node.openstack_database_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
-    reclass-tools del-key parameters.reclass.storage.node.openstack_database_node02 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
-    reclass-tools del-key parameters.reclass.storage.node.openstack_database_node03 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
-    reclass-tools del-key parameters.reclass.storage.node.openstack_message_queue_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
-    reclass-tools del-key parameters.reclass.storage.node.openstack_message_queue_node02 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
-    reclass-tools del-key parameters.reclass.storage.node.openstack_message_queue_node03 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
+    reclass-tools del-key parameters.reclass.storage.node.infra_kvm_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+    reclass-tools del-key parameters.reclass.storage.node.infra_kvm_node02 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+    reclass-tools del-key parameters.reclass.storage.node.infra_kvm_node03 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+    reclass-tools del-key parameters.reclass.storage.node.openstack_database_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+    reclass-tools del-key parameters.reclass.storage.node.openstack_database_node02 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+    reclass-tools del-key parameters.reclass.storage.node.openstack_database_node03 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+    reclass-tools del-key parameters.reclass.storage.node.openstack_message_queue_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+    reclass-tools del-key parameters.reclass.storage.node.openstack_message_queue_node02 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+    reclass-tools del-key parameters.reclass.storage.node.openstack_message_queue_node03 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
 
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 5}
diff --git a/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-queens-dvr-ssl-barbican.yaml b/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-queens-dvr-ssl-barbican.yaml
new file mode 100644
index 0000000..4e5dbc9
--- /dev/null
+++ b/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-queens-dvr-ssl-barbican.yaml
@@ -0,0 +1,51 @@
+{% from 'cookied-model-generator/underlay.yaml' import HOSTNAME_CFG01 with context %}
+{% from 'cookied-model-generator/underlay.yaml' import DOMAIN_NAME with context %}
+
+{% set LAB_CONFIG_NAME = 'cookied-mcp-queens-dvr-ssl-barbican' %}
+# Name of the context file (without extension, that is fixed .yaml) used to render the Environment model
+{% set ENVIRONMENT_MODEL_INVENTORY_NAME = os_env('ENVIRONMENT_MODEL_INVENTORY_NAME', LAB_CONFIG_NAME) %}
+# Path to the context files used to render Cluster and Environment models
+{%- set CLUSTER_CONTEXT_NAME = '_context-cookiecutter-mcp-queens-dvr-ssl-barbican.yaml' %}
+{%- set ENVIRONMENT_CONTEXT_NAMES = ['_context-environment.yaml'] %}
+
+{% import 'shared-salt.yaml' as SHARED with context %}
+
+{{ SHARED.MACRO_INSTALL_PACKAGES_ON_NODES(HOSTNAME_CFG01) }}
+
+{{ SHARED.MACRO_INSTALL_FORMULAS('\*') }}
+
+{{ SHARED.MACRO_GENERATE_COOKIECUTTER_MODEL() }}
+
+{{ SHARED.MACRO_GENERATE_AND_ENABLE_ENVIRONMENT_MODEL() }}
+
+- description: "Workaround for combined roles: remove unnecessary classes"
+  cmd: |
+    set -e;
+    sed -i '/system.reclass.storage.system.physical_control_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+    sed -i '/system.reclass.storage.system.openstack_database_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+    sed -i '/system.reclass.storage.system.openstack_message_queue_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+    sed -i '/system.salt.control.placement.openstack.compact/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/kvm.yml;
+    . /root/venv-reclass-tools/bin/activate;
+    reclass-tools del-key parameters.reclass.storage.node.infra_kvm_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+    reclass-tools del-key parameters.reclass.storage.node.infra_kvm_node02 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+    reclass-tools del-key parameters.reclass.storage.node.infra_kvm_node03 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+    reclass-tools del-key parameters.reclass.storage.node.openstack_database_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+    reclass-tools del-key parameters.reclass.storage.node.openstack_database_node02 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+    reclass-tools del-key parameters.reclass.storage.node.openstack_database_node03 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+    reclass-tools del-key parameters.reclass.storage.node.openstack_message_queue_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+    reclass-tools del-key parameters.reclass.storage.node.openstack_message_queue_node02 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+    reclass-tools del-key parameters.reclass.storage.node.openstack_message_queue_node03 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+    reclass-tools add-key parameters._param.cluster_internal_protocol 'https' /srv/salt/reclass/classes/system/cinder/volume/single.yml;
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+- description: Temporary workaround for removing cinder-volume from CTL nodes
+  cmd: |
+    sed -i 's/\-\ system\.cinder\.volume\.single//g' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/control.yml;
+    sed -i 's/\-\ system\.cinder\.volume\.notification\.messagingv2//g' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/control.yml;
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: true
+
+{{ SHARED.MACRO_GENERATE_INVENTORY(RERUN_SALTMASTER_STATE=true) }}
diff --git a/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-queens-dvr-ssl.yaml b/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-queens-dvr-ssl.yaml
index 7457eeb..bf6683d 100644
--- a/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-queens-dvr-ssl.yaml
+++ b/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-queens-dvr-ssl.yaml
@@ -21,29 +21,21 @@
 - description: "Workaround for combined roles: remove unnecessary classes"
   cmd: |
     set -e;
-    sed -i '/system.reclass.storage.system.physical_control_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
-    sed -i '/system.reclass.storage.system.openstack_database_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
-    sed -i '/system.reclass.storage.system.openstack_message_queue_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
+    sed -i '/system.reclass.storage.system.physical_control_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+    sed -i '/system.reclass.storage.system.openstack_database_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+    sed -i '/system.reclass.storage.system.openstack_message_queue_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
     sed -i '/system.salt.control.placement.openstack.compact/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/kvm.yml;
     . /root/venv-reclass-tools/bin/activate;
-    reclass-tools del-key parameters.reclass.storage.node.infra_kvm_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
-    reclass-tools del-key parameters.reclass.storage.node.infra_kvm_node02 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
-    reclass-tools del-key parameters.reclass.storage.node.infra_kvm_node03 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
-    reclass-tools del-key parameters.reclass.storage.node.openstack_database_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
-    reclass-tools del-key parameters.reclass.storage.node.openstack_database_node02 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
-    reclass-tools del-key parameters.reclass.storage.node.openstack_database_node03 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
-    reclass-tools del-key parameters.reclass.storage.node.openstack_message_queue_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
-    reclass-tools del-key parameters.reclass.storage.node.openstack_message_queue_node02 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
-    reclass-tools del-key parameters.reclass.storage.node.openstack_message_queue_node03 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
-    # Add cinder volume on cmp nodes. PROD-20945
-    reclass-tools add-key 'classes' 'system.cinder.volume.single' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/compute/init.yml --merge ;
-    reclass-tools add-key 'classes' 'system.cinder.volume.backend.lvm' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/compute/init.yml --merge ;
-    reclass-tools add-key 'classes' 'system.linux.storage.loopback' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/compute/init.yml --merge ;
-    reclass-tools add-key parameters._param.loopback_device_size '20' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/init.yml;
-    # Add for manila
-    reclass-tools add-key parameters._param.loopback_device1_size '20' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/init.yml;
-    reclass-tools add-key 'classes' 'system.linux.storage.loopback_manila' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/compute/init.yml --merge ;
-
+    reclass-tools del-key parameters.reclass.storage.node.infra_kvm_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+    reclass-tools del-key parameters.reclass.storage.node.infra_kvm_node02 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+    reclass-tools del-key parameters.reclass.storage.node.infra_kvm_node03 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+    reclass-tools del-key parameters.reclass.storage.node.openstack_database_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+    reclass-tools del-key parameters.reclass.storage.node.openstack_database_node02 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+    reclass-tools del-key parameters.reclass.storage.node.openstack_database_node03 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+    reclass-tools del-key parameters.reclass.storage.node.openstack_message_queue_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+    reclass-tools del-key parameters.reclass.storage.node.openstack_message_queue_node02 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+    reclass-tools del-key parameters.reclass.storage.node.openstack_message_queue_node03 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+    reclass-tools add-key parameters._param.cluster_internal_protocol 'https' /srv/salt/reclass/classes/system/cinder/volume/single.yml;
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 5}
   skip_fail: false
diff --git a/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-queens-dvr.yaml b/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-queens-dvr.yaml
index 4732a9a..1e50429 100644
--- a/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-queens-dvr.yaml
+++ b/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-queens-dvr.yaml
@@ -21,29 +21,20 @@
 - description: "Workaround for combined roles: remove unnecessary classes"
   cmd: |
     set -e;
-    sed -i '/system.reclass.storage.system.physical_control_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
-    sed -i '/system.reclass.storage.system.openstack_database_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
-    sed -i '/system.reclass.storage.system.openstack_message_queue_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
+    sed -i '/system.reclass.storage.system.physical_control_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+    sed -i '/system.reclass.storage.system.openstack_database_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+    sed -i '/system.reclass.storage.system.openstack_message_queue_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
     sed -i '/system.salt.control.placement.openstack.compact/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/kvm.yml;
     . /root/venv-reclass-tools/bin/activate;
-    reclass-tools del-key parameters.reclass.storage.node.infra_kvm_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
-    reclass-tools del-key parameters.reclass.storage.node.infra_kvm_node02 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
-    reclass-tools del-key parameters.reclass.storage.node.infra_kvm_node03 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
-    reclass-tools del-key parameters.reclass.storage.node.openstack_database_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
-    reclass-tools del-key parameters.reclass.storage.node.openstack_database_node02 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
-    reclass-tools del-key parameters.reclass.storage.node.openstack_database_node03 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
-    reclass-tools del-key parameters.reclass.storage.node.openstack_message_queue_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
-    reclass-tools del-key parameters.reclass.storage.node.openstack_message_queue_node02 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
-    reclass-tools del-key parameters.reclass.storage.node.openstack_message_queue_node03 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
-    # Add cinder volume on cmp nodes. PROD-20945
-    reclass-tools add-key 'classes' 'system.cinder.volume.single' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/compute/init.yml --merge ;
-    reclass-tools add-key 'classes' 'system.cinder.volume.backend.lvm' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/compute/init.yml --merge ;
-    reclass-tools add-key 'classes' 'system.linux.storage.loopback' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/compute/init.yml --merge ;
-    reclass-tools add-key parameters._param.loopback_device_size '20' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/init.yml;
-    # Add for manila
-    reclass-tools add-key parameters._param.loopback_device1_size '20' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/init.yml;
-    reclass-tools add-key 'classes' 'system.linux.storage.loopback_manila' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/compute/init.yml --merge ;
-
+    reclass-tools del-key parameters.reclass.storage.node.infra_kvm_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+    reclass-tools del-key parameters.reclass.storage.node.infra_kvm_node02 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+    reclass-tools del-key parameters.reclass.storage.node.infra_kvm_node03 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+    reclass-tools del-key parameters.reclass.storage.node.openstack_database_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+    reclass-tools del-key parameters.reclass.storage.node.openstack_database_node02 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+    reclass-tools del-key parameters.reclass.storage.node.openstack_database_node03 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+    reclass-tools del-key parameters.reclass.storage.node.openstack_message_queue_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+    reclass-tools del-key parameters.reclass.storage.node.openstack_message_queue_node02 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+    reclass-tools del-key parameters.reclass.storage.node.openstack_message_queue_node03 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 5}
   skip_fail: false
diff --git a/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-queens-ovs.yaml b/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-queens-ovs.yaml
index 6434349..7e2d2de 100644
--- a/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-queens-ovs.yaml
+++ b/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-queens-ovs.yaml
@@ -21,29 +21,20 @@
 - description: "Workaround for combined roles: remove unnecessary classes"
   cmd: |
     set -e;
-    sed -i '/system.reclass.storage.system.physical_control_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
-    sed -i '/system.reclass.storage.system.openstack_database_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
-    sed -i '/system.reclass.storage.system.openstack_message_queue_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
+    sed -i '/system.reclass.storage.system.physical_control_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+    sed -i '/system.reclass.storage.system.openstack_database_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+    sed -i '/system.reclass.storage.system.openstack_message_queue_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
     sed -i '/system.salt.control.placement.openstack.compact/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/kvm.yml;
     . /root/venv-reclass-tools/bin/activate;
-    reclass-tools del-key parameters.reclass.storage.node.infra_kvm_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
-    reclass-tools del-key parameters.reclass.storage.node.infra_kvm_node02 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
-    reclass-tools del-key parameters.reclass.storage.node.infra_kvm_node03 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
-    reclass-tools del-key parameters.reclass.storage.node.openstack_database_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
-    reclass-tools del-key parameters.reclass.storage.node.openstack_database_node02 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
-    reclass-tools del-key parameters.reclass.storage.node.openstack_database_node03 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
-    reclass-tools del-key parameters.reclass.storage.node.openstack_message_queue_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
-    reclass-tools del-key parameters.reclass.storage.node.openstack_message_queue_node02 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
-    reclass-tools del-key parameters.reclass.storage.node.openstack_message_queue_node03 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
-    # Add cinder volume on cmp nodes. PROD-20945
-    reclass-tools add-key 'classes' 'system.cinder.volume.single' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/compute/init.yml --merge ;
-    reclass-tools add-key 'classes' 'system.cinder.volume.backend.lvm' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/compute/init.yml --merge ;
-    reclass-tools add-key 'classes' 'system.linux.storage.loopback' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/compute/init.yml --merge ;
-    reclass-tools add-key parameters._param.loopback_device_size '20' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/init.yml;
-    # Add for manila
-    reclass-tools add-key parameters._param.loopback_device1_size '20' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/init.yml;
-    reclass-tools add-key 'classes' 'system.linux.storage.loopback_manila' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/compute/init.yml --merge ;
-
+    reclass-tools del-key parameters.reclass.storage.node.infra_kvm_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+    reclass-tools del-key parameters.reclass.storage.node.infra_kvm_node02 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+    reclass-tools del-key parameters.reclass.storage.node.infra_kvm_node03 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+    reclass-tools del-key parameters.reclass.storage.node.openstack_database_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+    reclass-tools del-key parameters.reclass.storage.node.openstack_database_node02 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+    reclass-tools del-key parameters.reclass.storage.node.openstack_database_node03 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+    reclass-tools del-key parameters.reclass.storage.node.openstack_message_queue_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+    reclass-tools del-key parameters.reclass.storage.node.openstack_message_queue_node02 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+    reclass-tools del-key parameters.reclass.storage.node.openstack_message_queue_node03 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 5}
   skip_fail: false
diff --git a/tcp_tests/templates/cookied-model-generator/underlay--user-data-cfg01.yaml b/tcp_tests/templates/cookied-model-generator/underlay--user-data-cfg01.yaml
index 7bd7a02..695e537 100644
--- a/tcp_tests/templates/cookied-model-generator/underlay--user-data-cfg01.yaml
+++ b/tcp_tests/templates/cookied-model-generator/underlay--user-data-cfg01.yaml
@@ -18,10 +18,6 @@
    expire: False
 
   bootcmd:
-   # Block access to SSH while node is preparing
-   #- cloud-init-per once sudo iptables -A INPUT -p tcp --dport 22 -j DROP
-   - cloud-init-per once sudo ifdown ens4
-
    # Enable root access
    - cloud-init-per once sudo sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
    - cloud-init-per once sudo service sshd restart
@@ -32,26 +28,21 @@
   runcmd:
    # Prepare network connection
    - sudo ifdown ens3
+   - sudo ifdown ens4
+   - sudo ip r d default || true  # remove existing default route to get it from dhcp
    - sudo ifup ens3
+   - sudo ifup ens4
    #- sudo route add default gw {gateway} {interface_name}
 
-   # Purge the unattended-upgrades package (Workaround for PROD-17904, PROD-18736)"
-   - echo "APT::Periodic::Update-Package-Lists 0;" > /etc/apt/apt.conf.d/99dont_update_package_list-salt
-   - echo "APT::Periodic::Download-Upgradeable-Packages 0;" > /etc/apt/apt.conf.d/99dont_update_download_upg_packages-salt
-   - echo "APT::Periodic::Unattended-Upgrade 0;" > /etc/apt/apt.conf.d/99disable_unattended_upgrade-salt
-   - apt-get -y purge unattended-upgrades
-   # Stop currently running apt-daily service, source: https://unix.stackexchange.com/a/315517
-   - systemctl stop apt-daily.service
-   - systemctl kill --kill-who=all apt-daily.service
-   - while ! (systemctl list-units --all apt-daily.service | fgrep -q dead); do sleep 1; done
-
-   # Configure dhclient
-   - sudo echo "nameserver {gateway}" >> /etc/resolvconf/resolv.conf.d/base
-   - sudo resolvconf -u
-
    # Enable grub menu using updated config below
    - update-grub
 
+   - mkdir -p /srv/salt/reclass/nodes
+   - systemctl enable salt-master
+   - systemctl enable salt-minion
+   - systemctl start salt-master
+   - systemctl start salt-minion
+   - salt-call -l info --timeout=120 test.ping
    # Create swap
    #- fallocate -l 16G /swapfile
    #- chmod 600 /swapfile
@@ -62,26 +53,10 @@
    ############## TCP Cloud cfg01 node ##################
    - echo "Preparing base OS"
 
-   - echo "nameserver 172.18.208.44" >> /etc/resolv.conf;
-
-   - apt-get clean
-   - apt-get update
+   - echo "nameserver 172.18.208.44" > /etc/resolv.conf;
 
    # Ensure that the salt-master service is ready to receive requests
    - salt-key -y -D
-   - service salt-master restart
-   - service salt-minion restart
-   - apt-get install -y salt-formula-*
-   - for f in $(ls -1 /usr/share/salt-formulas/reclass/service); do ln -s /usr/share/salt-formulas/reclass/service/$f /srv/salt/reclass/classes/service/ || true; done
-   - salt-call --timeout=180 test.ping
-
-   ########################################################
-   # Node is ready, allow SSH access
-   #- echo "Allow SSH access ..."
-   #- sudo iptables -D INPUT -p tcp --dport 22 -j DROP
-   - sudo ifup ens4
-   ########################################################
-
 
   write_files:
    - path: /etc/default/grub.d/97-enable-grub-menu.cfg
diff --git a/tcp_tests/templates/ironic_standalone/underlay--user-data-ironic.yaml b/tcp_tests/templates/ironic_standalone/underlay--user-data-ironic.yaml
index b231ced..e2b63dd 100644
--- a/tcp_tests/templates/ironic_standalone/underlay--user-data-ironic.yaml
+++ b/tcp_tests/templates/ironic_standalone/underlay--user-data-ironic.yaml
@@ -72,7 +72,7 @@
 
    {%- if os_env('IRONIC_DNSMASQ_HOSTFILE', '') %}
    - echo "dhcp-hostsfile=/var/lib/libvirt/dnsmasq/{{ IRONIC_ENV_NAME }}.hostsfile" >> /etc/dnsmasq.conf
-   - service dnsmasq restart
+   - service dnsmasq restart && sleep 30
    {%- endif %}
 
    # Enable SNAT to allow internet access for deploying nodes using ironic node as a gateway
diff --git a/tcp_tests/templates/k8s-ha-calico/underlay--user-data-cfg01.yaml b/tcp_tests/templates/k8s-ha-calico/underlay--user-data-cfg01.yaml
index 7f8b8ec..dc9f8cd 100644
--- a/tcp_tests/templates/k8s-ha-calico/underlay--user-data-cfg01.yaml
+++ b/tcp_tests/templates/k8s-ha-calico/underlay--user-data-cfg01.yaml
@@ -44,6 +44,13 @@
    - swapon /swapfile
    - echo "/swapfile   none    swap    defaults    0   0" >> /etc/fstab
 
+   - mkdir -p /srv/salt/reclass/nodes
+   - systemctl enable salt-master
+   - systemctl enable salt-minion
+   - systemctl start salt-master
+   - systemctl start salt-minion
+   - salt-call -l info --timeout=120 test.ping
+
   write_files:
    - path: /etc/network/interfaces
      content: |
diff --git a/tcp_tests/templates/k8s-ha-calico/underlay.yaml b/tcp_tests/templates/k8s-ha-calico/underlay.yaml
index eef7bb8..ac11a62 100644
--- a/tcp_tests/templates/k8s-ha-calico/underlay.yaml
+++ b/tcp_tests/templates/k8s-ha-calico/underlay.yaml
@@ -145,9 +145,6 @@
                   capacity: !os_env NODE_VOLUME_SIZE, 150
                   backing_store: cloudimage1604
                   format: qcow2
-                - name: cinder
-                  capacity: 50
-                  format: qcow2
                 - name: iso  # Volume with name 'iso' will be used
                              # for store image with cloud-init metadata.
                   capacity: 1
@@ -186,9 +183,6 @@
                   capacity: !os_env NODE_VOLUME_SIZE, 150
                   backing_store: cloudimage1604
                   format: qcow2
-                - name: cinder
-                  capacity: 50
-                  format: qcow2
                 - name: iso  # Volume with name 'iso' will be used
                              # for store image with cloud-init metadata.
                   capacity: 1
@@ -215,9 +209,6 @@
                   capacity: !os_env NODE_VOLUME_SIZE, 150
                   backing_store: cloudimage1604
                   format: qcow2
-                - name: cinder
-                  capacity: 50
-                  format: qcow2
                 - name: iso  # Volume with name 'iso' will be used
                              # for store image with cloud-init metadata.
                   capacity: 1
diff --git a/tcp_tests/templates/k8s-ha-contrail/underlay--user-data-cfg01.yaml b/tcp_tests/templates/k8s-ha-contrail/underlay--user-data-cfg01.yaml
index 48577ab..6076ffa 100644
--- a/tcp_tests/templates/k8s-ha-contrail/underlay--user-data-cfg01.yaml
+++ b/tcp_tests/templates/k8s-ha-contrail/underlay--user-data-cfg01.yaml
@@ -18,8 +18,6 @@
    expire: False
 
   bootcmd:
-   # Block access to SSH while node is preparing
-   - cloud-init-per once sudo iptables -A INPUT -p tcp --dport 22 -j DROP
    # Enable root access
    - sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
    - service sshd restart
@@ -65,11 +63,12 @@
    # Install common packages
    - eatmydata apt-get install -y python-pip git curl tmux byobu iputils-ping traceroute htop tree
 
-   ########################################################
-   # Node is ready, allow SSH access
-   - echo "Allow SSH access ..."
-   - sudo iptables -D INPUT -p tcp --dport 22 -j DROP
-   ########################################################
+   - mkdir -p /srv/salt/reclass/nodes
+   - systemctl enable salt-master
+   - systemctl enable salt-minion
+   - systemctl start salt-master
+   - systemctl start salt-minion
+   - salt-call -l info --timeout=120 test.ping
 
   write_files:
    - path: /etc/network/interfaces
diff --git a/tcp_tests/templates/k8s-ha-contrail/underlay.yaml b/tcp_tests/templates/k8s-ha-contrail/underlay.yaml
index 2551d12..b87f888 100644
--- a/tcp_tests/templates/k8s-ha-contrail/underlay.yaml
+++ b/tcp_tests/templates/k8s-ha-contrail/underlay.yaml
@@ -200,9 +200,6 @@
                   capacity: !os_env NODE_VOLUME_SIZE, 20
                   backing_store: cloudimage1604
                   format: qcow2
-                - name: cinder
-                  capacity: 50
-                  format: qcow2
                 - name: iso  # Volume with name 'iso' will be used
                              # for store image with cloud-init metadata.
                   capacity: 1
@@ -247,9 +244,6 @@
                   capacity: !os_env NODE_VOLUME_SIZE, 20
                   backing_store: cloudimage1604
                   format: qcow2
-                - name: cinder
-                  capacity: 50
-                  format: qcow2
                 - name: iso  # Volume with name 'iso' will be used
                              # for store image with cloud-init metadata.
                   capacity: 1
@@ -276,9 +270,6 @@
                   capacity: !os_env NODE_VOLUME_SIZE, 20
                   backing_store: cloudimage1604
                   format: qcow2
-                - name: cinder
-                  capacity: 50
-                  format: qcow2
                 - name: iso  # Volume with name 'iso' will be used
                              # for store image with cloud-init metadata.
                   capacity: 1
diff --git a/tcp_tests/templates/runtest.yml b/tcp_tests/templates/runtest.yml
index cceaf8b..263dbb0 100644
--- a/tcp_tests/templates/runtest.yml
+++ b/tcp_tests/templates/runtest.yml
@@ -1,10 +1,11 @@
 classes:
 - service.runtest.tempest
+- service.runtest.tempest.services.manila.glance
 parameters:
   _param:
-    runtest_tempest_cfg_dir: /root/test/
+    runtest_tempest_cfg_dir: /tmp/test/
     runtest_tempest_cfg_name: tempest.conf
-    runtest_tempest_public_net: net04_ext
+    runtest_tempest_public_net: public
     tempest_test_target: gtw01*
   neutron:
     client:
@@ -19,25 +20,10 @@
       convert_to_uuid:
         network:
           public_network_id: ${_param:runtest_tempest_public_net}
-      network:
-          floating_network_name: ${_param:runtest_tempest_public_net}
       DEFAULT:
         log_file: tempest.log
-      heat_plugin:
-        floating_network_name: ${_param:runtest_tempest_public_net}
       compute:
-        build_timeout: 600
-        min_microversion: 2.1
-        max_microversion: 2.53
         min_compute_nodes: 2
-        volume_device_name: 'vdc'
-      dns_feature_enabled:
-        api_admin: false
-        api_v1: false
-        api_v2: true
-        api_v2_quotas: true
-        api_v2_root_recordsets: true
-        bug_1573141_fixed: true
       share:
         capability_snapshot_support: True
         run_driver_assisted_migration_tests: False
diff --git a/tcp_tests/templates/shared-ceph.yaml b/tcp_tests/templates/shared-ceph.yaml
index 267e407..ab13cb2 100644
--- a/tcp_tests/templates/shared-ceph.yaml
+++ b/tcp_tests/templates/shared-ceph.yaml
@@ -111,14 +111,14 @@
 {%- endmacro %}
 
 {%- macro CONNECT_CEPH_TO_SERVICES() %}
-- description: Connect ceph to glance
+- description: Setup keyring for glance
   cmd: |
     salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@ceph:common and I@glance:server' state.sls ceph.common,ceph.setup.keyring;
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 2, delay: 5}
   skip_fail: false
 
-- description: Connect ceph to cinder and nova
+- description: Setup keyring for cinder and nova
   cmd: |
     salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@ceph:common and I@cinder:controller' state.sls ceph.common,ceph.setup.keyring;
     salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@ceph:common and I@nova:compute' state.sls ceph.common,ceph.setup.keyring;
@@ -126,4 +126,13 @@
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 2, delay: 5}
   skip_fail: false
+
+- description: Setup keyring for gnocchi
+  cmd: |
+    if salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@ceph:common and I@gnocchi:server' match.pillar 'ceph:common' ; then
+      salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@ceph:common and I@gnocchi:server' state.sls ceph.common,ceph.setup.keyring
+    fi
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
 {%- endmacro %}
diff --git a/tcp_tests/templates/shared-core.yaml b/tcp_tests/templates/shared-core.yaml
index 49eadac..3b1a716 100644
--- a/tcp_tests/templates/shared-core.yaml
+++ b/tcp_tests/templates/shared-core.yaml
@@ -76,9 +76,16 @@
 
 - description: Setup glusterfs on primary controller
   cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@glusterfs:server and *01*' state.sls glusterfs.server.setup -b 1
+    -C 'I@glusterfs:server:role:primary' state.sls glusterfs.server.setup -b 1
   node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
+  retry: {count: 5, delay: 5}
+  skip_fail: false
+
+- description: Setup glusterfs on other nodes
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@glusterfs:server' state.sls glusterfs
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 5, delay: 5}
   skip_fail: false
 
 - description: Check the gluster status
@@ -88,6 +95,20 @@
   retry: {count: 1, delay: 5}
   skip_fail: false
 
+- description: Refresh pillar before glusterfs client
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@glusterfs:client' saltutil.refresh_pillar
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+- description: Setup glusterfs client
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@glusterfs:client' state.sls glusterfs.client
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 15}
+  skip_fail: false
+
 {%- endmacro %}
 
 {%- macro MACRO_INSTALL_RABBITMQ() %}
@@ -165,6 +186,24 @@
 
 {%- endmacro %}
 
+{%- macro MACRO_INSTALL_NGINX() %}
+
+- description: Update certificate files on nginx nodes
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@nginx:server' state.sls salt.minion.cert
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 3, delay: 15}
+  skip_fail: false
+
+- description: Install nginx server
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@nginx:server' state.sls nginx
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+{%- endmacro %}
+
 {%- macro MACRO_INSTALL_MEMCACHED() %}
 
 - description: Install memcached on all controllers
diff --git a/tcp_tests/templates/shared-openstack.yaml b/tcp_tests/templates/shared-openstack.yaml
index 0461358..b48a611 100644
--- a/tcp_tests/templates/shared-openstack.yaml
+++ b/tcp_tests/templates/shared-openstack.yaml
@@ -40,20 +40,6 @@
   retry: {count: 1, delay: 15}
   skip_fail: false
 
-- description: Mount glusterfs.client volumes (resuires created 'keystone' system user)
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@keystone:server' state.sls glusterfs.client -b 1
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Update fernet keys for keystone server on the mounted glusterfs volume
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@keystone:server' state.sls keystone.server -b 1
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
 - description: Populate keystone services/tenants/admins
   cmd: salt --hard-crash --state-output=mixed --state-verbose=False
     -C 'I@keystone:client' state.sls keystone.client
@@ -85,13 +71,6 @@
   retry: {count: 1, delay: 5}
   skip_fail: false
 
-- description: Mount glusterfs.client volumes (resuires created  'glusterfs' system user)
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@glance:server' state.sls glusterfs.client -b 1
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
 - description: Check glance image-list
   cmd: salt --hard-crash --state-output=mixed --state-verbose=False
     -C "I@keystone:server" cmd.run ". /root/keystonercv3;
@@ -102,6 +81,7 @@
 {%- endmacro %}
 
 {%- macro MACRO_INSTALL_NOVA() %}
+
 - description: Install nova service on primary node
   cmd: salt --hard-crash --state-output=mixed --state-verbose=False
     -C "I@nova:controller and *01*" state.sls nova.controller
@@ -131,6 +111,15 @@
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 5}
   skip_fail: false
+
+- description: Create nova resources
+  cmd: |
+    if salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@nova:client' match.pillar 'nova:client' ; then
+      salt --hard-crash --state-output=mixed --state-verbose=False -C "I@nova:client" state.sls nova.client
+    fi
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
 {%- endmacro %}
 
 {%- macro MACRO_INSTALL_CINDER(INSTALL_VOLUME=false) %}
@@ -157,11 +146,11 @@
   skip_fail: false
 
   {%- if INSTALL_VOLUME %}
-- description: Install cinder volume
+- description: Install cinder volume, PROD-24485 set retry 2
   cmd: salt --hard-crash --state-output=mixed --state-verbose=False
     -C 'I@cinder:volume' state.sls cinder
   node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
+  retry: {count: 2, delay: 5}
   skip_fail: false
   {%- endif %}
 
@@ -231,13 +220,6 @@
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 5}
   skip_fail: true
-
-- description: Deploy nginx proxy
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@nginx:server' state.sls nginx
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: true
 {%- endmacro %}
 
 {%- macro MACRO_INSTALL_DESIGNATE(INSTALL_POWERDNS=false, INSTALL_BIND=false) %}
@@ -275,7 +257,27 @@
 {%- endmacro %}
 
 {%- macro MACRO_INSTALL_BARBICAN() %}
-# TO DO
+
+- description: Install barbican server
+  cmd: |
+    salt -C 'I@barbican:server:role:primary' state.sls barbican.server;
+    salt -C 'I@barbican:server' state.sls barbican.server;
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+{%- endmacro %}
+
+{%- macro MACRO_INSTALL_DOGTAG() %}
+
+- description: Install dogtag server
+  cmd: |
+    salt -C 'I@dogtag:server:role:master' state.sls dogtag.server;
+    salt -C 'I@dogtag:server' state.sls dogtag.server;
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
 {%- endmacro %}
 
 {%- macro MACRO_INSTALL_IRONIC() %}
@@ -290,13 +292,6 @@
   retry: {count: 1, delay: 5}
   skip_fail: false
 
-- description: Install manila-api on other nodes
-  cmd: |
-    salt -C 'I@manila:api and not *01*' state.sls manila.api;
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
 - description: Install manila-scheduler
   cmd: |
     salt -C 'I@manila:scheduler' state.sls manila.scheduler;
@@ -337,11 +332,64 @@
 {%- endmacro %}
 
 {%- macro MACRO_INSTALL_OCTAVIA_API() %}
-# TO DO
+
+- description: Execute glance client to upload octavia image
+  cmd: salt -C 'I@glance:client' state.sls glance.client
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+- description: Execute neutron client to create octavia resources
+  cmd: salt -C 'I@neutron:client' state.sls neutron.client
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+- description: Install octavia api service on primary node
+  cmd: salt -C 'I@octavia:api:role:primary' state.sls octavia.api
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+- description: Install octavia api service
+  cmd: salt -C 'I@octavia:api' state.sls octavia.api
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
 {%- endmacro %}
 
-{%- macro MACRO_INSTALL_DOGTAG() %}
-# TO DO
+{%- macro MACRO_INSTALL_OCTAVIA_MANAGER() %}
+- description: Update mine
+  cmd: salt -C 'I@neutron:client' mine.update && sleep 60
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+- description: Install octavia manager
+  cmd: salt -C 'I@octavia:manager' state.sls octavia.manager
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+- description: Execute octavia ca
+  cmd: salt -C 'I@octavia:manager' state.sls salt.minion.ca
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+- description: Execute octavia cert
+  cmd: salt -C 'I@octavia:manager' state.sls salt.minion.cert
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+- description: Execute octavia client
+  cmd: salt -C 'I@octavia:client' state.sls octavia.client
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
 {%- endmacro %}
 
 {%- macro MACRO_INSTALL_COMPUTE(CELL_MAPPING=false) %}
@@ -450,4 +498,4 @@
     salt --hard-crash --state-output=mixed --state-verbose=False '*' saltutil.sync_all &&
     salt --hard-crash --state-output=mixed --state-verbose=False '*' saltutil.refresh_pillar
   node_name: {{ HOSTNAME_CFG01 }}
-{%- endmacro %}
\ No newline at end of file
+{%- endmacro %}
diff --git a/tcp_tests/templates/shared-salt.yaml b/tcp_tests/templates/shared-salt.yaml
index 1f4ced5..dcbf3af 100644
--- a/tcp_tests/templates/shared-salt.yaml
+++ b/tcp_tests/templates/shared-salt.yaml
@@ -9,7 +9,9 @@
 {% set SALT_MODELS_SYSTEM_COMMIT = os_env('SALT_MODELS_SYSTEM_COMMIT','') %}
 {% set SALT_MODELS_SYSTEM_REF_CHANGE = os_env('SALT_MODELS_SYSTEM_REF_CHANGE','') %}
 {% set SALT_MODELS_SYSTEM_TAG = os_env('SALT_MODELS_SYSTEM_TAG','') %}
-{% set COOKIECUTTER_TEMPLATES_REPOSITORY = os_env('COOKIECUTTER_TEMPLATES_REPOSITORY','https://gerrit.mcp.mirantis.com/mk/cookiecutter-templates') %}
+{% set COOKIECUTTER_TEMPLATES_REPOSITORY_USER = os_env('COOKIECUTTER_TEMPLATES_REPOSITORY_USER','mcp-gerrit') %}
+{% set COOKIECUTTER_TEMPLATES_REPOSITORY_KEY_PATH = os_env('COOKIECUTTER_TEMPLATES_REPOSITORY_KEY_PATH','') %}
+{% set COOKIECUTTER_TEMPLATES_REPOSITORY = os_env('COOKIECUTTER_TEMPLATES_REPOSITORY','ssh://' + COOKIECUTTER_TEMPLATES_REPOSITORY_USER +'@gerrit.mcp.mirantis.com:29418/mk/cookiecutter-templates') %}
 {% set COOKIECUTTER_REF_CHANGE = os_env('COOKIECUTTER_REF_CHANGE','') %}
 {% set COOKIECUTTER_TAG = os_env('COOKIECUTTER_TAG','') %}
 {% set COOKIECUTTER_TEMPLATE_COMMIT = os_env('COOKIECUTTER_TEMPLATE_COMMIT','') %}
@@ -365,12 +367,23 @@
   retry: {count: 1, delay: 1}
   skip_fail: false
 
+- description: "Upload {{ COOKIECUTTER_TEMPLATES_REPOSITORY_USER }} key"
+  upload:
+    local_path: {{ COOKIECUTTER_TEMPLATES_REPOSITORY_KEY_PATH | dirname }}/
+    local_filename: {{ COOKIECUTTER_TEMPLATES_REPOSITORY_KEY_PATH | basename }}
+    remote_path: /tmp/
+  node_name: {{ HOSTNAME_CFG01 }}
+
 - description: Create cluster model from cookiecutter templates
   cmd: |
     set -e;
     set -x;
     sudo apt-get install python-setuptools -y
     pip install cookiecutter
+
+    chmod 0600 /tmp/{{ COOKIECUTTER_TEMPLATES_REPOSITORY_KEY_PATH | basename }}
+    eval $(ssh-agent)
+    ssh-add /tmp/{{ COOKIECUTTER_TEMPLATES_REPOSITORY_KEY_PATH | basename }}
     export GIT_SSL_NO_VERIFY=true; git clone {{ COOKIECUTTER_TEMPLATES_REPOSITORY }} /root/cookiecutter-templates
 
     {%- if COOKIECUTTER_REF_CHANGE != '' %}
@@ -519,6 +532,29 @@
   node_name: {{ HOSTNAME_CFG01 }}
 {%- endfor %}
 
+- description: "Replace template addresses to actual environment addresses"
+  cmd: |
+    set -ex;
+    # Replace firstly to an intermediate value to avoid intersection between
+    # already replaced and replacing networks.
+    # For example, if generated IPV4_NET_ADMIN_PREFIX=10.16.0 , then there is a risk of replacing twice:
+    # 192.168.10 -> 10.16.0 (generated network for admin)
+    # 10.16.0 -> <external network>
+    # So let's replace constant networks to the keywords, and then keywords to the desired networks.
+    export REPLACE_DIRS="/root/environment/"
+    find ${REPLACE_DIRS} -type f -exec sed -i 's/10\.167\.5/==IPV4_NET_ADMIN_PREFIX==/g' {} +
+    find ${REPLACE_DIRS} -type f -exec sed -i 's/10\.167\.4/==IPV4_NET_CONTROL_PREFIX==/g' {} +
+    find ${REPLACE_DIRS} -type f -exec sed -i 's/10\.167\.6/==IPV4_NET_TENANT_PREFIX==/g' {} +
+    find ${REPLACE_DIRS} -type f -exec sed -i 's/172\.17\.16/==IPV4_NET_EXTERNAL_PREFIX==/g' {} +
+
+    find ${REPLACE_DIRS} -type f -exec sed -i 's/==IPV4_NET_ADMIN_PREFIX==/{{ IPV4_NET_ADMIN_PREFIX }}/g' {} +
+    find ${REPLACE_DIRS} -type f -exec sed -i 's/==IPV4_NET_CONTROL_PREFIX==/{{ IPV4_NET_CONTROL_PREFIX }}/g' {} +
+    find ${REPLACE_DIRS} -type f -exec sed -i 's/==IPV4_NET_TENANT_PREFIX==/{{ IPV4_NET_TENANT_PREFIX }}/g' {} +
+    find ${REPLACE_DIRS} -type f -exec sed -i 's/==IPV4_NET_EXTERNAL_PREFIX==/{{ IPV4_NET_EXTERNAL_PREFIX }}/g' {} +
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
 - description: "[EXPERIMENTAL] Remove linux.network.interface object from the cluster/system models and use fixed 'environment' model instead"
   cmd: |
     set -e;
@@ -761,6 +797,12 @@
 
 {# Prepare salt services and nodes settings #}
 
+- description: '*Workaround* of harcoded host from day01 grains'
+  cmd: salt-key -d cfg01.mcp-day01.local  -y
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 1}
+  skip_fail: true
+
 - description: Run 'linux' formula on cfg01
   cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@salt:master' state.sls linux;
   node_name: {{ HOSTNAME_CFG01 }}
@@ -777,12 +819,6 @@
   retry: {count: 3, delay: 5}
   skip_fail: false
 
-- description: '*Workaround* of harcoded host from day01 grains'
-  cmd: salt-key -d cfg01.mcp-day01.local  -y
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 1}
-  skip_fail: true
-
 - description: '*Workaround* of the bug https://mirantis.jira.com/browse/PROD-7962'
   cmd: salt --hard-crash --state-output=mixed --state-verbose=False
     '*' cmd.run "echo '    StrictHostKeyChecking no' >> /root/.ssh/config"
diff --git a/tcp_tests/templates/shared-sl.yaml b/tcp_tests/templates/shared-sl.yaml
index 413162b..34c42f0 100644
--- a/tcp_tests/templates/shared-sl.yaml
+++ b/tcp_tests/templates/shared-sl.yaml
@@ -67,14 +67,14 @@
   skip_fail: false
 {%- endmacro %}
 
-{%- macro MACRO_INSTALL_GLUSTERFS_CLIENT() %}
-- description: Install glusterfs client
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@glusterfs:client' state.sls glusterfs.client
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 2, delay: 15}
-  skip_fail: false
-{%- endmacro %}
+#{#%- macro MACRO_INSTALL_GLUSTERFS_CLIENT() %#}
+#- description: Install glusterfs client
+#  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+#    -C 'I@glusterfs:client' state.sls glusterfs.client
+#  node_name: {{ HOSTNAME_CFG01 }}
+#  retry: {count: 2, delay: 15}
+#  skip_fail: false
+#{#%- endmacro %#}
 
 {%- macro MACRO_INSTALL_MONGODB() %}
 # Install slv2 infra
diff --git a/tcp_tests/templates/virtual-mcp-ocata-ceph-offline/underlay--user-data-cfg01.yaml b/tcp_tests/templates/virtual-mcp-ocata-ceph-offline/underlay--user-data-cfg01.yaml
index 79adad5..800a0b1 100644
--- a/tcp_tests/templates/virtual-mcp-ocata-ceph-offline/underlay--user-data-cfg01.yaml
+++ b/tcp_tests/templates/virtual-mcp-ocata-ceph-offline/underlay--user-data-cfg01.yaml
@@ -18,8 +18,6 @@
    expire: False
 
   bootcmd:
-   # Block access to SSH while node is preparing
-   - cloud-init-per once sudo iptables -A INPUT -p tcp --dport 22 -j DROP
    # Enable root access
    - sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
    - service sshd restart
@@ -48,15 +46,12 @@
    - swapon /swapfile
    - echo "/swapfile   none    swap    defaults    0   0" >> /etc/fstab
 
-   ############## TCP Cloud cfg01 node ##################
-   - echo "Preparing base OS"
-   - sleep 160;
-   # find /etc/apt/ -type f -exec sed -i "s/ubuntu.com/ubuntu.local.test/g" {} +;
-   ########################################################
-   # Node is ready, allow SSH access
-   - echo "Allow SSH access ..."
-   - sudo iptables -D INPUT -p tcp --dport 22 -j DROP
-   ########################################################
+   - mkdir -p /srv/salt/reclass/nodes
+   - systemctl enable salt-master
+   - systemctl enable salt-minion
+   - systemctl start salt-master
+   - systemctl start salt-minion
+   - salt-call -l info --timeout=120 test.ping
 
   write_files:
    - path: /etc/network/interfaces
diff --git a/tcp_tests/templates/virtual-mcp-ocata-dvr/core.yaml b/tcp_tests/templates/virtual-mcp-ocata-dvr/core.yaml
deleted file mode 100644
index 9d07fb0..0000000
--- a/tcp_tests/templates/virtual-mcp-ocata-dvr/core.yaml
+++ /dev/null
@@ -1,100 +0,0 @@
-{% from 'virtual-mcp-ocata-dvr/underlay.yaml' import HOSTNAME_CFG01 with context %}
-
-{% import 'shared-core.yaml' as SHARED_CORE with context %}
-
-# Install support services
-- description: Install keepalived on ctl01
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@keepalived:cluster and *01*' state.sls keepalived
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 10}
-  skip_fail: true
-
-- description: Install keepalived
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@keepalived:cluster' state.sls keepalived
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 10}
-  skip_fail: true
-
-{{ SHARED_CORE.MACRO_INSTALL_GLUSTERFS() }}
-
-- description: Install RabbitMQ on ctl01
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@rabbitmq:server and *01*' state.sls rabbitmq
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Install RabbitMQ
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@rabbitmq:server' state.sls rabbitmq
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Check the rabbitmq status
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@rabbitmq:server' cmd.run 'rabbitmqctl cluster_status'
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Install Galera on first server
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@galera:master' state.sls galera
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Install Galera on other servers
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@galera:slave' state.sls galera -b 1
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Check mysql status
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@galera:*' mysql.status | grep -A1 -e "wsrep_incoming_addresses\|wsrep_cluster_size"
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: true
-
-
-- description: Install haproxy
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@haproxy:proxy' state.sls haproxy
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Check haproxy status
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@haproxy:proxy' service.status haproxy
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Restart rsyslog
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@haproxy:proxy' service.restart rsyslog
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Install memcached on all controllers
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@memcached:server' state.sls memcached
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Check the VIP
-  cmd: |
-    OPENSTACK_CONTROL_ADDRESS=`salt-call --out=newline_values_only pillar.get _param:openstack_control_address`;
-    echo "_param:openstack_control_address (vip): ${OPENSTACK_CONTROL_ADDRESS}";
-    salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@keepalived:cluster' cmd.run "ip a | grep ${OPENSTACK_CONTROL_ADDRESS}" | grep -B1 ${OPENSTACK_CONTROL_ADDRESS}
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 3, delay: 10}
-  skip_fail: false
diff --git a/tcp_tests/templates/virtual-mcp-ocata-ovs-ceph/underlay--user-data-cfg01.yaml b/tcp_tests/templates/virtual-mcp-ocata-ovs-ceph/underlay--user-data-cfg01.yaml
index da7908d..48562ad 100644
--- a/tcp_tests/templates/virtual-mcp-ocata-ovs-ceph/underlay--user-data-cfg01.yaml
+++ b/tcp_tests/templates/virtual-mcp-ocata-ovs-ceph/underlay--user-data-cfg01.yaml
@@ -44,6 +44,13 @@
 
    - echo "nameserver 172.18.208.44" >> /etc/resolv.conf;
 
+   - mkdir -p /srv/salt/reclass/nodes
+   - systemctl enable salt-master
+   - systemctl enable salt-minion
+   - systemctl start salt-master
+   - systemctl start salt-minion
+   - salt-call -l info --timeout=120 test.ping
+
   write_files:
    - path: /etc/network/interfaces
      content: |
diff --git a/tcp_tests/templates/virtual-mcp-ocata-ovs/core.yaml b/tcp_tests/templates/virtual-mcp-ocata-ovs/core.yaml
deleted file mode 100644
index 85afc89..0000000
--- a/tcp_tests/templates/virtual-mcp-ocata-ovs/core.yaml
+++ /dev/null
@@ -1,100 +0,0 @@
-{% from 'virtual-mcp-ocata-ovs/underlay.yaml' import HOSTNAME_CFG01 with context %}
-
-{% import 'shared-core.yaml' as SHARED_CORE with context %}
-
-# Install support services
-- description: Install keepalived on ctl01
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@keepalived:cluster and *01*' state.sls keepalived
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 10}
-  skip_fail: true
-
-- description: Install keepalived
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@keepalived:cluster' state.sls keepalived
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 10}
-  skip_fail: true
-
-{{ SHARED_CORE.MACRO_INSTALL_GLUSTERFS() }}
-
-- description: Install RabbitMQ on ctl01
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@rabbitmq:server and *01*' state.sls rabbitmq
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Install RabbitMQ
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@rabbitmq:server' state.sls rabbitmq
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Check the rabbitmq status
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@rabbitmq:server' cmd.run 'rabbitmqctl cluster_status'
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Install Galera on first server
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@galera:master' state.sls galera
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Install Galera on other servers
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@galera:slave' state.sls galera -b 1
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Check mysql status
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@galera:*' mysql.status | grep -A1 -e "wsrep_incoming_addresses\|wsrep_cluster_size"
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: true
-
-
-- description: Install haproxy
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@haproxy:proxy' state.sls haproxy
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Check haproxy status
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@haproxy:proxy' service.status haproxy
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Restart rsyslog
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@haproxy:proxy' service.restart rsyslog
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Install memcached on all controllers
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@memcached:server' state.sls memcached
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Check the VIP
-  cmd: |
-    OPENSTACK_CONTROL_ADDRESS=`salt-call --out=newline_values_only pillar.get _param:openstack_control_address`;
-    echo "_param:openstack_control_address (vip): ${OPENSTACK_CONTROL_ADDRESS}";
-    salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@keepalived:cluster' cmd.run "ip a | grep ${OPENSTACK_CONTROL_ADDRESS}" | grep -B1 ${OPENSTACK_CONTROL_ADDRESS}
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 3, delay: 10}
-  skip_fail: false
diff --git a/tcp_tests/templates/virtual-mcp-ocata-ovs/salt.yaml b/tcp_tests/templates/virtual-mcp-ocata-ovs/salt.yaml
deleted file mode 100644
index 3a3ed3a..0000000
--- a/tcp_tests/templates/virtual-mcp-ocata-ovs/salt.yaml
+++ /dev/null
@@ -1,59 +0,0 @@
-{% from 'virtual-mcp-ocata-ovs/underlay.yaml' import HOSTNAME_CFG01 with context %}
-{% from 'virtual-mcp-ocata-ovs/underlay.yaml' import HOSTNAME_CMP01 with context %}
-{% from 'virtual-mcp-ocata-ovs/underlay.yaml' import HOSTNAME_CMP02 with context %}
-{% from 'virtual-mcp-ocata-ovs/underlay.yaml' import HOSTNAME_GTW01 with context %}
-{% from 'virtual-mcp-ocata-ovs/underlay.yaml' import LAB_CONFIG_NAME with context %}
-{% from 'virtual-mcp-ocata-ovs/underlay.yaml' import DOMAIN_NAME with context %}
-
-{% set SALT_MODELS_REPOSITORY = os_env('SALT_MODELS_REPOSITORY','https://gerrit.mcp.mirantis.com/salt-models/mcp-virtual-lab') %}
-# Other salt model repository parameters see in shared-salt.yaml
-{% set OVERRIDES = os_env('OVERRIDES', 'override_example: true') %}
-{% set OVERRIDES_FILENAME = os_env('OVERRIDES_FILENAME', '/srv/salt/reclass/classes/environment/virtual-mcp-ocata-ovs/overrides.yml') %}
-
-{% import 'shared-salt.yaml' as SHARED with context %}
-
-{{ SHARED.MACRO_INSTALL_SALT_MASTER() }}
-
-{{ SHARED.MACRO_CLONE_RECLASS_MODELS() }}
-
-{{ SHARED.MACRO_CONFIGURE_RECLASS(FORMULA_SERVICES='"linux" "reclass" "salt" "openssh" "ntp" "git" "nginx" "collectd" "sensu" "heka" "sphinx" "keystone" "mysql" "grafana" "haproxy" "rsyslog" "horizon" "prometheus" "telegraf" "elasticsearch" "glusterfs" "jenkins" "maas" "backupninja" "fluentd" "auditd" "logrotate"') }}
-
-{{ SHARED.MACRO_INSTALL_SALT_MINIONS() }}
-
-{{ SHARED.MACRO_RUN_SALT_MASTER_UNDERLAY_STATES() }}
-
-{%- if OVERRIDES != '' %}
-{%- for param in OVERRIDES.splitlines() %}
-{%- set key, value = param.replace(' ','').split(':') %}
-- description: Override cluster parameters
-  cmd: |
-    salt-call reclass.cluster_meta_set name='{{ key }}' value='{{ value }}' file_name='{{OVERRIDES_FILENAME}}'
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 1}
-  skip_fail: false
-{%- endfor %}
-
-- description: Refresh pillar
-  cmd: salt '*' saltutil.refresh_pillar
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 1}
-  skip_fail: false
-{%- endif %}
-
-{{ SHARED.ADJUST_SL_OPTS(OVERRIDES_FILENAME='/srv/salt/reclass/classes/cluster/' + SHARED.CLUSTER_NAME + '/stacklight/server.yml') }}
-
-{{ SHARED.MACRO_GENERATE_INVENTORY() }}
-
-{{ SHARED.MACRO_BOOTSTRAP_ALL_MINIONS() }}
-
-{{SHARED.MACRO_CHECK_SALT_VERSION_SERVICES_ON_CFG()}}
-
-{{SHARED.MACRO_CHECK_SALT_VERSION_ON_NODES()}}
-
-# WORKAROUND PROD-21071
-- description: Set correct pin for openstack repository
-  cmd: |
-    salt --hard-crash --state-output=mixed --state-verbose=False '*' cmd.run "sed -i -e 's/Pin: release l=ocata/Pin: release l=xenial\/openstack\/ocata testing/g' /etc/apt/preferences.d/mirantis_openstack"
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
diff --git a/tcp_tests/templates/virtual-mcp-pike-dvr-ceph-rgw/core.yaml b/tcp_tests/templates/virtual-mcp-pike-dvr-ceph-rgw/core.yaml
index 7ac814c..3a24e5e 100644
--- a/tcp_tests/templates/virtual-mcp-pike-dvr-ceph-rgw/core.yaml
+++ b/tcp_tests/templates/virtual-mcp-pike-dvr-ceph-rgw/core.yaml
@@ -1,118 +1,12 @@
 {% from 'virtual-mcp-pike-dvr-ceph-rgw/underlay.yaml' import HOSTNAME_CFG01 with context %}
 
-# Install support services
-- description: Install keepalived on ctl01
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@keepalived:cluster and *01*' state.sls keepalived
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 10}
-  skip_fail: true
+{% import 'shared-core.yaml' as SHARED_CORE with context %}
 
-- description: Install keepalived
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@keepalived:cluster' state.sls keepalived
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 10}
-  skip_fail: true
-
-- description: Install glusterfs
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@glusterfs:server' state.sls glusterfs.server.service
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Setup glusterfs on primary controller
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@glusterfs:server' state.sls glusterfs.server.setup -b 1
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 2, delay: 5}
-  skip_fail: false
-
-- description: Check the gluster status
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@glusterfs:server' cmd.run 'gluster peer status; gluster volume status' -b 1
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Install RabbitMQ on ctl01
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@rabbitmq:server and *01*' state.sls rabbitmq
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Install RabbitMQ
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@rabbitmq:server' state.sls rabbitmq
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Check the rabbitmq status
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@rabbitmq:server' cmd.run 'rabbitmqctl cluster_status'
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Install Galera on first server
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@galera:master' state.sls galera
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Install Galera on other servers
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@galera:slave' state.sls galera -b 1
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Check mysql status
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@galera:*' mysql.status | grep -A1 -e "wsrep_incoming_addresses\|wsrep_cluster_size"
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: true
-
-
-- description: Install haproxy
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@haproxy:proxy' state.sls haproxy
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Check haproxy status
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@haproxy:proxy' service.status haproxy
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Restart rsyslog
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@haproxy:proxy' service.restart rsyslog
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Install memcached on all controllers
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@memcached:server' state.sls memcached
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Check the VIP
-  cmd: |
-    OPENSTACK_CONTROL_ADDRESS=`salt-call --out=newline_values_only pillar.get _param:openstack_control_address`;
-    echo "_param:openstack_control_address (vip): ${OPENSTACK_CONTROL_ADDRESS}";
-    salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@keepalived:cluster' cmd.run "ip a | grep ${OPENSTACK_CONTROL_ADDRESS}" | grep -B1 ${OPENSTACK_CONTROL_ADDRESS}
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 3, delay: 10}
-  skip_fail: false
-
+{{ SHARED_CORE.MACRO_INSTALL_KEEPALIVED() }}
+{{ SHARED_CORE.MACRO_INSTALL_GLUSTERFS() }}
+{{ SHARED_CORE.MACRO_INSTALL_RABBITMQ() }}
+{{ SHARED_CORE.MACRO_INSTALL_GALERA() }}
+{{ SHARED_CORE.MACRO_INSTALL_HAPROXY() }}
+{{ SHARED_CORE.MACRO_INSTALL_NGINX() }}
+{{ SHARED_CORE.MACRO_INSTALL_MEMCACHED() }}
+{{ SHARED_CORE.MACRO_CHECK_VIP() }}
diff --git a/tcp_tests/templates/virtual-mcp-pike-dvr-ceph-rgw/underlay--user-data-cfg01.yaml b/tcp_tests/templates/virtual-mcp-pike-dvr-ceph-rgw/underlay--user-data-cfg01.yaml
index da7908d..48562ad 100644
--- a/tcp_tests/templates/virtual-mcp-pike-dvr-ceph-rgw/underlay--user-data-cfg01.yaml
+++ b/tcp_tests/templates/virtual-mcp-pike-dvr-ceph-rgw/underlay--user-data-cfg01.yaml
@@ -44,6 +44,13 @@
 
    - echo "nameserver 172.18.208.44" >> /etc/resolv.conf;
 
+   - mkdir -p /srv/salt/reclass/nodes
+   - systemctl enable salt-master
+   - systemctl enable salt-minion
+   - systemctl start salt-master
+   - systemctl start salt-minion
+   - salt-call -l info --timeout=120 test.ping
+
   write_files:
    - path: /etc/network/interfaces
      content: |
diff --git a/tcp_tests/templates/virtual-mcp-pike-dvr-maas/core.yaml b/tcp_tests/templates/virtual-mcp-pike-dvr-maas/core.yaml
index 965d297..5c35319 100644
--- a/tcp_tests/templates/virtual-mcp-pike-dvr-maas/core.yaml
+++ b/tcp_tests/templates/virtual-mcp-pike-dvr-maas/core.yaml
@@ -1,118 +1,12 @@
 {% from 'virtual-mcp-pike-dvr/underlay.yaml' import HOSTNAME_CFG01 with context %}
 
-# Install support services
-- description: Install keepalived on ctl01
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@keepalived:cluster and *01*' state.sls keepalived
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 10}
-  skip_fail: true
-
-- description: Install keepalived
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@keepalived:cluster' state.sls keepalived
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 10}
-  skip_fail: true
-
-- description: Check the VIP
-  cmd: |
-    OPENSTACK_CONTROL_ADDRESS=`salt-call --out=newline_values_only pillar.get _param:openstack_control_address`;
-    echo "_param:openstack_control_address (vip): ${OPENSTACK_CONTROL_ADDRESS}";
-    salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@keepalived:cluster' cmd.run "ip a | grep ${OPENSTACK_CONTROL_ADDRESS}" | grep -B1 ${OPENSTACK_CONTROL_ADDRESS}
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-
-- description: Install glusterfs
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@glusterfs:server' state.sls glusterfs.server.service
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Setup glusterfs on primary controller
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@glusterfs:server' state.sls glusterfs.server.setup -b 1
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 2, delay: 5}
-  skip_fail: false
-
-- description: Check the gluster status
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@glusterfs:server' cmd.run 'gluster peer status; gluster volume status' -b 1
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Install RabbitMQ on ctl01
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@rabbitmq:server and *01*' state.sls rabbitmq
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Install RabbitMQ
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@rabbitmq:server' state.sls rabbitmq
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Check the rabbitmq status
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@rabbitmq:server' cmd.run 'rabbitmqctl cluster_status'
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Install Galera on first server
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@galera:master' state.sls galera
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Install Galera on other servers
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@galera:slave' state.sls galera
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Check mysql status
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@galera:*' mysql.status | grep -A1 -e "wsrep_incoming_addresses\|wsrep_cluster_size"
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: true
-
-
-- description: Install haproxy
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@haproxy:proxy' state.sls haproxy
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Check haproxy status
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@haproxy:proxy' service.status haproxy
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Restart rsyslog
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@haproxy:proxy' service.restart rsyslog
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Install memcached on all controllers
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@memcached:server' state.sls memcached
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
+{% import 'shared-core.yaml' as SHARED_CORE with context %}
+  
+{{ SHARED_CORE.MACRO_INSTALL_KEEPALIVED() }}
+{{ SHARED_CORE.MACRO_INSTALL_GLUSTERFS() }}
+{{ SHARED_CORE.MACRO_INSTALL_RABBITMQ() }}
+{{ SHARED_CORE.MACRO_INSTALL_GALERA() }}
+{{ SHARED_CORE.MACRO_INSTALL_HAPROXY() }}
+{{ SHARED_CORE.MACRO_INSTALL_NGINX() }}
+{{ SHARED_CORE.MACRO_INSTALL_MEMCACHED() }}
+{{ SHARED_CORE.MACRO_CHECK_VIP() }}
diff --git a/tcp_tests/templates/virtual-mcp-pike-dvr-maas/underlay--user-data-cfg01.yaml b/tcp_tests/templates/virtual-mcp-pike-dvr-maas/underlay--user-data-cfg01.yaml
index be74a88..63fb199 100644
--- a/tcp_tests/templates/virtual-mcp-pike-dvr-maas/underlay--user-data-cfg01.yaml
+++ b/tcp_tests/templates/virtual-mcp-pike-dvr-maas/underlay--user-data-cfg01.yaml
@@ -46,6 +46,13 @@
 
    - echo "nameserver 172.18.176.6" >> /etc/resolv.conf;
 
+   - mkdir -p /srv/salt/reclass/nodes
+   - systemctl enable salt-master
+   - systemctl enable salt-minion
+   - systemctl start salt-master
+   - systemctl start salt-minion
+   - salt-call -l info --timeout=120 test.ping
+
   write_files:
    - path: /etc/network/interfaces
      content: |
diff --git a/tcp_tests/templates/virtual-mcp-pike-dvr-ssl-barbican/core.yaml b/tcp_tests/templates/virtual-mcp-pike-dvr-ssl-barbican/core.yaml
index 3ec4687..a2d4be8 100644
--- a/tcp_tests/templates/virtual-mcp-pike-dvr-ssl-barbican/core.yaml
+++ b/tcp_tests/templates/virtual-mcp-pike-dvr-ssl-barbican/core.yaml
@@ -1,118 +1,12 @@
 {% from 'virtual-mcp-pike-dvr-ssl-barbican/underlay.yaml' import HOSTNAME_CFG01 with context %}
 
-# Install support services
-- description: Install keepalived on ctl01
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@keepalived:cluster and *01*' state.sls keepalived
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 10}
-  skip_fail: true
-
-- description: Install keepalived
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@keepalived:cluster' state.sls keepalived
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 10}
-  skip_fail: true
-
-- description: Install glusterfs
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@glusterfs:server' state.sls glusterfs.server.service
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Setup glusterfs on primary controller
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@glusterfs:server' state.sls glusterfs.server.setup -b 1
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 2, delay: 5}
-  skip_fail: false
-
-- description: Check the gluster status
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@glusterfs:server' cmd.run 'gluster peer status; gluster volume status' -b 1
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Install RabbitMQ on ctl01
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@rabbitmq:server and *01*' state.sls rabbitmq
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Install RabbitMQ
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@rabbitmq:server' state.sls rabbitmq
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Check the rabbitmq status
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@rabbitmq:server' cmd.run 'rabbitmqctl cluster_status'
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Install Galera on first server
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@galera:master' state.sls galera
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Install Galera on other servers
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@galera:slave' state.sls galera -b 1
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Check mysql status
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@galera:*' mysql.status | grep -A1 -e "wsrep_incoming_addresses\|wsrep_cluster_size"
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: true
-
-
-- description: Install haproxy
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@haproxy:proxy' state.sls haproxy
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Check haproxy status
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@haproxy:proxy' service.status haproxy
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Restart rsyslog
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@haproxy:proxy' service.restart rsyslog
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Install memcached on all controllers
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@memcached:server' state.sls memcached
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Check the VIP
-  cmd: |
-    OPENSTACK_CONTROL_ADDRESS=`salt-call --out=newline_values_only pillar.get _param:openstack_control_address`;
-    echo "_param:openstack_control_address (vip): ${OPENSTACK_CONTROL_ADDRESS}";
-    salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@keepalived:cluster' cmd.run "ip a | grep ${OPENSTACK_CONTROL_ADDRESS}" | grep -B1 ${OPENSTACK_CONTROL_ADDRESS}
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 3, delay: 10}
-  skip_fail: false
-
+{% import 'shared-core.yaml' as SHARED_CORE with context %}
+  
+{{ SHARED_CORE.MACRO_INSTALL_KEEPALIVED() }}
+{{ SHARED_CORE.MACRO_INSTALL_GLUSTERFS() }}
+{{ SHARED_CORE.MACRO_INSTALL_RABBITMQ() }}
+{{ SHARED_CORE.MACRO_INSTALL_GALERA() }}
+{{ SHARED_CORE.MACRO_INSTALL_HAPROXY() }}
+{{ SHARED_CORE.MACRO_INSTALL_NGINX() }}
+{{ SHARED_CORE.MACRO_INSTALL_MEMCACHED() }}
+{{ SHARED_CORE.MACRO_CHECK_VIP() }}
diff --git a/tcp_tests/templates/virtual-mcp-pike-dvr-ssl-barbican/underlay--user-data-cfg01.yaml b/tcp_tests/templates/virtual-mcp-pike-dvr-ssl-barbican/underlay--user-data-cfg01.yaml
index da7908d..48562ad 100644
--- a/tcp_tests/templates/virtual-mcp-pike-dvr-ssl-barbican/underlay--user-data-cfg01.yaml
+++ b/tcp_tests/templates/virtual-mcp-pike-dvr-ssl-barbican/underlay--user-data-cfg01.yaml
@@ -44,6 +44,13 @@
 
    - echo "nameserver 172.18.208.44" >> /etc/resolv.conf;
 
+   - mkdir -p /srv/salt/reclass/nodes
+   - systemctl enable salt-master
+   - systemctl enable salt-minion
+   - systemctl start salt-master
+   - systemctl start salt-minion
+   - salt-call -l info --timeout=120 test.ping
+
   write_files:
    - path: /etc/network/interfaces
      content: |
diff --git a/tcp_tests/templates/virtual-mcp-pike-dvr-ssl/core.yaml b/tcp_tests/templates/virtual-mcp-pike-dvr-ssl/core.yaml
index 57968ee..49b016a 100644
--- a/tcp_tests/templates/virtual-mcp-pike-dvr-ssl/core.yaml
+++ b/tcp_tests/templates/virtual-mcp-pike-dvr-ssl/core.yaml
@@ -1,117 +1,12 @@
 {% from 'virtual-mcp-pike-dvr-ssl/underlay.yaml' import HOSTNAME_CFG01 with context %}
 
-# Install support services
-- description: Install keepalived on ctl01
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@keepalived:cluster and *01*' state.sls keepalived
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 10}
-  skip_fail: true
-
-- description: Install keepalived
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@keepalived:cluster' state.sls keepalived
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 10}
-  skip_fail: true
-
-- description: Install glusterfs
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@glusterfs:server' state.sls glusterfs.server.service
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Setup glusterfs on primary controller
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@glusterfs:server' state.sls glusterfs.server.setup -b 1
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 2, delay: 5}
-  skip_fail: false
-
-- description: Check the gluster status
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@glusterfs:server' cmd.run 'gluster peer status; gluster volume status' -b 1
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Install RabbitMQ on ctl01
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@rabbitmq:server and *01*' state.sls rabbitmq
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Install RabbitMQ
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@rabbitmq:server' state.sls rabbitmq
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Check the rabbitmq status
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@rabbitmq:server' cmd.run 'rabbitmqctl cluster_status'
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Install Galera on first server
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@galera:master' state.sls galera
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Install Galera on other servers
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@galera:slave' state.sls galera -b 1
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Check mysql status
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@galera:*' mysql.status | grep -A1 -e "wsrep_incoming_addresses\|wsrep_cluster_size"
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: true
-
-
-- description: Install haproxy
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@haproxy:proxy' state.sls haproxy
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Check haproxy status
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@haproxy:proxy' service.status haproxy
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Restart rsyslog
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@haproxy:proxy' service.restart rsyslog
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Install memcached on all controllers
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@memcached:server' state.sls memcached
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Check the VIP
-  cmd: |
-    OPENSTACK_CONTROL_ADDRESS=`salt-call --out=newline_values_only pillar.get _param:openstack_control_address`;
-    echo "_param:openstack_control_address (vip): ${OPENSTACK_CONTROL_ADDRESS}";
-    salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@keepalived:cluster' cmd.run "ip a | grep ${OPENSTACK_CONTROL_ADDRESS}" | grep -B1 ${OPENSTACK_CONTROL_ADDRESS}
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 3, delay: 10}
-  skip_fail: false
+{% import 'shared-core.yaml' as SHARED_CORE with context %}
+  
+{{ SHARED_CORE.MACRO_INSTALL_KEEPALIVED() }}
+{{ SHARED_CORE.MACRO_INSTALL_GLUSTERFS() }}
+{{ SHARED_CORE.MACRO_INSTALL_RABBITMQ() }}
+{{ SHARED_CORE.MACRO_INSTALL_GALERA() }}
+{{ SHARED_CORE.MACRO_INSTALL_HAPROXY() }}
+{{ SHARED_CORE.MACRO_INSTALL_NGINX() }}
+{{ SHARED_CORE.MACRO_INSTALL_MEMCACHED() }}
+{{ SHARED_CORE.MACRO_CHECK_VIP() }}
diff --git a/tcp_tests/templates/virtual-mcp-pike-dvr-ssl/underlay--user-data-cfg01.yaml b/tcp_tests/templates/virtual-mcp-pike-dvr-ssl/underlay--user-data-cfg01.yaml
index da7908d..48562ad 100644
--- a/tcp_tests/templates/virtual-mcp-pike-dvr-ssl/underlay--user-data-cfg01.yaml
+++ b/tcp_tests/templates/virtual-mcp-pike-dvr-ssl/underlay--user-data-cfg01.yaml
@@ -44,6 +44,13 @@
 
    - echo "nameserver 172.18.208.44" >> /etc/resolv.conf;
 
+   - mkdir -p /srv/salt/reclass/nodes
+   - systemctl enable salt-master
+   - systemctl enable salt-minion
+   - systemctl start salt-master
+   - systemctl start salt-minion
+   - salt-call -l info --timeout=120 test.ping
+
   write_files:
    - path: /etc/network/interfaces
      content: |
diff --git a/tcp_tests/templates/virtual-mcp-pike-dvr/core.yaml b/tcp_tests/templates/virtual-mcp-pike-dvr/core.yaml
index cbde3f0..5c35319 100644
--- a/tcp_tests/templates/virtual-mcp-pike-dvr/core.yaml
+++ b/tcp_tests/templates/virtual-mcp-pike-dvr/core.yaml
@@ -1,117 +1,12 @@
 {% from 'virtual-mcp-pike-dvr/underlay.yaml' import HOSTNAME_CFG01 with context %}
 
-# Install support services
-- description: Install keepalived on ctl01
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@keepalived:cluster and *01*' state.sls keepalived
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 10}
-  skip_fail: true
-
-- description: Install keepalived
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@keepalived:cluster' state.sls keepalived
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 10}
-  skip_fail: true
-
-- description: Install glusterfs
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@glusterfs:server' state.sls glusterfs.server.service
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Setup glusterfs on primary controller
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@glusterfs:server' state.sls glusterfs.server.setup -b 1
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 2, delay: 5}
-  skip_fail: false
-
-- description: Check the gluster status
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@glusterfs:server' cmd.run 'gluster peer status; gluster volume status' -b 1
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Install RabbitMQ on ctl01
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@rabbitmq:server and *01*' state.sls rabbitmq
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Install RabbitMQ
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@rabbitmq:server' state.sls rabbitmq
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Check the rabbitmq status
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@rabbitmq:server' cmd.run 'rabbitmqctl cluster_status'
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Install Galera on first server
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@galera:master' state.sls galera
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Install Galera on other servers
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@galera:slave' state.sls galera -b 1
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Check mysql status
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@galera:*' mysql.status | grep -A1 -e "wsrep_incoming_addresses\|wsrep_cluster_size"
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: true
-
-
-- description: Install haproxy
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@haproxy:proxy' state.sls haproxy
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Check haproxy status
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@haproxy:proxy' service.status haproxy
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Restart rsyslog
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@haproxy:proxy' service.restart rsyslog
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Install memcached on all controllers
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@memcached:server' state.sls memcached
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Check the VIP
-  cmd: |
-    OPENSTACK_CONTROL_ADDRESS=`salt-call --out=newline_values_only pillar.get _param:openstack_control_address`;
-    echo "_param:openstack_control_address (vip): ${OPENSTACK_CONTROL_ADDRESS}";
-    salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@keepalived:cluster' cmd.run "ip a | grep ${OPENSTACK_CONTROL_ADDRESS}" | grep -B1 ${OPENSTACK_CONTROL_ADDRESS}
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 3, delay: 10}
-  skip_fail: false
+{% import 'shared-core.yaml' as SHARED_CORE with context %}
+  
+{{ SHARED_CORE.MACRO_INSTALL_KEEPALIVED() }}
+{{ SHARED_CORE.MACRO_INSTALL_GLUSTERFS() }}
+{{ SHARED_CORE.MACRO_INSTALL_RABBITMQ() }}
+{{ SHARED_CORE.MACRO_INSTALL_GALERA() }}
+{{ SHARED_CORE.MACRO_INSTALL_HAPROXY() }}
+{{ SHARED_CORE.MACRO_INSTALL_NGINX() }}
+{{ SHARED_CORE.MACRO_INSTALL_MEMCACHED() }}
+{{ SHARED_CORE.MACRO_CHECK_VIP() }}
diff --git a/tcp_tests/templates/virtual-mcp-pike-dvr/underlay--user-data-cfg01.yaml b/tcp_tests/templates/virtual-mcp-pike-dvr/underlay--user-data-cfg01.yaml
index da7908d..48562ad 100644
--- a/tcp_tests/templates/virtual-mcp-pike-dvr/underlay--user-data-cfg01.yaml
+++ b/tcp_tests/templates/virtual-mcp-pike-dvr/underlay--user-data-cfg01.yaml
@@ -44,6 +44,13 @@
 
    - echo "nameserver 172.18.208.44" >> /etc/resolv.conf;
 
+   - mkdir -p /srv/salt/reclass/nodes
+   - systemctl enable salt-master
+   - systemctl enable salt-minion
+   - systemctl start salt-master
+   - systemctl start salt-minion
+   - salt-call -l info --timeout=120 test.ping
+
   write_files:
    - path: /etc/network/interfaces
      content: |
diff --git a/tcp_tests/templates/virtual-mcp-pike-ovs-ceph/core.yaml b/tcp_tests/templates/virtual-mcp-pike-ovs-ceph/core.yaml
index c0e9b0e..3aed7e6 100644
--- a/tcp_tests/templates/virtual-mcp-pike-ovs-ceph/core.yaml
+++ b/tcp_tests/templates/virtual-mcp-pike-ovs-ceph/core.yaml
@@ -1,117 +1,12 @@
 {% from 'virtual-mcp-pike-ovs-ceph/underlay.yaml' import HOSTNAME_CFG01 with context %}
 
-# Install support services
-- description: Install keepalived on ctl01
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@keepalived:cluster and *01*' state.sls keepalived
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 10}
-  skip_fail: true
-
-- description: Install keepalived
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@keepalived:cluster' state.sls keepalived
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 10}
-  skip_fail: true
-
-- description: Install glusterfs
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@glusterfs:server' state.sls glusterfs.server.service
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Setup glusterfs on primary controller
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@glusterfs:server' state.sls glusterfs.server.setup -b 1
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 2, delay: 5}
-  skip_fail: false
-
-- description: Check the gluster status
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@glusterfs:server' cmd.run 'gluster peer status; gluster volume status' -b 1
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Install RabbitMQ on ctl01
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@rabbitmq:server and *01*' state.sls rabbitmq
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Install RabbitMQ
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@rabbitmq:server' state.sls rabbitmq
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Check the rabbitmq status
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@rabbitmq:server' cmd.run 'rabbitmqctl cluster_status'
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Install Galera on first server
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@galera:master' state.sls galera
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Install Galera on other servers
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@galera:slave' state.sls galera -b 1
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Check mysql status
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@galera:*' mysql.status | grep -A1 -e "wsrep_incoming_addresses\|wsrep_cluster_size"
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: true
-
-
-- description: Install haproxy
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@haproxy:proxy' state.sls haproxy
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Check haproxy status
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@haproxy:proxy' service.status haproxy
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Restart rsyslog
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@haproxy:proxy' service.restart rsyslog
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Install memcached on all controllers
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@memcached:server' state.sls memcached
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Check the VIP
-  cmd: |
-    OPENSTACK_CONTROL_ADDRESS=`salt-call --out=newline_values_only pillar.get _param:openstack_control_address`;
-    echo "_param:openstack_control_address (vip): ${OPENSTACK_CONTROL_ADDRESS}";
-    salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@keepalived:cluster' cmd.run "ip a | grep ${OPENSTACK_CONTROL_ADDRESS}" | grep -B1 ${OPENSTACK_CONTROL_ADDRESS}
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 3, delay: 10}
-  skip_fail: false
+{% import 'shared-core.yaml' as SHARED_CORE with context %}
+  
+{{ SHARED_CORE.MACRO_INSTALL_KEEPALIVED() }}
+{{ SHARED_CORE.MACRO_INSTALL_GLUSTERFS() }}
+{{ SHARED_CORE.MACRO_INSTALL_RABBITMQ() }}
+{{ SHARED_CORE.MACRO_INSTALL_GALERA() }}
+{{ SHARED_CORE.MACRO_INSTALL_HAPROXY() }}
+{{ SHARED_CORE.MACRO_INSTALL_NGINX() }}
+{{ SHARED_CORE.MACRO_INSTALL_MEMCACHED() }}
+{{ SHARED_CORE.MACRO_CHECK_VIP() }}
diff --git a/tcp_tests/templates/virtual-mcp-pike-ovs-ceph/underlay--user-data-cfg01.yaml b/tcp_tests/templates/virtual-mcp-pike-ovs-ceph/underlay--user-data-cfg01.yaml
index da7908d..48562ad 100644
--- a/tcp_tests/templates/virtual-mcp-pike-ovs-ceph/underlay--user-data-cfg01.yaml
+++ b/tcp_tests/templates/virtual-mcp-pike-ovs-ceph/underlay--user-data-cfg01.yaml
@@ -44,6 +44,13 @@
 
    - echo "nameserver 172.18.208.44" >> /etc/resolv.conf;
 
+   - mkdir -p /srv/salt/reclass/nodes
+   - systemctl enable salt-master
+   - systemctl enable salt-minion
+   - systemctl start salt-master
+   - systemctl start salt-minion
+   - salt-call -l info --timeout=120 test.ping
+
   write_files:
    - path: /etc/network/interfaces
      content: |
diff --git a/tcp_tests/templates/virtual-mcp-pike-ovs-l2gw-bgpvpn/core.yaml b/tcp_tests/templates/virtual-mcp-pike-ovs-l2gw-bgpvpn/core.yaml
index 368f2bf..cf4a90a 100644
--- a/tcp_tests/templates/virtual-mcp-pike-ovs-l2gw-bgpvpn/core.yaml
+++ b/tcp_tests/templates/virtual-mcp-pike-ovs-l2gw-bgpvpn/core.yaml
@@ -1,117 +1,12 @@
 {% from 'virtual-mcp-pike-ovs-l2gw-bgpvpn/underlay.yaml' import HOSTNAME_CFG01 with context %}
 
-# Install support services
-- description: Install keepalived on ctl01
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@keepalived:cluster and *01*' state.sls keepalived
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 10}
-  skip_fail: true
-
-- description: Install keepalived
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@keepalived:cluster' state.sls keepalived
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 10}
-  skip_fail: true
-
-- description: Install glusterfs
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@glusterfs:server' state.sls glusterfs.server.service
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Setup glusterfs on primary controller
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@glusterfs:server' state.sls glusterfs.server.setup -b 1
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 2, delay: 5}
-  skip_fail: false
-
-- description: Check the gluster status
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@glusterfs:server' cmd.run 'gluster peer status; gluster volume status' -b 1
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Install RabbitMQ on ctl01
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@rabbitmq:server and *01*' state.sls rabbitmq
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Install RabbitMQ
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@rabbitmq:server' state.sls rabbitmq
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Check the rabbitmq status
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@rabbitmq:server' cmd.run 'rabbitmqctl cluster_status'
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Install Galera on first server
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@galera:master' state.sls galera
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Install Galera on other servers
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@galera:slave' state.sls galera -b 1
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Check mysql status
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@galera:*' mysql.status | grep -A1 -e "wsrep_incoming_addresses\|wsrep_cluster_size"
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: true
-
-
-- description: Install haproxy
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@haproxy:proxy' state.sls haproxy
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Check haproxy status
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@haproxy:proxy' service.status haproxy
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Restart rsyslog
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@haproxy:proxy' service.restart rsyslog
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Install memcached on all controllers
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@memcached:server' state.sls memcached
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Check the VIP
-  cmd: |
-    OPENSTACK_CONTROL_ADDRESS=`salt-call --out=newline_values_only pillar.get _param:openstack_control_address`;
-    echo "_param:openstack_control_address (vip): ${OPENSTACK_CONTROL_ADDRESS}";
-    salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@keepalived:cluster' cmd.run "ip a | grep ${OPENSTACK_CONTROL_ADDRESS}" | grep -B1 ${OPENSTACK_CONTROL_ADDRESS}
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 3, delay: 10}
-  skip_fail: false
+{% import 'shared-core.yaml' as SHARED_CORE with context %}
+  
+{{ SHARED_CORE.MACRO_INSTALL_KEEPALIVED() }}
+{{ SHARED_CORE.MACRO_INSTALL_GLUSTERFS() }}
+{{ SHARED_CORE.MACRO_INSTALL_RABBITMQ() }}
+{{ SHARED_CORE.MACRO_INSTALL_GALERA() }}
+{{ SHARED_CORE.MACRO_INSTALL_HAPROXY() }}
+{{ SHARED_CORE.MACRO_INSTALL_NGINX() }}
+{{ SHARED_CORE.MACRO_INSTALL_MEMCACHED() }}
+{{ SHARED_CORE.MACRO_CHECK_VIP() }}
diff --git a/tcp_tests/templates/virtual-mcp-pike-ovs-l2gw-bgpvpn/underlay--user-data-cfg01.yaml b/tcp_tests/templates/virtual-mcp-pike-ovs-l2gw-bgpvpn/underlay--user-data-cfg01.yaml
index da7908d..48562ad 100644
--- a/tcp_tests/templates/virtual-mcp-pike-ovs-l2gw-bgpvpn/underlay--user-data-cfg01.yaml
+++ b/tcp_tests/templates/virtual-mcp-pike-ovs-l2gw-bgpvpn/underlay--user-data-cfg01.yaml
@@ -44,6 +44,13 @@
 
    - echo "nameserver 172.18.208.44" >> /etc/resolv.conf;
 
+   - mkdir -p /srv/salt/reclass/nodes
+   - systemctl enable salt-master
+   - systemctl enable salt-minion
+   - systemctl start salt-master
+   - systemctl start salt-minion
+   - salt-call -l info --timeout=120 test.ping
+
   write_files:
    - path: /etc/network/interfaces
      content: |
diff --git a/tcp_tests/templates/virtual-mcp-pike-ovs/core.yaml b/tcp_tests/templates/virtual-mcp-pike-ovs/core.yaml
index abaa50d..af8778d 100644
--- a/tcp_tests/templates/virtual-mcp-pike-ovs/core.yaml
+++ b/tcp_tests/templates/virtual-mcp-pike-ovs/core.yaml
@@ -1,117 +1,12 @@
 {% from 'virtual-mcp-pike-ovs/underlay.yaml' import HOSTNAME_CFG01 with context %}
 
-# Install support services
-- description: Install keepalived on ctl01
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@keepalived:cluster and *01*' state.sls keepalived
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 10}
-  skip_fail: true
-
-- description: Install keepalived
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@keepalived:cluster' state.sls keepalived
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 10}
-  skip_fail: true
-
-- description: Install glusterfs
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@glusterfs:server' state.sls glusterfs.server.service
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Setup glusterfs on primary controller
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@glusterfs:server' state.sls glusterfs.server.setup -b 1
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 2, delay: 5}
-  skip_fail: false
-
-- description: Check the gluster status
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@glusterfs:server' cmd.run 'gluster peer status; gluster volume status' -b 1
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Install RabbitMQ on ctl01
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@rabbitmq:server and *01*' state.sls rabbitmq
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Install RabbitMQ
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@rabbitmq:server' state.sls rabbitmq
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Check the rabbitmq status
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@rabbitmq:server' cmd.run 'rabbitmqctl cluster_status'
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Install Galera on first server
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@galera:master' state.sls galera
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Install Galera on other servers
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@galera:slave' state.sls galera -b 1
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Check mysql status
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@galera:*' mysql.status | grep -A1 -e "wsrep_incoming_addresses\|wsrep_cluster_size"
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: true
-
-
-- description: Install haproxy
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@haproxy:proxy' state.sls haproxy
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Check haproxy status
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@haproxy:proxy' service.status haproxy
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Restart rsyslog
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@haproxy:proxy' service.restart rsyslog
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Install memcached on all controllers
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@memcached:server' state.sls memcached
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Check the VIP
-  cmd: |
-    OPENSTACK_CONTROL_ADDRESS=`salt-call --out=newline_values_only pillar.get _param:openstack_control_address`;
-    echo "_param:openstack_control_address (vip): ${OPENSTACK_CONTROL_ADDRESS}";
-    salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@keepalived:cluster' cmd.run "ip a | grep ${OPENSTACK_CONTROL_ADDRESS}" | grep -B1 ${OPENSTACK_CONTROL_ADDRESS}
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 3, delay: 10}
-  skip_fail: false
+{% import 'shared-core.yaml' as SHARED_CORE with context %}
+  
+{{ SHARED_CORE.MACRO_INSTALL_KEEPALIVED() }}
+{{ SHARED_CORE.MACRO_INSTALL_GLUSTERFS() }}
+{{ SHARED_CORE.MACRO_INSTALL_RABBITMQ() }}
+{{ SHARED_CORE.MACRO_INSTALL_GALERA() }}
+{{ SHARED_CORE.MACRO_INSTALL_HAPROXY() }}
+{{ SHARED_CORE.MACRO_INSTALL_NGINX() }}
+{{ SHARED_CORE.MACRO_INSTALL_MEMCACHED() }}
+{{ SHARED_CORE.MACRO_CHECK_VIP() }}
diff --git a/tcp_tests/templates/virtual-mcp-pike-ovs/underlay--user-data-cfg01.yaml b/tcp_tests/templates/virtual-mcp-pike-ovs/underlay--user-data-cfg01.yaml
index da7908d..48562ad 100644
--- a/tcp_tests/templates/virtual-mcp-pike-ovs/underlay--user-data-cfg01.yaml
+++ b/tcp_tests/templates/virtual-mcp-pike-ovs/underlay--user-data-cfg01.yaml
@@ -44,6 +44,13 @@
 
    - echo "nameserver 172.18.208.44" >> /etc/resolv.conf;
 
+   - mkdir -p /srv/salt/reclass/nodes
+   - systemctl enable salt-master
+   - systemctl enable salt-minion
+   - systemctl start salt-master
+   - systemctl start salt-minion
+   - salt-call -l info --timeout=120 test.ping
+
   write_files:
    - path: /etc/network/interfaces
      content: |
diff --git a/tcp_tests/templates/virtual-mcp-sl-os/salt.yaml b/tcp_tests/templates/virtual-mcp-sl-os/salt.yaml
index 2bf4bb3..a1b2c92 100644
--- a/tcp_tests/templates/virtual-mcp-sl-os/salt.yaml
+++ b/tcp_tests/templates/virtual-mcp-sl-os/salt.yaml
@@ -21,7 +21,7 @@
 
 {%- if OVERRIDES != '' %}
 {%- for param in OVERRIDES.splitlines() %}
-{%- set key, value = param.replace(' ','').split(':') %}
+{%- set key, value = param.replace(' ','').split(':', 1) %}
 - description: Override cluster parameters
   cmd: |
     salt-call reclass.cluster_meta_set name='{{ key }}' value='{{ value }}' file_name='{{OVERRIDES_FILENAME}}'
diff --git a/tcp_tests/templates/virtual-mcp-sl-os/underlay--user-data-cfg01.yaml b/tcp_tests/templates/virtual-mcp-sl-os/underlay--user-data-cfg01.yaml
index a8afd05..1018c28 100644
--- a/tcp_tests/templates/virtual-mcp-sl-os/underlay--user-data-cfg01.yaml
+++ b/tcp_tests/templates/virtual-mcp-sl-os/underlay--user-data-cfg01.yaml
@@ -47,6 +47,13 @@
    # Enable grub menu using updated config below
    - update-grub
 
+   - mkdir -p /srv/salt/reclass/nodes
+   - systemctl enable salt-master
+   - systemctl enable salt-minion
+   - systemctl start salt-master
+   - systemctl start salt-minion
+   - salt-call -l info --timeout=120 test.ping
+
   write_files:
    - path: /etc/default/grub.d/97-enable-grub-menu.cfg
      content: |
diff --git a/tcp_tests/templates/virtual-mcp-trusty/Readme.txt b/tcp_tests/templates/virtual-mcp-trusty/Readme.txt
index fc9f978..da47d0b 100644
--- a/tcp_tests/templates/virtual-mcp-trusty/Readme.txt
+++ b/tcp_tests/templates/virtual-mcp-trusty/Readme.txt
@@ -2,4 +2,14 @@
 - virtual-mcp-mitaka-dvr-trusty
 - virtual-mcp-mitaka-ovs-trusty
 
-Used by maintenance team.
\ No newline at end of file
+Used by maintenance team.
+
+Use following env vars should be used:
+SALT_MODELS_COMMIT = 'fa85f84'
+SALT_MODELS_SYSTEM_TAG = '2018.8.0'
+REPOSITORY_SUITE = '2018.8.0'
+OVERRIDES = 'openstack_log_appender: true
+linux_system_repo_mk_openstack_version: testing
+'
+
+Also VCP 2018.8.0 images should be used
\ No newline at end of file
diff --git a/tcp_tests/templates/virtual-mcp-trusty/core.yaml b/tcp_tests/templates/virtual-mcp-trusty/core.yaml
index 938b11f..a433aee 100644
--- a/tcp_tests/templates/virtual-mcp-trusty/core.yaml
+++ b/tcp_tests/templates/virtual-mcp-trusty/core.yaml
@@ -1,6 +1,9 @@
 {% from 'virtual-mcp-trusty/underlay.yaml' import HOSTNAME_CFG01 with context %}
 
-{% import 'shared-core.yaml' as SHARED_CORE with context %}
+# vkhlyunev: shared steps are constantly updating due master development so
+# we cant use them for old release (e.g. new steps for gluster are using
+# glusterfs:server:role:primary pillar for targeting which does not exists in
+# 2018.8.0 release model (and we can't update the model)
 
 # Install support services
 - description: Install keepalived on ctl01
@@ -17,7 +20,33 @@
   retry: {count: 1, delay: 10}
   skip_fail: true
 
-{{ SHARED_CORE.MACRO_INSTALL_GLUSTERFS() }}
+- description: Install glusterfs
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@glusterfs:server' state.sls glusterfs.server.service && sleep 20
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+- description: Setup glusterfs on primary controller
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@glusterfs:server and *01*' state.sls glusterfs.server.setup -b 1
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 5, delay: 5}
+  skip_fail: false
+
+- description: Setup glusterfs on other nodes
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@glusterfs:server' state.sls glusterfs -b 1
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 5, delay: 5}
+  skip_fail: false
+
+- description: Check the gluster status
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@glusterfs:server' cmd.run 'gluster peer status && gluster volume status' -b 1
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
 
 - description: Install RabbitMQ on ctl01
   cmd: salt --hard-crash --state-output=mixed --state-verbose=False
diff --git a/tcp_tests/templates/virtual-mcp-trusty/openstack.yaml b/tcp_tests/templates/virtual-mcp-trusty/openstack.yaml
index c001d39..fff0966 100644
--- a/tcp_tests/templates/virtual-mcp-trusty/openstack.yaml
+++ b/tcp_tests/templates/virtual-mcp-trusty/openstack.yaml
@@ -6,6 +6,10 @@
 {% from 'shared-salt.yaml' import IPV4_NET_EXTERNAL_PREFIX with context %}
 {% from 'shared-salt.yaml' import IPV4_NET_TENANT_PREFIX with context %}
 
+# vkhlyunev: shared steps are constantly updating due master development so
+# we cant use them for old release. For openstack.yaml we can use some shared
+# steps for now but TODO: bind deployment workflow to 2018.8.0 state
+
 # Install OpenStack control services
 - description: Sync time
   cmd: salt --hard-crash --state-output=mixed --state-verbose=False -G 'oscodename:trusty' cmd.run "service ntp stop && ntpdate pool.ntp.org && service ntp start"
@@ -23,11 +27,94 @@
   retry: {count: 1, delay: 5}
   skip_fail: true
 
-{{ SHARED_OPENSTACK.MACRO_INSTALL_KEYSTONE() }}
+- description: Install keystone service on primary node
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@keystone:server and *01*' state.sls keystone.server
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 2, delay: 15}
+  skip_fail: false
+
+- description: Install keystone service on other nodes
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@keystone:server' state.sls keystone.server
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 2, delay: 15}
+  skip_fail: false
+
+- description: Restart apache due to PROD-10477
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl*'
+    cmd.run "service apache2 restart"
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 15}
+  skip_fail: false
+
+- description: Check apache status to PROD-10477
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl*'
+    cmd.run "service apache2 status"
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 15}
+  skip_fail: false
+
+- description: Mount glusterfs.client volumes (resuires created 'keystone' system user)
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@keystone:server' state.sls glusterfs.client -b 1
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+- description: Update fernet keys for keystone server on the mounted glusterfs volume
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@keystone:server' state.sls keystone.server -b 1
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+  
+- description: Populate keystone services/tenants/admins
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@keystone:client' state.sls keystone.client
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 2, delay: 5}
+  skip_fail: false
+
+- description: Check keystone service-list
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C "I@keystone:server" cmd.run ". /root/keystonercv3;
+    openstack service list"
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
 
 {{ SHARED_OPENSTACK.MACRO_INSTALL_GLANCE() }}
 
-{{ SHARED_OPENSTACK.MACRO_INSTALL_NOVA() }}
+- description: Install nova service on primary node
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C "I@nova:controller and *01*" state.sls nova.controller
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+- description: Install nova service on other nodes
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C "I@nova:controller" state.sls nova.controller
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+- description: Check nova service-list
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C "I@keystone:server" cmd.run ". /root/keystonercv3;
+    openstack compute service list"
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+- description: Check nova list
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C "I@keystone:server" cmd.run ". /root/keystonercv3;
+    openstack server list"
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
 
 {{ SHARED_OPENSTACK.MACRO_INSTALL_CINDER() }}
 
diff --git a/tcp_tests/templates/virtual-mcp-trusty/salt.yaml b/tcp_tests/templates/virtual-mcp-trusty/salt.yaml
index 7213162..d8f7fb7 100644
--- a/tcp_tests/templates/virtual-mcp-trusty/salt.yaml
+++ b/tcp_tests/templates/virtual-mcp-trusty/salt.yaml
@@ -10,6 +10,15 @@
 
 {% import 'shared-salt.yaml' as SHARED with context %}
 
+# vkhlyunev: sometimes we have to verify fixes to mitaka openstack based on
+# ubuntu trusty OS deployment. Last discovered deploy-able configuration is based on
+# mcp-virtual-lab/salt-formulas/reclass-system parameters/commits/tags listed below
+# SALT_MODELS_COMMIT = 'fa85f84'
+# SALT_MODELS_SYSTEM_TAG = '2018.8.0'
+# REPOSITORY_SUITE = '2018.8.0'
+# OVERRIDES = 'openstack_log_appender: true
+# linux_system_repo_mk_openstack_version: testing'
+
 {{ SHARED.MACRO_INSTALL_SALT_MASTER() }}
 
 {{ SHARED.MACRO_CLONE_RECLASS_MODELS() }}
@@ -22,7 +31,7 @@
 
 {%- if OVERRIDES != '' %}
 {%- for param in OVERRIDES.splitlines() %}
-{%- set key, value = param.replace(' ','').split(':') %}
+{%- set key, value = param.replace(' ','').split(':', 1) %}
 - description: Override cluster parameters
   cmd: |
     salt-call reclass.cluster_meta_set name='{{ key }}' value='{{ value }}' file_name='{{OVERRIDES_FILENAME}}'
@@ -30,13 +39,27 @@
   retry: {count: 1, delay: 1}
   skip_fail: false
 {%- endfor %}
+{%- endif %}
+
+# vkhlyunev: fa85f84 model commit contains sphinx on cfg01 node
+# which is not required for mitaka-trusty testing. Unfortunately we can not fix
+# it in model itself due to constantly updating models according to development
+# of main release.
+- description: Apply sphinx workaround - delete system.sphinx class
+  cmd: sed -i -e '/system.sphinx/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml
+  node_name: {{ HOSTNAME_CFG01 }}
+  skip_fail: False
+
+- description: Apply sphinx workaround - delete nginx section
+  cmd: sed -i -e '/  nginx:/,+8d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml
+  node_name: {{ HOSTNAME_CFG01 }}
+  skip_fail: False
 
 - description: Refresh pillar
   cmd: salt '*' saltutil.refresh_pillar
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 1}
   skip_fail: false
-{%- endif %}
 
 {{ SHARED.MACRO_GENERATE_INVENTORY() }}
 
diff --git a/tcp_tests/templates/virtual-offline-pike-ovs-dpdk/core.yaml b/tcp_tests/templates/virtual-offline-pike-ovs-dpdk/core.yaml
index 8ba7026..0a43183 100644
--- a/tcp_tests/templates/virtual-offline-pike-ovs-dpdk/core.yaml
+++ b/tcp_tests/templates/virtual-offline-pike-ovs-dpdk/core.yaml
@@ -1,5 +1,6 @@
 {% from 'virtual-offline-pike-ovs-dpdk/underlay.yaml' import HOSTNAME_CFG01 with context %}
 
+{% import 'shared-core.yaml' as SHARED_CORE with context %}
 
 - description: remove apparmor
   cmd: salt --hard-crash --state-output=mixed --state-verbose=False
@@ -8,118 +9,11 @@
   retry: {count: 1, delay: 10}
   skip_fail: true
 
-# Install support services
-- description: Install keepalived on ctl01
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@keepalived:cluster and *01*' state.sls keepalived
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 10}
-  skip_fail: true
-
-- description: Install keepalived
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@keepalived:cluster' state.sls keepalived
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 10}
-  skip_fail: true
-
-- description: Install glusterfs
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@glusterfs:server' state.sls glusterfs.server.service
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Setup glusterfs on primary controller
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@glusterfs:server' state.sls glusterfs.server.setup -b 1
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 2, delay: 5}
-  skip_fail: false
-
-- description: Check the gluster status
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@glusterfs:server' cmd.run 'gluster peer status; gluster volume status' -b 1
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Install RabbitMQ on ctl01
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@rabbitmq:server and *01*' state.sls rabbitmq
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Install RabbitMQ
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@rabbitmq:server' state.sls rabbitmq
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Check the rabbitmq status
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@rabbitmq:server' cmd.run 'rabbitmqctl cluster_status'
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Install Galera on first server
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@galera:master' state.sls galera
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Install Galera on other servers
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@galera:slave' state.sls galera -b 1
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Check mysql status
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@galera:*' mysql.status | grep -A1 -e "wsrep_incoming_addresses\|wsrep_cluster_size"
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: true
-
-
-- description: Install haproxy
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@haproxy:proxy' state.sls haproxy
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Check haproxy status
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@haproxy:proxy' service.status haproxy
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Restart rsyslog
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@haproxy:proxy' service.restart rsyslog
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Install memcached on all controllers
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@memcached:server' state.sls memcached
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Check the VIP
-  cmd: |
-    OPENSTACK_CONTROL_ADDRESS=`salt-call --out=newline_values_only pillar.get _param:openstack_control_address`;
-    echo "_param:openstack_control_address (vip): ${OPENSTACK_CONTROL_ADDRESS}";
-    salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@keepalived:cluster' cmd.run "ip a | grep ${OPENSTACK_CONTROL_ADDRESS}" | grep -B1 ${OPENSTACK_CONTROL_ADDRESS}
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 3, delay: 10}
-  skip_fail: false
+{{ SHARED_CORE.MACRO_INSTALL_KEEPALIVED() }}
+{{ SHARED_CORE.MACRO_INSTALL_GLUSTERFS() }}
+{{ SHARED_CORE.MACRO_INSTALL_RABBITMQ() }}
+{{ SHARED_CORE.MACRO_INSTALL_GALERA() }}
+{{ SHARED_CORE.MACRO_INSTALL_HAPROXY() }}
+{{ SHARED_CORE.MACRO_INSTALL_NGINX() }}
+{{ SHARED_CORE.MACRO_INSTALL_MEMCACHED() }}
+{{ SHARED_CORE.MACRO_CHECK_VIP() }}
diff --git a/tcp_tests/templates/virtual-offline-pike-ovs-dpdk/openstack.yaml b/tcp_tests/templates/virtual-offline-pike-ovs-dpdk/openstack.yaml
index 6546438..68a1220 100644
--- a/tcp_tests/templates/virtual-offline-pike-ovs-dpdk/openstack.yaml
+++ b/tcp_tests/templates/virtual-offline-pike-ovs-dpdk/openstack.yaml
@@ -5,14 +5,14 @@
 {% from 'virtual-offline-pike-ovs-dpdk/underlay.yaml' import HOSTNAME_GTW01 with context %}
 {% from 'shared-salt.yaml' import IPV4_NET_EXTERNAL_PREFIX with context %}
 {% from 'shared-salt.yaml' import IPV4_NET_TENANT_PREFIX with context %}
-{% set REPOSITORY_SUITE = os_env('REPOSITORY_SUITE', 'testing') %}
+{% set REPOSITORY_SUITE = os_env('REPOSITORY_SUITE', 'proposed') %}
 # Install OpenStack control services
 {% set DOMAIN_NAME = os_env('DOMAIN_NAME', 'virtual-offline-pike-ovs-dpdk') %}
 {% import 'shared-backup-restore.yaml' as BACKUP with context %}
 {% import 'shared-salt.yaml' as SHARED with context %}
 {% import 'shared-openstack.yaml' as SHARED_OPENSTACK with context %}
 
-{% set DOCKER_LOCAL_REPO = os_env('DOCKER_LOCAL_REPO', 'deb [arch=amd64] http://mirror.mcp.mirantis.local.test/ubuntu-xenial/docker ' + REPOSITORY_SUITE + ' stable') %}
+{% set DOCKER_LOCAL_REPO = os_env('DOCKER_LOCAL_REPO', 'deb [arch=amd64] http://mirror.mcp.mirantis.local.test/' + REPOSITORY_SUITE + '/docker/xenial xenial stable') %}
 
 {{ SHARED_OPENSTACK.MACRO_INSTALL_KEYSTONE() }}
 
diff --git a/tcp_tests/templates/virtual-offline-pike-ovs-dpdk/post_openstack.yaml b/tcp_tests/templates/virtual-offline-pike-ovs-dpdk/post_openstack.yaml
index 9f28ba9..fc45c30 100644
--- a/tcp_tests/templates/virtual-offline-pike-ovs-dpdk/post_openstack.yaml
+++ b/tcp_tests/templates/virtual-offline-pike-ovs-dpdk/post_openstack.yaml
@@ -12,9 +12,27 @@
 {% import 'shared-salt.yaml' as SHARED with context %}
 {% import 'shared-openstack.yaml' as SHARED_OPENSTACK with context %}
 
-{% set DOCKER_LOCAL_REPO = os_env('DOCKER_LOCAL_REPO', 'deb [arch=amd64] http://mirror.mcp.mirantis.local.test/ubuntu-xenial/docker ' + REPOSITORY_SUITE + ' stable') %}
+{% set DOCKER_LOCAL_REPO = os_env('DOCKER_LOCAL_REPO', 'deb [arch=amd64] http://mirror.mcp.mirantis.local.test/' + REPOSITORY_SUITE + '/docker/xenial xenial stable') %}
 
-  # Upload cirros image
+- description: Run 'openssh' formula on cfg01
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@salt:master' state.sls openssh &&
+    salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@salt:master' cmd.run "sed -i 's/PasswordAuthentication no/PasswordAuthentication
+    yes/' /etc/ssh/sshd_config && service ssh reload"
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 3, delay: 5}
+  skip_fail: false
+
+- description: Configure openssh on all nodes
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@linux:system and not cfg01*' state.sls openssh &&
+    salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@linux:system and not cfg01*' cmd.run "sed -i 's/PasswordAuthentication no/PasswordAuthentication
+    yes/' /etc/ssh/sshd_config && service ssh reload"
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
 - description: Upload cirros image on ctl01
   cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
     'wget http://download.cirros-cloud.net/0.3.4/cirros-0.3.4-i386-disk.img'
@@ -88,7 +106,7 @@
   skip_fail: false
 
 - description: Install docker-ce on gtw
-  cmd: salt-call cmd.run 'apt-get install docker-ce -y'
+  cmd: salt-call cmd.run 'apt-get install docker-ce -y  --allow-unauthenticated'
   node_name: {{ HOSTNAME_GTW01 }}
   retry: {count: 1, delay: 30}
   skip_fail: false
@@ -102,13 +120,13 @@
   skip_fail: false
 
 - description: create rc file on cfg
-  cmd: scp ctl01:/root/keystonercv3 /root
+  cmd: scp -o StrictHostKeyChecking=no ctl01:/root/keystonercv3 /root
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 30}
   skip_fail: false
 
 - description: Copy rc file
-  cmd: scp /root/keystonercv3 gtw01:/root
+  cmd: scp -o StrictHostKeyChecking=no /root/keystonercv3 gtw01:/root
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 30}
   skip_fail: false
diff --git a/tcp_tests/templates/virtual-offline-pike-ovs-dpdk/run_test.sh b/tcp_tests/templates/virtual-offline-pike-ovs-dpdk/run_test.sh
index 420b805..bb1316a 100755
--- a/tcp_tests/templates/virtual-offline-pike-ovs-dpdk/run_test.sh
+++ b/tcp_tests/templates/virtual-offline-pike-ovs-dpdk/run_test.sh
@@ -37,9 +37,9 @@
 export SALT_REPOSITORY = "deb [arch=amd64] http://mirror.mirantis.local.test/" + REPOSITORY_SUITE+ "/saltstack-" + SALT_VERSION+ "/${DISTRIB_CODENAME} ${DISTRIB_CODENAME} main"
 #export SALT_REPOSITORY="deb [arch=amd64] http://apt.mirantis.local.test/ubuntu-xenial/ ${REPOSITORY_SUITE} salt/2017.7 main"
 export SALT_GPG="http://apt.mirantis.local.test/public.gpg"
-export UBUNTU_REPOSITORY="deb http://mirror.mcp.mirantis.local.test/${REPOSITORY_SUITE}/ubuntu xenial main universe restricted"
-export UBUNTU_UPDATES_REPOSITORY="deb http://mirror.mcp.mirantis.local.test/${REPOSITORY_SUITE}/ubuntu xenial-updates main universe restricted"
-export UBUNTU_SECURITY_REPOSITORY="deb http://mirror.mcp.mirantis.local.test/${REPOSITORY_SUITE}/ubuntu xenial-security main universe restricted"
+export UBUNTU_REPOSITORY="deb http://mirror.mirantis.local.test/${REPOSITORY_SUITE}/ubuntu xenial main universe restricted"
+export UBUNTU_UPDATES_REPOSITORY="deb http://mirror.mirantis.local.test/${REPOSITORY_SUITE}/ubuntu xenial-updates main restricted universe"
+export UBUNTU_SECURITY_REPOSITORY="deb http://mirror.mirantis.local.test/${REPOSITORY_SUITE}/ubuntu xenial-security main restricted universe"
 
 cd tcp_tests
 py.test -vvv -s -p no:django -p no:ipdb --junit-xml=nosetests.xml -k ${TEST_GROUP}
diff --git a/tcp_tests/templates/virtual-offline-pike-ovs-dpdk/salt.yaml b/tcp_tests/templates/virtual-offline-pike-ovs-dpdk/salt.yaml
index d671337..828a14f 100644
--- a/tcp_tests/templates/virtual-offline-pike-ovs-dpdk/salt.yaml
+++ b/tcp_tests/templates/virtual-offline-pike-ovs-dpdk/salt.yaml
@@ -18,6 +18,7 @@
 {% from 'virtual-offline-pike-ovs-dpdk/underlay.yaml' import HOSTNAME_VS with context %}
 {% from 'virtual-offline-pike-ovs-dpdk/underlay.yaml' import LAB_CONFIG_NAME with context %}
 {% from 'virtual-offline-pike-ovs-dpdk/underlay.yaml' import DOMAIN_NAME with context %}
+{% set REPOSITORY_SUITE = os_env('REPOSITORY_SUITE', 'proposed') %}
 
 {% set SALT_MODELS_REPOSITORY = os_env('SALT_MODELS_REPOSITORY','https://gerrit.mcp.mirantis.local.test/salt-models/mcp-virtual-lab') %}
 # Other salt model repository parameters see in shared-salt.yaml
@@ -40,6 +41,7 @@
   retry: {count: 1, delay: 10}
   skip_fail: false
 
+{{ SHARED.MACRO_INSTALL_PACKAGES_ON_NODES(HOSTNAME_VS) }}
 {{ VSWITCH.MACRO_CONFIGURE_VSWITCH(HOSTNAME_VS, VSWITCH_IP) }}
 
 {{ SHARED.MACRO_INSTALL_PACKAGES_ON_NODES(HOSTNAME_CFG01) }}
diff --git a/tcp_tests/templates/virtual-offline-pike-ovs-dpdk/underlay--user-data-cfg01.yaml b/tcp_tests/templates/virtual-offline-pike-ovs-dpdk/underlay--user-data-cfg01.yaml
index 79adad5..800a0b1 100644
--- a/tcp_tests/templates/virtual-offline-pike-ovs-dpdk/underlay--user-data-cfg01.yaml
+++ b/tcp_tests/templates/virtual-offline-pike-ovs-dpdk/underlay--user-data-cfg01.yaml
@@ -18,8 +18,6 @@
    expire: False
 
   bootcmd:
-   # Block access to SSH while node is preparing
-   - cloud-init-per once sudo iptables -A INPUT -p tcp --dport 22 -j DROP
    # Enable root access
    - sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
    - service sshd restart
@@ -48,15 +46,12 @@
    - swapon /swapfile
    - echo "/swapfile   none    swap    defaults    0   0" >> /etc/fstab
 
-   ############## TCP Cloud cfg01 node ##################
-   - echo "Preparing base OS"
-   - sleep 160;
-   # find /etc/apt/ -type f -exec sed -i "s/ubuntu.com/ubuntu.local.test/g" {} +;
-   ########################################################
-   # Node is ready, allow SSH access
-   - echo "Allow SSH access ..."
-   - sudo iptables -D INPUT -p tcp --dport 22 -j DROP
-   ########################################################
+   - mkdir -p /srv/salt/reclass/nodes
+   - systemctl enable salt-master
+   - systemctl enable salt-minion
+   - systemctl start salt-master
+   - systemctl start salt-minion
+   - salt-call -l info --timeout=120 test.ping
 
   write_files:
    - path: /etc/network/interfaces
diff --git a/tcp_tests/templates/virtual-offline-pike-ovs-dpdk/vswitch-config.yaml b/tcp_tests/templates/virtual-offline-pike-ovs-dpdk/vswitch-config.yaml
index 705e9be..df9fd73 100644
--- a/tcp_tests/templates/virtual-offline-pike-ovs-dpdk/vswitch-config.yaml
+++ b/tcp_tests/templates/virtual-offline-pike-ovs-dpdk/vswitch-config.yaml
@@ -1,6 +1,21 @@
+{% set REPOSITORY_SUITE = os_env('REPOSITORY_SUITE', 'proposed') %}
+{% set OPENSTACK_PIKE_REPOSITORY = os_env('OPENSTACK_PIKE_REPOSITORY', "deb [arch=amd64] http://mirror.mirantis.local.test/" + REPOSITORY_SUITE + "/openstack-pike/xenial/ xenial main") %}
+{% set UBUNTU_KEY_SERVER = os_env('UBUNTU_KEY_SERVER', 'keyserver.ubuntu.com') %}
+{% set UBUNTU_KEY_ID = os_env('UBUNTU_KEY_ID', '0E08A149DE57BFBE') %}
+
 
 {%- macro MACRO_CONFIGURE_VSWITCH(NODE_NAME, IP) %}
 {#################################################}
+- description: 'Enable openstack repo for needed packages '
+  cmd: |
+    apt-key adv --keyserver "{{UBUNTU_KEY_SERVER}}" --recv-keys "{{ UBUNTU_KEY_ID}}"
+    echo "{{ OPENSTACK_PIKE_REPOSITORY }}" > /etc/apt/sources.list.d/openstack.list
+    eatmydata apt-get clean;
+    apt-get update;
+    sync;
+  node_name: {{ NODE_NAME }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
 
 - description: 'Install openvswitch-vtep package and configure it'
   cmd: |
@@ -8,9 +23,9 @@
     ifconfig ens4 up
 
     apt-get update
-    apt-get -y install openvswitch-switch
+    apt-get -y install openvswitch-switch --allow-unauthenticated
     service openvswitch-switch stop
-    apt-get -y install openvswitch-vtep bridge-utils
+    apt-get -y install openvswitch-vtep bridge-utils --allow-unauthenticated
 
     ovsdb-tool create /etc/openvswitch/vtep.db /usr/share/openvswitch/vtep.ovsschema
     ovsdb-tool create /etc/openvswitch/vswitch.db /usr/share/openvswitch/vswitch.ovsschema
@@ -67,7 +82,13 @@
   skip_fail: false
 
 - description: 'Refresh pillar data after L2GW enablement'
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False '*' saltutil.refresh_pillar
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False '*' saltutil.refresh_pillar;  sleep 15
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+- description: 'Sync all'
+  cmd:  salt --hard-crash --state-output=mixed --state-verbose=False '*' saltutil.sync_all && sleep 5
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 5}
   skip_fail: false
@@ -75,7 +96,7 @@
 - description: 'Check L2GW is enabled'
   cmd: salt 'gtw01*' pillar.get neutron:gateway:l2gw:enabled | grep True
   node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
+  retry: {count: 3, delay: 5}
   skip_fail: false
 
 {%- endmacro %}
diff --git a/tcp_tests/templates/virtual-offline-pike-ovs/core.yaml b/tcp_tests/templates/virtual-offline-pike-ovs/core.yaml
index b3a1404..4d69c89 100644
--- a/tcp_tests/templates/virtual-offline-pike-ovs/core.yaml
+++ b/tcp_tests/templates/virtual-offline-pike-ovs/core.yaml
@@ -1,5 +1,7 @@
 {% from 'virtual-offline-pike-ovs/underlay.yaml' import HOSTNAME_CFG01 with context %}
 
+{% import 'shared-core.yaml' as SHARED_CORE with context %}
+
 - description: remove apparmor
   cmd: salt --hard-crash --state-output=mixed --state-verbose=False
     '*' cmd.run 'service apparmor stop; service apparmor teardown; update-rc.d -f apparmor remove; apt-get -y remove apparmor'
@@ -7,118 +9,11 @@
   retry: {count: 1, delay: 10}
   skip_fail: true
 
-# Install support services
-- description: Install keepalived on ctl01
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@keepalived:cluster and *01*' state.sls keepalived
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 10}
-  skip_fail: true
-
-- description: Install keepalived
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@keepalived:cluster' state.sls keepalived
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 10}
-  skip_fail: true
-
-- description: Install glusterfs
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@glusterfs:server' state.sls glusterfs.server.service
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Setup glusterfs on primary controller
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@glusterfs:server' state.sls glusterfs.server.setup -b 1
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 2, delay: 5}
-  skip_fail: false
-
-- description: Check the gluster status
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@glusterfs:server' cmd.run 'gluster peer status; gluster volume status' -b 1
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Install RabbitMQ on ctl01
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@rabbitmq:server and *01*' state.sls rabbitmq
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Install RabbitMQ
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@rabbitmq:server' state.sls rabbitmq
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Check the rabbitmq status
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@rabbitmq:server' cmd.run 'rabbitmqctl cluster_status'
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Install Galera on first server
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@galera:master' state.sls galera
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Install Galera on other servers
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@galera:slave' state.sls galera -b 1
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Check mysql status
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@galera:*' mysql.status | grep -A1 -e "wsrep_incoming_addresses\|wsrep_cluster_size"
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: true
-
-
-- description: Install haproxy
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@haproxy:proxy' state.sls haproxy
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Check haproxy status
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@haproxy:proxy' service.status haproxy
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Restart rsyslog
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@haproxy:proxy' service.restart rsyslog
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Install memcached on all controllers
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@memcached:server' state.sls memcached
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Check the VIP
-  cmd: |
-    OPENSTACK_CONTROL_ADDRESS=`salt-call --out=newline_values_only pillar.get _param:openstack_control_address`;
-    echo "_param:openstack_control_address (vip): ${OPENSTACK_CONTROL_ADDRESS}";
-    salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@keepalived:cluster' cmd.run "ip a | grep ${OPENSTACK_CONTROL_ADDRESS}" | grep -B1 ${OPENSTACK_CONTROL_ADDRESS}
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 3, delay: 10}
-  skip_fail: false
+{{ SHARED_CORE.MACRO_INSTALL_KEEPALIVED() }}
+{{ SHARED_CORE.MACRO_INSTALL_GLUSTERFS() }}
+{{ SHARED_CORE.MACRO_INSTALL_RABBITMQ() }}
+{{ SHARED_CORE.MACRO_INSTALL_GALERA() }}
+{{ SHARED_CORE.MACRO_INSTALL_HAPROXY() }}
+{{ SHARED_CORE.MACRO_INSTALL_NGINX() }}
+{{ SHARED_CORE.MACRO_INSTALL_MEMCACHED() }}
+{{ SHARED_CORE.MACRO_CHECK_VIP() }}
diff --git a/tcp_tests/templates/virtual-offline-pike-ovs/openstack.yaml b/tcp_tests/templates/virtual-offline-pike-ovs/openstack.yaml
index 25fd59f..d362573 100644
--- a/tcp_tests/templates/virtual-offline-pike-ovs/openstack.yaml
+++ b/tcp_tests/templates/virtual-offline-pike-ovs/openstack.yaml
@@ -6,12 +6,13 @@
 {% from 'virtual-offline-pike-ovs/underlay.yaml' import LAB_CONFIG_NAME with context %}
 {% from 'shared-salt.yaml' import IPV4_NET_EXTERNAL_PREFIX with context %}
 {% from 'shared-salt.yaml' import IPV4_NET_TENANT_PREFIX with context %}
-{% set REPOSITORY_SUITE = os_env('REPOSITORY_SUITE', 'testing') %}
+{% set REPOSITORY_SUITE = os_env('REPOSITORY_SUITE', 'proposed') %}
 {% set DOMAIN_NAME = os_env('DOMAIN_NAME', 'virtual-offline-pike-ovs') %}
 {% import 'shared-backup-restore.yaml' as BACKUP with context %}
 {% import 'shared-salt.yaml' as SHARED with context %}
 {% import 'shared-openstack.yaml' as SHARED_OPENSTACK with context %}
-{% set DOCKER_LOCAL_REPO = os_env('DOCKER_LOCAL_REPO', 'deb [arch=amd64] http://mirror.mcp.mirantis.local.test/ubuntu-xenial/docker ' + REPOSITORY_SUITE + ' stable') %}
+{% set DOCKER_LOCAL_REPO = os_env('DOCKER_LOCAL_REPO', 'deb [arch=amd64] http://mirror.mcp.mirantis.local.test/' + REPOSITORY_SUITE + '/docker/xenial xenial stable') %}
+
 # Install OpenStack control services
 
 {{ SHARED_OPENSTACK.MACRO_INSTALL_KEYSTONE() }}
diff --git a/tcp_tests/templates/virtual-offline-pike-ovs/run_test.sh b/tcp_tests/templates/virtual-offline-pike-ovs/run_test.sh
index d1e0380..a33e90f 100755
--- a/tcp_tests/templates/virtual-offline-pike-ovs/run_test.sh
+++ b/tcp_tests/templates/virtual-offline-pike-ovs/run_test.sh
@@ -37,9 +37,9 @@
 export SALT_REPOSITORY = "deb [arch=amd64] http://mirror.mirantis.local.test/" + REPOSITORY_SUITE+ "/saltstack-" + SALT_VERSION+ "/${DISTRIB_CODENAME} ${DISTRIB_CODENAME} main"
 #export SALT_REPOSITORY="deb [arch=amd64] http://apt.mirantis.local.test/ubuntu-xenial/ ${REPOSITORY_SUITE} salt/2017.7 main"
 export SALT_GPG="http://apt.mirantis.local.test/public.gpg"
-export UBUNTU_REPOSITORY="deb http://mirror.mcp.mirantis.local.test/${REPOSITORY_SUITE}/ubuntu xenial main universe restricted"
-export UBUNTU_UPDATES_REPOSITORY="deb http://mirror.mcp.mirantis.local.test/${REPOSITORY_SUITE}/ubuntu xenial-updates main universe restricted"
-export UBUNTU_SECURITY_REPOSITORY="deb http://mirror.mcp.mirantis.local.test/${REPOSITORY_SUITE}/ubuntu xenial-security main universe restricted"
+export UBUNTU_REPOSITORY="deb http://mirror.mirantis.local.test/${REPOSITORY_SUITE}/ubuntu xenial main universe restricted"
+export UBUNTU_UPDATES_REPOSITORY="deb http://mirror.mirantis.local.test/${REPOSITORY_SUITE}/ubuntu xenial-updates main restricted universe"
+export UBUNTU_SECURITY_REPOSITORY="deb http://mirror.mirantis.local.test/${REPOSITORY_SUITE}/ubuntu xenial-security main restricted universe"
 
 cd tcp_tests
 py.test -vvv -s -p no:django -p no:ipdb --junit-xml=nosetests.xml -k ${TEST_GROUP}
diff --git a/tcp_tests/templates/virtual-offline-pike-ovs/underlay--user-data-cfg01.yaml b/tcp_tests/templates/virtual-offline-pike-ovs/underlay--user-data-cfg01.yaml
index 79adad5..800a0b1 100644
--- a/tcp_tests/templates/virtual-offline-pike-ovs/underlay--user-data-cfg01.yaml
+++ b/tcp_tests/templates/virtual-offline-pike-ovs/underlay--user-data-cfg01.yaml
@@ -18,8 +18,6 @@
    expire: False
 
   bootcmd:
-   # Block access to SSH while node is preparing
-   - cloud-init-per once sudo iptables -A INPUT -p tcp --dport 22 -j DROP
    # Enable root access
    - sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
    - service sshd restart
@@ -48,15 +46,12 @@
    - swapon /swapfile
    - echo "/swapfile   none    swap    defaults    0   0" >> /etc/fstab
 
-   ############## TCP Cloud cfg01 node ##################
-   - echo "Preparing base OS"
-   - sleep 160;
-   # find /etc/apt/ -type f -exec sed -i "s/ubuntu.com/ubuntu.local.test/g" {} +;
-   ########################################################
-   # Node is ready, allow SSH access
-   - echo "Allow SSH access ..."
-   - sudo iptables -D INPUT -p tcp --dport 22 -j DROP
-   ########################################################
+   - mkdir -p /srv/salt/reclass/nodes
+   - systemctl enable salt-master
+   - systemctl enable salt-minion
+   - systemctl start salt-master
+   - systemctl start salt-minion
+   - salt-call -l info --timeout=120 test.ping
 
   write_files:
    - path: /etc/network/interfaces
diff --git a/tcp_tests/templates/virtual-offline-ssl/core.yaml b/tcp_tests/templates/virtual-offline-ssl/core.yaml
index 0c75bb4..c08b0cd 100644
--- a/tcp_tests/templates/virtual-offline-ssl/core.yaml
+++ b/tcp_tests/templates/virtual-offline-ssl/core.yaml
@@ -1,5 +1,7 @@
 {% from 'virtual-offline-ssl/underlay.yaml' import HOSTNAME_CFG01 with context %}
 
+{% import 'shared-core.yaml' as SHARED_CORE with context %}
+
 - description: remove apparmor
   cmd: salt --hard-crash --state-output=mixed --state-verbose=False
     '*' cmd.run 'service apparmor stop; service apparmor teardown; update-rc.d -f apparmor remove; apt-get -y remove apparmor'
@@ -7,118 +9,11 @@
   retry: {count: 1, delay: 10}
   skip_fail: true
 
-# Install support services
-- description: Install keepalived on ctl01
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@keepalived:cluster and *01*' state.sls keepalived
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 10}
-  skip_fail: true
-
-- description: Install keepalived
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@keepalived:cluster' state.sls keepalived
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 10}
-  skip_fail: true
-
-- description: Install glusterfs
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@glusterfs:server' state.sls glusterfs.server.service
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Setup glusterfs on primary controller
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@glusterfs:server' state.sls glusterfs.server.setup -b 1
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 2, delay: 5}
-  skip_fail: false
-
-- description: Check the gluster status
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@glusterfs:server' cmd.run 'gluster peer status; gluster volume status' -b 1
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Install RabbitMQ on ctl01
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@rabbitmq:server and *01*' state.sls rabbitmq
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Install RabbitMQ
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@rabbitmq:server' state.sls rabbitmq
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Check the rabbitmq status
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@rabbitmq:server' cmd.run 'rabbitmqctl cluster_status'
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Install Galera on first server
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@galera:master' state.sls galera
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Install Galera on other servers
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@galera:slave' state.sls galera -b 1
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Check mysql status
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@galera:*' mysql.status | grep -A1 -e "wsrep_incoming_addresses\|wsrep_cluster_size"
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: true
-
-
-- description: Install haproxy
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@haproxy:proxy' state.sls haproxy
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Check haproxy status
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@haproxy:proxy' service.status haproxy
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Restart rsyslog
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@haproxy:proxy' service.restart rsyslog
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Install memcached on all controllers
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@memcached:server' state.sls memcached
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Check the VIP
-  cmd: |
-    OPENSTACK_CONTROL_ADDRESS=`salt-call --out=newline_values_only pillar.get _param:openstack_control_address`;
-    echo "_param:openstack_control_address (vip): ${OPENSTACK_CONTROL_ADDRESS}";
-    salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@keepalived:cluster' cmd.run "ip a | grep ${OPENSTACK_CONTROL_ADDRESS}" | grep -B1 ${OPENSTACK_CONTROL_ADDRESS}
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 3, delay: 10}
-  skip_fail: false
+{{ SHARED_CORE.MACRO_INSTALL_KEEPALIVED() }}
+{{ SHARED_CORE.MACRO_INSTALL_GLUSTERFS() }}
+{{ SHARED_CORE.MACRO_INSTALL_RABBITMQ() }}
+{{ SHARED_CORE.MACRO_INSTALL_GALERA() }}
+{{ SHARED_CORE.MACRO_INSTALL_HAPROXY() }}
+{{ SHARED_CORE.MACRO_INSTALL_NGINX() }}
+{{ SHARED_CORE.MACRO_INSTALL_MEMCACHED() }}
+{{ SHARED_CORE.MACRO_CHECK_VIP() }}
diff --git a/tcp_tests/templates/virtual-offline-ssl/openstack.yaml b/tcp_tests/templates/virtual-offline-ssl/openstack.yaml
index 23541d0..e84ed31 100644
--- a/tcp_tests/templates/virtual-offline-ssl/openstack.yaml
+++ b/tcp_tests/templates/virtual-offline-ssl/openstack.yaml
@@ -15,8 +15,7 @@
 {% import 'shared-salt.yaml' as SHARED with context %}
 {% import 'shared-openstack.yaml' as SHARED_OPENSTACK with context %}
 
-{% set DOCKER_LOCAL_REPO = os_env('DOCKER_LOCAL_REPO', 'deb [arch=amd64] http://mirror.mcp.mirantis.local.test/ubuntu-xenial/docker ' + REPOSITORY_SUITE + ' stable') %}
-
+{% set DOCKER_LOCAL_REPO = os_env('DOCKER_LOCAL_REPO', 'deb [arch=amd64] http://mirror.mcp.mirantis.local.test/' + REPOSITORY_SUITE + '/docker/xenial xenial stable') %}
 
 # Install OpenStack control services
 
diff --git a/tcp_tests/templates/virtual-offline-ssl/run_test.sh b/tcp_tests/templates/virtual-offline-ssl/run_test.sh
index 1695eae..747f959 100755
--- a/tcp_tests/templates/virtual-offline-ssl/run_test.sh
+++ b/tcp_tests/templates/virtual-offline-ssl/run_test.sh
@@ -35,10 +35,11 @@
 export FORMULA_REPOSITORY="deb [arch=amd64] http://apt.mirantis.local.test/ubuntu-xenial ${REPOSITORY_SUITE} salt extra"
 export FORMULA_GPG="http://apt.mirantis.local.test/public.gpg"
 export SALT_REPOSITORY = "deb [arch=amd64] http://mirror.mirantis.local.test/" + REPOSITORY_SUITE+ "/saltstack-" + SALT_VERSION+ "/${DISTRIB_CODENAME} ${DISTRIB_CODENAME} main"
+#export SALT_REPOSITORY="deb [arch=amd64] http://apt.mirantis.local.test/ubuntu-xenial/ ${REPOSITORY_SUITE} salt/2017.7 main"
 export SALT_GPG="http://apt.mirantis.local.test/public.gpg"
-export UBUNTU_REPOSITORY="deb http://mirror.mcp.mirantis.local.test/${REPOSITORY_SUITE}/ubuntu xenial main universe restricted"
-export UBUNTU_UPDATES_REPOSITORY="deb http://mirror.mcp.mirantis.local.test/${REPOSITORY_SUITE}/ubuntu xenial-updates main universe restricted"
-export UBUNTU_SECURITY_REPOSITORY="deb http://mirror.mcp.mirantis.local.test/${REPOSITORY_SUITE}/ubuntu xenial-security main universe restricted"
+export UBUNTU_REPOSITORY="deb http://mirror.mirantis.local.test/${REPOSITORY_SUITE}/ubuntu xenial main universe restricted"
+export UBUNTU_UPDATES_REPOSITORY="deb http://mirror.mirantis.local.test/${REPOSITORY_SUITE}/ubuntu xenial-updates main restricted universe"
+export UBUNTU_SECURITY_REPOSITORY="deb http://mirror.mirantis.local.test/${REPOSITORY_SUITE}/ubuntu xenial-security main restricted universe"
 
 cd tcp_tests
 py.test -vvv -s -p no:django -p no:ipdb --junit-xml=nosetests.xml -k ${TEST_GROUP}
diff --git a/tcp_tests/templates/virtual-offline-ssl/underlay--user-data-cfg01.yaml b/tcp_tests/templates/virtual-offline-ssl/underlay--user-data-cfg01.yaml
index 79adad5..800a0b1 100644
--- a/tcp_tests/templates/virtual-offline-ssl/underlay--user-data-cfg01.yaml
+++ b/tcp_tests/templates/virtual-offline-ssl/underlay--user-data-cfg01.yaml
@@ -18,8 +18,6 @@
    expire: False
 
   bootcmd:
-   # Block access to SSH while node is preparing
-   - cloud-init-per once sudo iptables -A INPUT -p tcp --dport 22 -j DROP
    # Enable root access
    - sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
    - service sshd restart
@@ -48,15 +46,12 @@
    - swapon /swapfile
    - echo "/swapfile   none    swap    defaults    0   0" >> /etc/fstab
 
-   ############## TCP Cloud cfg01 node ##################
-   - echo "Preparing base OS"
-   - sleep 160;
-   # find /etc/apt/ -type f -exec sed -i "s/ubuntu.com/ubuntu.local.test/g" {} +;
-   ########################################################
-   # Node is ready, allow SSH access
-   - echo "Allow SSH access ..."
-   - sudo iptables -D INPUT -p tcp --dport 22 -j DROP
-   ########################################################
+   - mkdir -p /srv/salt/reclass/nodes
+   - systemctl enable salt-master
+   - systemctl enable salt-minion
+   - systemctl start salt-master
+   - systemctl start salt-minion
+   - salt-call -l info --timeout=120 test.ping
 
   write_files:
    - path: /etc/network/interfaces
diff --git a/tcp_tests/templates/virtual-pike-ovs-dpdk/_salt_generate_cookied-pike-ovs-dpdk.yaml b/tcp_tests/templates/virtual-pike-ovs-dpdk/_salt_generate_cookied-pike-ovs-dpdk.yaml
index 050c4c4..5da2666 100644
--- a/tcp_tests/templates/virtual-pike-ovs-dpdk/_salt_generate_cookied-pike-ovs-dpdk.yaml
+++ b/tcp_tests/templates/virtual-pike-ovs-dpdk/_salt_generate_cookied-pike-ovs-dpdk.yaml
@@ -17,18 +17,18 @@
 - description: "Workaround for combined roles: remove unnecessary classes"
   cmd: |
     set -e;
-    sed -i '/system.reclass.storage.system.physical_control_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
-    sed -i '/system.reclass.storage.system.stacklight_telemetry_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
-    sed -i '/system.reclass.storage.system.stacklight_log_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
+    sed -i '/system.reclass.storage.system.physical_control_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+    sed -i '/system.reclass.storage.system.stacklight_telemetry_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+    sed -i '/system.reclass.storage.system.stacklight_log_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
 
     # Start compute node addresses from .105 , as in static models
-    sed -i 's/start: 101/start: 105/g' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
+    sed -i 's/start: 101/start: 105/g' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
 
     . /root/venv-reclass-tools/bin/activate;
-    reclass-tools del-key parameters.reclass.storage.node.infra_kvm_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
-    reclass-tools del-key parameters.reclass.storage.node.infra_kvm_node02 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
-    reclass-tools del-key parameters.reclass.storage.node.infra_kvm_node03 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
-    reclass-tools del-key parameters.reclass.storage.node.stacklight_log_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
+    reclass-tools del-key parameters.reclass.storage.node.infra_kvm_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+    reclass-tools del-key parameters.reclass.storage.node.infra_kvm_node02 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+    reclass-tools del-key parameters.reclass.storage.node.infra_kvm_node03 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+    reclass-tools del-key parameters.reclass.storage.node.stacklight_log_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
 
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 5}
diff --git a/tcp_tests/templates/virtual-pike-ovs-dpdk/core.yaml b/tcp_tests/templates/virtual-pike-ovs-dpdk/core.yaml
index 202a7e2..11a7665 100644
--- a/tcp_tests/templates/virtual-pike-ovs-dpdk/core.yaml
+++ b/tcp_tests/templates/virtual-pike-ovs-dpdk/core.yaml
@@ -1,117 +1,12 @@
 {% from 'virtual-pike-ovs-dpdk/underlay.yaml' import HOSTNAME_CFG01 with context %}
 
-# Install support services
-- description: Install keepalived on ctl01
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@keepalived:cluster and *01*' state.sls keepalived
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 10}
-  skip_fail: true
+{% import 'shared-core.yaml' as SHARED_CORE with context %}
 
-- description: Install keepalived
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@keepalived:cluster' state.sls keepalived
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 10}
-  skip_fail: true
-
-- description: Install glusterfs
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@glusterfs:server' state.sls glusterfs.server.service
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Setup glusterfs on primary controller
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@glusterfs:server' state.sls glusterfs.server.setup -b 1
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 2, delay: 5}
-  skip_fail: false
-
-- description: Check the gluster status
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@glusterfs:server' cmd.run 'gluster peer status; gluster volume status' -b 1
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Install RabbitMQ on ctl01
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@rabbitmq:server and *01*' state.sls rabbitmq
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Install RabbitMQ
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@rabbitmq:server' state.sls rabbitmq
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Check the rabbitmq status
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@rabbitmq:server' cmd.run 'rabbitmqctl cluster_status'
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Install Galera on first server
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@galera:master' state.sls galera
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Install Galera on other servers
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@galera:slave' state.sls galera -b 1
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Check mysql status
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@galera:*' mysql.status | grep -A1 -e "wsrep_incoming_addresses\|wsrep_cluster_size"
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: true
-
-
-- description: Install haproxy
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@haproxy:proxy' state.sls haproxy
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Check haproxy status
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@haproxy:proxy' service.status haproxy
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Restart rsyslog
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@haproxy:proxy' service.restart rsyslog
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Install memcached on all controllers
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@memcached:server' state.sls memcached
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Check the VIP
-  cmd: |
-    OPENSTACK_CONTROL_ADDRESS=`salt-call --out=newline_values_only pillar.get _param:openstack_control_address`;
-    echo "_param:openstack_control_address (vip): ${OPENSTACK_CONTROL_ADDRESS}";
-    salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@keepalived:cluster' cmd.run "ip a | grep ${OPENSTACK_CONTROL_ADDRESS}" | grep -B1 ${OPENSTACK_CONTROL_ADDRESS}
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 3, delay: 10}
-  skip_fail: false
+{{ SHARED_CORE.MACRO_INSTALL_KEEPALIVED() }}
+{{ SHARED_CORE.MACRO_INSTALL_GLUSTERFS() }}
+{{ SHARED_CORE.MACRO_INSTALL_RABBITMQ() }}
+{{ SHARED_CORE.MACRO_INSTALL_GALERA() }}
+{{ SHARED_CORE.MACRO_INSTALL_HAPROXY() }}
+{{ SHARED_CORE.MACRO_INSTALL_NGINX() }}
+{{ SHARED_CORE.MACRO_INSTALL_MEMCACHED() }}
+{{ SHARED_CORE.MACRO_CHECK_VIP() }}
diff --git a/tcp_tests/templates/virtual-pike-ovs-dpdk/underlay--user-data-cfg01.yaml b/tcp_tests/templates/virtual-pike-ovs-dpdk/underlay--user-data-cfg01.yaml
index da7908d..48562ad 100644
--- a/tcp_tests/templates/virtual-pike-ovs-dpdk/underlay--user-data-cfg01.yaml
+++ b/tcp_tests/templates/virtual-pike-ovs-dpdk/underlay--user-data-cfg01.yaml
@@ -44,6 +44,13 @@
 
    - echo "nameserver 172.18.208.44" >> /etc/resolv.conf;
 
+   - mkdir -p /srv/salt/reclass/nodes
+   - systemctl enable salt-master
+   - systemctl enable salt-minion
+   - systemctl start salt-master
+   - systemctl start salt-minion
+   - salt-call -l info --timeout=120 test.ping
+
   write_files:
    - path: /etc/network/interfaces
      content: |
diff --git a/tcp_tests/tests/system/test_3rdparty_suites.py b/tcp_tests/tests/system/test_3rdparty_suites.py
index d545532..78583af 100644
--- a/tcp_tests/tests/system/test_3rdparty_suites.py
+++ b/tcp_tests/tests/system/test_3rdparty_suites.py
@@ -33,7 +33,7 @@
     @pytest.mark.parametrize("_", [settings.ENV_NAME])
     @pytest.mark.run_tempest
     def test_run_tempest(self, tempest_actions, show_step, _):
-        """Runner for Juniper contrail-tests
+        """Runner for Openstack tempest tests
 
         Scenario:
             1. Run tempest
@@ -83,11 +83,16 @@
         k8s_actions.run_conformance()
 
     @pytest.mark.grab_versions
+    @pytest.mark.extract(container_system='docker',
+                         extract_from='mirantis/virtlet',
+                         files_to_extract=['conformance_virtlet_result.xml'])
     @pytest.mark.grab_k8s_results(name=['virtlet_conformance.log',
-                                        'report.xml'])
+                                        'conformance_virtlet_result.xml'])
     @pytest.mark.parametrize("_", [settings.ENV_NAME])
     @pytest.mark.k8s_conformance_virtlet
     def test_run_k8s_conformance_virtlet(self, show_step, config, k8s_actions,
                                          k8s_logs, _):
         """Test run of k8s virtlet conformance tests"""
-        k8s_actions.run_virtlet_conformance()
+        config.k8s.run_extended_virtlet_conformance = True
+        k8s_actions.run_virtlet_conformance(
+            report_name="conformance_virtlet_result.xml")
diff --git a/tcp_tests/tests/system/test_calico.py b/tcp_tests/tests/system/test_calico.py
index ca5c116..6399eb3 100644
--- a/tcp_tests/tests/system/test_calico.py
+++ b/tcp_tests/tests/system/test_calico.py
@@ -107,9 +107,7 @@
         assert len(first_node_ips) > 0, "Couldn't find first k8s node IP!"
         first_node_names = [name for name in underlay.node_names()
                             if name.startswith(first_node.name)]
-        assert len(first_node_names) == 1, "Couldn't find first k8s node " \
-                                           "hostname in SSH config!"
-        first_node_name = first_node_names.pop()
+        first_node_name = first_node_names[0]
 
         target_pod_ip = None
 
diff --git a/tcp_tests/tests/system/test_failover_k8s.py b/tcp_tests/tests/system/test_failover_k8s.py
index a334a42..b872c36 100644
--- a/tcp_tests/tests/system/test_failover_k8s.py
+++ b/tcp_tests/tests/system/test_failover_k8s.py
@@ -21,7 +21,7 @@
 
 class TestFailoverK8s(object):
 
-    @pytest.mark.grap_versions
+    @pytest.mark.grab_versions
     @pytest.mark.fail_snapshot
     def test_k8s_master_vip_migration(self, show_step, k8s_deployed, underlay,
                                       k8s_actions, core_actions,
diff --git a/tcp_tests/tests/system/test_install_k8s.py b/tcp_tests/tests/system/test_install_k8s.py
index 8066cd9..ba5a81d 100644
--- a/tcp_tests/tests/system/test_install_k8s.py
+++ b/tcp_tests/tests/system/test_install_k8s.py
@@ -106,7 +106,11 @@
             # todo (tleontovich) add asserts here and extend the tests
             # with acceptance criteria
         show_step(10)
+
         # Run SL component tests
+        stacklight_deployed.setup_sl_functional_tests(
+                'cfg01',
+        )
         stacklight_deployed.run_sl_functional_tests(
             'cfg01',
             '/root/stacklight-pytest/stacklight_tests/',
@@ -163,6 +167,9 @@
         stacklight_deployed.check_prometheus_targets(mon_nodes)
         show_step(6)
         # Run SL component tests
+        stacklight_deployed.setup_sl_functional_tests(
+                'cfg01',
+        )
         stacklight_deployed.run_sl_functional_tests(
             'cfg01',
             '/root/stacklight-pytest/stacklight_tests/',
diff --git a/tcp_tests/tests/system/test_install_mcp_ovs_pike.py b/tcp_tests/tests/system/test_install_mcp_ovs_pike.py
index 56efe59..a34496b 100644
--- a/tcp_tests/tests/system/test_install_mcp_ovs_pike.py
+++ b/tcp_tests/tests/system/test_install_mcp_ovs_pike.py
@@ -298,7 +298,7 @@
             tgt='*', fun='cmd.run',
             args='service ntp stop; ntpd -gq; service ntp start')
         if settings.RUN_TEMPEST:
-            tempest_actions.prepare_and_run_tempest(dpdk=True)
+            tempest_actions.prepare_and_run_tempest()
 
         LOG.info("*************** DONE **************")
 
@@ -394,7 +394,7 @@
                 result)
 
         # Prepare resources before test
-        steps_path = config.openstack_deploy.penstack_resources_steps_path
+        steps_path = config.openstack_deploy.openstack_resources_steps_path
         commands = underlay.read_template(steps_path)
         openstack_actions.install(commands)
 
diff --git a/tcp_tests/tests/system/test_install_mcp_queens.py b/tcp_tests/tests/system/test_install_mcp_queens.py
new file mode 100644
index 0000000..b644d26
--- /dev/null
+++ b/tcp_tests/tests/system/test_install_mcp_queens.py
@@ -0,0 +1,220 @@
+#    Copyright 2018 Mirantis, Inc.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+import pytest
+
+from tcp_tests import logger
+from tcp_tests import settings
+
+LOG = logger.logger
+
+
+class TestMcpInstallQueensCeph(object):
+    """Test class for testing mcp queens ceph deploy"""
+
+    @pytest.mark.grab_versions
+    @pytest.mark.fail_snapshot
+    @pytest.mark.cookied_mcp_queens_dvr_ceph
+    def test_cookied_mcp_queens_dvr_ceph(self, underlay,
+                                         openstack_deployed,
+                                         tempest_actions):
+        """Test for deploying an mcp environment and check it
+        Scenario:
+        1. Prepare salt on hosts
+        2. Setup controller nodes
+        3. Setup compute nodes
+        4. Run tempest
+
+        """
+        openstack_deployed._salt.local(
+            tgt='*', fun='cmd.run',
+            args='service ntp stop; ntpd -gq; service ntp start')
+        if settings.RUN_TEMPEST:
+            tempest_actions.prepare_and_run_tempest()
+
+        LOG.info("*************** DONE **************")
+
+
+class TestMcpInstallQueensOvs(object):
+    """Test class for testing mcp queens ovs deploy"""
+
+    @pytest.mark.grab_versions
+    @pytest.mark.fail_snapshot
+    @pytest.mark.cookied_mcp_queens_ovs
+    def test_cookied_mcp_queens_ovs(self, underlay,
+                                    openstack_deployed,
+                                    stacklight_deployed,
+                                    tempest_actions):
+        """Test for deploying an mcp environment and check it
+        Scenario:
+        1. Prepare salt on hosts
+        2. Setup controller nodes
+        3. Setup compute nodes
+        4. Run tempest
+
+        """
+        openstack_deployed._salt.local(
+            tgt='*', fun='cmd.run',
+            args='service ntp stop; ntpd -gq; service ntp start')
+        if settings.RUN_TEMPEST:
+            tempest_actions.prepare_and_run_tempest()
+
+        LOG.info("*************** DONE **************")
+
+    @pytest.mark.grab_versions
+    @pytest.mark.fail_snapshot
+    @pytest.mark.cookied_mcp_queens_ovs
+    def test_cookied_mcp_queens_ovs_sl(self, underlay,
+                                       openstack_deployed,
+                                       stacklight_deployed):
+        """Test for deploying an mcp environment and check it
+        Scenario:
+        1. Prepare salt on hosts
+        2. Setup controller nodes
+        3. Setup compute nodes
+        4. Run stacklight tests
+
+        """
+        openstack_deployed._salt.local(
+            tgt='*', fun='cmd.run',
+            args='service ntp stop; ntpd -gq; service ntp start')
+
+        # Run SL component tests
+        stacklight_deployed.run_sl_functional_tests(
+            'cfg01',
+            '/root/stacklight-pytest/stacklight_tests/',
+            'tests/prometheus',
+            'test_alerts.py')
+
+        # Download report
+        stacklight_deployed.download_sl_test_report(
+            'cfg01',
+            '/root/stacklight-pytest/stacklight_tests/report.xml')
+        LOG.info("*************** DONE **************")
+
+
+class TestMcpInstallQueensDvr(object):
+    """Test class for testing mcp queens dvr deploy"""
+
+    @pytest.mark.grab_versions
+    @pytest.mark.fail_snapshot
+    @pytest.mark.cookied_mcp_queens_dvr
+    def test_cookied_mcp_queens_dvr(self, underlay,
+                                    openstack_deployed,
+                                    stacklight_deployed,
+                                    tempest_actions):
+        """Test for deploying an mcp environment and check it
+        Scenario:
+        1. Prepare salt on hosts
+        2. Setup controller nodes
+        3. Setup compute nodes
+        4. Run tempest
+
+        """
+        openstack_deployed._salt.local(
+            tgt='*', fun='cmd.run',
+            args='service ntp stop; ntpd -gq; service ntp start')
+        if settings.RUN_TEMPEST:
+            tempest_actions.prepare_and_run_tempest()
+
+        LOG.info("*************** DONE **************")
+
+    @pytest.mark.grab_versions
+    @pytest.mark.fail_snapshot
+    @pytest.mark.cookied_mcp_queens_dvr
+    def test_cookied_mcp_queens_dvr_sl(self, underlay,
+                                       openstack_deployed,
+                                       stacklight_deployed):
+        """Test for deploying an mcp environment and check it
+        Scenario:
+        1. Prepare salt on hosts
+        2. Setup controller nodes
+        3. Setup compute nodes
+        4. Run stacklight tests
+
+        """
+        openstack_deployed._salt.local(
+            tgt='*', fun='cmd.run',
+            args='service ntp stop; ntpd -gq; service ntp start')
+
+        # Run SL component tests
+        stacklight_deployed.run_sl_functional_tests(
+            'cfg01',
+            '/root/stacklight-pytest/stacklight_tests/',
+            'tests/prometheus',
+            'test_alerts.py')
+
+        # Download report
+        stacklight_deployed.download_sl_test_report(
+            'cfg01',
+            '/root/stacklight-pytest/stacklight_tests/report.xml')
+        LOG.info("*************** DONE **************")
+
+
+class TestMcpInstallQueensDvrSsl(object):
+    """Test class for testing mcp queens dvr deploy"""
+
+    @pytest.mark.grab_versions
+    @pytest.mark.fail_snapshot
+    @pytest.mark.cookied_mcp_queens_dvr_ssl
+    def test_cookied_mcp_queens_dvr_ssl(self, underlay,
+                                        openstack_deployed,
+                                        stacklight_deployed,
+                                        tempest_actions):
+        """Test for deploying an mcp environment and check it
+        Scenario:
+        1. Prepare salt on hosts
+        2. Setup controller nodes
+        3. Setup compute nodes
+        4. Run tempest
+
+        """
+        openstack_deployed._salt.local(
+            tgt='*', fun='cmd.run',
+            args='service ntp stop; ntpd -gq; service ntp start')
+        if settings.RUN_TEMPEST:
+            tempest_actions.prepare_and_run_tempest()
+
+        LOG.info("*************** DONE **************")
+
+    @pytest.mark.grab_versions
+    @pytest.mark.fail_snapshot
+    @pytest.mark.cookied_mcp_queens_dvr_ssl
+    def test_cookied_mcp_queens_dvr_ssl_sl(self, underlay,
+                                           openstack_deployed,
+                                           stacklight_deployed):
+        """Test for deploying an mcp environment and check it
+        Scenario:
+        1. Prepare salt on hosts
+        2. Setup controller nodes
+        3. Setup compute nodes
+        4. Run stacklight tests
+
+        """
+        openstack_deployed._salt.local(
+            tgt='*', fun='cmd.run',
+            args='service ntp stop; ntpd -gq; service ntp start')
+
+        # Run SL component tests
+        stacklight_deployed.run_sl_functional_tests(
+            'cfg01',
+            '/root/stacklight-pytest/stacklight_tests/',
+            'tests/prometheus',
+            'test_alerts.py')
+
+        # Download report
+        stacklight_deployed.download_sl_test_report(
+            'cfg01',
+            '/root/stacklight-pytest/stacklight_tests/report.xml')
+        LOG.info("*************** DONE **************")
diff --git a/tcp_tests/tests/system/test_install_mcp_sl_os.py b/tcp_tests/tests/system/test_install_mcp_sl_os.py
index ce56f7f..58ad9c7 100644
--- a/tcp_tests/tests/system/test_install_mcp_sl_os.py
+++ b/tcp_tests/tests/system/test_install_mcp_sl_os.py
@@ -69,28 +69,6 @@
 
     @pytest.mark.grab_versions
     @pytest.mark.fail_snapshot
-    def test_mcp_os_newton_install(self, underlay, openstack_deployed,
-                                   openstack_actions):
-        """Test for deploying an mcp environment and check it
-        Scenario:
-        1. Prepare salt on hosts
-        2. Setup controller nodes
-        3. Setup compute nodes
-        4. Run tempest
-
-        """
-        openstack_actions._salt.local(
-            tgt='*', fun='cmd.run',
-            args='service ntp stop; ntpd -gq; service ntp start')
-
-        if settings.RUN_TEMPEST:
-            openstack_actions.run_tempest(pattern=settings.PATTERN,
-                                          conf_name='lvm_mcp_newton.conf')
-            openstack_actions.download_tempest_report()
-        LOG.info("*************** DONE **************")
-
-    @pytest.mark.grab_versions
-    @pytest.mark.fail_snapshot
     def test_mcp_sl_os_install(self, underlay, config, openstack_deployed,
                                stacklight_deployed, openstack_actions):
         """Test for deploying an mcp environment and check it
diff --git a/tcp_tests/tests/system/test_k8s_actions.py b/tcp_tests/tests/system/test_k8s_actions.py
index 2fddde6..6467a8a 100644
--- a/tcp_tests/tests/system/test_k8s_actions.py
+++ b/tcp_tests/tests/system/test_k8s_actions.py
@@ -22,6 +22,7 @@
 from tcp_tests import settings
 
 from tcp_tests.managers.k8s import read_yaml_file
+from tcp_tests.managers.jenkins.client import JenkinsClient
 
 LOG = logger.logger
 
@@ -61,7 +62,7 @@
         svc = deployment.expose()
 
         show_step(4)
-        hostname = "test.{0}.local.".format(settings.LAB_CONFIG_NAME)
+        hostname = "test.{0}.".format(settings.DOMAIN_NAME)
         svc.patch({
             "metadata": {
                 "annotations": {
@@ -94,7 +95,7 @@
         show_step(1)
         k8s_deployed.start_k8s_cncf_verification()
 
-    @pytest.mark.grap_versions
+    @pytest.mark.grab_versions
     @pytest.mark.fail_snapshot
     def test_k8s_chain_update(self, show_step, underlay, config, k8s_deployed,
                               k8s_chain_update_log_helper):
@@ -141,8 +142,9 @@
         show_step(8)
         sample.delete()
 
-    @pytest.mark.grap_versions
+    @pytest.mark.grab_versions
     @pytest.mark.fail_snapshot
+    @pytest.mark.k8s_metallb
     def test_k8s_metallb(self, show_step, config, k8s_deployed):
         """Enable metallb in cluster and do basic tests
 
@@ -157,7 +159,7 @@
             8. Delete deployments
         """
         show_step(1)
-        if not config.k8s_deploy.kubernetes_metallb_enabled:
+        if not k8s_deployed.is_metallb_enabled:
             pytest.skip("Test requires metallb addon enabled")
 
         show_step(2)
@@ -195,7 +197,7 @@
         for sample in samples:
             sample.delete()
 
-    @pytest.mark.grap_versions
+    @pytest.mark.grab_versions
     @pytest.mark.fail_snapshot
     @pytest.mark.k8s_genie
     def test_k8s_genie_flannel(self, show_step, config,
@@ -222,7 +224,7 @@
         show_step(1)
 
         # Find out calico and flannel networks
-        tgt_k8s_control = "I@kubernetes:control:enabled:True"
+        tgt_k8s_control = "I@kubernetes:master"
 
         flannel_pillar = salt_deployed.get_pillar(
             tgt=tgt_k8s_control,
@@ -314,8 +316,9 @@
         multicni_pod.delete()
         nocni_pod.delete()
 
-    @pytest.mark.grap_versions
+    @pytest.mark.grab_versions
     @pytest.mark.fail_snapshot
+    @pytest.mark.k8s_dashboard
     def test_k8s_dashboard(self, show_step, config,
                            salt_deployed, k8s_deployed):
         """Test dashboard setup
@@ -389,8 +392,9 @@
         for namespace in dashboard_namespaces:
             assert namespace['objectMeta']['name'] in namespaces_names_list
 
-    @pytest.mark.grap_versions
+    @pytest.mark.grab_versions
     @pytest.mark.fail_snapshot
+    @pytest.mark.k8s_ingress_nginx
     def test_k8s_ingress_nginx(self, show_step, config,
                                salt_deployed, k8s_deployed):
         """Test ingress-nginx configured and working with metallb
@@ -405,9 +409,9 @@
             6. Try to reach test1 and test2 deployment services endpoints
         """
         show_step(1)
-        if not config.k8s_deploy.kubernetes_metallb_enabled:
+        if not k8s_deployed.is_metallb_enabled:
             pytest.skip("Test requires metallb addon enabled")
-        if not config.k8s_deploy.kubernetes_ingressnginx_enabled:
+        if not k8s_deployed.is_ingress_nginx_enabled:
             pytest.skip("Test requires ingress-nginx addon enabled")
 
         show_step(2)
@@ -459,3 +463,41 @@
         req2 = requests.get(ingress_address + "/test2", verify=False)
         assert req2.status_code == 200
         assert 'dep-ingress-2' in req2.text
+
+    @pytest.mark.grab_versions
+    @pytest.mark.fail_snapshot
+    def test_k8s_cicd_upgrade(self, show_step, config,
+                              salt_deployed, k8s_deployed):
+        """Test k8s upgrade cicd pipeline
+
+        Scenario:
+            1. Setup Kubernetes+CICD cluster
+            2. Start deploy-k8s-upgrade job in jenkins
+            3. Wait for job to end
+        """
+        show_step(1)
+        jenkins_info = salt_deployed.get_pillar(
+            tgt='cid*1*', pillar="jenkins:client:master")[0].values()[0]
+
+        salt_api = salt_deployed.get_pillar(
+            tgt='cid*1*', pillar="_param:jenkins_salt_api_url")[0].values()[0]
+
+        show_step(2)
+        jenkins = JenkinsClient(
+            host='http://{host}:{port}'.format(**jenkins_info),
+            username=jenkins_info['username'],
+            password=jenkins_info['password'])
+
+        params = jenkins.make_defults_params('deploy-k8s-upgrade')
+        params['SALT_MASTER_URL'] = salt_api
+        params['SALT_MASTER_CREDENTIALS'] = 'salt'
+        params['CONFORMANCE_RUN_AFTER'] = True
+        params['CONFORMANCE_RUN_BEFORE'] = True
+        build = jenkins.run_build('deploy-k8s-upgrade', params)
+
+        show_step(3)
+        jenkins.wait_end_of_build(
+            name=build[0], build_id=build[1], timeout=3600 * 4)
+        result = jenkins.build_info(
+            name=build[0], build_id=build[1])['result']
+        assert result == 'SUCCESS', "k8s upgrade job has been failed"
diff --git a/tcp_tests/tests/system/test_offline.py b/tcp_tests/tests/system/test_offline.py
index 05a8deb..6c083cb 100644
--- a/tcp_tests/tests/system/test_offline.py
+++ b/tcp_tests/tests/system/test_offline.py
@@ -430,13 +430,7 @@
             cmd='salt "*" ssh.set_auth_key ubuntu '
                 '"$(ssh-keygen -y -f ~/.ssh/id_rsa | cut -d " " -f 2)"')
 
-        salt_nodes = salt_deployed.get_ssh_data()
-        nodes_list = \
-            [node for node in salt_nodes
-             if not any(node['node_name'] == n['node_name']
-                        for n in config.underlay.ssh)]
-        config.underlay.ssh = config.underlay.ssh + nodes_list
-        underlay.add_config_ssh(nodes_list)
+        salt_deployed.update_ssh_data_from_minions()
 
         time.sleep(120)  # debug sleep
         cmd = "salt '*' test.ping"
diff --git a/tcp_tests/tests/system/test_virtlet_actions.py b/tcp_tests/tests/system/test_virtlet_actions.py
index 83fd33a..d3b6c27 100644
--- a/tcp_tests/tests/system/test_virtlet_actions.py
+++ b/tcp_tests/tests/system/test_virtlet_actions.py
@@ -25,6 +25,7 @@
 
     @pytest.mark.grab_versions
     @pytest.mark.fail_snapshot
+    @pytest.mark.k8s_virtlet
     def test_virtlet_create_delete_vm(self, show_step, config, k8s_deployed):
         """Test for deploying an mcp environment with virtlet
 
@@ -51,6 +52,7 @@
 
     @pytest.mark.grab_versions
     @pytest.mark.fail_snapshot
+    @pytest.mark.k8s_virtlet
     def test_vm_resource_quotas(self, show_step, config, k8s_deployed):
         """Test for deploying a VM with specific quotas
 
diff --git a/tcp_tests/utils/get_logs.py b/tcp_tests/utils/get_logs.py
new file mode 100755
index 0000000..225f9d7
--- /dev/null
+++ b/tcp_tests/utils/get_logs.py
@@ -0,0 +1,54 @@
+#!/usr/bin/env python
+
+import argparse
+import os
+import sys
+import time
+
+sys.path.append(os.getcwd())
+try:
+    from tcp_tests.fixtures import config_fixtures
+    from tcp_tests.managers import underlay_ssh_manager
+except ImportError:
+    print("ImportError: Run the application from the tcp-qa directory or "
+          "set the PYTHONPATH environment variable to directory which contains"
+          " ./tcp_tests")
+    sys.exit(1)
+
+
+def load_params():
+    """
+    Parse CLI arguments and environment variables
+
+    Returns: ArgumentParser instance
+    """
+    parser = argparse.ArgumentParser(description=(
+        'Download logs and debug info from salt minions'
+    ))
+    default_name_prefix = 'logs_' + time.strftime("%Y%m%d_%H%M%S")
+    parser.add_argument('--archive-name-prefix',
+                        help=('Custom prefix for creating archive name'),
+                        default=default_name_prefix,
+                        type=str)
+    return parser
+
+
+def main():
+    parser = load_params()
+    opts = parser.parse_args()
+
+    tests_configs = os.environ.get('TESTS_CONFIGS', None)
+    if not tests_configs or not os.path.isfile(tests_configs):
+        print("Download logs and debug info from salt minions. "
+              "Please set TESTS_CONFIGS environment variable whith"
+              "the path to INI file with lab metadata.")
+        return 11
+
+    config = config_fixtures.config()
+    underlay = underlay_ssh_manager.UnderlaySSHManager(config)
+
+    underlay.get_logs(opts.archive_name_prefix)
+
+
+if __name__ == '__main__':
+    sys.exit(main())
diff --git a/tcp_tests/utils/run_jenkins_job.py b/tcp_tests/utils/run_jenkins_job.py
index b01f366..acc2e9f 100755
--- a/tcp_tests/utils/run_jenkins_job.py
+++ b/tcp_tests/utils/run_jenkins_job.py
@@ -4,7 +4,6 @@
 import os
 import sys
 
-from devops import error
 import json
 
 sys.path.append(os.getcwd())
@@ -140,7 +139,7 @@
             interval=1,
             verbose=opts.verbose,
             job_output_prefix=opts.job_output_prefix)
-    except error.TimeoutError as e:
+    except Exception as e:
         print(str(e))
         raise