Merge "Minor fixes in k8s tests"
diff --git a/jobs/pipelines/deploy-cicd-and-run-tests.groovy b/jobs/pipelines/deploy-cicd-and-run-tests.groovy
index 27dbfd7..3e96c84 100644
--- a/jobs/pipelines/deploy-cicd-and-run-tests.groovy
+++ b/jobs/pipelines/deploy-cicd-and-run-tests.groovy
@@ -24,7 +24,7 @@
             shared.swarm_deploy_cicd(env.DRIVETRAIN_STACK_INSTALL)
         }
 
-        stage("Install core infrastructure and deploy CICD nodes") {
+        stage("Deploy platform components") {
             // steps: env.PLATFORM_STACK_INSTALL
             shared.swarm_deploy_platform(env.PLATFORM_STACK_INSTALL)
         }
@@ -40,8 +40,6 @@
         if ("${env.SHUTDOWN_ENV_ON_TEARDOWN}" == "false") {
             shared.run_cmd("""\
                 dos.py resume ${ENV_NAME} || true
-                sleep 20    # Wait for I/O on the host calms down
-                dos.py time-sync ${ENV_NAME} || true
             """)
         } else {
             shared.run_cmd("""\
@@ -72,8 +70,6 @@
         if ("${env.SHUTDOWN_ENV_ON_TEARDOWN}" == "false") {
             shared.run_cmd("""\
                 dos.py resume ${ENV_NAME} || true
-                sleep 20    # Wait for I/O on the host calms down
-                dos.py time-sync ${ENV_NAME} || true
             """)
         } else {
             shared.run_cmd("""\
diff --git a/jobs/pipelines/swarm-bootstrap-salt-cluster-devops.groovy b/jobs/pipelines/swarm-bootstrap-salt-cluster-devops.groovy
index f4c8765..ce32c24 100644
--- a/jobs/pipelines/swarm-bootstrap-salt-cluster-devops.groovy
+++ b/jobs/pipelines/swarm-bootstrap-salt-cluster-devops.groovy
@@ -50,6 +50,12 @@
                 """)
             }
 
+            if (env.TCP_QA_REFS) {
+                stage("Update working dir to patch ${TCP_QA_REFS}") {
+                    shared.update_working_dir()
+                }
+            }
+
             stage("Create an environment ${ENV_NAME} in disabled state") {
                 // deploy_hardware.xml
                 shared.run_cmd("""\
@@ -87,7 +93,7 @@
                     export LAB_CONFIG_NAME=${LAB_CONFIG_NAME}
                     export MANAGER=devops
                     export SHUTDOWN_ENV_ON_TEARDOWN=false
-                    export BOOTSTRAP_TIMEOUT=900
+                    export BOOTSTRAP_TIMEOUT=1200
                     export PYTHONIOENCODING=UTF-8
                     export REPOSITORY_SUITE=${MCP_VERSION}
                     export TEST_GROUP=test_bootstrap_salt
@@ -97,7 +103,7 @@
             }
 
           } catch (e) {
-              common.printMsg("Job is failed: " + e.message, "red")
+              common.printMsg("Job is failed", "red")
               throw e
           } finally {
             // TODO(ddmitriev): analyze the "def currentResult = currentBuild.result ?: 'SUCCESS'"
diff --git a/jobs/pipelines/swarm-deploy-cicd.groovy b/jobs/pipelines/swarm-deploy-cicd.groovy
index d067e07..538f5ea 100644
--- a/jobs/pipelines/swarm-deploy-cicd.groovy
+++ b/jobs/pipelines/swarm-deploy-cicd.groovy
@@ -35,6 +35,12 @@
                 error "'STACK_INSTALL' must contain one or more comma separated stack names for [deploy_openstack] pipeline"
             }
 
+            if (env.TCP_QA_REFS) {
+                stage("Update working dir to patch ${TCP_QA_REFS}") {
+                    shared.update_working_dir()
+                }
+            }
+
             // Install core and cicd
             def stack
             def timeout
@@ -60,7 +66,7 @@
             }
 
         } catch (e) {
-            common.printMsg("Job is failed: " + e.message, "red")
+            common.printMsg("Job is failed", "red")
             throw e
         } finally {
             // TODO(ddmitriev): analyze the "def currentResult = currentBuild.result ?: 'SUCCESS'"
diff --git a/jobs/pipelines/swarm-deploy-platform.groovy b/jobs/pipelines/swarm-deploy-platform.groovy
index 54bc43d..78e363f 100644
--- a/jobs/pipelines/swarm-deploy-platform.groovy
+++ b/jobs/pipelines/swarm-deploy-platform.groovy
@@ -35,6 +35,12 @@
                 error "'STACK_INSTALL' must contain one or more comma separated stack names for [deploy_openstack] pipeline"
             }
 
+            if (env.TCP_QA_REFS) {
+                stage("Update working dir to patch ${TCP_QA_REFS}") {
+                    shared.update_working_dir()
+                }
+            }
+
             // Install the cluster
             def stack
             def timeout
@@ -60,7 +66,7 @@
             }
 
         } catch (e) {
-            common.printMsg("Job is failed:" + e.message, "red")
+            common.printMsg("Job is failed", "red")
             throw e
         } finally {
             // TODO(ddmitriev): analyze the "def currentResult = currentBuild.result ?: 'SUCCESS'"
diff --git a/jobs/pipelines/swarm-run-pytest.groovy b/jobs/pipelines/swarm-run-pytest.groovy
index 52dd25d..5d7bd8d 100644
--- a/jobs/pipelines/swarm-run-pytest.groovy
+++ b/jobs/pipelines/swarm-run-pytest.groovy
@@ -36,9 +36,16 @@
     dir("${PARENT_WORKSPACE}") {
         try {
 
+            if (env.TCP_QA_REFS) {
+                stage("Update working dir to patch ${TCP_QA_REFS}") {
+                    shared.update_working_dir()
+                }
+            }
+
             stage("Run tests") {
                 def steps = shared.get_steps_list(PASSED_STEPS)
                 def sources = """\
+                    cd ${PARENT_WORKSPACE}
                     export ENV_NAME=${ENV_NAME}
                     . ./tcp_tests/utils/env_salt"""
                 if (steps.contains('k8s')) {
@@ -52,7 +59,8 @@
                 def installed = steps.collect {"""\
                     export ${it}_installed=true"""}.join("\n")
 
-                shared.run_cmd(sources + installed + """
+                shared.run_sh(sources + installed + """
+                    export TESTS_CONFIGS=${ENV_NAME}_salt_deployed.ini
                     export MANAGER=devops  # use 'hardware' fixture to manage fuel-devops environment
                     export salt_master_host=\$SALT_MASTER_IP  # skip salt_deployed fixture
                     export salt_master_port=6969
@@ -67,7 +75,7 @@
             }
 
         } catch (e) {
-            common.printMsg("Job is failed" + e.message, "red")
+            common.printMsg("Job is failed", "red")
             throw e
         } finally {
             // TODO(ddmitriev): analyze the "def currentResult = currentBuild.result ?: 'SUCCESS'"
diff --git a/jobs/pipelines/swarm-testrail-report.groovy b/jobs/pipelines/swarm-testrail-report.groovy
index 7d2ce53..f31db14 100644
--- a/jobs/pipelines/swarm-testrail-report.groovy
+++ b/jobs/pipelines/swarm-testrail-report.groovy
@@ -31,66 +31,111 @@
     }
     dir("${PARENT_WORKSPACE}") {
         try {
+
+            if (env.TCP_QA_REFS) {
+                stage("Update working dir to patch ${TCP_QA_REFS}") {
+                    shared.update_working_dir()
+                }
+            }
+
             def report_name = ''
             def testSuiteName = ''
             def methodname = ''
             def testrail_name_template = ''
             def reporter_extra_options = []
 
-            //stage("Archive all xml reports") {
-            //    archiveArtifacts artifacts: "${PARENT_WORKSPACE}/*.xml"
-            //}
-
-            stage("Deployment report") {
-                report_name = "deployment_${ENV_NAME}.xml"
-                testSuiteName = "[MCP] Integration automation"
-                methodname = '{methodname}'
-                testrail_name_template = '{title}'
-                reporter_extra_options = [
-                  "--testrail-add-missing-cases",
-                  "--testrail-case-custom-fields {\\\"custom_qa_team\\\":\\\"9\\\"}",
-                  "--testrail-case-section-name \'All\'",
-                ]
-                shared.upload_results_to_testrail(report_name, testSuiteName, methodname, testrail_name_template, reporter_extra_options)
+            stage("Archive all xml reports") {
+                archiveArtifacts artifacts: "**/*.xml"
             }
 
-            stage("tcp-qa cases report") {
-                report_name = "nosetests.xml"
-                testSuiteName = "[MCP_X] integration cases"
-                methodname = "{methodname}"
-                testrail_name_template = "{title}"
-                shared.upload_results_to_testrail(report_name, testSuiteName, methodname, testrail_name_template)
+            def deployment_report_name = sh(script: "find ${PARENT_WORKSPACE} -name \"deployment_${ENV_NAME}.xml\"", returnStdout: true)
+            def tcpqa_report_name = sh(script: "find ${PARENT_WORKSPACE} -name \"nosetests.xml\"", returnStdout: true)
+            def tempest_report_name = sh(script: "find ${PARENT_WORKSPACE} -name \"report_*.xml\"", returnStdout: true)
+            def k8s_conformance_report_name = sh(script: "find ${PARENT_WORKSPACE} -name \"conformance_result.xml\"", returnStdout: true)
+            def stacklight_report_name = sh(script: "find ${PARENT_WORKSPACE} -name \"stacklight_report.xml\"", returnStdout: true)
+            common.printMsg(deployment_report_name ? "Found deployment report: ${deployment_report_name}" : "Deployment report not found", deployment_report_name ? "blue" : "red")
+            common.printMsg(tcpqa_report_name ? "Found tcp-qa report: ${tcpqa_report_name}" : "tcp-qa report not found", tcpqa_report_name ? "blue" : "red")
+            common.printMsg(tempest_report_name ? "Found tempest report: ${tempest_report_name}" : "tempest report not found", tempest_report_name ? "blue" : "red")
+            common.printMsg(k8s_conformance_report_name ? "Found k8s conformance report: ${k8s_conformance_report_name}" : "k8s conformance report not found", k8s_conformance_report_name ? "blue" : "red")
+            common.printMsg(stacklight_report_name ? "Found stacklight-pytest report: ${stacklight_report_name}" : "stacklight-pytest report not found", stacklight_report_name ? "blue" : "red")
+
+
+            if (deployment_report_name) {
+                stage("Deployment report") {
+//                    report_name = "deployment_${ENV_NAME}.xml"
+                    testSuiteName = "[MCP] Integration automation"
+                    methodname = '{methodname}'
+                    testrail_name_template = '{title}'
+                    reporter_extra_options = [
+                      "--testrail-add-missing-cases",
+                      "--testrail-case-custom-fields {\\\"custom_qa_team\\\":\\\"9\\\"}",
+                      "--testrail-case-section-name \'All\'",
+                    ]
+                    shared.upload_results_to_testrail(deployment_report_name, testSuiteName, methodname, testrail_name_template, reporter_extra_options)
+                }
             }
 
-            if ('openstack' in stacks) {
+            if (tcpqa_report_name) {
+                stage("tcp-qa cases report") {
+                    // tcpqa_report_name =~ "nosetests.xml"
+                    testSuiteName = "[MCP_X] integration cases"
+                    methodname = "{methodname}"
+                    testrail_name_template = "{title}"
+                    reporter_extra_options = [
+                      "--testrail-add-missing-cases",
+                      "--testrail-case-custom-fields {\\\"custom_qa_team\\\":\\\"9\\\"}",
+                      "--testrail-case-section-name \'All\'",
+                    ]
+                    shared.upload_results_to_testrail(tcpqa_report_name, testSuiteName, methodname, testrail_name_template, reporter_extra_options)
+                }
+            }
+
+            if ('openstack' in stacks && tempest_report_name) {
                 stage("Tempest report") {
-                    report_name = "report_*.xml"
+                    // tempest_report_name =~ "report_*.xml"
                     testSuiteName = "[MCP1.1_PIKE]Tempest"
                     methodname = "{classname}.{methodname}"
                     testrail_name_template = "{title}"
-                    shared.upload_results_to_testrail(report_name, testSuiteName, methodname, testrail_name_template)
+                    shared.upload_results_to_testrail(tempest_report_name, testSuiteName, methodname, testrail_name_template)
                 }
             }
 
-            if ('k8s' in stacks) {
-                stage("Tempest report") {
-                    println "TBD"
-                    // K8s conformance report
+            if ('k8s' in stacks && k8s_conformance_report_name) {
+                stage("K8s conformance report") {
+                    // k8s_conformance_report_name =~ conformance_result.xml
+                    // TODO(ddmitriev): it's better to get the k8s version right after deployment
+                    // and store in some artifact that can be re-used here.
+                    def k8s_version=shared.run_cmd_stdout("""\
+                        export ENV_NAME=${ENV_NAME}
+                        . ./tcp_tests/utils/env_salt
+                        . ./tcp_tests/utils/env_k8s
+                        echo "\$kubernetes_version_major.\$kubernetes_version_minor"
+                    """).trim().split().last()
+                    testSuiteName = "[MCP][k8s]Hyperkube ${k8s_version}.x"
+                    methodname = "{methodname}"
+                    testrail_name_template = "{title}"
+                    reporter_extra_options = [
+                      "--send-duplicates",
+                      "--testrail-add-missing-cases",
+                      "--testrail-case-custom-fields {\\\"custom_qa_team\\\":\\\"9\\\"}",
+                      "--testrail-case-section-name \'Conformance\'",
+                    ]
+                    shared.upload_results_to_testrail(k8s_conformance_report_name, testSuiteName, methodname, testrail_name_template, reporter_extra_options)
                 }
             }
 
-            if ('stacklight' in stacks) {
+            if ('stacklight' in stacks && stacklight_report_name) {
                 stage("stacklight-pytest report") {
-                    report_name = "report.xml"
+                    // stacklight_report_name =~ "stacklight_report.xml"
                     testSuiteName = "LMA2.0_Automated"
                     methodname = "{methodname}"
                     testrail_name_template = "{title}"
-                    shared.upload_results_to_testrail(report_name, testSuiteName, methodname, testrail_name_template)
+                    shared.upload_results_to_testrail(stacklight_report_name, testSuiteName, methodname, testrail_name_template)
                 }
             }
 
         } catch (e) {
-            common.printMsg("Job is failed: " + e.message, "red")
+            common.printMsg("Job is failed", "red")
             throw e
         } finally {
             // reporting is failed for some reason
diff --git a/src/com/mirantis/system_qa/SharedPipeline.groovy b/src/com/mirantis/system_qa/SharedPipeline.groovy
index 775b901..108ee0c 100644
--- a/src/com/mirantis/system_qa/SharedPipeline.groovy
+++ b/src/com/mirantis/system_qa/SharedPipeline.groovy
@@ -2,6 +2,19 @@
 
 import groovy.xml.XmlUtil
 
+def run_sh(String cmd) {
+    // run shell script without catching any output
+    def common = new com.mirantis.mk.Common()
+    common.printMsg("Run shell command:\n" + cmd, "blue")
+    def VENV_PATH='/home/jenkins/fuel-devops30'
+    script = """\
+        set -ex;
+        . ${VENV_PATH}/bin/activate;
+        bash -c '${cmd.stripIndent()}'
+    """
+    return sh(script: script)
+}
+
 def run_cmd(String cmd, Boolean returnStdout=false) {
     def common = new com.mirantis.mk.Common()
     common.printMsg("Run shell command:\n" + cmd, "blue")
@@ -115,8 +128,26 @@
         """)
 }
 
+def update_working_dir() {
+        // Use to fetch a patchset from gerrit to the working dir
+        run_cmd("""\
+            if [ -n "$TCP_QA_REFS" ]; then
+                set -e
+                git fetch https://review.gerrithub.io/Mirantis/tcp-qa $TCP_QA_REFS && git checkout FETCH_HEAD || exit \$?
+            fi
+            pip install -r tcp_tests/requirements.txt
+        """)
+}
+
 def swarm_bootstrap_salt_cluster_devops() {
         def common = new com.mirantis.mk.Common()
+        def cookiecutter_template_commit = env.COOKIECUTTER_TEMPLATE_COMMIT ?: env.MCP_VERSION
+        def salt_models_system_commit = env.SALT_MODELS_SYSTEM_COMMIT ?: env.MCP_VERSION
+        def tcp_qa_refs = env.TCP_QA_REFS ?: ''
+        def mk_pipelines_ref = env.MK_PIPELINES_REF ?: ''
+        def pipeline_library_ref = env.PIPELINE_LIBRARY_REF ?: ''
+        def cookiecutter_ref_change = env.COOKIECUTTER_REF_CHANGE ?: ''
+        def environment_template_ref_change = env.ENVIRONMENT_TEMPLATE_REF_CHANGE ?: ''
         def parameters = [
                 string(name: 'PARENT_NODE_NAME', value: "${NODE_NAME}"),
                 string(name: 'PARENT_WORKSPACE', value: pwd()),
@@ -126,11 +157,13 @@
                 string(name: 'MCP_IMAGE_PATH1604', value: "${MCP_IMAGE_PATH1604}"),
                 string(name: 'IMAGE_PATH_CFG01_DAY01', value: "${IMAGE_PATH_CFG01_DAY01}"),
                 string(name: 'CFG01_CONFIG_IMAGE_NAME', value: "${CFG01_CONFIG_IMAGE_NAME}"),
-                string(name: 'TCP_QA_REFS', value: "${TCP_QA_REFS}"),
-                string(name: 'PIPELINE_LIBRARY_REF', value: "${PIPELINE_LIBRARY_REF}"),
-                string(name: 'MK_PIPELINES_REF', value: "${MK_PIPELINES_REF}"),
-                string(name: 'COOKIECUTTER_TEMPLATE_COMMIT', value: "${COOKIECUTTER_TEMPLATE_COMMIT}"),
-                string(name: 'SALT_MODELS_SYSTEM_COMMIT', value: "${SALT_MODELS_SYSTEM_COMMIT}"),
+                string(name: 'TCP_QA_REFS', value: "${tcp_qa_refs}"),
+                string(name: 'PIPELINE_LIBRARY_REF', value: "${pipeline_library_ref}"),
+                string(name: 'MK_PIPELINES_REF', value: "${mk_pipelines_ref}"),
+                string(name: 'COOKIECUTTER_TEMPLATE_COMMIT', value: "${cookiecutter_template_commit}"),
+                string(name: 'SALT_MODELS_SYSTEM_COMMIT', value: "${salt_models_system_commit}"),
+                string(name: 'COOKIECUTTER_REF_CHANGE', value: "${cookiecutter_ref_change}"),
+                string(name: 'ENVIRONMENT_TEMPLATE_REF_CHANGE', value: "${environment_template_ref_change}"),
                 booleanParam(name: 'SHUTDOWN_ENV_ON_TEARDOWN', value: false),
             ]
 
@@ -140,12 +173,13 @@
 def swarm_deploy_cicd(String stack_to_install='core,cicd') {
         // Run openstack_deploy job on cfg01 Jenkins for specified stacks
         def common = new com.mirantis.mk.Common()
+        def tcp_qa_refs = env.TCP_QA_REFS ?: ''
         def parameters = [
                 string(name: 'PARENT_NODE_NAME', value: "${NODE_NAME}"),
                 string(name: 'PARENT_WORKSPACE', value: pwd()),
                 string(name: 'ENV_NAME', value: "${ENV_NAME}"),
                 string(name: 'STACK_INSTALL', value: stack_to_install),
-                string(name: 'TCP_QA_REFS', value: "${TCP_QA_REFS}"),
+                string(name: 'TCP_QA_REFS', value: "${tcp_qa_refs}"),
                 booleanParam(name: 'SHUTDOWN_ENV_ON_TEARDOWN', value: false),
             ]
         build_pipeline_job('swarm-deploy-cicd', parameters)
@@ -154,12 +188,13 @@
 def swarm_deploy_platform(String stack_to_install) {
         // Run openstack_deploy job on CICD Jenkins for specified stacks
         def common = new com.mirantis.mk.Common()
+        def tcp_qa_refs = env.TCP_QA_REFS ?: ''
         def parameters = [
                 string(name: 'PARENT_NODE_NAME', value: "${NODE_NAME}"),
                 string(name: 'PARENT_WORKSPACE', value: pwd()),
                 string(name: 'ENV_NAME', value: "${ENV_NAME}"),
                 string(name: 'STACK_INSTALL', value: stack_to_install),
-                string(name: 'TCP_QA_REFS', value: "${TCP_QA_REFS}"),
+                string(name: 'TCP_QA_REFS', value: "${tcp_qa_refs}"),
                 booleanParam(name: 'SHUTDOWN_ENV_ON_TEARDOWN', value: false),
             ]
         build_pipeline_job('swarm-deploy-platform', parameters)
@@ -168,13 +203,14 @@
 def swarm_run_pytest(String passed_steps) {
         // Run pytest tests
         def common = new com.mirantis.mk.Common()
+        def tcp_qa_refs = env.TCP_QA_REFS ?: ''
         def parameters = [
                 string(name: 'ENV_NAME', value: "${ENV_NAME}"),
                 string(name: 'PASSED_STEPS', value: passed_steps),
                 string(name: 'RUN_TEST_OPTS', value: "${RUN_TEST_OPTS}"),
                 string(name: 'PARENT_NODE_NAME', value: "${NODE_NAME}"),
                 string(name: 'PARENT_WORKSPACE', value: pwd()),
-                string(name: 'TCP_QA_REFS', value: "${TCP_QA_REFS}"),
+                string(name: 'TCP_QA_REFS', value: "${tcp_qa_refs}"),
                 booleanParam(name: 'SHUTDOWN_ENV_ON_TEARDOWN', value: false),
                 string(name: 'LAB_CONFIG_NAME', value: "${LAB_CONFIG_NAME}"),
                 string(name: 'REPOSITORY_SUITE', value: "${MCP_VERSION}"),
@@ -190,13 +226,14 @@
 def swarm_testrail_report(String passed_steps) {
         // Run pytest tests
         def common = new com.mirantis.mk.Common()
+        def tcp_qa_refs = env.TCP_QA_REFS ?: ''
         def parameters = [
                 string(name: 'ENV_NAME', value: "${ENV_NAME}"),
                 string(name: 'MCP_VERSION', value: "${MCP_VERSION}"),
                 string(name: 'PASSED_STEPS', value: passed_steps),
                 string(name: 'PARENT_NODE_NAME', value: "${NODE_NAME}"),
                 string(name: 'PARENT_WORKSPACE', value: pwd()),
-                string(name: 'TCP_QA_REFS', value: "${TCP_QA_REFS}"),
+                string(name: 'TCP_QA_REFS', value: "${tcp_qa_refs}"),
             ]
         common.printMsg("Start building job 'swarm-testrail-report' with parameters:", "purple")
         common.prettyPrint(parameters)
@@ -218,6 +255,9 @@
 
         def cookiecuttertemplate_commit = env.COOKIECUTTER_TEMPLATE_COMMIT ?: env.MCP_VERSION
         def saltmodels_system_commit = env.SALT_MODELS_SYSTEM_COMMIT ?: env.MCP_VERSION
+        def tcp_qa_refs = env.TCP_QA_REFS ?: ''
+        def environment_template_ref_change = env.ENVIRONMENT_TEMPLATE_REF_CHANGE ?: ''
+        def cookiecutter_ref_change = env.COOKIECUTTER_REF_CHANGE ?: ''
 
         def parameters = [
                 string(name: 'LAB_CONTEXT_NAME', value: "${LAB_CONFIG_NAME}"),
@@ -226,7 +266,9 @@
                 string(name: 'REPOSITORY_SUITE', value: "${env.MCP_VERSION}"),
                 string(name: 'SALT_MODELS_SYSTEM_COMMIT', value: "${saltmodels_system_commit}"),
                 string(name: 'COOKIECUTTER_TEMPLATE_COMMIT', value: "${cookiecuttertemplate_commit}"),
-                string(name: 'TCP_QA_REVIEW', value: "${TCP_QA_REFS}"),
+                string(name: 'COOKIECUTTER_REF_CHANGE', value: "${cookiecutter_ref_change}"),
+                string(name: 'ENVIRONMENT_TEMPLATE_REF_CHANGE', value: "${environment_template_ref_change}"),
+                string(name: 'TCP_QA_REVIEW', value: "${tcp_qa_refs}"),
                 string(name: 'IPV4_NET_ADMIN', value: IPV4_NET_ADMIN),
                 string(name: 'IPV4_NET_CONTROL', value: IPV4_NET_CONTROL),
                 string(name: 'IPV4_NET_TENANT', value: IPV4_NET_TENANT),
@@ -244,6 +286,10 @@
             echo \$SALT_MASTER_IP
             """).trim().split().last()
         println("SALT_MASTER_IP=" + SALT_MASTER_IP)
+
+        def mk_pipelines_ref = env.MK_PIPELINES_REF ?: ''
+        def pipeline_library_ref = env.PIPELINE_LIBRARY_REF ?: ''
+
         def parameters = [
                 string(name: 'CLUSTER_NAME', value: "${LAB_CONFIG_NAME}"),
                 string(name: 'MODEL_URL', value: "http://cz8133.bud.mirantis.net:8098/${LAB_CONFIG_NAME}.git"),
@@ -258,8 +304,8 @@
                 booleanParam(name: 'PIPELINES_FROM_ISO', value: true),
                 string(name: 'MCP_SALT_REPO_URL', value: "http://apt.mirantis.com/xenial"),
                 string(name: 'MCP_SALT_REPO_KEY', value: "http://apt.mirantis.com/public.gpg"),
-                string(name: 'PIPELINE_LIBRARY_REF', value: "${PIPELINE_LIBRARY_REF}"),
-                string(name: 'MK_PIPELINES_REF', value: "${MK_PIPELINES_REF}"),
+                string(name: 'PIPELINE_LIBRARY_REF', value: "${pipeline_library_ref}"),
+                string(name: 'MK_PIPELINES_REF', value: "${mk_pipelines_ref}"),
             ]
         build_pipeline_job('create-cfg-config-drive', parameters)
 }
@@ -347,11 +393,15 @@
     // then make a copy for the created snapshot to allow the system
     // tests to revert this snapshot along with the metadata from the INI file.
     run_cmd("""\
+        set -ex
         dos.py suspend ${ENV_NAME}
         dos.py snapshot ${ENV_NAME} ${stack}_deployed
         dos.py resume ${ENV_NAME}
         sleep 20    # Wait for I/O on the host calms down
-        dos.py time-sync ${ENV_NAME} || dos.py time-sync ${ENV_NAME} # sometimes, timesync may fail. Need to update it in fuel-devops.
+
+        CFG01_NAME=\$(dos.py show-resources ${ENV_NAME} | grep ^cfg01 | cut -d" " -f1)
+        dos.py time-sync ${ENV_NAME} --skip-sync \${CFG01_NAME}
+
         if [ -f \$(pwd)/${ENV_NAME}_salt_deployed.ini ]; then
             cp \$(pwd)/${ENV_NAME}_salt_deployed.ini \$(pwd)/${ENV_NAME}_${stack}_deployed.ini
         fi
@@ -408,10 +458,7 @@
   def script = """
     . ${venvPath}/bin/activate
     set -ex
-    report_xml=\$(find ${PARENT_WORKSPACE} -name "${report_name}")
-    if [ -n "\${report_xml}" ]; then
-        report ${reporterOptions.join(' ')} \${report_xml}
-    fi
+    report ${reporterOptions.join(' ')} ${report_name}
   """
 
   def testrail_cred_id = params.TESTRAIL_CRED ?: 'testrail_system_tests'
diff --git a/tcp_tests/environment/lab03_ovs_dpdk/nodes.yml b/tcp_tests/environment/lab03_ovs_dpdk/nodes.yml
index 0fe1e7e..67fb53c 100644
--- a/tcp_tests/environment/lab03_ovs_dpdk/nodes.yml
+++ b/tcp_tests/environment/lab03_ovs_dpdk/nodes.yml
@@ -22,7 +22,7 @@
                 proto: manual
                 slaves: enp2s0f1
                 type: bond
-                use_interfaces: # Remove 'use_interfaces' after https://gerrit.mcp.mirantis.net/#/c/8344
+                use_interfaces: # Remove 'use_interfaces' after https://gerrit.mcp.mirantis.com/#/c/8344
                 - enp2s0f1
                 require_interfaces:
                 - enp2s0f1
@@ -30,7 +30,7 @@
                 enabled: true
                 proto: manual
                 type: vlan
-                use_interfaces: # Remove 'use_interfaces' after https://gerrit.mcp.mirantis.net/#/c/8344
+                use_interfaces: # Remove 'use_interfaces' after https://gerrit.mcp.mirantis.com/#/c/8344
                 - bond0
                 require_interfaces:
                 - bond0
@@ -40,7 +40,7 @@
                 netmask: 255.255.255.0
                 proto: static
                 type: bridge
-                use_interfaces: # Remove 'use_interfaces' after https://gerrit.mcp.mirantis.net/#/c/8344
+                use_interfaces: # Remove 'use_interfaces' after https://gerrit.mcp.mirantis.com/#/c/8344
                 - bond0.2416
                 require_interfaces:
                 - bond0.2416
@@ -54,7 +54,7 @@
                 netmask: 255.255.255.192
                 proto: static
                 type: bridge
-                use_interfaces: # Remove 'use_interfaces' after https://gerrit.mcp.mirantis.net/#/c/8344
+                use_interfaces: # Remove 'use_interfaces' after https://gerrit.mcp.mirantis.com/#/c/8344
                 - enp2s0f0
                 require_interfaces:
                 - enp2s0f0
@@ -89,7 +89,7 @@
                 proto: manual
                 slaves: enp2s0f1
                 type: bond
-                use_interfaces: # Remove 'use_interfaces' after https://gerrit.mcp.mirantis.net/#/c/8344
+                use_interfaces: # Remove 'use_interfaces' after https://gerrit.mcp.mirantis.com/#/c/8344
                 - enp2s0f1
                 require_interfaces:
                 - enp2s0f1
@@ -97,7 +97,7 @@
                 enabled: true
                 proto: manual
                 type: vlan
-                use_interfaces: # Remove 'use_interfaces' after https://gerrit.mcp.mirantis.net/#/c/8344
+                use_interfaces: # Remove 'use_interfaces' after https://gerrit.mcp.mirantis.com/#/c/8344
                 - bond0
                 require_interfaces:
                 - bond0
@@ -107,7 +107,7 @@
                 netmask: 255.255.255.0
                 proto: static
                 type: bridge
-                use_interfaces: # Remove 'use_interfaces' after https://gerrit.mcp.mirantis.net/#/c/8344
+                use_interfaces: # Remove 'use_interfaces' after https://gerrit.mcp.mirantis.com/#/c/8344
                 - bond0.2416
                 require_interfaces:
                 - bond0.2416
@@ -121,7 +121,7 @@
                 netmask: 255.255.255.192
                 proto: static
                 type: bridge
-                use_interfaces: # Remove 'use_interfaces' after https://gerrit.mcp.mirantis.net/#/c/8344
+                use_interfaces: # Remove 'use_interfaces' after https://gerrit.mcp.mirantis.com/#/c/8344
                 - enp2s0f0
                 require_interfaces:
                 - enp2s0f0
@@ -156,7 +156,7 @@
                 proto: manual
                 slaves: eno2
                 type: bond
-                use_interfaces: # Remove 'use_interfaces' after https://gerrit.mcp.mirantis.net/#/c/8344
+                use_interfaces: # Remove 'use_interfaces' after https://gerrit.mcp.mirantis.com/#/c/8344
                 - eno2
                 require_interfaces:
                 - eno2
@@ -164,7 +164,7 @@
                 enabled: true
                 proto: manual
                 type: vlan
-                use_interfaces: # Remove 'use_interfaces' after https://gerrit.mcp.mirantis.net/#/c/8344
+                use_interfaces: # Remove 'use_interfaces' after https://gerrit.mcp.mirantis.com/#/c/8344
                 - bond0
                 require_interfaces:
                 - bond0
@@ -174,7 +174,7 @@
                 netmask: 255.255.255.0
                 proto: static
                 type: bridge
-                use_interfaces: # Remove 'use_interfaces' after https://gerrit.mcp.mirantis.net/#/c/8344
+                use_interfaces: # Remove 'use_interfaces' after https://gerrit.mcp.mirantis.com/#/c/8344
                 - bond0.2416
                 require_interfaces:
                 - bond0.2416
@@ -188,7 +188,7 @@
                 netmask: 255.255.255.192
                 proto: static
                 type: bridge
-                use_interfaces: # Remove 'use_interfaces' after https://gerrit.mcp.mirantis.net/#/c/8344
+                use_interfaces: # Remove 'use_interfaces' after https://gerrit.mcp.mirantis.com/#/c/8344
                 - eno1
                 require_interfaces:
                 - eno1
@@ -263,7 +263,7 @@
                 proto: manual
                 slaves: enp3s0f1 enp5s0f0
                 type: bond
-                use_interfaces: # Remove 'use_interfaces' after https://gerrit.mcp.mirantis.net/#/c/8344
+                use_interfaces: # Remove 'use_interfaces' after https://gerrit.mcp.mirantis.com/#/c/8344
                 - enp3s0f1
                 - enp5s0f0
                 require_interfaces:
@@ -273,7 +273,7 @@
                 enabled: true
                 proto: manual
                 type: vlan
-                use_interfaces: # Remove 'use_interfaces' after https://gerrit.mcp.mirantis.net/#/c/8344
+                use_interfaces: # Remove 'use_interfaces' after https://gerrit.mcp.mirantis.com/#/c/8344
                 - bond0
                 require_interfaces:
                 - bond0
@@ -283,7 +283,7 @@
                 netmask: 255.255.255.0
                 proto: static
                 type: bridge
-                use_interfaces: # Remove 'use_interfaces' after https://gerrit.mcp.mirantis.net/#/c/8344
+                use_interfaces: # Remove 'use_interfaces' after https://gerrit.mcp.mirantis.com/#/c/8344
                 - bond0.2416
                 require_interfaces:
                 - bond0.2416
@@ -321,7 +321,7 @@
 #                slaves: eth0 eth3
                 slaves: eth0
                 type: bond
-                use_interfaces: # Remove 'use_interfaces' after https://gerrit.mcp.mirantis.net/#/c/8344
+                use_interfaces: # Remove 'use_interfaces' after https://gerrit.mcp.mirantis.com/#/c/8344
                 - eth0
 #                - eth3
                 require_interfaces:
@@ -331,7 +331,7 @@
                 enabled: true
                 proto: manual
                 type: vlan
-                use_interfaces: # Remove 'use_interfaces' after https://gerrit.mcp.mirantis.net/#/c/8344
+                use_interfaces: # Remove 'use_interfaces' after https://gerrit.mcp.mirantis.com/#/c/8344
                 - bond0
                 require_interfaces:
                 - bond0
@@ -346,7 +346,7 @@
                 netmask: 255.255.255.0
                 proto: static
                 type: bridge
-                use_interfaces: # Remove 'use_interfaces' after https://gerrit.mcp.mirantis.net/#/c/8344
+                use_interfaces: # Remove 'use_interfaces' after https://gerrit.mcp.mirantis.com/#/c/8344
                 - bond0.2416
                 require_interfaces:
                 - bond0.2416
@@ -397,7 +397,7 @@
                 proto: manual
                 slaves: enp3s0f1
                 type: bond
-                use_interfaces: # Remove 'use_interfaces' after https://gerrit.mcp.mirantis.net/#/c/8344
+                use_interfaces: # Remove 'use_interfaces' after https://gerrit.mcp.mirantis.com/#/c/8344
                 - enp3s0f1
                 require_interfaces:
                 - enp3s0f1
@@ -483,7 +483,7 @@
                 proto: manual
                 slaves: eno2
                 type: bond
-                use_interfaces: # Remove 'use_interfaces' after https://gerrit.mcp.mirantis.net/#/c/8344
+                use_interfaces: # Remove 'use_interfaces' after https://gerrit.mcp.mirantis.com/#/c/8344
                 - eno2
                 require_interfaces:
                 - eno2
diff --git a/tcp_tests/fixtures/ceph_fixtures.py b/tcp_tests/fixtures/ceph_fixtures.py
index 44ab7d2..a06965b 100644
--- a/tcp_tests/fixtures/ceph_fixtures.py
+++ b/tcp_tests/fixtures/ceph_fixtures.py
@@ -22,7 +22,7 @@
 
 
 @pytest.fixture(scope='function')
-def ceph_actions(config, underlay, salt_deployed):
+def ceph_actions(config, underlay_actions, salt_actions):
     """Fixture that provides various actions for OpenStack
 
     :param config: fixture provides oslo.config
@@ -32,7 +32,7 @@
 
     For use in tests or fixtures to deploy a custom OpenStack
     """
-    return ceph_manager.CephManager(config, underlay, salt_deployed)
+    return ceph_manager.CephManager(config, underlay_actions, salt_actions)
 
 
 @pytest.mark.revert_snapshot(ext.SNAPSHOT.ceph_deployed)
diff --git a/tcp_tests/fixtures/core_fixtures.py b/tcp_tests/fixtures/core_fixtures.py
index cb058a3..98f88c2 100644
--- a/tcp_tests/fixtures/core_fixtures.py
+++ b/tcp_tests/fixtures/core_fixtures.py
@@ -22,14 +22,14 @@
 
 
 @pytest.fixture(scope='function')
-def core_actions(config, underlay, salt_actions):
+def core_actions(config, underlay_actions, salt_actions):
     """Fixture that provides various actions for Core
 
     :param config: fixture provides oslo.config
     :param underlay: fixture provides underlay manager
     :rtype: CoreManager
     """
-    return core_manager.CoreManager(config, underlay, salt_actions)
+    return core_manager.CoreManager(config, underlay_actions, salt_actions)
 
 
 @pytest.mark.revert_snapshot(ext.SNAPSHOT.core_deployed)
diff --git a/tcp_tests/fixtures/decapod_fixtures.py b/tcp_tests/fixtures/decapod_fixtures.py
index 8e40b41..0ef8869 100644
--- a/tcp_tests/fixtures/decapod_fixtures.py
+++ b/tcp_tests/fixtures/decapod_fixtures.py
@@ -22,14 +22,15 @@
 
 
 @pytest.fixture(scope='function')
-def decapod_actions(config, underlay, salt_actions):
+def decapod_actions(config, underlay_actions, salt_actions):
     """Fixture that provides various actions for Decapod
 
     :param config: fixture provides oslo.config
     :param underlay: fixture provides underlay manager
     :rtype: DecapodManager
     """
-    return decapod_manager.DecapodManager(config, underlay, salt_actions)
+    return decapod_manager.DecapodManager(config, underlay_actions,
+                                          salt_actions)
 
 
 @pytest.mark.revert_snapshot(ext.SNAPSHOT.decapod_deployed)
diff --git a/tcp_tests/fixtures/drivetrain_fixtures.py b/tcp_tests/fixtures/drivetrain_fixtures.py
index e0e709b..0e8cbed 100644
--- a/tcp_tests/fixtures/drivetrain_fixtures.py
+++ b/tcp_tests/fixtures/drivetrain_fixtures.py
@@ -22,14 +22,15 @@
 
 
 @pytest.fixture(scope='function')
-def drivetrain_actions(config, underlay, salt_actions):
+def drivetrain_actions(config, underlay_actions, salt_actions):
     """Fixture that provides various actions for Drivetrain
 
     :param config: fixture provides oslo.config
     :param underlay: fixture provides underlay manager
     :rtype: DrivetrainManager
     """
-    return drivetrain_manager.DrivetrainManager(config, underlay, salt_actions)
+    return drivetrain_manager.DrivetrainManager(config, underlay_actions,
+                                                salt_actions)
 
 
 @pytest.mark.revert_snapshot(ext.SNAPSHOT.drivetrain_deployed)
diff --git a/tcp_tests/fixtures/k8s_fixtures.py b/tcp_tests/fixtures/k8s_fixtures.py
index fee64cf..e581b86 100644
--- a/tcp_tests/fixtures/k8s_fixtures.py
+++ b/tcp_tests/fixtures/k8s_fixtures.py
@@ -24,7 +24,7 @@
 
 
 @pytest.fixture(scope='function')
-def k8s_actions(config, underlay, salt_deployed):
+def k8s_actions(config, underlay_actions, salt_actions):
     """Fixture that provides various actions for K8S
 
     :param config: fixture provides oslo.config
@@ -34,7 +34,7 @@
 
     For use in tests or fixtures to deploy a custom K8S
     """
-    return k8smanager.K8SManager(config, underlay, salt_deployed)
+    return k8smanager.K8SManager(config, underlay_actions, salt_actions)
 
 
 @pytest.mark.revert_snapshot(ext.SNAPSHOT.k8s_deployed)
@@ -107,7 +107,7 @@
 
 
 @pytest.fixture(scope='function')
-def k8s_logs(request, func_name, underlay, k8s_deployed):
+def k8s_logs(request, func_name, k8s_actions):
     """Finalizer to extract conformance logs
 
     Usage:
@@ -156,16 +156,16 @@
                 files_to_extract = utils.extract_name_from_mark(
                     extract, 'files_to_extract')
                 for path in files_to_extract:
-                    k8s_deployed.extract_file_to_node(
+                    k8s_actions.extract_file_to_node(
                         system=container_system, container=extract_from,
                         file_path=path)
             else:
-                k8s_deployed.extract_file_to_node()
+                k8s_actions.extract_file_to_node()
             if merge_xunit:
                 path = utils.extract_name_from_mark(merge_xunit, 'path')
                 output = utils.extract_name_from_mark(merge_xunit, 'output')
-                k8s_deployed.combine_xunit(path, output)
-            k8s_deployed.download_k8s_logs(files)
+                k8s_actions.combine_xunit(path, output)
+            k8s_actions.download_k8s_logs(files)
 
     request.addfinalizer(test_fin)
 
diff --git a/tcp_tests/fixtures/openstack_fixtures.py b/tcp_tests/fixtures/openstack_fixtures.py
index 480a548..e3f8780 100644
--- a/tcp_tests/fixtures/openstack_fixtures.py
+++ b/tcp_tests/fixtures/openstack_fixtures.py
@@ -22,7 +22,7 @@
 
 
 @pytest.fixture(scope='function')
-def openstack_actions(config, underlay, salt_deployed):
+def openstack_actions(config, underlay_actions, salt_actions):
     """Fixture that provides various actions for OpenStack
 
     :param config: fixture provides oslo.config
@@ -33,7 +33,8 @@
 
     For use in tests or fixtures to deploy a custom OpenStack
     """
-    return openstack_manager.OpenstackManager(config, underlay, salt_deployed)
+    return openstack_manager.OpenstackManager(config, underlay_actions,
+                                              salt_actions)
 
 
 @pytest.mark.revert_snapshot(ext.SNAPSHOT.openstack_deployed)
diff --git a/tcp_tests/fixtures/oss_fixtures.py b/tcp_tests/fixtures/oss_fixtures.py
index 6162e27..3ff12f1 100644
--- a/tcp_tests/fixtures/oss_fixtures.py
+++ b/tcp_tests/fixtures/oss_fixtures.py
@@ -22,14 +22,14 @@
 
 
 @pytest.fixture(scope='function')
-def oss_actions(config, underlay, salt_actions):
+def oss_actions(config, underlay_actions, salt_actions):
     """Fixture that provides various actions for OSS
 
     :param config: fixture provides oslo.config
     :param underlay: fixture provides underlay manager
     :rtype: OSSManager
     """
-    return oss_manager.OSSManager(config, underlay, salt_actions)
+    return oss_manager.OSSManager(config, underlay_actions, salt_actions)
 
 
 @pytest.mark.revert_snapshot(ext.SNAPSHOT.oss_deployed)
diff --git a/tcp_tests/fixtures/runtest_fixtures.py b/tcp_tests/fixtures/runtest_fixtures.py
index ddeced2..5a43b42 100644
--- a/tcp_tests/fixtures/runtest_fixtures.py
+++ b/tcp_tests/fixtures/runtest_fixtures.py
@@ -18,7 +18,7 @@
 
 
 @pytest.fixture(scope='function')
-def tempest_actions(underlay, salt_actions):
+def tempest_actions(underlay_actions, salt_actions):
     """
     Run tempest tests
     """
@@ -28,7 +28,7 @@
     domain_name = settings.DOMAIN_NAME
     target = settings.TEMPEST_TARGET
     runtest = RuntestManager(
-        underlay, salt_actions,
+        underlay_actions, salt_actions,
         cluster_name=cluster_name,
         domain_name=domain_name,
         tempest_threads=tempest_threads,
diff --git a/tcp_tests/fixtures/salt_fixtures.py b/tcp_tests/fixtures/salt_fixtures.py
index 469b965..7f4ce60 100644
--- a/tcp_tests/fixtures/salt_fixtures.py
+++ b/tcp_tests/fixtures/salt_fixtures.py
@@ -22,14 +22,14 @@
 
 
 @pytest.fixture(scope='function')
-def salt_actions(config, underlay):
+def salt_actions(config, underlay_actions):
     """Fixture that provides various actions for salt
 
     :param config: fixture provides oslo.config
     :param underlay: fixture provides underlay manager
     :rtype: SaltManager
     """
-    return saltmanager.SaltManager(config, underlay)
+    return saltmanager.SaltManager(config, underlay_actions)
 
 
 @pytest.mark.revert_snapshot(ext.SNAPSHOT.salt_deployed)
diff --git a/tcp_tests/fixtures/stacklight_fixtures.py b/tcp_tests/fixtures/stacklight_fixtures.py
index df0d516..4340847 100644
--- a/tcp_tests/fixtures/stacklight_fixtures.py
+++ b/tcp_tests/fixtures/stacklight_fixtures.py
@@ -23,7 +23,7 @@
 
 
 @pytest.fixture(scope='function')
-def sl_actions(config, underlay, salt_deployed):
+def sl_actions(config, underlay_actions, salt_actions):
     """Fixture that provides various actions for K8S
 
     :param config: fixture provides oslo.config
@@ -32,14 +32,14 @@
 
     For use in tests or fixtures to deploy a custom K8S
     """
-    return sl_manager.SLManager(config, underlay, salt_deployed)
+    return sl_manager.SLManager(config, underlay_actions, salt_actions)
 
 
 @pytest.mark.revert_snapshot(ext.SNAPSHOT.stacklight_deployed)
 @pytest.fixture(scope='function')
 def stacklight_deployed(revert_snapshot, request, config,
-                        hardware, underlay, core_deployed,
-                        salt_deployed, sl_actions):
+                        hardware, underlay, salt_deployed,
+                        sl_actions, core_deployed):
     """Fixture to get or install SL services on environment
 
     :param revert_snapshot: fixture that reverts snapshot that is specified
diff --git a/tcp_tests/fixtures/underlay_fixtures.py b/tcp_tests/fixtures/underlay_fixtures.py
index 182e3f9..65677a9 100644
--- a/tcp_tests/fixtures/underlay_fixtures.py
+++ b/tcp_tests/fixtures/underlay_fixtures.py
@@ -167,7 +167,7 @@
 
 @pytest.mark.revert_snapshot(ext.SNAPSHOT.underlay)
 @pytest.fixture(scope="function")
-def underlay(request, revert_snapshot, config, hardware):
+def underlay(request, revert_snapshot, config, hardware, underlay_actions):
     """Fixture that bootstraps the environment underlay.
 
     - Starts the 'hardware' environment and creates 'underlay' with required
@@ -198,15 +198,15 @@
             roles=config.underlay.roles)
 
         LOG.info("Config - {}".format(config))
-        underlay = underlay_actions(config)
+        underlay_actions.add_config_ssh(config.underlay.ssh)
 
         if not config.underlay.lvm:
-            underlay.enable_lvm(hardware.lvm_storages())
-            config.underlay.lvm = underlay.config_lvm
+            underlay_actions.enable_lvm(hardware.lvm_storages())
+            config.underlay.lvm = underlay_actions.config_lvm
 
         hardware.create_snapshot(ext.SNAPSHOT.underlay)
 
-        return underlay
+        return underlay_actions
 
     def day1_underlay():
         hardware.start(
@@ -215,8 +215,7 @@
 
         config.underlay.ssh = hardware.get_ssh_data(
             roles=config.underlay.roles)
-
-        underlay = underlay_actions(config)
+        underlay_actions.add_config_ssh(config.underlay.ssh)
 
         LOG.info("Generate MACs for MaaS")
         macs = {
@@ -236,8 +235,8 @@
                         "machines": macs}}}}
 
         if not config.underlay.lvm:
-            underlay.enable_lvm(hardware.lvm_storages())
-            config.underlay.lvm = underlay.config_lvm
+            underlay_actions.enable_lvm(hardware.lvm_storages())
+            config.underlay.lvm = underlay_actions.config_lvm
 
         for node in hardware.slave_nodes:
             # For correct comissioning by MaaS nodes should be powered off
@@ -245,21 +244,18 @@
 
         hardware.create_snapshot(ext.SNAPSHOT.underlay)
 
-        return underlay
+        return underlay_actions
 
     if not config.underlay.ssh:
         if request.node.get_marker('day1_underlay'):
-            underlay = day1_underlay()
+            return day1_underlay()
         else:
-            underlay = basic_underlay()
-
+            return basic_underlay()
     else:
         # 1. hardware environment created and powered on
         # 2. config.underlay.ssh contains SSH access to provisioned nodes
         #    (can be passed from external config with TESTS_CONFIGS variable)
-        underlay = underlay_actions(config)
-
-    return underlay
+        return underlay_actions
 
 
 @pytest.fixture(scope='function')
diff --git a/tcp_tests/managers/k8s/cluster.py b/tcp_tests/managers/k8s/cluster.py
index 8ffb4d1..db7bb18 100644
--- a/tcp_tests/managers/k8s/cluster.py
+++ b/tcp_tests/managers/k8s/cluster.py
@@ -42,6 +42,8 @@
 from tcp_tests.managers.k8s.services import K8sServiceManager
 from tcp_tests.managers.k8s.replicasets import K8sReplicaSetManager
 from tcp_tests.managers.k8s.networkpolicies import K8sNetworkPolicyManager
+from tcp_tests.managers.k8s.clusterrolebindings import \
+    K8sClusterRoleBindingManager
 
 
 class K8sCluster(object):
@@ -89,6 +91,7 @@
         self.api_extensions = client.ExtensionsV1beta1Api(api_client)
         self.api_autoscaling = client.AutoscalingV1Api(api_client)
         self.api_batch = client.BatchV1Api(api_client)
+        self.api_rbac_auth = client.RbacAuthorizationV1Api(api_client)
 
         self.nodes = K8sNodeManager(self)
         self.pods = K8sPodManager(self)
@@ -111,3 +114,4 @@
         self.pvolumes = K8sPersistentVolumeManager(self)
         self.replicasets = K8sReplicaSetManager(self)
         self.networkpolicies = K8sNetworkPolicyManager(self)
+        self.clusterrolebindings = K8sClusterRoleBindingManager(self)
diff --git a/tcp_tests/managers/k8s/clusterrolebindings.py b/tcp_tests/managers/k8s/clusterrolebindings.py
new file mode 100644
index 0000000..d958817
--- /dev/null
+++ b/tcp_tests/managers/k8s/clusterrolebindings.py
@@ -0,0 +1,54 @@
+#    Copyright 2017 Mirantis, Inc.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+
+
+from kubernetes import client
+
+from tcp_tests.managers.k8s.base import K8sBaseResource
+from tcp_tests.managers.k8s.base import K8sBaseManager
+
+
+class K8sClusterRoleBinding(K8sBaseResource):
+    resource_type = 'clusterrolebindings'
+
+    def _read(self, **kwargs):
+        return self._manager.api.read_cluster_role_binding(self.name, **kwargs)
+
+    def _create(self, body, **kwargs):
+        return self._manager.api.create_cluster_role_binding(body, **kwargs)
+
+    def _patch(self, body, **kwargs):
+        return self._manager.api.patch_cluster_role_binding(
+            self.name, body, **kwargs)
+
+    def _replace(self, body, **kwargs):
+        return self._manager.api.replace_cluster_role_binding(
+            self.name, body, **kwargs)
+
+    def _delete(self, **kwargs):
+        self._manager.api.delete_cluster_role_binding(
+            self.name, client.V1DeleteOptions(), **kwargs)
+
+
+class K8sClusterRoleBindingManager(K8sBaseManager):
+    resource_class = K8sClusterRoleBinding
+
+    @property
+    def api(self):
+        return self._cluster.api_rbac_auth
+
+    def _list(self, namespace, **kwargs):
+        return self.api.list_cluster_role_binding(**kwargs)
+
+    def _list_all(self, **kwargs):
+        return self._list(None, **kwargs)
diff --git a/tcp_tests/managers/k8s/ingresses.py b/tcp_tests/managers/k8s/ingresses.py
index 906dc31..5dd353c 100644
--- a/tcp_tests/managers/k8s/ingresses.py
+++ b/tcp_tests/managers/k8s/ingresses.py
@@ -14,6 +14,8 @@
 
 from kubernetes import client
 
+from devops.helpers import helpers
+
 from tcp_tests.managers.k8s.base import K8sBaseResource
 from tcp_tests.managers.k8s.base import K8sBaseManager
 
@@ -41,6 +43,12 @@
         self._manager.api.delete_namespaced_ingress(
             self.name, self.namespace, client.V1DeleteOptions(), **kwargs)
 
+    def wait_ready(self, timeout=120, interval=2):
+        helpers.wait(
+            lambda: self.read().status.load_balancer.ingress is not None,
+            timeout=timeout, interval=interval)
+        return self
+
 
 class K8sIngressManager(K8sBaseManager):
     resource_class = K8sIngress
diff --git a/tcp_tests/managers/k8s/serviceaccounts.py b/tcp_tests/managers/k8s/serviceaccounts.py
index 3b779eb..bc0db30 100644
--- a/tcp_tests/managers/k8s/serviceaccounts.py
+++ b/tcp_tests/managers/k8s/serviceaccounts.py
@@ -14,6 +14,8 @@
 
 from kubernetes import client
 
+from devops.helpers import helpers
+
 from tcp_tests.managers.k8s.base import K8sBaseResource
 from tcp_tests.managers.k8s.base import K8sBaseManager
 
@@ -41,6 +43,13 @@
         self._manager.api.delete_namespaced_service_account(
             self.name, self.namespace, client.V1DeleteOptions(), **kwargs)
 
+    def wait_secret_generation(self, timeout=90, interval=2):
+        def is_secret_generated():
+            secrets = self.read().secrets
+            return secrets is not None and len(secrets) > 0
+        helpers.wait(lambda: is_secret_generated(),
+                     timeout=timeout, interval=interval)
+
 
 class K8sServiceAccountManager(K8sBaseManager):
     resource_class = K8sServiceAccount
diff --git a/tcp_tests/managers/k8smanager.py b/tcp_tests/managers/k8smanager.py
index 4cd7435..cc0a924 100644
--- a/tcp_tests/managers/k8smanager.py
+++ b/tcp_tests/managers/k8smanager.py
@@ -192,7 +192,7 @@
                image=self.__config.k8s.k8s_conformance_image)
         return self.__underlay.check_call(
                cmd=cmd, node_name=node_name, timeout=timeout,
-               raise_on_err=raise_on_err)
+               raise_on_err=raise_on_err, verbose=True)
 
     def run_virtlet_conformance(self, timeout=60 * 120,
                                 log_file='virtlet_conformance.log'):
@@ -332,7 +332,7 @@
             LOG.debug('Installing xunitmerge')
             r.check_call(cmd, raise_on_err=False)
             LOG.debug('Merging xunit')
-            cmd = ("cd {0}; arg = ''; "
+            cmd = ("cd {0}; arg=''; "
                    "for i in $(ls | grep xml); "
                    "do arg=\"$arg $i\"; done && "
                    "xunitmerge $arg {1}".format(path, output))
@@ -364,15 +364,19 @@
         return self.controller_check_call("nslookup {0} {1}".format(host, src))
 
     @retry(300, exception=DevopsCalledProcessError)
-    def curl(self, url):
+    def curl(self, url, *args):
         """
         Run curl on controller and return stdout
 
         :param url: url to curl
-        :return: response string
+        :return: list of strings (with /n at end of every line)
         """
-        result = self.controller_check_call("curl -s -S \"{}\"".format(url))
-        LOG.debug("curl \"{0}\" result: {1}".format(url, result['stdout']))
+        args = list(args)
+        args.append(url)
+        cmd = "curl -s -S {}".format(
+            " ".join(["'{}'".format(a.replace("'", "\\'")) for a in args]))
+        result = self.controller_check_call(cmd)
+        LOG.debug("{0}\nresult:\n{1}".format(cmd, result['stdout']))
         return result['stdout']
 
 
diff --git a/tcp_tests/managers/sl_manager.py b/tcp_tests/managers/sl_manager.py
index 2c1ba2d..3bb0a1f 100644
--- a/tcp_tests/managers/sl_manager.py
+++ b/tcp_tests/managers/sl_manager.py
@@ -19,6 +19,7 @@
 from tcp_tests.managers.execute_commands import ExecuteCommandsMixin
 from tcp_tests.managers.clients.prometheus import prometheus_client
 from tcp_tests import logger
+from tcp_tests import settings
 
 LOG = logger.logger
 
@@ -97,17 +98,65 @@
                 service_stat_dict.update({tmp[0]: tmp[1]})
         return service_stat_dict
 
-    def run_sl_functional_tests(self, node_to_run, tests_path,
-                                test_to_run, skip_tests,
-                                reruns=5, reruns_delay=60):
+    def setup_sl_functional_tests(self, node_to_run,
+                                  repo_path='/root/stacklight-pytest',
+                                  sl_test_repo=settings.SL_TEST_REPO,
+                                  sl_test_commit=settings.SL_TEST_COMMIT):
         target_node_name = [node_name for node_name
                             in self.__underlay.node_names()
                             if node_to_run in node_name]
-        cmd = (". venv-stacklight-pytest/bin/activate;"
+        cmd_install = (
+            "set -ex;"
+            "apt-get install -y  build-essential python-dev "
+            "    virtualenv;"
+            "[ -d venv-stacklight-pytest ] || "
+            "    virtualenv --system-site-packages venv-stacklight-pytest;"
+            ". venv-stacklight-pytest/bin/activate;"
+            "if [ ! -d {repo_path} ]; then"
+            "    git clone {sl_test_repo} {repo_path};"
+            "fi;"
+            "pushd {repo_path};"
+            "git checkout {sl_test_commit};"
+            "popd;"
+            "pip install {repo_path};"
+            .format(repo_path=repo_path,
+                    sl_test_repo=sl_test_repo,
+                    sl_test_commit=sl_test_commit)
+        )
+
+        cmd_configure = (
+            "set -ex;"
+            ". venv-stacklight-pytest/bin/activate;"
+            "stl-tests gen-config-mk;"
+            "cp venv-stacklight-pytest/lib/python2.7/site-packages/"
+            "stacklight_tests/fixtures/config.yaml "
+            "{repo_path}/stacklight_tests/fixtures/config.yaml;"
+            .format(repo_path=repo_path)
+        )
+
+        with self.__underlay.remote(node_name=target_node_name[0]) \
+                as node_remote:
+            LOG.info("Install stacklight-pytest on the node {0}".format(
+                target_node_name[0]))
+            node_remote.check_call(cmd_install, verbose=True)
+
+            LOG.info("Configure stacklight-pytest on the node {0}".format(
+                target_node_name[0]))
+            node_remote.check_call(cmd_configure, verbose=True)
+
+    def run_sl_functional_tests(self, node_to_run, tests_path,
+                                test_to_run, skip_tests,
+                                reruns=5, reruns_delay=60,
+                                junit_report_name='report.xml'):
+        target_node_name = [node_name for node_name
+                            in self.__underlay.node_names()
+                            if node_to_run in node_name]
+        cmd = ("set -ex;"
+               ". venv-stacklight-pytest/bin/activate;"
                "cd {tests_path}; "
                "export VOLUME_STATUS='available';"
-               "pytest {reruns} {reruns_delay} "
-               "-k {skip_tests} {test_to_run}".format(**{
+               "pytest {reruns} {reruns_delay} --junit-xml={junit_report_name}"
+               " -k {skip_tests} {test_to_run}".format(**{
                    "tests_path": tests_path,
                    "skip_tests": ("'not " + skip_tests + "'"
                                   if skip_tests else ''),
@@ -116,6 +165,7 @@
                               if reruns > 1 else ""),
                    "reruns_delay": ("--reruns-delay {}".format(reruns_delay)
                                     if reruns_delay > 0 else ""),
+                   "junit_report_name": junit_report_name,
                    }))
 
         with self.__underlay.remote(node_name=target_node_name[0]) \
@@ -131,7 +181,8 @@
         target_node_name = [node_name for node_name
                             in self.__underlay.node_names()
                             if node_to_run in node_name]
-        cmd = (". venv-stacklight-pytest/bin/activate;"
+        cmd = ("set -ex;"
+               ". venv-stacklight-pytest/bin/activate;"
                "cd {tests_path}; "
                "export VOLUME_STATUS='available';"
                "pip install pytest-json;"
diff --git a/tcp_tests/requirements.txt b/tcp_tests/requirements.txt
index 8233a9b..83461ab 100644
--- a/tcp_tests/requirements.txt
+++ b/tcp_tests/requirements.txt
@@ -1,6 +1,6 @@
 # git+git://github.com/openstack/fuel-devops.git@887368d#egg=project[postgre]   # Use this requirement for PostgreSQL
 libvirt-python>=3.5.0,<4.1.0  # LGPLv2+
-git+git://github.com/openstack/fuel-devops.git@25d4cc67315132b1b27131977b2e07029b3ffbe1   # Use this requirement for Sqlite3, or if requirements for PostgreSQL are already installed
+git+git://github.com/openstack/fuel-devops.git@10f4ac744e89bfefcba3d7d009de82669c52fa6e   # Use this requirement for Sqlite3, or if requirements for PostgreSQL are already installed
 git+git://github.com/dis-xcom/fuel-devops-driver-ironic
 paramiko
 six
diff --git a/tcp_tests/settings.py b/tcp_tests/settings.py
index 1f69850..0d79cc4 100644
--- a/tcp_tests/settings.py
+++ b/tcp_tests/settings.py
@@ -81,3 +81,7 @@
 TEMPEST_THREADS = int(os.environ.get('TEMPEST_THREADS', 2))
 TEMPEST_TARGET = os.environ.get('TEMPEST_TARGET', 'gtw01')
 SALT_VERSION = os.environ.get('SALT_VERSION', '2017.7')
+
+SL_TEST_REPO = os.environ.get('SL_TEST_REPO',
+                              'https://github.com/Mirantis/stacklight-pytest')
+SL_TEST_COMMIT = os.environ.get('SL_TEST_COMMIT', 'master')
diff --git a/tcp_tests/settings_oslo.py b/tcp_tests/settings_oslo.py
index 037dbd8..d76396a 100644
--- a/tcp_tests/settings_oslo.py
+++ b/tcp_tests/settings_oslo.py
@@ -53,6 +53,9 @@
 _default_openstack_steps = pkg_resources.resource_filename(
     __name__, 'templates/{0}/openstack.yaml'.format(
         settings.LAB_CONFIG_NAME))
+_default_openstack_resources_steps = pkg_resources.resource_filename(
+    __name__, 'templates/{0}/post_openstack.yaml'.format(
+        settings.LAB_CONFIG_NAME))
 _default_opencontrail_prepare_tests_steps_path = \
     pkg_resources.resource_filename(
         __name__, 'templates/{0}/opencontrail.yaml'.format(
@@ -203,6 +206,9 @@
     ct.Cfg('openstack_steps_path', ct.String(),
            help="Path to YAML with steps to deploy openstack",
            default=_default_openstack_steps),
+    ct.Cfg('openstack_resources_steps_path', ct.String(),
+           help="Path to YAML with steps to deploy openstack",
+           default=_default_openstack_resources_steps),
     ct.Cfg('horizon_host', ct.IPAddress(),
            help="", default='0.0.0.0'),
     ct.Cfg('horizon_port', ct.String(),
@@ -348,6 +354,8 @@
            help="", default=False),
     ct.Cfg('kubernetes_metallb_enabled', ct.Boolean(),
            help="", default=False),
+    ct.Cfg('kubernetes_ingressnginx_enabled', ct.Boolean(),
+           help="", default=False),
     ct.Cfg('kubelet_fail_on_swap', ct.Boolean(),
            help="", default=False)
 ]
diff --git a/tcp_tests/templates/cookied-bm-contrail-maas/salt-context-cookiecutter-contrail.yaml b/tcp_tests/templates/cookied-bm-contrail-maas/salt-context-cookiecutter-contrail.yaml
index e053de3..02cf300 100644
--- a/tcp_tests/templates/cookied-bm-contrail-maas/salt-context-cookiecutter-contrail.yaml
+++ b/tcp_tests/templates/cookied-bm-contrail-maas/salt-context-cookiecutter-contrail.yaml
@@ -43,7 +43,7 @@
   control_vlan: '2422'
   cookiecutter_template_branch: ''
   cookiecutter_template_credentials: gerrit
-  cookiecutter_template_url: https://gerrit.mcp.mirantis.net/mk/cookiecutter-templates.git
+  cookiecutter_template_url: https://gerrit.mcp.mirantis.com/mk/cookiecutter-templates.git
   deploy_network_gateway: 172.16.49.65
   deploy_network_netmask: 255.255.255.192
   deploy_network_subnet: 172.16.49.64/26
@@ -165,7 +165,7 @@
   salt_master_hostname: cfg01
   salt_master_management_address: 172.16.49.66
   shared_reclass_branch: ''
-  shared_reclass_url: https://gerrit.mcp.mirantis.net/salt-models/reclass-system.git
+  shared_reclass_url: https://gerrit.mcp.mirantis.com/salt-models/reclass-system.git
   stacklight_enabled: 'True'
   stacklight_log_address: 10.167.8.60
   stacklight_log_hostname: log
diff --git a/tcp_tests/templates/cookied-bm-contrail-maas/salt.yaml b/tcp_tests/templates/cookied-bm-contrail-maas/salt.yaml
index 56db406..67833da 100644
--- a/tcp_tests/templates/cookied-bm-contrail-maas/salt.yaml
+++ b/tcp_tests/templates/cookied-bm-contrail-maas/salt.yaml
@@ -6,7 +6,7 @@
 {% from 'cookied-bm-contrail-maas/underlay.yaml' import MAAS_DHCP_POOL_START with context %}
 {% from 'cookied-bm-contrail-maas/underlay.yaml' import MAAS_DHCP_POOL_END with context %}
 
-{% set SALT_MODELS_REPOSITORY = os_env('SALT_MODELS_REPOSITORY','https://gerrit.mcp.mirantis.net/salt-models/mcp-virtual-lab') %}
+{% set SALT_MODELS_REPOSITORY = os_env('SALT_MODELS_REPOSITORY','https://gerrit.mcp.mirantis.com/salt-models/mcp-virtual-lab') %}
 # Other salt model repository parameters see in shared-salt.yaml
 
 {% set ENVIRONMENT_MODEL_INVENTORY_NAME = os_env('ENVIRONMENT_MODEL_INVENTORY_NAME','physical-cookied-bm-contrail-maas') %}
@@ -24,8 +24,7 @@
 
 {{ SHARED.MACRO_GENERATE_AND_ENABLE_ENVIRONMENT_MODEL() }}
 
-{{ SHARED.MACRO_CONFIGURE_RECLASS(FORMULA_SERVICES='"linux" "reclass" "salt" "openssh" "ntp" "git" "nginx" "collectd" "sensu" "heka" "sphinx" "keystone" "mysql" "grafana" "haproxy" "rsyslog" "horizon" "prometheus" "telegraf" "elasticsearch" "powerdns" "glusterfs" "xtrabackup" "maas" "backupninja" "jenkins" "fluentd" "auditd" "logrotate"') }}
-
+{{ SHARED.MACRO_CONFIGURE_RECLASS(FORMULA_SERVICES='\*') }}
 {{ SHARED.MACRO_INSTALL_SALT_MINIONS() }}
 
 {{ SHARED.MACRO_RUN_SALT_MASTER_UNDERLAY_STATES() }}
diff --git a/tcp_tests/templates/cookied-bm-contrail-nfv-maas/salt-context-cookiecutter-contrail.yaml b/tcp_tests/templates/cookied-bm-contrail-nfv-maas/salt-context-cookiecutter-contrail.yaml
index 300039a..0b00542 100644
--- a/tcp_tests/templates/cookied-bm-contrail-nfv-maas/salt-context-cookiecutter-contrail.yaml
+++ b/tcp_tests/templates/cookied-bm-contrail-nfv-maas/salt-context-cookiecutter-contrail.yaml
@@ -43,7 +43,7 @@
   control_vlan: '2422'
   cookiecutter_template_branch: ''
   cookiecutter_template_credentials: gerrit
-  cookiecutter_template_url: https://gerrit.mcp.mirantis.net/mk/cookiecutter-templates.git
+  cookiecutter_template_url: https://gerrit.mcp.mirantis.com/mk/cookiecutter-templates.git
   deploy_network_gateway: 172.16.49.65
   deploy_network_netmask: 255.255.255.192
   deploy_network_subnet: 172.16.49.64/26
@@ -170,7 +170,7 @@
   salt_master_hostname: cfg01
   salt_master_management_address: 172.16.49.66
   shared_reclass_branch: ''
-  shared_reclass_url: https://gerrit.mcp.mirantis.net/salt-models/reclass-system.git
+  shared_reclass_url: https://gerrit.mcp.mirantis.com/salt-models/reclass-system.git
   stacklight_enabled: 'True'
   stacklight_log_address: 10.167.8.60
   stacklight_log_hostname: log
diff --git a/tcp_tests/templates/cookied-bm-contrail-nfv-maas/salt.yaml b/tcp_tests/templates/cookied-bm-contrail-nfv-maas/salt.yaml
index 4325907..3b508ce 100644
--- a/tcp_tests/templates/cookied-bm-contrail-nfv-maas/salt.yaml
+++ b/tcp_tests/templates/cookied-bm-contrail-nfv-maas/salt.yaml
@@ -6,7 +6,7 @@
 {% from 'cookied-bm-contrail-nfv-maas/underlay.yaml' import MAAS_DHCP_POOL_START with context %}
 {% from 'cookied-bm-contrail-nfv-maas/underlay.yaml' import MAAS_DHCP_POOL_END with context %}
 
-{% set SALT_MODELS_REPOSITORY = os_env('SALT_MODELS_REPOSITORY','https://gerrit.mcp.mirantis.net/salt-models/mcp-virtual-lab') %}
+{% set SALT_MODELS_REPOSITORY = os_env('SALT_MODELS_REPOSITORY','https://gerrit.mcp.mirantis.com/salt-models/mcp-virtual-lab') %}
 # Other salt model repository parameters see in shared-salt.yaml
 
 {% set ENVIRONMENT_MODEL_INVENTORY_NAME = os_env('ENVIRONMENT_MODEL_INVENTORY_NAME','physical-cookied-bm-contrail-nfv-maas') %}
@@ -24,8 +24,7 @@
 
 {{ SHARED.MACRO_GENERATE_AND_ENABLE_ENVIRONMENT_MODEL() }}
 
-{{ SHARED.MACRO_CONFIGURE_RECLASS(FORMULA_SERVICES='"linux" "reclass" "salt" "openssh" "ntp" "git" "nginx" "collectd" "sensu" "heka" "sphinx" "keystone" "mysql" "grafana" "haproxy" "rsyslog" "horizon" "prometheus" "telegraf" "elasticsearch" "powerdns" "glusterfs" "xtrabackup" "maas" "backupninja" "jenkins" "fluentd" "auditd" "logrotate"') }}
-
+{{ SHARED.MACRO_CONFIGURE_RECLASS(FORMULA_SERVICES='\*') }}
 {{ SHARED.MACRO_INSTALL_SALT_MINIONS() }}
 
 {{ SHARED.MACRO_RUN_SALT_MASTER_UNDERLAY_STATES() }}
diff --git a/tcp_tests/templates/cookied-bm-contrail40-nfv/salt-context-cookiecutter-contrail-nfv.yaml b/tcp_tests/templates/cookied-bm-contrail40-nfv/salt-context-cookiecutter-contrail-nfv.yaml
index a9e33af..8cb8de7 100644
--- a/tcp_tests/templates/cookied-bm-contrail40-nfv/salt-context-cookiecutter-contrail-nfv.yaml
+++ b/tcp_tests/templates/cookied-bm-contrail40-nfv/salt-context-cookiecutter-contrail-nfv.yaml
@@ -91,7 +91,7 @@
   control_vlan: '2422'
   cookiecutter_template_branch: ''
   cookiecutter_template_credentials: gerrit
-  cookiecutter_template_url: https://gerrit.mcp.mirantis.net/mk/cookiecutter-templates.git
+  cookiecutter_template_url: https://gerrit.mcp.mirantis.com/mk/cookiecutter-templates.git
   deploy_network_gateway: 172.16.49.65
   deploy_network_netmask: 255.255.255.192
   deploy_network_subnet: 172.16.49.64/26
@@ -216,7 +216,7 @@
   salt_master_hostname: cfg01
   salt_master_management_address: 172.16.49.66
   shared_reclass_branch: ''
-  shared_reclass_url: https://gerrit.mcp.mirantis.net/salt-models/reclass-system.git
+  shared_reclass_url: https://gerrit.mcp.mirantis.com/salt-models/reclass-system.git
   stacklight_enabled: 'True'
   stacklight_log_address: 10.167.8.60
   stacklight_log_hostname: log
diff --git a/tcp_tests/templates/cookied-bm-contrail40-nfv/salt-context-cookiecutter-contrail.yaml b/tcp_tests/templates/cookied-bm-contrail40-nfv/salt-context-cookiecutter-contrail.yaml
index 1e06917..b130ae8 100644
--- a/tcp_tests/templates/cookied-bm-contrail40-nfv/salt-context-cookiecutter-contrail.yaml
+++ b/tcp_tests/templates/cookied-bm-contrail40-nfv/salt-context-cookiecutter-contrail.yaml
@@ -92,7 +92,7 @@
   control_vlan: '2422'
   cookiecutter_template_branch: ''
   cookiecutter_template_credentials: gerrit
-  cookiecutter_template_url: https://gerrit.mcp.mirantis.net/mk/cookiecutter-templates.git
+  cookiecutter_template_url: https://gerrit.mcp.mirantis.com/mk/cookiecutter-templates.git
   deploy_network_gateway: 172.16.49.65
   deploy_network_netmask: 255.255.255.192
   deploy_network_subnet: 172.16.49.64/26
@@ -213,7 +213,7 @@
   salt_master_hostname: cfg01
   salt_master_management_address: 172.16.49.66
   shared_reclass_branch: ''
-  shared_reclass_url: https://gerrit.mcp.mirantis.net/salt-models/reclass-system.git
+  shared_reclass_url: https://gerrit.mcp.mirantis.com/salt-models/reclass-system.git
   stacklight_enabled: 'True'
   stacklight_log_address: 10.167.8.60
   stacklight_log_hostname: log
diff --git a/tcp_tests/templates/cookied-bm-contrail40-nfv/salt.yaml b/tcp_tests/templates/cookied-bm-contrail40-nfv/salt.yaml
index 32afbfa..1ce4082 100644
--- a/tcp_tests/templates/cookied-bm-contrail40-nfv/salt.yaml
+++ b/tcp_tests/templates/cookied-bm-contrail40-nfv/salt.yaml
@@ -2,7 +2,7 @@
 {% from 'cookied-bm-contrail40-nfv/underlay.yaml' import LAB_CONFIG_NAME with context %}
 {% from 'cookied-bm-contrail40-nfv/underlay.yaml' import DOMAIN_NAME with context %}
 
-{% set SALT_MODELS_REPOSITORY = os_env('SALT_MODELS_REPOSITORY','https://gerrit.mcp.mirantis.net/salt-models/mcp-virtual-lab') %}
+{% set SALT_MODELS_REPOSITORY = os_env('SALT_MODELS_REPOSITORY','https://gerrit.mcp.mirantis.com/salt-models/mcp-virtual-lab') %}
 # Other salt model repository parameters see in shared-salt.yaml
 
 # Name of the context file (without extension, that is fixed .yaml) used to render the Environment model
@@ -22,8 +22,7 @@
 
 {{ SHARED.MACRO_GENERATE_AND_ENABLE_ENVIRONMENT_MODEL() }}
 
-{{ SHARED.MACRO_CONFIGURE_RECLASS(FORMULA_SERVICES='"linux" "reclass" "salt" "openssh" "ntp" "git" "nginx" "collectd" "sensu" "heka" "sphinx" "keystone" "mysql" "auditd" "grafana" "haproxy" "rsyslog" "horizon" "prometheus" "telegraf" "elasticsearch" "powerdns" "glusterfs" "xtrabackup" "maas" "backupninja" "jenkins" "fluentd" "logrotate"') }}
-
+{{ SHARED.MACRO_CONFIGURE_RECLASS(FORMULA_SERVICES='\*') }}
 {{ SHARED.MACRO_INSTALL_SALT_MINIONS() }}
 
 {{ SHARED.MACRO_RUN_SALT_MASTER_UNDERLAY_STATES() }}
diff --git a/tcp_tests/templates/cookied-bm-contrail40/salt-context-cookiecutter-contrail.yaml b/tcp_tests/templates/cookied-bm-contrail40/salt-context-cookiecutter-contrail.yaml
index 1125a8f..4a9dc13 100644
--- a/tcp_tests/templates/cookied-bm-contrail40/salt-context-cookiecutter-contrail.yaml
+++ b/tcp_tests/templates/cookied-bm-contrail40/salt-context-cookiecutter-contrail.yaml
@@ -82,7 +82,7 @@
   control_vlan: '2422'
   cookiecutter_template_branch: ''
   cookiecutter_template_credentials: gerrit
-  cookiecutter_template_url: https://gerrit.mcp.mirantis.net/mk/cookiecutter-templates.git
+  cookiecutter_template_url: https://gerrit.mcp.mirantis.com/mk/cookiecutter-templates.git
   deploy_network_gateway: 172.16.49.65
   deploy_network_netmask: 255.255.255.192
   deploy_network_subnet: 172.16.49.64/26
@@ -207,7 +207,7 @@
   salt_master_hostname: cfg01
   salt_master_management_address: 172.16.49.66
   shared_reclass_branch: ''
-  shared_reclass_url: https://gerrit.mcp.mirantis.net/salt-models/reclass-system.git
+  shared_reclass_url: https://gerrit.mcp.mirantis.com/salt-models/reclass-system.git
   stacklight_enabled: 'True'
   stacklight_log_address: 10.167.8.60
   stacklight_log_hostname: log
diff --git a/tcp_tests/templates/cookied-bm-contrail40/salt.yaml b/tcp_tests/templates/cookied-bm-contrail40/salt.yaml
index 494333a..81c3307 100644
--- a/tcp_tests/templates/cookied-bm-contrail40/salt.yaml
+++ b/tcp_tests/templates/cookied-bm-contrail40/salt.yaml
@@ -2,7 +2,7 @@
 {% from 'cookied-bm-contrail40/underlay.yaml' import LAB_CONFIG_NAME with context %}
 {% from 'cookied-bm-contrail40/underlay.yaml' import DOMAIN_NAME with context %}
 
-{% set SALT_MODELS_REPOSITORY = os_env('SALT_MODELS_REPOSITORY','https://gerrit.mcp.mirantis.net/salt-models/mcp-virtual-lab') %}
+{% set SALT_MODELS_REPOSITORY = os_env('SALT_MODELS_REPOSITORY','https://gerrit.mcp.mirantis.com/salt-models/mcp-virtual-lab') %}
 # Other salt model repository parameters see in shared-salt.yaml
 
 # Name of the context file (without extension, that is fixed .yaml) used to render the Environment model
@@ -22,8 +22,7 @@
 
 {{ SHARED.MACRO_GENERATE_AND_ENABLE_ENVIRONMENT_MODEL() }}
 
-{{ SHARED.MACRO_CONFIGURE_RECLASS(FORMULA_SERVICES='"linux" "reclass" "salt" "openssh" "ntp" "git" "nginx" "collectd" "sensu" "heka" "sphinx" "keystone" "mysql" "auditd" "grafana" "haproxy" "rsyslog" "horizon" "prometheus" "telegraf" "elasticsearch" "powerdns" "glusterfs" "xtrabackup" "maas" "backupninja" "jenkins" "fluentd" "logrotate"') }}
-
+{{ SHARED.MACRO_CONFIGURE_RECLASS(FORMULA_SERVICES='\*') }}
 {{ SHARED.MACRO_INSTALL_SALT_MINIONS() }}
 
 {{ SHARED.MACRO_RUN_SALT_MASTER_UNDERLAY_STATES() }}
diff --git a/tcp_tests/templates/cookied-bm-dpdk-pipeline/salt-context-cookiecutter-openstack_ovs_dpdk.yaml b/tcp_tests/templates/cookied-bm-dpdk-pipeline/salt-context-cookiecutter-openstack_ovs_dpdk.yaml
index 0d9e84c..fcc3f87 100644
--- a/tcp_tests/templates/cookied-bm-dpdk-pipeline/salt-context-cookiecutter-openstack_ovs_dpdk.yaml
+++ b/tcp_tests/templates/cookied-bm-dpdk-pipeline/salt-context-cookiecutter-openstack_ovs_dpdk.yaml
@@ -79,7 +79,7 @@
   control_vlan: '2416'
   cookiecutter_template_branch: proposed
   cookiecutter_template_credentials: gerrit
-  cookiecutter_template_url: https://gerrit.mcp.mirantis.net/mk/cookiecutter-templates.git
+  cookiecutter_template_url: https://gerrit.mcp.mirantis.com/mk/cookiecutter-templates.git
   deploy_network_gateway: 172.16.49.62
   deploy_network_netmask: 255.255.255.192
   deploy_network_subnet: 172.16.49.0/26
@@ -190,7 +190,7 @@
   salt_master_hostname: cfg01
   salt_master_management_address: 172.16.49.2
   shared_reclass_branch: proposed
-  shared_reclass_url: https://gerrit.mcp.mirantis.net/salt-models/reclass-system.git
+  shared_reclass_url: https://gerrit.mcp.mirantis.com/salt-models/reclass-system.git
   stacklight_enabled: 'False'
   stacklight_version: '2'
   static_ips_on_deploy_network_enabled: 'False'
diff --git a/tcp_tests/templates/cookied-bm-dpdk-pipeline/salt.yaml b/tcp_tests/templates/cookied-bm-dpdk-pipeline/salt.yaml
index 8a6d342..f3c2f61 100644
--- a/tcp_tests/templates/cookied-bm-dpdk-pipeline/salt.yaml
+++ b/tcp_tests/templates/cookied-bm-dpdk-pipeline/salt.yaml
@@ -5,7 +5,7 @@
 {% from 'cookied-bm-dpdk-pipeline/underlay.yaml' import LAB_CONFIG_NAME with context %}
 {% from 'cookied-bm-dpdk-pipeline/underlay.yaml' import DOMAIN_NAME with context %}
 
-{% set SALT_MODELS_REPOSITORY = os_env('SALT_MODELS_REPOSITORY','https://gerrit.mcp.mirantis.net/salt-models/mcp-baremetal-lab') %}
+{% set SALT_MODELS_REPOSITORY = os_env('SALT_MODELS_REPOSITORY','https://gerrit.mcp.mirantis.com/salt-models/mcp-baremetal-lab') %}
 # Other salt model repository parameters see in shared-salt.yaml
 
 {% import 'shared-salt.yaml' as SHARED with context %}
@@ -27,8 +27,7 @@
   retry: {count: 1, delay: 5}
   skip_fail: false
 
-{{ SHARED.MACRO_CONFIGURE_RECLASS(FORMULA_SERVICES='"linux" "reclass" "salt" "openssh" "ntp" "git" "nginx" "collectd" "sensu" "heka" "sphinx" "keystone" "mysql" "grafana" "haproxy" "rsyslog" "horizon" "prometheus" "telegraf" "elasticsearch" "fluentd" "runtest" "maas" "jenkins" "glusterfs" "backupninja" "auditd" "logrotate"') }}
-
+{{ SHARED.MACRO_CONFIGURE_RECLASS(FORMULA_SERVICES='\*') }}
 {{ SHARED.MACRO_INSTALL_SALT_MINIONS() }}
 
 {{ SHARED.MACRO_RUN_SALT_MASTER_UNDERLAY_STATES() }}
diff --git a/tcp_tests/templates/cookied-bm-mcp-dvr-vxlan/salt-context-cookiecutter-openstack_ovs_dvr_vxlan.yaml b/tcp_tests/templates/cookied-bm-mcp-dvr-vxlan/salt-context-cookiecutter-openstack_ovs_dvr_vxlan.yaml
index b0e32ef..f8cc820 100644
--- a/tcp_tests/templates/cookied-bm-mcp-dvr-vxlan/salt-context-cookiecutter-openstack_ovs_dvr_vxlan.yaml
+++ b/tcp_tests/templates/cookied-bm-mcp-dvr-vxlan/salt-context-cookiecutter-openstack_ovs_dvr_vxlan.yaml
@@ -23,7 +23,7 @@
   control_vlan: '2404'
   cookiecutter_template_branch: proposed
   cookiecutter_template_credentials: gerrit
-  cookiecutter_template_url: ssh://mcp-jenkins@gerrit.mcp.mirantis.net:29418/mk/cookiecutter-templates.git
+  cookiecutter_template_url: ssh://mcp-jenkins@gerrit.mcp.mirantis.com:29418/mk/cookiecutter-templates.git
   deploy_network_gateway: 172.16.164.1
   deploy_network_netmask: 255.255.255.192
   deploy_network_subnet: 172.16.164.0/26
@@ -154,7 +154,7 @@
   salt_master_address: 10.167.4.2
   salt_master_hostname: cfg01
   salt_master_management_address: 172.16.164.2
-  shared_reclass_url: ssh://mcp-jenkins@gerrit.mcp.mirantis.net:29418/salt-models/reclass-system.git
+  shared_reclass_url: ssh://mcp-jenkins@gerrit.mcp.mirantis.com:29418/salt-models/reclass-system.git
   stacklight_enabled: 'False'
   fluentd_enabled: 'False'
   stacklight_log_address: 10.167.4.60
diff --git a/tcp_tests/templates/cookied-bm-mcp-dvr-vxlan/salt.yaml b/tcp_tests/templates/cookied-bm-mcp-dvr-vxlan/salt.yaml
index eb7e213..81d9096 100644
--- a/tcp_tests/templates/cookied-bm-mcp-dvr-vxlan/salt.yaml
+++ b/tcp_tests/templates/cookied-bm-mcp-dvr-vxlan/salt.yaml
@@ -5,7 +5,7 @@
 {% from 'cookied-bm-mcp-dvr-vxlan/underlay.yaml' import HOSTNAME_KVM02 with context %}
 {% from 'cookied-bm-mcp-dvr-vxlan/underlay.yaml' import HOSTNAME_KVM03 with context %}
 
-{% set SALT_MODELS_REPOSITORY = os_env('SALT_MODELS_REPOSITORY','https://gerrit.mcp.mirantis.net/salt-models/mcp-virtual-lab') %}
+{% set SALT_MODELS_REPOSITORY = os_env('SALT_MODELS_REPOSITORY','https://gerrit.mcp.mirantis.com/salt-models/mcp-virtual-lab') %}
 
 {% set ENVIRONMENT_MODEL_INVENTORY_NAME = os_env('ENVIRONMENT_MODEL_INVENTORY_NAME','cookied-bm-mcp-dvr-vxlan') %}
 
@@ -15,8 +15,7 @@
 
 {{ SHARED.MACRO_CLONE_RECLASS_MODELS() }}
 
-{{ SHARED.MACRO_CONFIGURE_RECLASS(FORMULA_SERVICES='"linux" "reclass" "salt" "openssh" "ntp" "git" "nginx" "collectd" "sensu" "heka" "sphinx" "keystone" "mysql" "grafana" "haproxy" "rsyslog" "horizon" "prometheus" "telegraf" "elasticsearch" "powerdns" "glusterfs" "xtrabackup" "maas" "backupninja" "jenkins" "fluentd" "runtest" "auditd" "logrotate"') }}
-
+{{ SHARED.MACRO_CONFIGURE_RECLASS(FORMULA_SERVICES='\*') }}
 {{ SHARED.MACRO_INSTALL_SALT_MINIONS() }}
 
 {{ SHARED.MACRO_RUN_SALT_MASTER_UNDERLAY_STATES() }}
diff --git a/tcp_tests/templates/cookied-bm-mcp-ocata-contrail-nfv/salt-context-cookiecutter-contrail-dpdk.yaml b/tcp_tests/templates/cookied-bm-mcp-ocata-contrail-nfv/salt-context-cookiecutter-contrail-dpdk.yaml
index 954323c..6e0fee1 100644
--- a/tcp_tests/templates/cookied-bm-mcp-ocata-contrail-nfv/salt-context-cookiecutter-contrail-dpdk.yaml
+++ b/tcp_tests/templates/cookied-bm-mcp-ocata-contrail-nfv/salt-context-cookiecutter-contrail-dpdk.yaml
@@ -43,7 +43,7 @@
   control_vlan: '2422'
   cookiecutter_template_branch: ''
   cookiecutter_template_credentials: gerrit
-  cookiecutter_template_url: https://gerrit.mcp.mirantis.net/mk/cookiecutter-templates.git
+  cookiecutter_template_url: https://gerrit.mcp.mirantis.com/mk/cookiecutter-templates.git
   deploy_network_gateway: 172.16.49.126
   deploy_network_netmask: 255.255.255.192
   deploy_network_subnet: 172.16.49.64/26
@@ -163,7 +163,7 @@
   salt_master_hostname: cfg01
   salt_master_management_address: 172.16.49.66
   shared_reclass_branch: ''
-  shared_reclass_url: https://gerrit.mcp.mirantis.net/salt-models/reclass-system.git
+  shared_reclass_url: https://gerrit.mcp.mirantis.com/salt-models/reclass-system.git
   stacklight_enabled: 'True'
   stacklight_log_address: 10.167.8.60
   stacklight_log_hostname: log
diff --git a/tcp_tests/templates/cookied-bm-mcp-ocata-contrail-nfv/salt.yaml b/tcp_tests/templates/cookied-bm-mcp-ocata-contrail-nfv/salt.yaml
index f30331e..afec74c 100644
--- a/tcp_tests/templates/cookied-bm-mcp-ocata-contrail-nfv/salt.yaml
+++ b/tcp_tests/templates/cookied-bm-mcp-ocata-contrail-nfv/salt.yaml
@@ -5,7 +5,7 @@
 {% from 'cookied-bm-mcp-ocata-contrail-nfv/underlay.yaml' import HOSTNAME_KVM02 with context %}
 {% from 'cookied-bm-mcp-ocata-contrail-nfv/underlay.yaml' import HOSTNAME_KVM03 with context %}
 
-{% set SALT_MODELS_REPOSITORY = os_env('SALT_MODELS_REPOSITORY','https://gerrit.mcp.mirantis.net/salt-models/mcp-virtual-lab') %}
+{% set SALT_MODELS_REPOSITORY = os_env('SALT_MODELS_REPOSITORY','https://gerrit.mcp.mirantis.com/salt-models/mcp-virtual-lab') %}
 # Other salt model repository parameters see in shared-salt.yaml
 # Name of the context file (without extension, that is fixed .yaml) used to render the Environment model
 {% set ENVIRONMENT_MODEL_INVENTORY_NAME = os_env('ENVIRONMENT_MODEL_INVENTORY_NAME','physical-cookied-bm-mcp-ocata-contrail-nfv') %}
@@ -23,8 +23,7 @@
 
 {{ SHARED.MACRO_GENERATE_AND_ENABLE_ENVIRONMENT_MODEL() }}
 
-{{ SHARED.MACRO_CONFIGURE_RECLASS(FORMULA_SERVICES='"linux" "reclass" "salt" "openssh" "ntp" "git" "nginx" "collectd" "sensu" "heka" "sphinx" "keystone" "mysql" "grafana" "haproxy" "rsyslog" "horizon" "prometheus" "telegraf" "elasticsearch" "powerdns" "glusterfs" "xtrabackup" "maas" "backupninja" "jenkins" "fluentd" "auditd" "logrotate"') }}
-
+{{ SHARED.MACRO_CONFIGURE_RECLASS(FORMULA_SERVICES='\*') }}
 {{ SHARED.MACRO_INSTALL_SALT_MINIONS() }}
 
 {{ SHARED.MACRO_RUN_SALT_MASTER_UNDERLAY_STATES() }}
diff --git a/tcp_tests/templates/cookied-bm-mcp-ocata-contrail/salt-context-cookiecutter-contrail.yaml b/tcp_tests/templates/cookied-bm-mcp-ocata-contrail/salt-context-cookiecutter-contrail.yaml
index e57820e..476df0d 100644
--- a/tcp_tests/templates/cookied-bm-mcp-ocata-contrail/salt-context-cookiecutter-contrail.yaml
+++ b/tcp_tests/templates/cookied-bm-mcp-ocata-contrail/salt-context-cookiecutter-contrail.yaml
@@ -43,7 +43,7 @@
   control_vlan: '2422'
   cookiecutter_template_branch: ''
   cookiecutter_template_credentials: gerrit
-  cookiecutter_template_url: https://gerrit.mcp.mirantis.net/mk/cookiecutter-templates.git
+  cookiecutter_template_url: https://gerrit.mcp.mirantis.com/mk/cookiecutter-templates.git
   deploy_network_gateway: 172.16.49.126
   deploy_network_netmask: 255.255.255.192
   deploy_network_subnet: 172.16.49.64/26
@@ -158,7 +158,7 @@
   salt_master_hostname: cfg01
   salt_master_management_address: 172.16.49.66
   shared_reclass_branch: ''
-  shared_reclass_url: https://gerrit.mcp.mirantis.net/salt-models/reclass-system.git
+  shared_reclass_url: https://gerrit.mcp.mirantis.com/salt-models/reclass-system.git
   stacklight_enabled: 'True'
   stacklight_log_address: 10.167.8.60
   stacklight_log_hostname: log
diff --git a/tcp_tests/templates/cookied-bm-mcp-ocata-contrail/salt-context-cookiecutter-contrail.yaml.back b/tcp_tests/templates/cookied-bm-mcp-ocata-contrail/salt-context-cookiecutter-contrail.yaml.back
index db949d4..02bcbf2 100644
--- a/tcp_tests/templates/cookied-bm-mcp-ocata-contrail/salt-context-cookiecutter-contrail.yaml.back
+++ b/tcp_tests/templates/cookied-bm-mcp-ocata-contrail/salt-context-cookiecutter-contrail.yaml.back
@@ -12,7 +12,7 @@
   control_vlan: '2422'

   cookiecutter_template_branch: master

   cookiecutter_template_credentials: gerrit

-  cookiecutter_template_url: ssh://mcp-jenkins@gerrit.mcp.mirantis.net:29418/mk/cookiecutter-templates.git

+  cookiecutter_template_url: ssh://mcp-jenkins@gerrit.mcp.mirantis.com:29418/mk/cookiecutter-templates.git

   deploy_network_gateway: 172.16.49.126

   deploy_network_netmask: 255.255.255.192

   deploy_network_subnet: 172.16.49.64/26

@@ -154,7 +154,7 @@
   salt_master_address: 10.167.8.66

   salt_master_hostname: cfg01

   salt_master_management_address: 172.16.49.66

-  shared_reclass_url: ssh://mcp-jenkins@gerrit.mcp.mirantis.net:29418/salt-models/reclass-system.git

+  shared_reclass_url: ssh://mcp-jenkins@gerrit.mcp.mirantis.com:29418/salt-models/reclass-system.git

   stacklight_enabled: 'True'

   stacklight_log_address: 10.167.8.60

   stacklight_log_hostname: log

diff --git a/tcp_tests/templates/cookied-bm-mcp-ocata-contrail/salt.yaml b/tcp_tests/templates/cookied-bm-mcp-ocata-contrail/salt.yaml
index a214666..1adfd90 100644
--- a/tcp_tests/templates/cookied-bm-mcp-ocata-contrail/salt.yaml
+++ b/tcp_tests/templates/cookied-bm-mcp-ocata-contrail/salt.yaml
@@ -4,7 +4,7 @@
 {% from 'cookied-bm-mcp-ocata-contrail/underlay.yaml' import CUSTOM_VCP_TRUSTY_IMAGE_URL with context %}
 {% from 'cookied-bm-mcp-ocata-contrail/underlay.yaml' import CUSTOM_VCP_XENIAL_IMAGE_URL with context %}
 
-{% set SALT_MODELS_REPOSITORY = os_env('SALT_MODELS_REPOSITORY','https://gerrit.mcp.mirantis.net/salt-models/mcp-virtual-lab') %}
+{% set SALT_MODELS_REPOSITORY = os_env('SALT_MODELS_REPOSITORY','https://gerrit.mcp.mirantis.com/salt-models/mcp-virtual-lab') %}
 # Other salt model repository parameters see in shared-salt.yaml
 
 # Name of the context file (without extension, that is fixed .yaml) used to render the Environment model
@@ -24,8 +24,7 @@
 
 {{ SHARED.MACRO_GENERATE_AND_ENABLE_ENVIRONMENT_MODEL() }}
 
-{{ SHARED.MACRO_CONFIGURE_RECLASS(FORMULA_SERVICES='"linux" "reclass" "salt" "openssh" "ntp" "git" "nginx" "collectd" "sensu" "heka" "sphinx" "keystone" "mysql" "grafana" "haproxy" "rsyslog" "horizon" "prometheus" "telegraf" "elasticsearch" "powerdns" "glusterfs" "xtrabackup" "maas" "backupninja" "jenkins" "fluentd" "auditd" "logrotate"') }}
-
+{{ SHARED.MACRO_CONFIGURE_RECLASS(FORMULA_SERVICES='\*') }}
 {{ SHARED.MACRO_INSTALL_SALT_MINIONS() }}
 
 {{ SHARED.MACRO_RUN_SALT_MASTER_UNDERLAY_STATES() }}
diff --git a/tcp_tests/templates/cookied-bm-mcp-ovs-dpdk/salt-context-cookiecutter-openstack_ovs_dpdk.yaml b/tcp_tests/templates/cookied-bm-mcp-ovs-dpdk/salt-context-cookiecutter-openstack_ovs_dpdk.yaml
index 653fc81..d735caf 100644
--- a/tcp_tests/templates/cookied-bm-mcp-ovs-dpdk/salt-context-cookiecutter-openstack_ovs_dpdk.yaml
+++ b/tcp_tests/templates/cookied-bm-mcp-ovs-dpdk/salt-context-cookiecutter-openstack_ovs_dpdk.yaml
@@ -79,7 +79,7 @@
   control_vlan: '2416'
   cookiecutter_template_branch: proposed
   cookiecutter_template_credentials: gerrit
-  cookiecutter_template_url: https://gerrit.mcp.mirantis.net/mk/cookiecutter-templates.git
+  cookiecutter_template_url: https://gerrit.mcp.mirantis.com/mk/cookiecutter-templates.git
   deploy_network_gateway: 172.16.49.62
   deploy_network_netmask: 255.255.255.192
   deploy_network_subnet: 172.16.49.0/26
@@ -190,7 +190,7 @@
   salt_master_hostname: cfg01
   salt_master_management_address: 172.16.49.2
   shared_reclass_branch: proposed
-  shared_reclass_url: https://gerrit.mcp.mirantis.net/salt-models/reclass-system.git
+  shared_reclass_url: https://gerrit.mcp.mirantis.com/salt-models/reclass-system.git
   stacklight_enabled: 'False'
   stacklight_version: '2'
   static_ips_on_deploy_network_enabled: 'False'
diff --git a/tcp_tests/templates/cookied-bm-mcp-ovs-dpdk/salt.yaml b/tcp_tests/templates/cookied-bm-mcp-ovs-dpdk/salt.yaml
index a4e43ba..ce9318e 100644
--- a/tcp_tests/templates/cookied-bm-mcp-ovs-dpdk/salt.yaml
+++ b/tcp_tests/templates/cookied-bm-mcp-ovs-dpdk/salt.yaml
@@ -5,7 +5,7 @@
 {% from 'cookied-bm-mcp-ovs-dpdk/underlay.yaml' import LAB_CONFIG_NAME with context %}
 {% from 'cookied-bm-mcp-ovs-dpdk/underlay.yaml' import DOMAIN_NAME with context %}
 
-{% set SALT_MODELS_REPOSITORY = os_env('SALT_MODELS_REPOSITORY','https://gerrit.mcp.mirantis.net/salt-models/mcp-baremetal-lab') %}
+{% set SALT_MODELS_REPOSITORY = os_env('SALT_MODELS_REPOSITORY','https://gerrit.mcp.mirantis.com/salt-models/mcp-baremetal-lab') %}
 # Other salt model repository parameters see in shared-salt.yaml
 
 {% import 'shared-salt.yaml' as SHARED with context %}
@@ -27,8 +27,7 @@
   retry: {count: 1, delay: 5}
   skip_fail: false
 
-{{ SHARED.MACRO_CONFIGURE_RECLASS(FORMULA_SERVICES='"linux" "reclass" "salt" "openssh" "ntp" "git" "nginx" "collectd" "sensu" "heka" "sphinx" "keystone" "mysql" "grafana" "haproxy" "rsyslog" "horizon" "prometheus" "telegraf" "elasticsearch" "fluentd" "runtest" "maas" "jenkins" "glusterfs" "backupninja" "auditd" "logrotate"') }}
-
+{{ SHARED.MACRO_CONFIGURE_RECLASS(FORMULA_SERVICES='\*') }}
 {{ SHARED.MACRO_INSTALL_SALT_MINIONS() }}
 
 {{ SHARED.MACRO_RUN_SALT_MASTER_UNDERLAY_STATES() }}
diff --git a/tcp_tests/templates/cookied-bm-mcp-pike-k8s-contrail/salt-context-cookiecutter-k8s-contrail.yaml b/tcp_tests/templates/cookied-bm-mcp-pike-k8s-contrail/salt-context-cookiecutter-k8s-contrail.yaml
index 88aef93..49e3ddb 100644
--- a/tcp_tests/templates/cookied-bm-mcp-pike-k8s-contrail/salt-context-cookiecutter-k8s-contrail.yaml
+++ b/tcp_tests/templates/cookied-bm-mcp-pike-k8s-contrail/salt-context-cookiecutter-k8s-contrail.yaml
@@ -82,7 +82,7 @@
   control_vlan: '2410'
   cookiecutter_template_branch: ''
   cookiecutter_template_credentials: gerrit
-  cookiecutter_template_url: https://gerrit.mcp.mirantis.net/mk/cookiecutter-templates.git
+  cookiecutter_template_url: https://gerrit.mcp.mirantis.com/mk/cookiecutter-templates.git
   deploy_network_gateway: 172.17.41.2
   deploy_network_netmask: 255.255.255.192
   deploy_network_subnet: 172.16.49.64/26
@@ -175,7 +175,7 @@
   salt_master_hostname: cfg01
   salt_master_management_address: 172.17.41.3
   shared_reclass_branch: ''
-  shared_reclass_url: https://gerrit.mcp.mirantis.net/salt-models/reclass-system.git
+  shared_reclass_url: https://gerrit.mcp.mirantis.com/salt-models/reclass-system.git
   stacklight_enabled: 'True'
   stacklight_log_address: 10.167.8.60
   stacklight_log_hostname: log
diff --git a/tcp_tests/templates/cookied-bm-mcp-pike-k8s-contrail/salt.yaml b/tcp_tests/templates/cookied-bm-mcp-pike-k8s-contrail/salt.yaml
index d26a22a..3705053 100644
--- a/tcp_tests/templates/cookied-bm-mcp-pike-k8s-contrail/salt.yaml
+++ b/tcp_tests/templates/cookied-bm-mcp-pike-k8s-contrail/salt.yaml
@@ -5,7 +5,7 @@
 {% from 'cookied-bm-mcp-pike-k8s-contrail/underlay.yaml' import HOSTNAME_KVM02 with context %}
 {% from 'cookied-bm-mcp-pike-k8s-contrail/underlay.yaml' import HOSTNAME_KVM03 with context %}
 
-{% set SALT_MODELS_REPOSITORY = os_env('SALT_MODELS_REPOSITORY','https://gerrit.mcp.mirantis.net/salt-models/mcp-virtual-lab') %}
+{% set SALT_MODELS_REPOSITORY = os_env('SALT_MODELS_REPOSITORY','https://gerrit.mcp.mirantis.com/salt-models/mcp-virtual-lab') %}
 # Other salt model repository parameters see in shared-salt.yaml
 
 # Name of the context file (without extension, that is fixed .yaml) used to render the Environment model
@@ -25,8 +25,7 @@
 
 {{ SHARED.MACRO_GENERATE_AND_ENABLE_ENVIRONMENT_MODEL() }}
 
-{{ SHARED.MACRO_CONFIGURE_RECLASS(FORMULA_SERVICES='"linux" "reclass" "salt" "openssh" "ntp" "git" "nginx" "collectd" "sensu" "heka" "sphinx" "keystone" "mysql" "grafana" "haproxy" "rsyslog" "horizon" "prometheus" "telegraf" "elasticsearch" "powerdns" "glusterfs" "xtrabackup" "maas" "backupninja" "jenkins" "fluentd" "auditd" "logrotate"') }}
-
+{{ SHARED.MACRO_CONFIGURE_RECLASS(FORMULA_SERVICES='\*') }}
 {{ SHARED.MACRO_INSTALL_SALT_MINIONS() }}
 
 {{ SHARED.MACRO_RUN_SALT_MASTER_UNDERLAY_STATES() }}
diff --git a/tcp_tests/templates/cookied-bm-ocata-cicd-pipeline/salt-context-cookiecutter-initial-cicd-nfv.yaml b/tcp_tests/templates/cookied-bm-ocata-cicd-pipeline/salt-context-cookiecutter-initial-cicd-nfv.yaml
index fa2ffb2..c8612bb 100644
--- a/tcp_tests/templates/cookied-bm-ocata-cicd-pipeline/salt-context-cookiecutter-initial-cicd-nfv.yaml
+++ b/tcp_tests/templates/cookied-bm-ocata-cicd-pipeline/salt-context-cookiecutter-initial-cicd-nfv.yaml
@@ -50,7 +50,7 @@
   control_vlan: '2422'
   cookiecutter_template_branch: master
   cookiecutter_template_credentials: gerrit
-  cookiecutter_template_url: ssh://mcp-jenkins@gerrit.mcp.mirantis.net:29418/mk/cookiecutter-templates.git
+  cookiecutter_template_url: ssh://mcp-jenkins@gerrit.mcp.mirantis.com:29418/mk/cookiecutter-templates.git
   deploy_network_gateway: 172.16.49.126
   deploy_network_netmask: 255.255.255.192
   deploy_network_subnet: 172.16.49.64/26
@@ -187,7 +187,7 @@
   salt_master_hostname: cfg01
   salt_master_management_address: 172.16.49.66
   shared_reclass_refspec: refs/changes/44/16144/1
-  shared_reclass_url: ssh://mcp-jenkins@gerrit.mcp.mirantis.net:29418/salt-models/reclass-system.git
+  shared_reclass_url: ssh://mcp-jenkins@gerrit.mcp.mirantis.com:29418/salt-models/reclass-system.git
   stacklight_enabled: 'False'
   stacklight_version: '2'
   static_ips_on_deploy_network_enabled: 'False'
diff --git a/tcp_tests/templates/cookied-bm-ocata-cicd-pipeline/salt-context-cookiecutter-initial-cicd.yaml b/tcp_tests/templates/cookied-bm-ocata-cicd-pipeline/salt-context-cookiecutter-initial-cicd.yaml
index da9c583..e7584f8 100644
--- a/tcp_tests/templates/cookied-bm-ocata-cicd-pipeline/salt-context-cookiecutter-initial-cicd.yaml
+++ b/tcp_tests/templates/cookied-bm-ocata-cicd-pipeline/salt-context-cookiecutter-initial-cicd.yaml
@@ -50,7 +50,7 @@
   control_vlan: '2422'
   cookiecutter_template_branch: master
   cookiecutter_template_credentials: gerrit
-  cookiecutter_template_url: ssh://mcp-jenkins@gerrit.mcp.mirantis.net:29418/mk/cookiecutter-templates.git
+  cookiecutter_template_url: ssh://mcp-jenkins@gerrit.mcp.mirantis.com:29418/mk/cookiecutter-templates.git
   deploy_network_gateway: 172.16.49.126
   deploy_network_netmask: 255.255.255.192
   deploy_network_subnet: 172.16.49.64/26
@@ -262,7 +262,7 @@
   sfdc_password: admin
   sfdc_sandbox_enabled: 'False'
   sfdc_username: admin
-  shared_reclass_url: ssh://mcp-jenkins@gerrit.mcp.mirantis.net:29418/salt-models/reclass-system.git
+  shared_reclass_url: ssh://mcp-jenkins@gerrit.mcp.mirantis.com:29418/salt-models/reclass-system.git
   stacklight_enabled: 'True'
   stacklight_version: '2'
   static_ips_on_deploy_network_enabled: 'False'
diff --git a/tcp_tests/templates/cookied-bm-ocata-cicd-pipeline/salt.yaml b/tcp_tests/templates/cookied-bm-ocata-cicd-pipeline/salt.yaml
index 9ff0f8a..d39ca10 100644
--- a/tcp_tests/templates/cookied-bm-ocata-cicd-pipeline/salt.yaml
+++ b/tcp_tests/templates/cookied-bm-ocata-cicd-pipeline/salt.yaml
@@ -27,13 +27,12 @@
 
 {{ SHARED.MACRO_GENERATE_AND_ENABLE_ENVIRONMENT_MODEL() }}
 
-{{ SHARED.MACRO_CONFIGURE_RECLASS(FORMULA_SERVICES='"linux" "reclass" "salt" "openssh" "ntp" "git" "nginx" "collectd" "sensu" "heka" "sphinx" "keystone" "mysql" "grafana" "haproxy" "rsyslog" "horizon" "prometheus" "telegraf" "elasticsearch" "powerdns" "glusterfs" "xtrabackup" "maas" "backupninja" "jenkins" "auditd" "logrotate"') }}
-
-- description: 'Workaround for typo in salt.minion.service (https://gerrit.mcp.mirantis.net/#/c/14806/)'
+{{ SHARED.MACRO_CONFIGURE_RECLASS(FORMULA_SERVICES='\*') }}
+- description: 'Workaround for typo in salt.minion.service (https://gerrit.mcp.mirantis.com/#/c/14806/)'
   cmd: |
-    git clone https://gerrit.mcp.mirantis.net/salt-formulas/salt /tmp/salt-formula-salt;
+    git clone https://gerrit.mcp.mirantis.com/salt-formulas/salt /tmp/salt-formula-salt;
     pushd /tmp/salt-formula-salt;
-    git fetch https://gerrit.mcp.mirantis.net/salt-formulas/salt refs/changes/06/14806/1 && git checkout FETCH_HEAD;
+    git fetch https://gerrit.mcp.mirantis.com/salt-formulas/salt refs/changes/06/14806/1 && git checkout FETCH_HEAD;
     popd;
     cp /tmp/salt-formula-salt/salt/minion/service.sls /usr/share/salt-formulas/env/salt/minion/service.sls;
   node_name: {{ HOSTNAME_CFG01 }}
diff --git a/tcp_tests/templates/cookied-cicd-k8s-calico-sl/cookiecutter-context-k8s-sl.yaml b/tcp_tests/templates/cookied-cicd-k8s-calico-sl/cookiecutter-context-k8s-sl.yaml
index 898017e..bf26588 100644
--- a/tcp_tests/templates/cookied-cicd-k8s-calico-sl/cookiecutter-context-k8s-sl.yaml
+++ b/tcp_tests/templates/cookied-cicd-k8s-calico-sl/cookiecutter-context-k8s-sl.yaml
@@ -1,34 +1,34 @@
 default_context:
   auditd_enabled: 'False'
-  backup_private_key: |-
-    -----BEGIN RSA PRIVATE KEY-----
-    MIIEowIBAAKCAQEAtyCfiXxwB6Dk6n7Y1t9u2XqMkLPvMArKwRUWGEwTzS7w0NzY
-    bCYdUfxo9m3tmhCO6hb0Yqzk6LEcOrARR7nHK7dS0JpRmZBeD3thdgXD8wNaG1PQ
-    ZzdNwGHP5cjfBXPHYXdP1k6HtLB1PymPDyEqhU0ZJrVGBK4+WSLNesSGOMiREQSx
-    kg/85aGdagLWHgAbgi0x1xx+Bu8LtuVkIcz8IMa3lanY7B8s5aIMxsOGTokJvdRL
-    QZJN0AfGRSANTIQZXfgkTO5wP85UsNisB8j7bliLl1wbxgnq/LTJZ+nQ1PA4dx8c
-    t0FKHYIR6zSd0LkDZaxJnZBgSrVnZ2JBbt7hTwIDAQABAoIBAHu/Ic+INTQSd142
-    hVT9+ywe8em+jX0LbeN32kxk7GSUucqJ0f2S6/FA/bS4p/yZ/9kT1eTwLGdJd2f5
-    HlQ3p+1UnjO0dDuvIMCZgUx9rOIEe9lHk+aLqpC8B/6g9IP8rtigBWUt/+oL687Y
-    yIFSyib16G8Nw9jZ3evh5rR1JLYtPHvAJiodsT3iY/+wZkuo4dAa4/QlKPT7QXaU
-    G5/AA/8zdsVOJl5JOHjP2pFBMzxttkWbkuYpEQe7LRw5MOlfFpMEYYZ+NJGVwDNe
-    0WTpiOIDo78xaq5TsOS23fJCEKodtFrITXvSv0c1tNoL/WcslwmwcV3mKyySFffG
-    Sj7G5RECgYEA7NszuBZBY0Zn7qLkczIdTq15lZ0KFJb2sHIRQbzeeCYn6Q8LJsCb
-    ELhgevun4BxrE2O+R8H4HL+g002vqzL9Vn9oOqFTn3GZMaHojiMSmjCBNl05Mftw
-    EM69l6WV6H8E+D90GMGGoxRJlqHdOuNcQ9bdQpkF4vTNBfzx7VU+5csCgYEAxe23
-    h191srNg7wjafMuK22RtM739knqX+sqeFaGqM6f73+vJaqNilvfgSRQYZc1MOetp
-    Ty4A4g/Jx/NkDWkaLbewFaHw7dNK62Vr6Ovl67Z9sEo8A2ySS7VWVuAqzVbRjyGp
-    yddGiW2Q+ITdfPfHbCFobVUgFeSinfZxkMFw4g0CgYBG8rZASzJU+W8CdXq24ukS
-    ezYzUbIGTt4gJlry9Q8ysEM+NYpilkkcrg4AaMd1gy2zxinmNr0KZ4BWKywWvRRT
-    x6BCB7cTyKRZ0KTnhqv40dSyoyQRy75a2oLCHRCVbw7fCarOC5I34UjVvTCWhipK
-    C9+FJm8z954+T/Fr5SANFwKBgEJBSvhD2jBRn5ckjY7My9SZD30Mkj9gTlOjU7vF
-    /CWCi+vvD+NkgfIrU6bi1S/uwx94UC4zJhSGWHNYZBuhHSREour65J2X5zJZJwA3
-    RyXaVsSWdPRoeahiMV6vd2R5NXkGOcHZEEGcrbSjNUlJ4DWwETbYEf+CI3VhM67T
-    MihZAoGBALxcTSivHJZDle81lsu1dcgmzZkUfQAcUSYDWhg+Bqg3A8FVKMpEzrbd
-    weGRM8S8oAz1PN0T/LRcpJq3TFZpy+iXx59jl5XenmoKwPr+u5XFrEHTWqNS2NcL
-    MwS8VTJhWYNVdrNIRWClRVUv87hZMha40JHiPK1KA4em1G+H29x3
-    -----END RSA PRIVATE KEY-----
-  backup_public_key: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQC3IJ+JfHAHoOTqftjW327ZeoyQs+8wCsrBFRYYTBPNLvDQ3NhsJh1R/Gj2be2aEI7qFvRirOTosRw6sBFHuccrt1LQmlGZkF4Pe2F2BcPzA1obU9BnN03AYc/lyN8Fc8dhd0/WToe0sHU/KY8PISqFTRkmtUYErj5ZIs16xIY4yJERBLGSD/zloZ1qAtYeABuCLTHXHH4G7wu25WQhzPwgxreVqdjsHyzlogzGw4ZOiQm91EtBkk3QB8ZFIA1MhBld+CRM7nA/zlSw2KwHyPtuWIuXXBvGCer8tMln6dDU8Dh3Hxy3QUodghHrNJ3QuQNlrEmdkGBKtWdnYkFu3uFP
+  backup_private_key: |
+      -----BEGIN RSA PRIVATE KEY-----
+      MIIEowIBAAKCAQEAxL6/rVgCetsETpZaUmXmkj8cZ1WN0eubH1FvMDOi/La9ZJyT
+      k0C6AYpJnIyEm93pMj5cLm08qRqMW+2pdOhYjcH69yg5MrX5SkRk8jCmIHIYoIbh
+      Qnwbnj3dd3I39ZdfU2FO7u2vlbglVou6ZoQxlJDItuLNtzq6EG+w9eF19e7+OsC6
+      6iUItp618zfw1l3J/8nKvCGe2RYDf7mJW6XwCl/DwryJmwwzvPgYJ3QMuDD8/HFj
+      lrJ3xjFTXj4b4Ws1XIoy78fFbtiLr4OwqCYkho03u2E5rOOP1qZxZB63sivHMLMO
+      MM5bOAQKbulFNoyALADGYfc7sf0bZ4u9XXDXxQIDAQABAoIBAQCfmc2MJRT97KW1
+      yqpCpX9BrAiymuiNHf+cjEcSZxEUyHkjIRFmJt+9WB0W7ba1anM92vCUiPDojSzH
+      dig9Oi578JxR20NrK8uqv4jUHzrknynzLveVI3CUEcOSnglfJQijbxDFKfOCFPvV
+      FUyE1UATMNBh6+LNfMprgu+exuMWOPnDyUiYQ+WZ0JfuZY8fuaZte4woJJOb9LUu
+      5rsMG/smIzjpgZ0Z9ZVDMurfq565qhpaXRAqKeIuyht8pacTo31iMQdHB78AvY/3
+      g0z21Gk8k3z0Kr/YFKr2r4FmXY5m/gAUvZly2ZrVQM5XsbTVCzq/JpI5fssNvSbU
+      AKmXzf4RAoGBAOO3d4/cstxERzW6hyOTjZIN1ppR52CsnZTsVPbfd0pCtmzmVZce
+      CtHKdcXSbTwZvvkK09QSWAp3MoSpd0gIOiLU8Wx/R/RIZsu9BlhTS3r3EQLnk72d
+      H/1TTA+j4T/LIYLSojQ1RxvIrHetAD44j732aTwKAHj/SybEAVqNkOB/AoGBAN0u
+      gLcrgqIHGrk4VjWSvlCGymfF40equcx+ud7XhfZDGETUOSahW4dPZ52cjPAkrCBQ
+      MMfcDwSVGsOAjd+mNt11BHUKobnhXwFaWWuyqyn9NmWFbjMbICVh7E3Of5aVN38o
+      lrmo/7LuKMVG7XRwphCv5NkaJmQG4njDyUQWlaW7AoGADCd8wDb9bPhP/LQqBmIX
+      ylXmwHHisaxE9O/wUQT4bwREjGd25gv6c9wkkRx8LBsLsGs9hzI7dMOL9Ly+2x9l
+      SvqmsC3S/1zl77X1Ir2/Z57MT6Vgo1xBmtnZU3Rhz2/eKAdqFPNLClaZrgGT475N
+      HcyLLWMzR0IJFtabY+Puea0CgYA8Zb5wRkldxWLewSuJZZDinGwY+kieAVjLJq/K
+      0j+ah6fQ48LXcah0wpIgz+cMjHcUO9GWQdk3/x9X03rqX5EL2DBnZYfUIl63F9zj
+      M97ZkHOSNWVqPzX//0Vv2butewG0j3jZKfTo/2/SrxOYgEpYtC9huWpSVi7xm0US
+      erhSkQKBgFIf9JEsfgE57ANhvITZ3ZI0uZXNxZkXQaVg8jvScDi79IIhy9iPzhKC
+      aIIQoDNIlWv1ftCRZ5AlBvVXgvQ/QNrwy48JiQTzWZlb9Ezg8w+olQmSbG6fq7Y+
+      7r3i+QUZ7RBdOb24QcQ618q54ozNTCB7OywY78ptFzeoBeptiNr1
+      -----END RSA PRIVATE KEY-----
+  backup_public_key: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDEvr+tWAJ62wROllpSZeaSPxxnVY3R65sfUW8wM6L8tr1knJOTQLoBikmcjISb3ekyPlwubTypGoxb7al06FiNwfr3KDkytflKRGTyMKYgchighuFCfBuePd13cjf1l19TYU7u7a+VuCVWi7pmhDGUkMi24s23OroQb7D14XX17v46wLrqJQi2nrXzN/DWXcn/ycq8IZ7ZFgN/uYlbpfAKX8PCvImbDDO8+BgndAy4MPz8cWOWsnfGMVNePhvhazVcijLvx8Vu2Iuvg7CoJiSGjTe7YTms44/WpnFkHreyK8cwsw4wzls4BApu6UU2jIAsAMZh9zux/Rtni71dcNfF
   bmk_enabled: 'False'
   calico_cni_image: docker-prod-local.artifactory.mirantis.com/mirantis/projectcalico/calico/cni:latest
   calico_enable_nat: 'True'
@@ -83,7 +83,7 @@
   control_vlan: '10'
   cookiecutter_template_branch: ''
   cookiecutter_template_credentials: gerrit
-  cookiecutter_template_url: https://gerrit.mcp.mirantis.net/mk/cookiecutter-templates.git
+  cookiecutter_template_url: https://gerrit.mcp.mirantis.com/mk/cookiecutter-templates.git
   deploy_network_gateway: 10.167.5.1
   deploy_network_netmask: 255.255.255.0
   deploy_network_subnet: 10.167.5.0/24
@@ -156,7 +156,7 @@
   salt_master_hostname: cfg01
   salt_master_management_address: 10.167.5.15
   shared_reclass_branch: 'proposed'
-  shared_reclass_url: https://gerrit.mcp.mirantis.net/salt-models/reclass-system.git
+  shared_reclass_url: https://gerrit.mcp.mirantis.com/salt-models/reclass-system.git
   stacklight_enabled: 'True'
   stacklight_log_address: 10.167.4.60
   stacklight_log_hostname: log
diff --git a/tcp_tests/templates/cookied-cicd-k8s-calico-sl/environment-context-k8s-sl.yaml b/tcp_tests/templates/cookied-cicd-k8s-calico-sl/environment-context-k8s-sl.yaml
index 0a07a81..bfff297 100644
--- a/tcp_tests/templates/cookied-cicd-k8s-calico-sl/environment-context-k8s-sl.yaml
+++ b/tcp_tests/templates/cookied-cicd-k8s-calico-sl/environment-context-k8s-sl.yaml
@@ -85,7 +85,7 @@
         ens3:
           role: single_dhcp
         ens4:
-          role: single_ctl
+          role: single_ctl_calico
 
     ctl02:
       reclass_storage_name: kubernetes_control_node02
@@ -96,7 +96,7 @@
         ens3:
           role: single_dhcp
         ens4:
-          role: single_ctl
+          role: single_ctl_calico
 
     ctl03:
       reclass_storage_name: kubernetes_control_node03
@@ -107,7 +107,7 @@
         ens3:
           role: single_dhcp
         ens4:
-          role: single_ctl
+          role: single_ctl_calico
 
     cmp001:
       reclass_storage_name: kubernetes_compute_node01
@@ -119,7 +119,7 @@
         ens3:
           role: single_dhcp
         ens4:
-          role: single_ctl
+          role: single_ctl_calico
           single_address: ${_param:kubernetes_compute_node01_address}
 
     cmp002:
@@ -132,9 +132,35 @@
         ens3:
           role: single_dhcp
         ens4:
-          role: single_ctl
+          role: single_ctl_calico
           single_address: ${_param:kubernetes_compute_node02_address}
 
+    cmp003:
+      reclass_storage_name: kubernetes_compute_node03
+      roles:
+      - kubernetes_compute
+      - linux_system_codename_xenial
+      - salt_master_host
+      interfaces:
+        ens3:
+          role: single_dhcp
+        ens4:
+          role: single_ctl_calico
+          single_address: ${_param:kubernetes_compute_node03_address}
+
+    cmp004:
+      reclass_storage_name: kubernetes_compute_node04
+      roles:
+      - kubernetes_compute
+      - linux_system_codename_xenial
+      - salt_master_host
+      interfaces:
+        ens3:
+          role: single_dhcp
+        ens4:
+          role: single_ctl_calico
+          single_address: ${_param:kubernetes_compute_node04_address}
+
     mon01:
       reclass_storage_name: stacklight_server_node01
       roles:
diff --git a/tcp_tests/templates/cookied-cicd-k8s-calico-sl/salt.yaml b/tcp_tests/templates/cookied-cicd-k8s-calico-sl/salt.yaml
index 8a739fc..70bf54d 100644
--- a/tcp_tests/templates/cookied-cicd-k8s-calico-sl/salt.yaml
+++ b/tcp_tests/templates/cookied-cicd-k8s-calico-sl/salt.yaml
@@ -2,7 +2,7 @@
 {% from 'cookied-cicd-k8s-calico-sl/underlay.yaml' import LAB_CONFIG_NAME with context %}
 {% from 'cookied-cicd-k8s-calico-sl/underlay.yaml' import DOMAIN_NAME with context %}
 
-{% set SALT_MODELS_REPOSITORY = os_env('SALT_MODELS_REPOSITORY','https://gerrit.mcp.mirantis.net/salt-models/mcp-virtual-lab') %}
+{% set SALT_MODELS_REPOSITORY = os_env('SALT_MODELS_REPOSITORY','https://gerrit.mcp.mirantis.com/salt-models/mcp-virtual-lab') %}
 # Other salt model repository parameters see in shared-salt.yaml
 
 {% import 'shared-salt.yaml' as SHARED with context %}
diff --git a/tcp_tests/templates/cookied-cicd-k8s-calico-sl/underlay.yaml b/tcp_tests/templates/cookied-cicd-k8s-calico-sl/underlay.yaml
index c249522..b51cd5e 100644
--- a/tcp_tests/templates/cookied-cicd-k8s-calico-sl/underlay.yaml
+++ b/tcp_tests/templates/cookied-cicd-k8s-calico-sl/underlay.yaml
@@ -13,6 +13,8 @@
 {% set HOSTNAME_CTL03 = os_env('HOSTNAME_CTL03', 'ctl03') %}
 {% set HOSTNAME_CMP01 = os_env('HOSTNAME_CMP01', 'cmp001') %}
 {% set HOSTNAME_CMP02 = os_env('HOSTNAME_CMP02', 'cmp002') %}
+{% set HOSTNAME_CMP03 = os_env('HOSTNAME_CMP03', 'cmp003') %}
+{% set HOSTNAME_CMP04 = os_env('HOSTNAME_CMP04', 'cmp004') %}
 {% set HOSTNAME_LOG01 = os_env('HOSTNAME_LOG01', 'log01') %}
 {% set HOSTNAME_LOG02 = os_env('HOSTNAME_LOG02', 'log02') %}
 {% set HOSTNAME_LOG03 = os_env('HOSTNAME_LOG03', 'log03') %}
@@ -80,6 +82,8 @@
             default_{{ HOSTNAME_CTL03 }}: +13
             default_{{ HOSTNAME_CMP01 }}: +101
             default_{{ HOSTNAME_CMP02 }}: +102
+            default_{{ HOSTNAME_CMP03 }}: +103
+            default_{{ HOSTNAME_CMP04 }}: +104
             default_{{ HOSTNAME_LOG }}: +60
             default_{{ HOSTNAME_LOG01 }}: +61
             default_{{ HOSTNAME_LOG02 }}: +62
@@ -402,7 +406,7 @@
             role: k8s_controller
             params:
               vcpu: !os_env SLAVE_NODE_CPU, 2
-              memory: !os_env SLAVE_NODE_MEMORY, 2048
+              memory: !os_env SLAVE_NODE_MEMORY, 8192
               boot:
                 - hd
               cloud_init_volume_name: iso
@@ -431,7 +435,7 @@
             role: salt_minion
             params:
               vcpu: !os_env SLAVE_NODE_CPU, 2
-              memory: !os_env SLAVE_NODE_MEMORY, 2048
+              memory: !os_env SLAVE_NODE_MEMORY, 8192
               boot:
                 - hd
               cloud_init_volume_name: iso
@@ -460,7 +464,7 @@
             role: salt_minion
             params:
               vcpu: !os_env SLAVE_NODE_CPU, 2
-              memory: !os_env SLAVE_NODE_MEMORY, 2048
+              memory: !os_env SLAVE_NODE_MEMORY, 8192
               boot:
                 - hd
               cloud_init_volume_name: iso
@@ -489,7 +493,7 @@
             role: salt_minion
             params:
               vcpu: !os_env SLAVE_NODE_CPU, 2
-              memory: !os_env SLAVE_NODE_MEMORY, 2048
+              memory: !os_env SLAVE_NODE_MEMORY, 4096
               boot:
                 - hd
               cloud_init_volume_name: iso
@@ -515,7 +519,59 @@
             role: salt_minion
             params:
               vcpu: !os_env SLAVE_NODE_CPU, 2
-              memory: !os_env SLAVE_NODE_MEMORY, 2048
+              memory: !os_env SLAVE_NODE_MEMORY, 4096
+              boot:
+                - hd
+              cloud_init_volume_name: iso
+              cloud_init_iface_up: ens3
+              volumes:
+                - name: system
+                  capacity: !os_env NODE_VOLUME_SIZE, 150
+                  backing_store: mcp_ubuntu_1604_image
+                  format: qcow2
+                - name: iso  # Volume with name 'iso' will be used
+                             # for store image with cloud-init metadata.
+                  capacity: 1
+                  format: raw
+                  device: cdrom
+                  bus: ide
+                  cloudinit_meta_data: *cloudinit_meta_data
+                  cloudinit_user_data: *cloudinit_user_data_1604
+
+              interfaces: *interfaces
+              network_config: *network_config
+
+          - name: {{ HOSTNAME_CMP03 }}
+            role: salt_minion
+            params:
+              vcpu: !os_env SLAVE_NODE_CPU, 2
+              memory: !os_env SLAVE_NODE_MEMORY, 4096
+              boot:
+                - hd
+              cloud_init_volume_name: iso
+              cloud_init_iface_up: ens3
+              volumes:
+                - name: system
+                  capacity: !os_env NODE_VOLUME_SIZE, 150
+                  backing_store: mcp_ubuntu_1604_image
+                  format: qcow2
+                - name: iso  # Volume with name 'iso' will be used
+                             # for store image with cloud-init metadata.
+                  capacity: 1
+                  format: raw
+                  device: cdrom
+                  bus: ide
+                  cloudinit_meta_data: *cloudinit_meta_data
+                  cloudinit_user_data: *cloudinit_user_data_1604
+
+              interfaces: *interfaces
+              network_config: *network_config
+
+          - name: {{ HOSTNAME_CMP04 }}
+            role: salt_minion
+            params:
+              vcpu: !os_env SLAVE_NODE_CPU, 2
+              memory: !os_env SLAVE_NODE_MEMORY, 4096
               boot:
                 - hd
               cloud_init_volume_name: iso
diff --git a/tcp_tests/templates/cookied-cicd-k8s-calico/cookiecutter-context-k8s.yaml b/tcp_tests/templates/cookied-cicd-k8s-calico/cookiecutter-context-k8s.yaml
index 2f0e52e..6b63c60 100644
--- a/tcp_tests/templates/cookied-cicd-k8s-calico/cookiecutter-context-k8s.yaml
+++ b/tcp_tests/templates/cookied-cicd-k8s-calico/cookiecutter-context-k8s.yaml
@@ -1,34 +1,34 @@
 default_context:
   auditd_enabled: 'False'
-  backup_private_key: |-
-    -----BEGIN RSA PRIVATE KEY-----
-    MIIEpAIBAAKCAQEA3ufjR+Eh/CJp84JZPKosMNL7ydXidfe9qdAnQIGGOsS/TBnc
-    RyY+hy4Mg5Or//VBpY53frcrEEnm1CzEeIfGALiQtsMWOwEEiEHIDzbxN7xyYK1u
-    9fpcRHZy16VJx2gQOxbeAIyct9jrQeQuSbN0k7Tr+bfWLVPoGL4SZiBC+dxRjrmT
-    2pMIdqa8kAcd5cakuyENXT31ZI/ffVscl7TJBat7tUbgD+48GK4LqMBL/eC6v2bQ
-    ohmo6ZqWLh9uT+/l/rRdIiBhI+kmPpUDRLnjkd/gH4GQh/r/PPlxu11JNwPgY4Kx
-    IAk5hq9uPRn3pqBKPg+WHgWmQvpHqVVDDnf7XQIDAQABAoIBAQDCEtuL5bQVNlFR
-    NphDfVZkXA3lOVempkB37Ud/nkYkPNDhjVKAkAe44pr6pEQI4px5bIUVypyv3egf
-    q6qT1oLKdedpeImObeBoUf3BYXC7ulNLYTVO7OAQq6BpqPuHpk8bY1l+2O5KE48h
-    G25BtQE26TrbfPf5FyjpAfQ6/rPRniURu1ZMFK1Do18wf7lGxa6RN4jPbfGfEvlf
-    q6GWGtsOB2kLXnUDjuDeUrgS8HgxBSMH+lwxrkdX0Qb4VN+cBOp8TC30rHXdLAmn
-    mWUDQhaao+zZpZsAAxGbM2BAFUQAicd/OS6FJn6xkH0KN6+Rp1Iiy3Sa97wMsMti
-    aHAyVwkBAoGBAPf1gcRmKTHaUVb7XgS3acytBm7LM2GCQfgPDvQIp8rf8bmnAko6
-    MzxPdq2WXzWY75JiNxQSsmemcJyBRJm1sscp0txAnZS5SSycWlHy0zP5LJDtU7jW
-    Z7dXtRYzdDL0sH6KVQCOmfDmGowLs3eO0F7MyCbDIwdkIQ4LCs+TWcYxAoGBAOYi
-    ZUR7vXFbmXQQUEHxeft3sF6v8epFhnMuvwHgmHIzSCDDKoIMoLlqDOV8KynggyqQ
-    /YpvzqfCuP4aDpriU1glTZB0R9WdkKwk+GW13U9LfDw86u/XfGkMtT2QP6PmIQaI
-    1MJlX2b0rihnUy6zqRFH+mU4+9I66Gg1s8O9s4DtAoGAbiUol82pzvNj3neatA2l
-    eb4CdYTeNhpeo4pM4ipWHtCL2CRP6BkiWVATL9j0QiLFiQkH3mrPxSsyKtNhXcZQ
-    vBfgCubJGR+VWbO6i1yKZTPykA5cemcDe3YCgvIoU9pN7GgWikDOMSyF7l/kQN+3
-    v+THpDBahxX7ePl+u+aAooECgYBOoigJ+2HirtLDJqPVtGXit6XK5MF7M+BZ0Pow
-    8QYF12Ho1+bZYuk0EXlwnDm/aFhJHhuTxtpM1isRn+Onpnel4bEcD69P3TPGric1
-    0atZ4cgEaSg5ZV68Ijx3Wad1IDfenLhd5/duHWK4qX1xsq+tGPQEzDC3R6uLl/Xh
-    hxsjjQKBgQC53W+e4N6pOK8oCA2tlDw8Nu733FRrxNP4emdTzYyKJbNxBP2LI/ts
-    K/fgcD9aWeo0zt3Y/0UzzijqrWMCG2NdAlHwEShcXUt1525O4H64mH50MeylTGcj
-    t6ZFlhArriIXlejxuU9Jxe/HEKMh/1iBdlnD0rCOfhJaY/HO9dWtRw==
-    -----END RSA PRIVATE KEY-----
-  backup_public_key: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDe5+NH4SH8Imnzglk8qiww0vvJ1eJ1972p0CdAgYY6xL9MGdxHJj6HLgyDk6v/9UGljnd+tysQSebULMR4h8YAuJC2wxY7AQSIQcgPNvE3vHJgrW71+lxEdnLXpUnHaBA7Ft4AjJy32OtB5C5Js3STtOv5t9YtU+gYvhJmIEL53FGOuZPakwh2pryQBx3lxqS7IQ1dPfVkj999WxyXtMkFq3u1RuAP7jwYrguowEv94Lq/ZtCiGajpmpYuH25P7+X+tF0iIGEj6SY+lQNEueOR3+AfgZCH+v88+XG7XUk3A+BjgrEgCTmGr249GfemoEo+D5YeBaZC+kepVUMOd/td
+  backup_private_key: |
+      -----BEGIN RSA PRIVATE KEY-----
+      MIIEowIBAAKCAQEAxL6/rVgCetsETpZaUmXmkj8cZ1WN0eubH1FvMDOi/La9ZJyT
+      k0C6AYpJnIyEm93pMj5cLm08qRqMW+2pdOhYjcH69yg5MrX5SkRk8jCmIHIYoIbh
+      Qnwbnj3dd3I39ZdfU2FO7u2vlbglVou6ZoQxlJDItuLNtzq6EG+w9eF19e7+OsC6
+      6iUItp618zfw1l3J/8nKvCGe2RYDf7mJW6XwCl/DwryJmwwzvPgYJ3QMuDD8/HFj
+      lrJ3xjFTXj4b4Ws1XIoy78fFbtiLr4OwqCYkho03u2E5rOOP1qZxZB63sivHMLMO
+      MM5bOAQKbulFNoyALADGYfc7sf0bZ4u9XXDXxQIDAQABAoIBAQCfmc2MJRT97KW1
+      yqpCpX9BrAiymuiNHf+cjEcSZxEUyHkjIRFmJt+9WB0W7ba1anM92vCUiPDojSzH
+      dig9Oi578JxR20NrK8uqv4jUHzrknynzLveVI3CUEcOSnglfJQijbxDFKfOCFPvV
+      FUyE1UATMNBh6+LNfMprgu+exuMWOPnDyUiYQ+WZ0JfuZY8fuaZte4woJJOb9LUu
+      5rsMG/smIzjpgZ0Z9ZVDMurfq565qhpaXRAqKeIuyht8pacTo31iMQdHB78AvY/3
+      g0z21Gk8k3z0Kr/YFKr2r4FmXY5m/gAUvZly2ZrVQM5XsbTVCzq/JpI5fssNvSbU
+      AKmXzf4RAoGBAOO3d4/cstxERzW6hyOTjZIN1ppR52CsnZTsVPbfd0pCtmzmVZce
+      CtHKdcXSbTwZvvkK09QSWAp3MoSpd0gIOiLU8Wx/R/RIZsu9BlhTS3r3EQLnk72d
+      H/1TTA+j4T/LIYLSojQ1RxvIrHetAD44j732aTwKAHj/SybEAVqNkOB/AoGBAN0u
+      gLcrgqIHGrk4VjWSvlCGymfF40equcx+ud7XhfZDGETUOSahW4dPZ52cjPAkrCBQ
+      MMfcDwSVGsOAjd+mNt11BHUKobnhXwFaWWuyqyn9NmWFbjMbICVh7E3Of5aVN38o
+      lrmo/7LuKMVG7XRwphCv5NkaJmQG4njDyUQWlaW7AoGADCd8wDb9bPhP/LQqBmIX
+      ylXmwHHisaxE9O/wUQT4bwREjGd25gv6c9wkkRx8LBsLsGs9hzI7dMOL9Ly+2x9l
+      SvqmsC3S/1zl77X1Ir2/Z57MT6Vgo1xBmtnZU3Rhz2/eKAdqFPNLClaZrgGT475N
+      HcyLLWMzR0IJFtabY+Puea0CgYA8Zb5wRkldxWLewSuJZZDinGwY+kieAVjLJq/K
+      0j+ah6fQ48LXcah0wpIgz+cMjHcUO9GWQdk3/x9X03rqX5EL2DBnZYfUIl63F9zj
+      M97ZkHOSNWVqPzX//0Vv2butewG0j3jZKfTo/2/SrxOYgEpYtC9huWpSVi7xm0US
+      erhSkQKBgFIf9JEsfgE57ANhvITZ3ZI0uZXNxZkXQaVg8jvScDi79IIhy9iPzhKC
+      aIIQoDNIlWv1ftCRZ5AlBvVXgvQ/QNrwy48JiQTzWZlb9Ezg8w+olQmSbG6fq7Y+
+      7r3i+QUZ7RBdOb24QcQ618q54ozNTCB7OywY78ptFzeoBeptiNr1
+      -----END RSA PRIVATE KEY-----
+  backup_public_key: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDEvr+tWAJ62wROllpSZeaSPxxnVY3R65sfUW8wM6L8tr1knJOTQLoBikmcjISb3ekyPlwubTypGoxb7al06FiNwfr3KDkytflKRGTyMKYgchighuFCfBuePd13cjf1l19TYU7u7a+VuCVWi7pmhDGUkMi24s23OroQb7D14XX17v46wLrqJQi2nrXzN/DWXcn/ycq8IZ7ZFgN/uYlbpfAKX8PCvImbDDO8+BgndAy4MPz8cWOWsnfGMVNePhvhazVcijLvx8Vu2Iuvg7CoJiSGjTe7YTms44/WpnFkHreyK8cwsw4wzls4BApu6UU2jIAsAMZh9zux/Rtni71dcNfF
   bmk_enabled: 'False'
   calico_cni_image: docker-prod-local.artifactory.mirantis.com/mirantis/projectcalico/calico/cni:latest
   calico_enable_nat: 'True'
@@ -83,7 +83,7 @@
   control_vlan: '10'
   cookiecutter_template_branch: ''
   cookiecutter_template_credentials: gerrit
-  cookiecutter_template_url: https://gerrit.mcp.mirantis.net/mk/cookiecutter-templates.git
+  cookiecutter_template_url: https://gerrit.mcp.mirantis.com/mk/cookiecutter-templates.git
   deploy_network_gateway: 10.167.5.1
   deploy_network_netmask: 255.255.255.0
   deploy_network_subnet: 10.167.5.0/24
@@ -152,7 +152,7 @@
   salt_master_hostname: cfg01
   salt_master_management_address: 10.167.5.15
   shared_reclass_branch: 'proposed'
-  shared_reclass_url: https://gerrit.mcp.mirantis.net/salt-models/reclass-system.git
+  shared_reclass_url: https://gerrit.mcp.mirantis.com/salt-models/reclass-system.git
   stacklight_enabled: 'False'
   stacklight_version: '2'
   static_ips_on_deploy_network_enabled: 'False'
diff --git a/tcp_tests/templates/cookied-cicd-k8s-calico/environment-context-k8s.yaml b/tcp_tests/templates/cookied-cicd-k8s-calico/environment-context-k8s.yaml
index 4c01f4f..8af2c0c 100644
--- a/tcp_tests/templates/cookied-cicd-k8s-calico/environment-context-k8s.yaml
+++ b/tcp_tests/templates/cookied-cicd-k8s-calico/environment-context-k8s.yaml
@@ -85,7 +85,7 @@
         ens3:
           role: single_dhcp
         ens4:
-          role: single_ctl
+          role: single_ctl_calico
 
     ctl02:
       reclass_storage_name: kubernetes_control_node02
@@ -96,7 +96,7 @@
         ens3:
           role: single_dhcp
         ens4:
-          role: single_ctl
+          role: single_ctl_calico
 
     ctl03:
       reclass_storage_name: kubernetes_control_node03
@@ -107,7 +107,7 @@
         ens3:
           role: single_dhcp
         ens4:
-          role: single_ctl
+          role: single_ctl_calico
 
     cmp001:
       reclass_storage_name: kubernetes_compute_node01
@@ -119,7 +119,7 @@
         ens3:
           role: single_dhcp
         ens4:
-          role: single_ctl
+          role: single_ctl_calico
           single_address: ${_param:kubernetes_compute_node01_address}
 
     cmp002:
@@ -132,5 +132,31 @@
         ens3:
           role: single_dhcp
         ens4:
-          role: single_ctl
+          role: single_ctl_calico
           single_address: ${_param:kubernetes_compute_node02_address}
+
+    cmp003:
+      reclass_storage_name: kubernetes_compute_node03
+      roles:
+      - kubernetes_compute
+      - linux_system_codename_xenial
+      - salt_master_host
+      interfaces:
+        ens3:
+          role: single_dhcp
+        ens4:
+          role: single_ctl_calico
+          single_address: ${_param:kubernetes_compute_node03_address}
+
+    cmp004:
+      reclass_storage_name: kubernetes_compute_node04
+      roles:
+      - kubernetes_compute
+      - linux_system_codename_xenial
+      - salt_master_host
+      interfaces:
+        ens3:
+          role: single_dhcp
+        ens4:
+          role: single_ctl_calico
+          single_address: ${_param:kubernetes_compute_node04_address}
diff --git a/tcp_tests/templates/cookied-cicd-k8s-calico/salt.yaml b/tcp_tests/templates/cookied-cicd-k8s-calico/salt.yaml
index a7994a8..94b248f 100644
--- a/tcp_tests/templates/cookied-cicd-k8s-calico/salt.yaml
+++ b/tcp_tests/templates/cookied-cicd-k8s-calico/salt.yaml
@@ -2,7 +2,7 @@
 {% from 'cookied-cicd-k8s-calico/underlay.yaml' import LAB_CONFIG_NAME with context %}
 {% from 'cookied-cicd-k8s-calico/underlay.yaml' import DOMAIN_NAME with context %}
 
-{% set SALT_MODELS_REPOSITORY = os_env('SALT_MODELS_REPOSITORY','https://gerrit.mcp.mirantis.net/salt-models/mcp-virtual-lab') %}
+{% set SALT_MODELS_REPOSITORY = os_env('SALT_MODELS_REPOSITORY','https://gerrit.mcp.mirantis.com/salt-models/mcp-virtual-lab') %}
 # Other salt model repository parameters see in shared-salt.yaml
 
 {% import 'shared-salt.yaml' as SHARED with context %}
diff --git a/tcp_tests/templates/cookied-cicd-k8s-calico/underlay.yaml b/tcp_tests/templates/cookied-cicd-k8s-calico/underlay.yaml
index 6168b6e..4f18aa4 100644
--- a/tcp_tests/templates/cookied-cicd-k8s-calico/underlay.yaml
+++ b/tcp_tests/templates/cookied-cicd-k8s-calico/underlay.yaml
@@ -13,6 +13,8 @@
 {% set HOSTNAME_CTL03 = os_env('HOSTNAME_CTL03', 'ctl03.' + DOMAIN_NAME) %}
 {% set HOSTNAME_CMP01 = os_env('HOSTNAME_CMP01', 'cmp001.' + DOMAIN_NAME) %}
 {% set HOSTNAME_CMP02 = os_env('HOSTNAME_CMP02', 'cmp002.' + DOMAIN_NAME) %}
+{% set HOSTNAME_CMP03 = os_env('HOSTNAME_CMP03', 'cmp003.' + DOMAIN_NAME) %}
+{% set HOSTNAME_CMP04 = os_env('HOSTNAME_CMP04', 'cmp004.' + DOMAIN_NAME) %}
 {% set HOSTNAME_LOG01 = os_env('HOSTNAME_LOG01', 'log01.' + DOMAIN_NAME) %}
 {% set HOSTNAME_LOG02 = os_env('HOSTNAME_LOG02', 'log02.' + DOMAIN_NAME) %}
 {% set HOSTNAME_LOG03 = os_env('HOSTNAME_LOG03', 'log03.' + DOMAIN_NAME) %}
@@ -80,6 +82,8 @@
             default_{{ HOSTNAME_CTL03 }}: +13
             default_{{ HOSTNAME_CMP01 }}: +101
             default_{{ HOSTNAME_CMP02 }}: +102
+            default_{{ HOSTNAME_CMP03 }}: +103
+            default_{{ HOSTNAME_CMP04 }}: +104
             default_{{ HOSTNAME_LOG }}: +60
             default_{{ HOSTNAME_LOG01 }}: +61
             default_{{ HOSTNAME_LOG02 }}: +62
@@ -402,7 +406,7 @@
             role: k8s_controller
             params:
               vcpu: !os_env SLAVE_NODE_CPU, 2
-              memory: !os_env SLAVE_NODE_MEMORY, 2048
+              memory: !os_env SLAVE_NODE_MEMORY, 8192
               boot:
                 - hd
               cloud_init_volume_name: iso
@@ -431,7 +435,7 @@
             role: salt_minion
             params:
               vcpu: !os_env SLAVE_NODE_CPU, 2
-              memory: !os_env SLAVE_NODE_MEMORY, 2048
+              memory: !os_env SLAVE_NODE_MEMORY, 8192
               boot:
                 - hd
               cloud_init_volume_name: iso
@@ -460,7 +464,7 @@
             role: salt_minion
             params:
               vcpu: !os_env SLAVE_NODE_CPU, 2
-              memory: !os_env SLAVE_NODE_MEMORY, 2048
+              memory: !os_env SLAVE_NODE_MEMORY, 8192
               boot:
                 - hd
               cloud_init_volume_name: iso
@@ -489,7 +493,7 @@
             role: salt_minion
             params:
               vcpu: !os_env SLAVE_NODE_CPU, 2
-              memory: !os_env SLAVE_NODE_MEMORY, 2048
+              memory: !os_env SLAVE_NODE_MEMORY, 4096
               boot:
                 - hd
               cloud_init_volume_name: iso
@@ -515,7 +519,59 @@
             role: salt_minion
             params:
               vcpu: !os_env SLAVE_NODE_CPU, 2
-              memory: !os_env SLAVE_NODE_MEMORY, 2048
+              memory: !os_env SLAVE_NODE_MEMORY, 4096
+              boot:
+                - hd
+              cloud_init_volume_name: iso
+              cloud_init_iface_up: ens3
+              volumes:
+                - name: system
+                  capacity: !os_env NODE_VOLUME_SIZE, 150
+                  backing_store: mcp_ubuntu_1604_image
+                  format: qcow2
+                - name: iso  # Volume with name 'iso' will be used
+                             # for store image with cloud-init metadata.
+                  capacity: 1
+                  format: raw
+                  device: cdrom
+                  bus: ide
+                  cloudinit_meta_data: *cloudinit_meta_data
+                  cloudinit_user_data: *cloudinit_user_data_1604
+
+              interfaces: *interfaces
+              network_config: *network_config
+
+          - name: {{ HOSTNAME_CMP03 }}
+            role: salt_minion
+            params:
+              vcpu: !os_env SLAVE_NODE_CPU, 2
+              memory: !os_env SLAVE_NODE_MEMORY, 4096
+              boot:
+                - hd
+              cloud_init_volume_name: iso
+              cloud_init_iface_up: ens3
+              volumes:
+                - name: system
+                  capacity: !os_env NODE_VOLUME_SIZE, 150
+                  backing_store: mcp_ubuntu_1604_image
+                  format: qcow2
+                - name: iso  # Volume with name 'iso' will be used
+                             # for store image with cloud-init metadata.
+                  capacity: 1
+                  format: raw
+                  device: cdrom
+                  bus: ide
+                  cloudinit_meta_data: *cloudinit_meta_data
+                  cloudinit_user_data: *cloudinit_user_data_1604
+
+              interfaces: *interfaces
+              network_config: *network_config
+
+          - name: {{ HOSTNAME_CMP04 }}
+            role: salt_minion
+            params:
+              vcpu: !os_env SLAVE_NODE_CPU, 2
+              memory: !os_env SLAVE_NODE_MEMORY, 4096
               boot:
                 - hd
               cloud_init_volume_name: iso
diff --git a/tcp_tests/templates/cookied-cicd-k8s-genie/cookiecutter-context-k8s-genie.yaml b/tcp_tests/templates/cookied-cicd-k8s-genie/cookiecutter-context-k8s-genie.yaml
index 48e91fd..55d1c5e 100644
--- a/tcp_tests/templates/cookied-cicd-k8s-genie/cookiecutter-context-k8s-genie.yaml
+++ b/tcp_tests/templates/cookied-cicd-k8s-genie/cookiecutter-context-k8s-genie.yaml
@@ -1,34 +1,34 @@
 default_context:
   auditd_enabled: 'False'
-  backup_private_key: |-
-    -----BEGIN RSA PRIVATE KEY-----
-    MIIEowIBAAKCAQEAtyCfiXxwB6Dk6n7Y1t9u2XqMkLPvMArKwRUWGEwTzS7w0NzY
-    bCYdUfxo9m3tmhCO6hb0Yqzk6LEcOrARR7nHK7dS0JpRmZBeD3thdgXD8wNaG1PQ
-    ZzdNwGHP5cjfBXPHYXdP1k6HtLB1PymPDyEqhU0ZJrVGBK4+WSLNesSGOMiREQSx
-    kg/85aGdagLWHgAbgi0x1xx+Bu8LtuVkIcz8IMa3lanY7B8s5aIMxsOGTokJvdRL
-    QZJN0AfGRSANTIQZXfgkTO5wP85UsNisB8j7bliLl1wbxgnq/LTJZ+nQ1PA4dx8c
-    t0FKHYIR6zSd0LkDZaxJnZBgSrVnZ2JBbt7hTwIDAQABAoIBAHu/Ic+INTQSd142
-    hVT9+ywe8em+jX0LbeN32kxk7GSUucqJ0f2S6/FA/bS4p/yZ/9kT1eTwLGdJd2f5
-    HlQ3p+1UnjO0dDuvIMCZgUx9rOIEe9lHk+aLqpC8B/6g9IP8rtigBWUt/+oL687Y
-    yIFSyib16G8Nw9jZ3evh5rR1JLYtPHvAJiodsT3iY/+wZkuo4dAa4/QlKPT7QXaU
-    G5/AA/8zdsVOJl5JOHjP2pFBMzxttkWbkuYpEQe7LRw5MOlfFpMEYYZ+NJGVwDNe
-    0WTpiOIDo78xaq5TsOS23fJCEKodtFrITXvSv0c1tNoL/WcslwmwcV3mKyySFffG
-    Sj7G5RECgYEA7NszuBZBY0Zn7qLkczIdTq15lZ0KFJb2sHIRQbzeeCYn6Q8LJsCb
-    ELhgevun4BxrE2O+R8H4HL+g002vqzL9Vn9oOqFTn3GZMaHojiMSmjCBNl05Mftw
-    EM69l6WV6H8E+D90GMGGoxRJlqHdOuNcQ9bdQpkF4vTNBfzx7VU+5csCgYEAxe23
-    h191srNg7wjafMuK22RtM739knqX+sqeFaGqM6f73+vJaqNilvfgSRQYZc1MOetp
-    Ty4A4g/Jx/NkDWkaLbewFaHw7dNK62Vr6Ovl67Z9sEo8A2ySS7VWVuAqzVbRjyGp
-    yddGiW2Q+ITdfPfHbCFobVUgFeSinfZxkMFw4g0CgYBG8rZASzJU+W8CdXq24ukS
-    ezYzUbIGTt4gJlry9Q8ysEM+NYpilkkcrg4AaMd1gy2zxinmNr0KZ4BWKywWvRRT
-    x6BCB7cTyKRZ0KTnhqv40dSyoyQRy75a2oLCHRCVbw7fCarOC5I34UjVvTCWhipK
-    C9+FJm8z954+T/Fr5SANFwKBgEJBSvhD2jBRn5ckjY7My9SZD30Mkj9gTlOjU7vF
-    /CWCi+vvD+NkgfIrU6bi1S/uwx94UC4zJhSGWHNYZBuhHSREour65J2X5zJZJwA3
-    RyXaVsSWdPRoeahiMV6vd2R5NXkGOcHZEEGcrbSjNUlJ4DWwETbYEf+CI3VhM67T
-    MihZAoGBALxcTSivHJZDle81lsu1dcgmzZkUfQAcUSYDWhg+Bqg3A8FVKMpEzrbd
-    weGRM8S8oAz1PN0T/LRcpJq3TFZpy+iXx59jl5XenmoKwPr+u5XFrEHTWqNS2NcL
-    MwS8VTJhWYNVdrNIRWClRVUv87hZMha40JHiPK1KA4em1G+H29x3
-    -----END RSA PRIVATE KEY-----
-  backup_public_key: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQC3IJ+JfHAHoOTqftjW327ZeoyQs+8wCsrBFRYYTBPNLvDQ3NhsJh1R/Gj2be2aEI7qFvRirOTosRw6sBFHuccrt1LQmlGZkF4Pe2F2BcPzA1obU9BnN03AYc/lyN8Fc8dhd0/WToe0sHU/KY8PISqFTRkmtUYErj5ZIs16xIY4yJERBLGSD/zloZ1qAtYeABuCLTHXHH4G7wu25WQhzPwgxreVqdjsHyzlogzGw4ZOiQm91EtBkk3QB8ZFIA1MhBld+CRM7nA/zlSw2KwHyPtuWIuXXBvGCer8tMln6dDU8Dh3Hxy3QUodghHrNJ3QuQNlrEmdkGBKtWdnYkFu3uFP
+  backup_private_key: |
+      -----BEGIN RSA PRIVATE KEY-----
+      MIIEowIBAAKCAQEAxL6/rVgCetsETpZaUmXmkj8cZ1WN0eubH1FvMDOi/La9ZJyT
+      k0C6AYpJnIyEm93pMj5cLm08qRqMW+2pdOhYjcH69yg5MrX5SkRk8jCmIHIYoIbh
+      Qnwbnj3dd3I39ZdfU2FO7u2vlbglVou6ZoQxlJDItuLNtzq6EG+w9eF19e7+OsC6
+      6iUItp618zfw1l3J/8nKvCGe2RYDf7mJW6XwCl/DwryJmwwzvPgYJ3QMuDD8/HFj
+      lrJ3xjFTXj4b4Ws1XIoy78fFbtiLr4OwqCYkho03u2E5rOOP1qZxZB63sivHMLMO
+      MM5bOAQKbulFNoyALADGYfc7sf0bZ4u9XXDXxQIDAQABAoIBAQCfmc2MJRT97KW1
+      yqpCpX9BrAiymuiNHf+cjEcSZxEUyHkjIRFmJt+9WB0W7ba1anM92vCUiPDojSzH
+      dig9Oi578JxR20NrK8uqv4jUHzrknynzLveVI3CUEcOSnglfJQijbxDFKfOCFPvV
+      FUyE1UATMNBh6+LNfMprgu+exuMWOPnDyUiYQ+WZ0JfuZY8fuaZte4woJJOb9LUu
+      5rsMG/smIzjpgZ0Z9ZVDMurfq565qhpaXRAqKeIuyht8pacTo31iMQdHB78AvY/3
+      g0z21Gk8k3z0Kr/YFKr2r4FmXY5m/gAUvZly2ZrVQM5XsbTVCzq/JpI5fssNvSbU
+      AKmXzf4RAoGBAOO3d4/cstxERzW6hyOTjZIN1ppR52CsnZTsVPbfd0pCtmzmVZce
+      CtHKdcXSbTwZvvkK09QSWAp3MoSpd0gIOiLU8Wx/R/RIZsu9BlhTS3r3EQLnk72d
+      H/1TTA+j4T/LIYLSojQ1RxvIrHetAD44j732aTwKAHj/SybEAVqNkOB/AoGBAN0u
+      gLcrgqIHGrk4VjWSvlCGymfF40equcx+ud7XhfZDGETUOSahW4dPZ52cjPAkrCBQ
+      MMfcDwSVGsOAjd+mNt11BHUKobnhXwFaWWuyqyn9NmWFbjMbICVh7E3Of5aVN38o
+      lrmo/7LuKMVG7XRwphCv5NkaJmQG4njDyUQWlaW7AoGADCd8wDb9bPhP/LQqBmIX
+      ylXmwHHisaxE9O/wUQT4bwREjGd25gv6c9wkkRx8LBsLsGs9hzI7dMOL9Ly+2x9l
+      SvqmsC3S/1zl77X1Ir2/Z57MT6Vgo1xBmtnZU3Rhz2/eKAdqFPNLClaZrgGT475N
+      HcyLLWMzR0IJFtabY+Puea0CgYA8Zb5wRkldxWLewSuJZZDinGwY+kieAVjLJq/K
+      0j+ah6fQ48LXcah0wpIgz+cMjHcUO9GWQdk3/x9X03rqX5EL2DBnZYfUIl63F9zj
+      M97ZkHOSNWVqPzX//0Vv2butewG0j3jZKfTo/2/SrxOYgEpYtC9huWpSVi7xm0US
+      erhSkQKBgFIf9JEsfgE57ANhvITZ3ZI0uZXNxZkXQaVg8jvScDi79IIhy9iPzhKC
+      aIIQoDNIlWv1ftCRZ5AlBvVXgvQ/QNrwy48JiQTzWZlb9Ezg8w+olQmSbG6fq7Y+
+      7r3i+QUZ7RBdOb24QcQ618q54ozNTCB7OywY78ptFzeoBeptiNr1
+      -----END RSA PRIVATE KEY-----
+  backup_public_key: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDEvr+tWAJ62wROllpSZeaSPxxnVY3R65sfUW8wM6L8tr1knJOTQLoBikmcjISb3ekyPlwubTypGoxb7al06FiNwfr3KDkytflKRGTyMKYgchighuFCfBuePd13cjf1l19TYU7u7a+VuCVWi7pmhDGUkMi24s23OroQb7D14XX17v46wLrqJQi2nrXzN/DWXcn/ycq8IZ7ZFgN/uYlbpfAKX8PCvImbDDO8+BgndAy4MPz8cWOWsnfGMVNePhvhazVcijLvx8Vu2Iuvg7CoJiSGjTe7YTms44/WpnFkHreyK8cwsw4wzls4BApu6UU2jIAsAMZh9zux/Rtni71dcNfF
   bmk_enabled: 'False'
   calico_cni_image: docker-prod-local.artifactory.mirantis.com/mirantis/projectcalico/calico/cni:latest
   calico_enable_nat: 'True'
@@ -83,7 +83,7 @@
   control_vlan: '10'
   cookiecutter_template_branch: ''
   cookiecutter_template_credentials: gerrit
-  cookiecutter_template_url: https://gerrit.mcp.mirantis.net/mk/cookiecutter-templates.git
+  cookiecutter_template_url: https://gerrit.mcp.mirantis.com/mk/cookiecutter-templates.git
   deploy_network_gateway: 10.167.5.1
   deploy_network_netmask: 255.255.255.0
   deploy_network_subnet: 10.167.5.0/24
@@ -151,7 +151,7 @@
   salt_master_hostname: cfg01
   salt_master_management_address: 10.167.5.15
   shared_reclass_branch: 'proposed'
-  shared_reclass_url: https://gerrit.mcp.mirantis.net/salt-models/reclass-system.git
+  shared_reclass_url: https://gerrit.mcp.mirantis.com/salt-models/reclass-system.git
   stacklight_enabled: 'False'
   stacklight_version: '2'
   static_ips_on_deploy_network_enabled: 'False'
diff --git a/tcp_tests/templates/cookied-cicd-k8s-genie/environment-context-k8s-genie.yaml b/tcp_tests/templates/cookied-cicd-k8s-genie/environment-context-k8s-genie.yaml
index 4c01f4f..8af2c0c 100644
--- a/tcp_tests/templates/cookied-cicd-k8s-genie/environment-context-k8s-genie.yaml
+++ b/tcp_tests/templates/cookied-cicd-k8s-genie/environment-context-k8s-genie.yaml
@@ -85,7 +85,7 @@
         ens3:
           role: single_dhcp
         ens4:
-          role: single_ctl
+          role: single_ctl_calico
 
     ctl02:
       reclass_storage_name: kubernetes_control_node02
@@ -96,7 +96,7 @@
         ens3:
           role: single_dhcp
         ens4:
-          role: single_ctl
+          role: single_ctl_calico
 
     ctl03:
       reclass_storage_name: kubernetes_control_node03
@@ -107,7 +107,7 @@
         ens3:
           role: single_dhcp
         ens4:
-          role: single_ctl
+          role: single_ctl_calico
 
     cmp001:
       reclass_storage_name: kubernetes_compute_node01
@@ -119,7 +119,7 @@
         ens3:
           role: single_dhcp
         ens4:
-          role: single_ctl
+          role: single_ctl_calico
           single_address: ${_param:kubernetes_compute_node01_address}
 
     cmp002:
@@ -132,5 +132,31 @@
         ens3:
           role: single_dhcp
         ens4:
-          role: single_ctl
+          role: single_ctl_calico
           single_address: ${_param:kubernetes_compute_node02_address}
+
+    cmp003:
+      reclass_storage_name: kubernetes_compute_node03
+      roles:
+      - kubernetes_compute
+      - linux_system_codename_xenial
+      - salt_master_host
+      interfaces:
+        ens3:
+          role: single_dhcp
+        ens4:
+          role: single_ctl_calico
+          single_address: ${_param:kubernetes_compute_node03_address}
+
+    cmp004:
+      reclass_storage_name: kubernetes_compute_node04
+      roles:
+      - kubernetes_compute
+      - linux_system_codename_xenial
+      - salt_master_host
+      interfaces:
+        ens3:
+          role: single_dhcp
+        ens4:
+          role: single_ctl_calico
+          single_address: ${_param:kubernetes_compute_node04_address}
diff --git a/tcp_tests/templates/cookied-cicd-k8s-genie/salt.yaml b/tcp_tests/templates/cookied-cicd-k8s-genie/salt.yaml
index 1e3c924..625472c 100644
--- a/tcp_tests/templates/cookied-cicd-k8s-genie/salt.yaml
+++ b/tcp_tests/templates/cookied-cicd-k8s-genie/salt.yaml
@@ -2,7 +2,7 @@
 {% from 'cookied-cicd-k8s-genie/underlay.yaml' import LAB_CONFIG_NAME with context %}
 {% from 'cookied-cicd-k8s-genie/underlay.yaml' import DOMAIN_NAME with context %}
 
-{% set SALT_MODELS_REPOSITORY = os_env('SALT_MODELS_REPOSITORY','https://gerrit.mcp.mirantis.net/salt-models/mcp-virtual-lab') %}
+{% set SALT_MODELS_REPOSITORY = os_env('SALT_MODELS_REPOSITORY','https://gerrit.mcp.mirantis.com/salt-models/mcp-virtual-lab') %}
 # Other salt model repository parameters see in shared-salt.yaml
 
 {% import 'shared-salt.yaml' as SHARED with context %}
diff --git a/tcp_tests/templates/cookied-cicd-k8s-genie/underlay.yaml b/tcp_tests/templates/cookied-cicd-k8s-genie/underlay.yaml
index 8c1b151..331dfb4 100644
--- a/tcp_tests/templates/cookied-cicd-k8s-genie/underlay.yaml
+++ b/tcp_tests/templates/cookied-cicd-k8s-genie/underlay.yaml
@@ -13,6 +13,8 @@
 {% set HOSTNAME_CTL03 = os_env('HOSTNAME_CTL03', 'ctl03') %}
 {% set HOSTNAME_CMP01 = os_env('HOSTNAME_CMP01', 'cmp001') %}
 {% set HOSTNAME_CMP02 = os_env('HOSTNAME_CMP02', 'cmp002') %}
+{% set HOSTNAME_CMP03 = os_env('HOSTNAME_CMP03', 'cmp003') %}
+{% set HOSTNAME_CMP04 = os_env('HOSTNAME_CMP04', 'cmp004') %}
 {% set HOSTNAME_KVM01 = os_env('HOSTNAME_KVM01', 'kvm01') %}
 {% set HOSTNAME_KVM02 = os_env('HOSTNAME_KVM02', 'kvm02') %}
 {% set HOSTNAME_KVM03 = os_env('HOSTNAME_KVM03', 'kvm03') %}
@@ -70,6 +72,8 @@
             default_{{ HOSTNAME_CTL03 }}: +13
             default_{{ HOSTNAME_CMP01 }}: +101
             default_{{ HOSTNAME_CMP02 }}: +102
+            default_{{ HOSTNAME_CMP03 }}: +103
+            default_{{ HOSTNAME_CMP04 }}: +104
             default_{{ HOSTNAME_KVM }}: +240
             default_{{ HOSTNAME_KVM01 }}: +241
             default_{{ HOSTNAME_KVM02 }}: +242
@@ -379,7 +383,7 @@
             role: k8s_controller
             params:
               vcpu: !os_env SLAVE_NODE_CPU, 2
-              memory: !os_env SLAVE_NODE_MEMORY, 2048
+              memory: !os_env SLAVE_NODE_MEMORY, 8192
               boot:
                 - hd
               cloud_init_volume_name: iso
@@ -408,7 +412,7 @@
             role: salt_minion
             params:
               vcpu: !os_env SLAVE_NODE_CPU, 2
-              memory: !os_env SLAVE_NODE_MEMORY, 2048
+              memory: !os_env SLAVE_NODE_MEMORY, 8192
               boot:
                 - hd
               cloud_init_volume_name: iso
@@ -437,7 +441,7 @@
             role: salt_minion
             params:
               vcpu: !os_env SLAVE_NODE_CPU, 2
-              memory: !os_env SLAVE_NODE_MEMORY, 2048
+              memory: !os_env SLAVE_NODE_MEMORY, 8192
               boot:
                 - hd
               cloud_init_volume_name: iso
@@ -466,7 +470,7 @@
             role: salt_minion
             params:
               vcpu: !os_env SLAVE_NODE_CPU, 2
-              memory: !os_env SLAVE_NODE_MEMORY, 2048
+              memory: !os_env SLAVE_NODE_MEMORY, 4096
               boot:
                 - hd
               cloud_init_volume_name: iso
@@ -492,7 +496,59 @@
             role: salt_minion
             params:
               vcpu: !os_env SLAVE_NODE_CPU, 2
-              memory: !os_env SLAVE_NODE_MEMORY, 2048
+              memory: !os_env SLAVE_NODE_MEMORY, 4096
+              boot:
+                - hd
+              cloud_init_volume_name: iso
+              cloud_init_iface_up: ens3
+              volumes:
+                - name: system
+                  capacity: !os_env NODE_VOLUME_SIZE, 150
+                  backing_store: mcp_ubuntu_1604_image
+                  format: qcow2
+                - name: iso  # Volume with name 'iso' will be used
+                             # for store image with cloud-init metadata.
+                  capacity: 1
+                  format: raw
+                  device: cdrom
+                  bus: ide
+                  cloudinit_meta_data: *cloudinit_meta_data
+                  cloudinit_user_data: *cloudinit_user_data_1604
+
+              interfaces: *interfaces
+              network_config: *network_config
+
+          - name: {{ HOSTNAME_CMP03 }}
+            role: salt_minion
+            params:
+              vcpu: !os_env SLAVE_NODE_CPU, 2
+              memory: !os_env SLAVE_NODE_MEMORY, 4096
+              boot:
+                - hd
+              cloud_init_volume_name: iso
+              cloud_init_iface_up: ens3
+              volumes:
+                - name: system
+                  capacity: !os_env NODE_VOLUME_SIZE, 150
+                  backing_store: mcp_ubuntu_1604_image
+                  format: qcow2
+                - name: iso  # Volume with name 'iso' will be used
+                             # for store image with cloud-init metadata.
+                  capacity: 1
+                  format: raw
+                  device: cdrom
+                  bus: ide
+                  cloudinit_meta_data: *cloudinit_meta_data
+                  cloudinit_user_data: *cloudinit_user_data_1604
+
+              interfaces: *interfaces
+              network_config: *network_config
+
+          - name: {{ HOSTNAME_CMP04 }}
+            role: salt_minion
+            params:
+              vcpu: !os_env SLAVE_NODE_CPU, 2
+              memory: !os_env SLAVE_NODE_MEMORY, 4096
               boot:
                 - hd
               cloud_init_volume_name: iso
diff --git a/tcp_tests/templates/cookied-cicd-pike-dpdk/cookiecutter-context-pike-ovs-dpdk.yaml b/tcp_tests/templates/cookied-cicd-pike-dpdk/cookiecutter-context-pike-ovs-dpdk.yaml
index a5992bf..976506f 100644
--- a/tcp_tests/templates/cookied-cicd-pike-dpdk/cookiecutter-context-pike-ovs-dpdk.yaml
+++ b/tcp_tests/templates/cookied-cicd-pike-dpdk/cookiecutter-context-pike-ovs-dpdk.yaml
@@ -51,7 +51,7 @@
   control_vlan: '10'
   cookiecutter_template_branch: master
   cookiecutter_template_credentials: gerrit
-  cookiecutter_template_url: https://gerrit.mcp.mirantis.net/mk/cookiecutter-templates.git
+  cookiecutter_template_url: https://gerrit.mcp.mirantis.com/mk/cookiecutter-templates.git
   deploy_network_gateway: 10.167.5.1
   deploy_network_netmask: 255.255.255.0
   deploy_network_subnet: 10.167.5.0/24
@@ -190,7 +190,7 @@
   salt_master_hostname: cfg01
   salt_master_management_address: 10.167.5.15
   shared_reclass_branch: 'proposed'
-  shared_reclass_url: https://gerrit.mcp.mirantis.net/salt-models/reclass-system.git
+  shared_reclass_url: https://gerrit.mcp.mirantis.com/salt-models/reclass-system.git
   stacklight_enabled: 'False'
   stacklight_version: '2'
   static_ips_on_deploy_network_enabled: 'False'
diff --git a/tcp_tests/templates/cookied-cicd-pike-dpdk/salt.yaml b/tcp_tests/templates/cookied-cicd-pike-dpdk/salt.yaml
index a2d8eb5..4cb437b 100644
--- a/tcp_tests/templates/cookied-cicd-pike-dpdk/salt.yaml
+++ b/tcp_tests/templates/cookied-cicd-pike-dpdk/salt.yaml
@@ -5,7 +5,7 @@
 {% from 'cookied-cicd-pike-dpdk/underlay.yaml' import LAB_CONFIG_NAME with context %}
 {% from 'cookied-cicd-pike-dpdk/underlay.yaml' import DOMAIN_NAME with context %}
 
-{% set SALT_MODELS_REPOSITORY = os_env('SALT_MODELS_REPOSITORY','https://gerrit.mcp.mirantis.net/salt-models/mcp-virtual-lab') %}
+{% set SALT_MODELS_REPOSITORY = os_env('SALT_MODELS_REPOSITORY','https://gerrit.mcp.mirantis.com/salt-models/mcp-virtual-lab') %}
 # Other salt model repository parameters see in shared-salt.yaml
 
 {% import 'shared-salt.yaml' as SHARED with context %}
diff --git a/tcp_tests/templates/cookied-cicd-pike-dvr-sl/cookiecutter-context-pike-dvr-sl.yaml b/tcp_tests/templates/cookied-cicd-pike-dvr-sl/cookiecutter-context-pike-dvr-sl.yaml
index 375d734..71a02fa 100644
--- a/tcp_tests/templates/cookied-cicd-pike-dvr-sl/cookiecutter-context-pike-dvr-sl.yaml
+++ b/tcp_tests/templates/cookied-cicd-pike-dvr-sl/cookiecutter-context-pike-dvr-sl.yaml
@@ -51,7 +51,7 @@
   control_vlan: '10'
   cookiecutter_template_branch: master
   cookiecutter_template_credentials: gerrit
-  cookiecutter_template_url: https://gerrit.mcp.mirantis.net/mk/cookiecutter-templates.git
+  cookiecutter_template_url: https://gerrit.mcp.mirantis.com/mk/cookiecutter-templates.git
   deploy_network_gateway: 10.167.5.1
   deploy_network_netmask: 255.255.255.0
   deploy_network_subnet: 10.167.5.0/24
@@ -192,7 +192,7 @@
   salt_master_hostname: cfg01
   salt_master_management_address: 10.167.5.15
   shared_reclass_branch: 'proposed'
-  shared_reclass_url: https://gerrit.mcp.mirantis.net/salt-models/reclass-system.git
+  shared_reclass_url: https://gerrit.mcp.mirantis.com/salt-models/reclass-system.git
   fluentd_enabled: 'True'
   stacklight_enabled: 'True'
   stacklight_log_address: 10.167.4.60
diff --git a/tcp_tests/templates/cookied-cicd-pike-dvr-sl/environment_context.yaml b/tcp_tests/templates/cookied-cicd-pike-dvr-sl/environment_context.yaml
index 1791477..5ed6d36 100644
--- a/tcp_tests/templates/cookied-cicd-pike-dvr-sl/environment_context.yaml
+++ b/tcp_tests/templates/cookied-cicd-pike-dvr-sl/environment_context.yaml
@@ -4,6 +4,8 @@
       roles:
       - infra_config
       - linux_system_codename_xenial
+      classes:
+      - environment.cookied-cicd-pike-dvr-sl.override_ntp_virtual
       interfaces:
         ens3:
           role: single_dhcp
diff --git a/tcp_tests/templates/cookied-cicd-pike-dvr-sl/salt.yaml b/tcp_tests/templates/cookied-cicd-pike-dvr-sl/salt.yaml
index e9d9408..a38f2f3 100644
--- a/tcp_tests/templates/cookied-cicd-pike-dvr-sl/salt.yaml
+++ b/tcp_tests/templates/cookied-cicd-pike-dvr-sl/salt.yaml
@@ -2,7 +2,7 @@
 {% from 'cookied-cicd-pike-dvr-sl/underlay.yaml' import LAB_CONFIG_NAME with context %}
 {% from 'cookied-cicd-pike-dvr-sl/underlay.yaml' import DOMAIN_NAME with context %}
 
-{% set SALT_MODELS_REPOSITORY = os_env('SALT_MODELS_REPOSITORY','https://gerrit.mcp.mirantis.net/salt-models/mcp-virtual-lab') %}
+{% set SALT_MODELS_REPOSITORY = os_env('SALT_MODELS_REPOSITORY','https://gerrit.mcp.mirantis.com/salt-models/mcp-virtual-lab') %}
 # Other salt model repository parameters see in shared-salt.yaml
 
 {% import 'shared-salt.yaml' as SHARED with context %}
diff --git a/tcp_tests/templates/cookied-cicd-pike-ovs-sl/cookiecutter-context-pike-ovs-sl.yaml b/tcp_tests/templates/cookied-cicd-pike-ovs-sl/cookiecutter-context-pike-ovs-sl.yaml
index d1e447c..4798f2b 100644
--- a/tcp_tests/templates/cookied-cicd-pike-ovs-sl/cookiecutter-context-pike-ovs-sl.yaml
+++ b/tcp_tests/templates/cookied-cicd-pike-ovs-sl/cookiecutter-context-pike-ovs-sl.yaml
@@ -51,7 +51,7 @@
   control_vlan: '10'
   cookiecutter_template_branch: master
   cookiecutter_template_credentials: gerrit
-  cookiecutter_template_url: https://gerrit.mcp.mirantis.net/mk/cookiecutter-templates.git
+  cookiecutter_template_url: https://gerrit.mcp.mirantis.com/mk/cookiecutter-templates.git
   deploy_network_gateway: 10.167.5.1
   deploy_network_netmask: 255.255.255.0
   deploy_network_subnet: 10.167.5.0/24
@@ -192,7 +192,7 @@
   salt_master_hostname: cfg01
   salt_master_management_address: 10.167.5.15
   shared_reclass_branch: 'proposed'
-  shared_reclass_url: https://gerrit.mcp.mirantis.net/salt-models/reclass-system.git
+  shared_reclass_url: https://gerrit.mcp.mirantis.com/salt-models/reclass-system.git
   fluentd_enabled: 'True'
   stacklight_enabled: 'True'
   stacklight_log_address: 10.167.4.60
diff --git a/tcp_tests/templates/cookied-cicd-pike-ovs-sl/salt.yaml b/tcp_tests/templates/cookied-cicd-pike-ovs-sl/salt.yaml
index 3c4d021..4905e32 100644
--- a/tcp_tests/templates/cookied-cicd-pike-ovs-sl/salt.yaml
+++ b/tcp_tests/templates/cookied-cicd-pike-ovs-sl/salt.yaml
@@ -2,7 +2,7 @@
 {% from 'cookied-cicd-pike-ovs-sl/underlay.yaml' import LAB_CONFIG_NAME with context %}
 {% from 'cookied-cicd-pike-ovs-sl/underlay.yaml' import DOMAIN_NAME with context %}
 
-{% set SALT_MODELS_REPOSITORY = os_env('SALT_MODELS_REPOSITORY','https://gerrit.mcp.mirantis.net/salt-models/mcp-virtual-lab') %}
+{% set SALT_MODELS_REPOSITORY = os_env('SALT_MODELS_REPOSITORY','https://gerrit.mcp.mirantis.com/salt-models/mcp-virtual-lab') %}
 # Other salt model repository parameters see in shared-salt.yaml
 
 {% import 'shared-salt.yaml' as SHARED with context %}
diff --git a/tcp_tests/templates/cookied-mcp-mitaka-dvr/_context-cookiecutter-mcp-mitaka-dvr.yaml b/tcp_tests/templates/cookied-mcp-mitaka-dvr/_context-cookiecutter-mcp-mitaka-dvr.yaml
index 0ade6cf..51a7ede 100644
--- a/tcp_tests/templates/cookied-mcp-mitaka-dvr/_context-cookiecutter-mcp-mitaka-dvr.yaml
+++ b/tcp_tests/templates/cookied-mcp-mitaka-dvr/_context-cookiecutter-mcp-mitaka-dvr.yaml
@@ -13,7 +13,7 @@
   control_vlan: '10'
   cookiecutter_template_branch: master
   cookiecutter_template_credentials: gerrit
-  cookiecutter_template_url: ssh://mcp-jenkins@gerrit.mcp.mirantis.net:29418/mk/cookiecutter-templates.git
+  cookiecutter_template_url: ssh://mcp-jenkins@gerrit.mcp.mirantis.com:29418/mk/cookiecutter-templates.git
   deploy_network_gateway: 192.168.10.1
   deploy_network_netmask: 255.255.255.0
   deploy_network_subnet: 192.168.10.0/24
@@ -145,7 +145,7 @@
   salt_master_address: 172.16.10.90
   salt_master_hostname: cfg01
   salt_master_management_address: 192.168.10.90
-  shared_reclass_url: ssh://mcp-jenkins@gerrit.mcp.mirantis.net:29418/salt-models/reclass-system.git
+  shared_reclass_url: ssh://mcp-jenkins@gerrit.mcp.mirantis.com:29418/salt-models/reclass-system.git
   fluentd_enabled: 'True'
   stacklight_enabled: 'True'
   stacklight_log_address: 172.16.10.70
diff --git a/tcp_tests/templates/cookied-mcp-mitaka-dvr/salt.yaml b/tcp_tests/templates/cookied-mcp-mitaka-dvr/salt.yaml
index 04ec5b2..b965d0f 100644
--- a/tcp_tests/templates/cookied-mcp-mitaka-dvr/salt.yaml
+++ b/tcp_tests/templates/cookied-mcp-mitaka-dvr/salt.yaml
@@ -5,7 +5,7 @@
 {% from 'cookied-mcp-mitaka-dvr/underlay.yaml' import LAB_CONFIG_NAME with context %}
 {% from 'cookied-mcp-mitaka-dvr/underlay.yaml' import DOMAIN_NAME with context %}
 
-{% set SALT_MODELS_REPOSITORY = os_env('SALT_MODELS_REPOSITORY','https://gerrit.mcp.mirantis.net/salt-models/mcp-virtual-lab') %}
+{% set SALT_MODELS_REPOSITORY = os_env('SALT_MODELS_REPOSITORY','https://gerrit.mcp.mirantis.com/salt-models/mcp-virtual-lab') %}
 # Other salt model repository parameters see in shared-salt.yaml
 {% set OVERRIDES = os_env('OVERRIDES', 'override_example: true') %}
 {% set OVERRIDES_FILENAME = os_env('OVERRIDES_FILENAME', '/srv/salt/reclass/classes/environment/cookied-mcp-mitaka-dvr/overrides.yml') %}
diff --git a/tcp_tests/templates/cookied-mcp-mitaka-ovs/_context-cookiecutter-mcp-mitaka-ovs.yaml b/tcp_tests/templates/cookied-mcp-mitaka-ovs/_context-cookiecutter-mcp-mitaka-ovs.yaml
index 10f3c17..a22e3a3 100644
--- a/tcp_tests/templates/cookied-mcp-mitaka-ovs/_context-cookiecutter-mcp-mitaka-ovs.yaml
+++ b/tcp_tests/templates/cookied-mcp-mitaka-ovs/_context-cookiecutter-mcp-mitaka-ovs.yaml
@@ -13,7 +13,7 @@
   control_vlan: '10'
   cookiecutter_template_branch: master
   cookiecutter_template_credentials: gerrit
-  cookiecutter_template_url: ssh://mcp-jenkins@gerrit.mcp.mirantis.net:29418/mk/cookiecutter-templates.git
+  cookiecutter_template_url: ssh://mcp-jenkins@gerrit.mcp.mirantis.com:29418/mk/cookiecutter-templates.git
   deploy_network_gateway: 192.168.10.1
   deploy_network_netmask: 255.255.255.0
   deploy_network_subnet: 192.168.10.0/24
@@ -145,7 +145,7 @@
   salt_master_address: 172.16.10.90
   salt_master_hostname: cfg01
   salt_master_management_address: 192.168.10.90
-  shared_reclass_url: ssh://mcp-jenkins@gerrit.mcp.mirantis.net:29418/salt-models/reclass-system.git
+  shared_reclass_url: ssh://mcp-jenkins@gerrit.mcp.mirantis.com:29418/salt-models/reclass-system.git
   fluentd_enabled: 'True'
   stacklight_enabled: 'True'
   stacklight_log_address: 172.16.10.60
diff --git a/tcp_tests/templates/cookied-mcp-mitaka-ovs/salt.yaml b/tcp_tests/templates/cookied-mcp-mitaka-ovs/salt.yaml
index 46fd677..3f3def3 100644
--- a/tcp_tests/templates/cookied-mcp-mitaka-ovs/salt.yaml
+++ b/tcp_tests/templates/cookied-mcp-mitaka-ovs/salt.yaml
@@ -5,7 +5,7 @@
 {% from 'cookied-mcp-mitaka-ovs/underlay.yaml' import LAB_CONFIG_NAME with context %}
 {% from 'cookied-mcp-mitaka-ovs/underlay.yaml' import DOMAIN_NAME with context %}
 
-{% set SALT_MODELS_REPOSITORY = os_env('SALT_MODELS_REPOSITORY','https://gerrit.mcp.mirantis.net/salt-models/mcp-virtual-lab') %}
+{% set SALT_MODELS_REPOSITORY = os_env('SALT_MODELS_REPOSITORY','https://gerrit.mcp.mirantis.com/salt-models/mcp-virtual-lab') %}
 # Other salt model repository parameters see in shared-salt.yaml
 {% set OVERRIDES = os_env('OVERRIDES', 'override_example: true') %}
 {% set OVERRIDES_FILENAME = os_env('OVERRIDES_FILENAME', '/srv/salt/reclass/classes/environment/cookied-mcp-mitaka-ovs/overrides.yml') %}
diff --git a/tcp_tests/templates/cookied-mcp-newton-dvr/_context-cookiecutter-mcp-newton-dvr.yaml b/tcp_tests/templates/cookied-mcp-newton-dvr/_context-cookiecutter-mcp-newton-dvr.yaml
index b27d4f4..3eb6821 100644
--- a/tcp_tests/templates/cookied-mcp-newton-dvr/_context-cookiecutter-mcp-newton-dvr.yaml
+++ b/tcp_tests/templates/cookied-mcp-newton-dvr/_context-cookiecutter-mcp-newton-dvr.yaml
@@ -13,7 +13,7 @@
   control_vlan: '10'
   cookiecutter_template_branch: master
   cookiecutter_template_credentials: gerrit
-  cookiecutter_template_url: ssh://mcp-jenkins@gerrit.mcp.mirantis.net:29418/mk/cookiecutter-templates.git
+  cookiecutter_template_url: ssh://mcp-jenkins@gerrit.mcp.mirantis.com:29418/mk/cookiecutter-templates.git
   deploy_network_gateway: 192.168.10.1
   deploy_network_netmask: 255.255.255.0
   deploy_network_subnet: 192.168.10.0/24
@@ -145,7 +145,7 @@
   salt_master_address: 172.16.10.90
   salt_master_hostname: cfg01
   salt_master_management_address: 192.168.10.90
-  shared_reclass_url: ssh://mcp-jenkins@gerrit.mcp.mirantis.net:29418/salt-models/reclass-system.git
+  shared_reclass_url: ssh://mcp-jenkins@gerrit.mcp.mirantis.com:29418/salt-models/reclass-system.git
   fluentd_enabled: 'True'
   stacklight_enabled: 'True'
   stacklight_log_address: 172.16.10.70
diff --git a/tcp_tests/templates/cookied-mcp-newton-dvr/salt.yaml b/tcp_tests/templates/cookied-mcp-newton-dvr/salt.yaml
index d65d59e..d869571 100644
--- a/tcp_tests/templates/cookied-mcp-newton-dvr/salt.yaml
+++ b/tcp_tests/templates/cookied-mcp-newton-dvr/salt.yaml
@@ -5,7 +5,7 @@
 {% from 'cookied-mcp-newton-dvr/underlay.yaml' import LAB_CONFIG_NAME with context %}
 {% from 'cookied-mcp-newton-dvr/underlay.yaml' import DOMAIN_NAME with context %}
 
-{% set SALT_MODELS_REPOSITORY = os_env('SALT_MODELS_REPOSITORY','https://gerrit.mcp.mirantis.net/salt-models/mcp-cookied-lab') %}
+{% set SALT_MODELS_REPOSITORY = os_env('SALT_MODELS_REPOSITORY','https://gerrit.mcp.mirantis.com/salt-models/mcp-cookied-lab') %}
 # Other salt model repository parameters see in shared-salt.yaml
 {% set OVERRIDES = os_env('OVERRIDES', 'override_example: true') %}
 {% set OVERRIDES_FILENAME = os_env('OVERRIDES_FILENAME', '/srv/salt/reclass/classes/environment/cookied-mcp-newton-dvr/overrides.yml') %}
diff --git a/tcp_tests/templates/cookied-mcp-newton-ovs/_context-cookiecutter-mcp-newton-ovs.yaml b/tcp_tests/templates/cookied-mcp-newton-ovs/_context-cookiecutter-mcp-newton-ovs.yaml
index 09ed630..7f2b815 100644
--- a/tcp_tests/templates/cookied-mcp-newton-ovs/_context-cookiecutter-mcp-newton-ovs.yaml
+++ b/tcp_tests/templates/cookied-mcp-newton-ovs/_context-cookiecutter-mcp-newton-ovs.yaml
@@ -13,7 +13,7 @@
   control_vlan: '10'
   cookiecutter_template_branch: master
   cookiecutter_template_credentials: gerrit
-  cookiecutter_template_url: ssh://mcp-jenkins@gerrit.mcp.mirantis.net:29418/mk/cookiecutter-templates.git
+  cookiecutter_template_url: ssh://mcp-jenkins@gerrit.mcp.mirantis.com:29418/mk/cookiecutter-templates.git
   deploy_network_gateway: 192.168.10.1
   deploy_network_netmask: 255.255.255.0
   deploy_network_subnet: 192.168.10.0/24
@@ -145,7 +145,7 @@
   salt_master_address: 172.16.10.90
   salt_master_hostname: cfg01
   salt_master_management_address: 192.168.10.90
-  shared_reclass_url: ssh://mcp-jenkins@gerrit.mcp.mirantis.net:29418/salt-models/reclass-system.git
+  shared_reclass_url: ssh://mcp-jenkins@gerrit.mcp.mirantis.com:29418/salt-models/reclass-system.git
   fluentd_enabled: 'True'
   stacklight_enabled: 'True'
   stacklight_log_address: 172.16.10.70
diff --git a/tcp_tests/templates/cookied-mcp-newton-ovs/salt.yaml b/tcp_tests/templates/cookied-mcp-newton-ovs/salt.yaml
index c95be5c..496da5b 100644
--- a/tcp_tests/templates/cookied-mcp-newton-ovs/salt.yaml
+++ b/tcp_tests/templates/cookied-mcp-newton-ovs/salt.yaml
@@ -5,7 +5,7 @@
 {% from 'cookied-mcp-newton-ovs/underlay.yaml' import LAB_CONFIG_NAME with context %}
 {% from 'cookied-mcp-newton-ovs/underlay.yaml' import DOMAIN_NAME with context %}
 
-{% set SALT_MODELS_REPOSITORY = os_env('SALT_MODELS_REPOSITORY','https://gerrit.mcp.mirantis.net/salt-models/mcp-virtual-lab') %}
+{% set SALT_MODELS_REPOSITORY = os_env('SALT_MODELS_REPOSITORY','https://gerrit.mcp.mirantis.com/salt-models/mcp-virtual-lab') %}
 # Other salt model repository parameters see in shared-salt.yaml
 {% set OVERRIDES = os_env('OVERRIDES', 'override_example: true') %}
 {% set OVERRIDES_FILENAME = os_env('OVERRIDES_FILENAME', '/srv/salt/reclass/classes/environment/cookied-mcp-newton-ovs/overrides.yml') %}
diff --git a/tcp_tests/templates/cookied-mcp-ocata-dop-sl2/salt.yaml b/tcp_tests/templates/cookied-mcp-ocata-dop-sl2/salt.yaml
index aa9b016..f5b4f73 100644
--- a/tcp_tests/templates/cookied-mcp-ocata-dop-sl2/salt.yaml
+++ b/tcp_tests/templates/cookied-mcp-ocata-dop-sl2/salt.yaml
@@ -2,7 +2,7 @@
 {% from 'cookied-mcp-ocata-dop-sl2/underlay.yaml' import LAB_CONFIG_NAME with context %}
 {% from 'cookied-mcp-ocata-dop-sl2/underlay.yaml' import DOMAIN_NAME with context %}
 
-{% set SALT_MODELS_REPOSITORY = os_env('SALT_MODELS_REPOSITORY','https://gerrit.mcp.mirantis.net/salt-models/mcp-virtual-lab') %}
+{% set SALT_MODELS_REPOSITORY = os_env('SALT_MODELS_REPOSITORY','https://gerrit.mcp.mirantis.com/salt-models/mcp-virtual-lab') %}
 # Other salt model repository parameters see in shared-salt.yaml
 
 # Name of the context file (without extension, that is fixed .yaml) used to render the Environment model
diff --git a/tcp_tests/templates/cookied-mcp-ocata-dvr-vxlan/salt.yaml b/tcp_tests/templates/cookied-mcp-ocata-dvr-vxlan/salt.yaml
index 095c58a..829d515 100644
--- a/tcp_tests/templates/cookied-mcp-ocata-dvr-vxlan/salt.yaml
+++ b/tcp_tests/templates/cookied-mcp-ocata-dvr-vxlan/salt.yaml
@@ -2,7 +2,7 @@
 {% from 'cookied-mcp-ocata-dvr-vxlan/underlay.yaml' import LAB_CONFIG_NAME with context %}
 {% from 'cookied-mcp-ocata-dvr-vxlan/underlay.yaml' import DOMAIN_NAME with context %}
 
-{% set SALT_MODELS_REPOSITORY = os_env('SALT_MODELS_REPOSITORY','https://gerrit.mcp.mirantis.net/salt-models/mcp-virtual-lab') %}
+{% set SALT_MODELS_REPOSITORY = os_env('SALT_MODELS_REPOSITORY','https://gerrit.mcp.mirantis.com/salt-models/mcp-virtual-lab') %}
 # Other salt model repository parameters see in shared-salt.yaml
 
 # Name of the context file (without extension, that is fixed .yaml) used to render the Environment model
diff --git a/tcp_tests/templates/cookied-mcp-pike-dpdk/_context-cookiecutter-pike-ovs-dpdk.yaml b/tcp_tests/templates/cookied-mcp-pike-dpdk/_context-cookiecutter-pike-ovs-dpdk.yaml
index e17ac5b..14983b6 100644
--- a/tcp_tests/templates/cookied-mcp-pike-dpdk/_context-cookiecutter-pike-ovs-dpdk.yaml
+++ b/tcp_tests/templates/cookied-mcp-pike-dpdk/_context-cookiecutter-pike-ovs-dpdk.yaml
@@ -13,7 +13,7 @@
   control_vlan: '10'
   cookiecutter_template_branch: master
   cookiecutter_template_credentials: gerrit
-  cookiecutter_template_url: https://gerrit.mcp.mirantis.net/mk/cookiecutter-templates.git
+  cookiecutter_template_url: https://gerrit.mcp.mirantis.com/mk/cookiecutter-templates.git
   deploy_network_gateway: 192.168.10.1
   deploy_network_netmask: 255.255.255.0
   deploy_network_subnet: 192.168.10.0/24
@@ -144,7 +144,7 @@
   salt_master_hostname: cfg01
   salt_master_management_address: 192.168.10.90
   shared_reclass_branch: master
-  shared_reclass_url: https://gerrit.mcp.mirantis.net/salt-models/reclass-system.git
+  shared_reclass_url: https://gerrit.mcp.mirantis.com/salt-models/reclass-system.git
   stacklight_enabled: 'False'
   stacklight_version: '2'
   static_ips_on_deploy_network_enabled: 'False'
diff --git a/tcp_tests/templates/cookied-mcp-pike-dpdk/openstack.yaml b/tcp_tests/templates/cookied-mcp-pike-dpdk/openstack.yaml
index ddece28..69fbc1a 100644
--- a/tcp_tests/templates/cookied-mcp-pike-dpdk/openstack.yaml
+++ b/tcp_tests/templates/cookied-mcp-pike-dpdk/openstack.yaml
@@ -90,7 +90,7 @@
 
 - description: Create net04
   cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
-    '. /root/keystonercv3; neutron net-create net04 --provider:network_type gre'
+    '. /root/keystonercv3; neutron net-create net04 --provider:network_type vxlan'
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 30}
   skip_fail: false
diff --git a/tcp_tests/templates/cookied-mcp-pike-dpdk/salt.yaml b/tcp_tests/templates/cookied-mcp-pike-dpdk/salt.yaml
index 6a24020..140eb8c 100644
--- a/tcp_tests/templates/cookied-mcp-pike-dpdk/salt.yaml
+++ b/tcp_tests/templates/cookied-mcp-pike-dpdk/salt.yaml
@@ -5,7 +5,7 @@
 {% from 'cookied-mcp-pike-dpdk/underlay.yaml' import LAB_CONFIG_NAME with context %}
 {% from 'cookied-mcp-pike-dpdk/underlay.yaml' import DOMAIN_NAME with context %}
 
-{% set SALT_MODELS_REPOSITORY = os_env('SALT_MODELS_REPOSITORY','https://gerrit.mcp.mirantis.net/salt-models/mcp-virtual-lab') %}
+{% set SALT_MODELS_REPOSITORY = os_env('SALT_MODELS_REPOSITORY','https://gerrit.mcp.mirantis.com/salt-models/mcp-virtual-lab') %}
 # Other salt model repository parameters see in shared-salt.yaml
 
 {% import 'shared-salt.yaml' as SHARED with context %}
@@ -14,8 +14,7 @@
 
 {{ SHARED.MACRO_CLONE_RECLASS_MODELS() }}
 
-{{ SHARED.MACRO_CONFIGURE_RECLASS(FORMULA_SERVICES='"linux" "reclass" "salt" "openssh" "ntp" "git" "nginx" "collectd" "sensu" "heka" "sphinx" "keystone" "mysql" "grafana" "haproxy" "rsyslog" "horizon" "prometheus" "telegraf" "elasticsearch" "fluentd" "runtest" "maas" "jenkins" "glusterfs" "backupninja" "auditd" "logrotate"') }}
-
+{{ SHARED.MACRO_CONFIGURE_RECLASS(FORMULA_SERVICES='\*') }}
 {{ SHARED.MACRO_INSTALL_SALT_MINIONS() }}
 
 {{ SHARED.MACRO_RUN_SALT_MASTER_UNDERLAY_STATES() }}
diff --git a/tcp_tests/templates/cookied-mcp-pike-dvr-ceph/cookiecutter-context-dvr-ceph.yaml b/tcp_tests/templates/cookied-mcp-pike-dvr-ceph/cookiecutter-context-dvr-ceph.yaml
index 7367667..d806de4 100644
--- a/tcp_tests/templates/cookied-mcp-pike-dvr-ceph/cookiecutter-context-dvr-ceph.yaml
+++ b/tcp_tests/templates/cookied-mcp-pike-dvr-ceph/cookiecutter-context-dvr-ceph.yaml
@@ -1,8 +1,8 @@
 default_context:
   auditd_enabled: 'False'
   backend_network_netmask: 255.255.255.0
-  backend_network_subnet: 10.167.7.0/24
-  backend_vlan: '30'
+  backend_network_subnet: 10.167.4.0/24
+  backend_vlan: '10'
   backup_private_key: |-
     -----BEGIN RSA PRIVATE KEY-----
     MIIEpQIBAAKCAQEAuY7v++mza4e75f80GYE2iIdZ30d7yvT6Xym00iD/OxRWNtXe
@@ -33,7 +33,7 @@
     -----END RSA PRIVATE KEY-----
   backup_public_key: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQC5ju/76bNrh7vl/zQZgTaIh1nfR3vK9PpfKbTSIP87FFY21d6siHszRffRDQXYPeFW8/PntrFOF44rfEVFW8+ghmObd9WDA3NCZPJ8BnPhM51O1bH1zJ7wGjbSqVGmrzAVAxIOGBG/QnWJfouMmyvykzUDyrd7gZie/QOssFnUirv5P+SrPOxPs+68a8Qrrcn8NoToEjvdAb7RbQy9uV13OfBe9cuj9WUFdqNY6/ftgeIwUxNlqnuVO8ZUTLEHJUQzTgGjsAEXY8Q/AasYv2c6knUtLEEJyvI1XJlu94UX97wDUKPXvysP5bgfmnYCCvkD/LctmlOTKGdO1ZfPBPdj
   bmk_enabled: 'False'
-  ceph_cluster_network: 10.167.7.0/24
+  ceph_cluster_network: 10.167.4.0/24
   ceph_enabled: 'True'
   ceph_hyper_converged: 'False'
   ceph_mon_node01_address: 10.167.4.66
@@ -43,15 +43,16 @@
   ceph_mon_node03_address: 10.167.4.68
   ceph_mon_node03_hostname: cmn03
   ceph_osd_backend: bluestore
-  ceph_osd_block_db_size: '20'
+  ceph_osd_block_db_size: '10'
   ceph_osd_bond_mode: active-backup
   ceph_osd_count: '2'
-  ceph_osd_data_disks: /dev/vdb,/dev/vdc
-  ceph_osd_journal_or_block_db_disks: /dev/vdd,/dev/vde
+  ceph_osd_data_disks: /dev/vdb
+  ceph_osd_journal_or_block_db_disks: /dev/vdc
   ceph_osd_node_count: '2'
+  ceph_osd_journal_size: '10'
   ceph_osd_primary_first_nic: eth1
   ceph_osd_primary_second_nic: eth2
-  ceph_osd_rack01_backend_subnet: 10.167.7
+  ceph_osd_rack01_backend_subnet: 10.167.4
   ceph_osd_rack01_hostname: osd
   ceph_osd_rack01_single_subnet: 10.167.4
   ceph_public_network: 10.167.4.0/24
@@ -77,7 +78,7 @@
   control_vlan: '10'
   cookiecutter_template_branch: 'proposed'
   cookiecutter_template_credentials: gerrit
-  cookiecutter_template_url: https://gerrit.mcp.mirantis.net/mk/cookiecutter-templates.git
+  cookiecutter_template_url: https://gerrit.mcp.mirantis.com/mk/cookiecutter-templates.git
   deploy_network_gateway: 10.167.5.1
   deploy_network_netmask: 255.255.255.0
   deploy_network_subnet: 10.167.5.0/24
diff --git a/tcp_tests/templates/cookied-mcp-pike-dvr-ceph/openstack.yaml b/tcp_tests/templates/cookied-mcp-pike-dvr-ceph/openstack.yaml
index 6d343ba..9cc4dea 100644
--- a/tcp_tests/templates/cookied-mcp-pike-dvr-ceph/openstack.yaml
+++ b/tcp_tests/templates/cookied-mcp-pike-dvr-ceph/openstack.yaml
@@ -7,175 +7,33 @@
 {% from 'shared-salt.yaml' import IPV4_NET_TENANT_PREFIX with context %}
 
 {% import 'shared-salt.yaml' as SHARED with context %}
+{% import 'shared-openstack.yaml' as SHARED_OPENSTACK with context %}
 
-{% import 'shared-backup-restore.yaml' as BACKUP with context %}
+{% import 'shared-ceph.yaml' as SHARED_CEPH with context %}
 
-# Install OpenStack control services
+{{ SHARED_CEPH.MACRO_INSTALL_CEPH_MONS() }}
 
-- description: Install glance on all controllers
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-     -C 'I@glance:server' state.sls glance -b 1
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
+{{ SHARED_CEPH.MACRO_INSTALL_CEPH_MGR() }}
 
-- description: Install keystone service (note that different fernet keys are created on different nodes)
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@keystone:server' state.sls keystone.server -b 1
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 2, delay: 15}
-  skip_fail: false
+{{ SHARED_CEPH.MACRO_INSTALL_CEPH_OSD_AND_RADOSGW() }}
 
-- description: Restart apache due to PROD-10477
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl*' cmd.run "systemctl restart apache2"
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 15}
-  skip_fail: false
+{{ SHARED_CEPH.CONNECT_CEPH_TO_SERVICES() }}
 
-- description: Check apache status to PROD-10477
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl*' cmd.run "systemctl status apache2"
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 15}
-  skip_fail: false
+{{ SHARED_OPENSTACK.MACRO_INSTALL_KEYSTONE(USE_ORCHESTRATE=false) }}
 
-- description: Mount glusterfs.client volumes (resuires created 'keystone' and 'glusterfs' system users)
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@glance:server' state.sls glusterfs.client
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
+{{ SHARED_OPENSTACK.MACRO_INSTALL_GLANCE() }}
 
-- description: Update fernet keys for keystone server on the mounted glusterfs volume
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@keystone:server' state.sls keystone.server -b 1
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
+{{ SHARED_OPENSTACK.MACRO_INSTALL_NOVA() }}
 
-- description: Populate keystone services/tenants/admins
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@keystone:client' state.sls keystone.client
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 2, delay: 5}
-  skip_fail: false
+{{ SHARED_OPENSTACK.MACRO_INSTALL_CINDER() }}
 
-- description: Check keystone service-list
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@keystone:server' cmd.run '. /root/keystonercv3; openstack service list'
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
+{{ SHARED_OPENSTACK.MACRO_INSTALL_NEUTRON() }}
 
-- description: Check glance image-list
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@keystone:server' cmd.run '. /root/keystonercv3; glance image-list'
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
+{{ SHARED_OPENSTACK.MACRO_INSTALL_HEAT() }}
 
+{{ SHARED_OPENSTACK.MACRO_INSTALL_HORIZON() }}
 
-- description: Install nova on all controllers
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@nova:controller' state.sls nova -b 1
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 2, delay: 5}
-  skip_fail: false
-
-- description: Check nova service-list
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@keystone:server' cmd.run '. /root/keystonercv3; nova --debug service-list'
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 3, delay: 5}
-  skip_fail: false
-
-
-- description: Install cinder
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@cinder:controller' state.sls cinder -b 1
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 2, delay: 5}
-  skip_fail: false
-
-- description: Check cinder list
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@keystone:server' cmd.run '. /root/keystonercv3; cinder list'
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-
-- description: Install neutron service
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@neutron:server' state.sls neutron -b 1
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Install neutron on gtw node
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@neutron:gateway' state.sls neutron
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Check neutron agent-list
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@keystone:server' cmd.run '. /root/keystonercv3; neutron agent-list'
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-
-- description: Install heat service
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@heat:server' state.sls heat -b 1
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Check heat service
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@keystone:server' cmd.run '. /root/keystonercv3; openstack orchestration resource type list'
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 5, delay: 10}
-  skip_fail: false
-
-
-- description: Deploy horizon dashboard
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@horizon:server' state.sls horizon
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: true
-
-- description: Deploy nginx proxy
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@nginx:server' state.sls nginx
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: true
-
-
-# Install compute node
-
-- description: Apply formulas for compute node
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'cmp*' state.apply
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: true
-
-- description: Re-apply(as in doc) formulas for compute node
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'cmp*' state.apply
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Check IP on computes
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'cmp*' cmd.run
-    'ip a'
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 10, delay: 30}
-  skip_fail: false
+{{ SHARED_OPENSTACK.MACRO_INSTALL_COMPUTE() }}
 
 - description: Create net04_external
   cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
@@ -233,6 +91,4 @@
   retry: {count: 1, delay: 30}
   skip_fail: false
 
-{{ BACKUP.MACRO_WR_NGINX_MASTER() }}
-{{ BACKUP.MACRO_BACKUP_BACKUPNINJA() }}
-{{ BACKUP.MACRO_BACKUP_XTRABACKUP() }}
+{{ SHARED.INSTALL_DOCKER_ON_GTW() }}
diff --git a/tcp_tests/templates/cookied-mcp-pike-dvr-ceph/salt.yaml b/tcp_tests/templates/cookied-mcp-pike-dvr-ceph/salt.yaml
index 0ff2a9b..e90b99b 100644
--- a/tcp_tests/templates/cookied-mcp-pike-dvr-ceph/salt.yaml
+++ b/tcp_tests/templates/cookied-mcp-pike-dvr-ceph/salt.yaml
@@ -5,7 +5,7 @@
 {% from 'cookied-mcp-pike-dvr-ceph/underlay.yaml' import LAB_CONFIG_NAME with context %}
 {% from 'cookied-mcp-pike-dvr-ceph/underlay.yaml' import DOMAIN_NAME with context %}
 
-{% set SALT_MODELS_REPOSITORY = os_env('SALT_MODELS_REPOSITORY','https://gerrit.mcp.mirantis.net/salt-models/mcp-virtual-lab') %}
+{% set SALT_MODELS_REPOSITORY = os_env('SALT_MODELS_REPOSITORY','https://gerrit.mcp.mirantis.com/salt-models/mcp-virtual-lab') %}
 # Other salt model repository parameters see in shared-salt.yaml
 
 {% import 'shared-salt.yaml' as SHARED with context %}
@@ -14,8 +14,7 @@
 
 {{ SHARED.MACRO_CLONE_RECLASS_MODELS() }}
 
-{{ SHARED.MACRO_CONFIGURE_RECLASS(FORMULA_SERVICES='"linux" "reclass" "glusterfs" "jenkins" "maas" "backupninja" "salt" "openssh" "ntp" "git" "nginx" "collectd" "sensu" "heka" "sphinx" "keystone" "mysql" "grafana" "haproxy" "rsyslog" "horizon" "prometheus" "telegraf" "elasticsearch" "powerdns" "fluentd" "runtest" "auditd" "logrotate"') }}
-
+{{ SHARED.MACRO_CONFIGURE_RECLASS(FORMULA_SERVICES='\*') }}
 {{ SHARED.MACRO_INSTALL_SALT_MINIONS() }}
 
 {{ SHARED.MACRO_RUN_SALT_MASTER_UNDERLAY_STATES() }}
diff --git a/tcp_tests/templates/cookied-mcp-pike-dvr-ceph/underlay.yaml b/tcp_tests/templates/cookied-mcp-pike-dvr-ceph/underlay.yaml
index d3bacce..42df6a5 100644
--- a/tcp_tests/templates/cookied-mcp-pike-dvr-ceph/underlay.yaml
+++ b/tcp_tests/templates/cookied-mcp-pike-dvr-ceph/underlay.yaml
@@ -17,16 +17,16 @@
 {% set HOSTNAME_CTL01 = os_env('HOSTNAME_CTL01', 'ctl01.' + DOMAIN_NAME) %}
 {% set HOSTNAME_CTL02 = os_env('HOSTNAME_CTL02', 'ctl02.' + DOMAIN_NAME) %}
 {% set HOSTNAME_CTL03 = os_env('HOSTNAME_CTL03', 'ctl03.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CMP01 = os_env('HOSTNAME_CMP01', 'cmp01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CMP02 = os_env('HOSTNAME_CMP02', 'cmp02.' + DOMAIN_NAME) %}
+{% set HOSTNAME_CMP01 = os_env('HOSTNAME_CMP01', 'cmp1.' + DOMAIN_NAME) %}
+{% set HOSTNAME_CMP02 = os_env('HOSTNAME_CMP02', 'cmp2.' + DOMAIN_NAME) %}
 {% set HOSTNAME_CMN01 = os_env('HOSTNAME_CMN01', 'cmn01.' + DOMAIN_NAME) %}
 {% set HOSTNAME_CMN02 = os_env('HOSTNAME_CMN02', 'cmn02.' + DOMAIN_NAME) %}
 {% set HOSTNAME_CMN03 = os_env('HOSTNAME_CMN03', 'cmn03.' + DOMAIN_NAME) %}
 {% set HOSTNAME_RGW01 = os_env('HOSTNAME_CMN01', 'rgw01.' + DOMAIN_NAME) %}
 {% set HOSTNAME_RGW02 = os_env('HOSTNAME_CMN02', 'rgw02.' + DOMAIN_NAME) %}
 {% set HOSTNAME_RGW03 = os_env('HOSTNAME_CMN03', 'rgw03.' + DOMAIN_NAME) %}
-{% set HOSTNAME_OSD1 = os_env('HOSTNAME_OSD1', 'osd1.' + DOMAIN_NAME) %}
-{% set HOSTNAME_OSD2 = os_env('HOSTNAME_OSD2', 'osd2.' + DOMAIN_NAME) %}
+{% set HOSTNAME_OSD01 = os_env('HOSTNAME_OSD01', 'osd1.' + DOMAIN_NAME) %}
+{% set HOSTNAME_OSD02 = os_env('HOSTNAME_OSD02', 'osd2.' + DOMAIN_NAME) %}
 {% set HOSTNAME_GTW01 = os_env('HOSTNAME_GTW01', 'gtw01.' + DOMAIN_NAME) %}
 {% set HOSTNAME_PRX01 = os_env('HOSTNAME_PRX01', 'prx01.' + DOMAIN_NAME) %}
 
@@ -47,8 +47,8 @@
             default_{{ HOSTNAME_CTL03 }}: +13
             default_{{ HOSTNAME_CMP01 }}: +105
             default_{{ HOSTNAME_CMP02 }}: +106
-            default_{{ HOSTNAME_OSD1 }}: +94
-            default_{{ HOSTNAME_OSD2 }}: +95
+            default_{{ HOSTNAME_OSD01 }}: +94
+            default_{{ HOSTNAME_OSD02 }}: +95
             default_{{ HOSTNAME_CMN01 }}: +96
             default_{{ HOSTNAME_CMN02 }}: +97
             default_{{ HOSTNAME_CMN03 }}: +98
@@ -72,8 +72,8 @@
             default_{{ HOSTNAME_CTL03 }}: +13
             default_{{ HOSTNAME_CMP01 }}: +105
             default_{{ HOSTNAME_CMP02 }}: +106
-            default_{{ HOSTNAME_OSD1 }}: +94
-            default_{{ HOSTNAME_OSD2 }}: +95
+            default_{{ HOSTNAME_OSD01 }}: +94
+            default_{{ HOSTNAME_OSD02 }}: +95
             default_{{ HOSTNAME_CMN01 }}: +96
             default_{{ HOSTNAME_CMN02 }}: +97
             default_{{ HOSTNAME_CMN03 }}: +98
@@ -97,8 +97,8 @@
             default_{{ HOSTNAME_CTL03 }}: +13
             default_{{ HOSTNAME_CMP01 }}: +105
             default_{{ HOSTNAME_CMP02 }}: +106
-            default_{{ HOSTNAME_OSD1 }}: +94
-            default_{{ HOSTNAME_OSD2 }}: +95
+            default_{{ HOSTNAME_OSD01 }}: +94
+            default_{{ HOSTNAME_OSD02 }}: +95
             default_{{ HOSTNAME_CMN01 }}: +96
             default_{{ HOSTNAME_CMN02 }}: +97
             default_{{ HOSTNAME_CMN03 }}: +98
@@ -122,8 +122,8 @@
             default_{{ HOSTNAME_CTL03 }}: +13
             default_{{ HOSTNAME_CMP01 }}: +105
             default_{{ HOSTNAME_CMP02 }}: +106
-            default_{{ HOSTNAME_OSD1 }}: +94
-            default_{{ HOSTNAME_OSD2 }}: +95
+            default_{{ HOSTNAME_OSD01 }}: +94
+            default_{{ HOSTNAME_OSD02 }}: +95
             default_{{ HOSTNAME_CMN01 }}: +96
             default_{{ HOSTNAME_CMN02 }}: +97
             default_{{ HOSTNAME_CMN03 }}: +98
@@ -133,25 +133,7 @@
             default_{{ HOSTNAME_GTW01 }}: +110
             default_{{ HOSTNAME_PRX01 }}: +121
           ip_ranges:
-            dhcp: [+10, -10]
-
-      storage-pool01:
-        net: {{ os_env('STORAGE_ADDRESS_POOL01', '10.100.0.0/16:24') }}
-        params:
-          ip_reserved:
-            gateway: +1
-            l2_network_device: +1
-            default_{{ HOSTNAME_OSD1 }}: +94
-            default_{{ HOSTNAME_OSD2 }}: +95
-            default_{{ HOSTNAME_CMN01 }}: +96
-            default_{{ HOSTNAME_CMN02 }}: +97
-            default_{{ HOSTNAME_CMN03 }}: +98
-            default_{{ HOSTNAME_RGW01 }}: +76
-            default_{{ HOSTNAME_RGW02 }}: +77
-            default_{{ HOSTNAME_RGW03 }}: +78
-          ip_ranges:
-            dhcp: [+10, -10]
-
+            dhcp: [+130, +230]
 
     groups:
       - name: default
@@ -159,7 +141,6 @@
           name: devops.driver.libvirt
           params:
             connection_string: !os_env CONNECTION_STRING, qemu:///system
-            storage_pool_name: !os_env STORAGE_POOL_NAME, default
             stp: False
             hpet: False
             enable_acpi: true
@@ -171,7 +152,6 @@
           private: private-pool01
           tenant: tenant-pool01
           external: external-pool01
-          storage: storage-pool01
 
         l2_network_devices:
           private:
@@ -196,13 +176,6 @@
             forward:
               mode: route
 
-          storage:
-            address_pool: storage-pool01
-            dhcp: true
-            forward:
-              mode: route
-
-
         group_volumes:
          - name: cloudimage1604    # This name is used for 'backing_store' option for node volumes.
            source_image: !os_env IMAGE_PATH1604  # https://cloud-images.ubuntu.com/xenial/current/xenial-server-cloudimg-amd64-disk1.img
@@ -430,7 +403,7 @@
               interfaces: *interfaces
               network_config: *network_config
 
-          - name: {{ HOSTNAME_OSD1 }}
+          - name: {{ HOSTNAME_OSD01 }}
             role: salt_minion
             params:
               vcpu: !os_env SLAVE_NODE_CPU, 2
@@ -462,7 +435,7 @@
               interfaces: *interfaces
               network_config: *network_config
 
-          - name: {{ HOSTNAME_OSD2 }}
+          - name: {{ HOSTNAME_OSD02 }}
             role: salt_minion
             params:
               vcpu: !os_env SLAVE_NODE_CPU, 2
diff --git a/tcp_tests/templates/cookied-mcp-pike-dvr-ceph/vcp-context-environment.yaml b/tcp_tests/templates/cookied-mcp-pike-dvr-ceph/vcp-context-environment.yaml
index fd768d0..74a1465 100644
--- a/tcp_tests/templates/cookied-mcp-pike-dvr-ceph/vcp-context-environment.yaml
+++ b/tcp_tests/templates/cookied-mcp-pike-dvr-ceph/vcp-context-environment.yaml
@@ -104,8 +104,6 @@
           role: single_dhcp
         ens4:
           role: single_ctl
-        ens5:
-          role: single_storage_dhcp
 
     cmn01.cookied-mcp-pike-dvr-ceph.local:
       reclass_storage_name: ceph_mon_node01
@@ -117,8 +115,6 @@
           role: single_dhcp
         ens4:
           role: single_ctl
-        ens5:
-          role: single_storage_dhcp
 
     cmn02.cookied-mcp-pike-dvr-ceph.local:
       reclass_storage_name: ceph_mon_node02
@@ -130,8 +126,6 @@
           role: single_dhcp
         ens4:
           role: single_ctl
-        ens5:
-          role: single_storage_dhcp
 
     cmn03.cookied-mcp-pike-dvr-ceph.local:
       reclass_storage_name: ceph_mon_node03
@@ -143,8 +137,6 @@
           role: single_dhcp
         ens4:
           role: single_ctl
-        ens5:
-          role: single_storage_dhcp
 
     rgw01.cookied-mcp-pike-dvr-ceph.local:
       reclass_storage_name: ceph_rgw_node01
@@ -156,8 +148,6 @@
           role: single_dhcp
         ens4:
           role: single_ctl
-        ens5:
-          role: single_storage_dhcp
 
     rgw02.cookied-mcp-pike-dvr-ceph.local:
       reclass_storage_name: ceph_rgw_node02
@@ -169,8 +159,6 @@
           role: single_dhcp
         ens4:
           role: single_ctl
-        ens5:
-          role: single_storage_dhcp
 
     rgw03.cookied-mcp-pike-dvr-ceph.local:
       reclass_storage_name: ceph_rgw_node03
@@ -181,6 +169,4 @@
         ens3:
           role: single_dhcp
         ens4:
-          role: single_ctl
-        ens5:
-          role: single_storage_dhcp
\ No newline at end of file
+          role: single_ctl
\ No newline at end of file
diff --git a/tcp_tests/templates/cookied-mcp-pike-dvr-ssl/_context-cookiecutter-mcp-pike-dvr-ssl.yaml b/tcp_tests/templates/cookied-mcp-pike-dvr-ssl/_context-cookiecutter-mcp-pike-dvr-ssl.yaml
index bac6199..4a29768 100644
--- a/tcp_tests/templates/cookied-mcp-pike-dvr-ssl/_context-cookiecutter-mcp-pike-dvr-ssl.yaml
+++ b/tcp_tests/templates/cookied-mcp-pike-dvr-ssl/_context-cookiecutter-mcp-pike-dvr-ssl.yaml
@@ -13,7 +13,7 @@
   control_vlan: '10'
   cookiecutter_template_branch: master
   cookiecutter_template_credentials: gerrit
-  cookiecutter_template_url: ssh://mcp-jenkins@gerrit.mcp.mirantis.net:29418/mk/cookiecutter-templates.git
+  cookiecutter_template_url: ssh://mcp-jenkins@gerrit.mcp.mirantis.com:29418/mk/cookiecutter-templates.git
   deploy_network_gateway: 192.168.10.1
   deploy_network_netmask: 255.255.255.0
   deploy_network_subnet: 192.168.10.0/24
@@ -145,7 +145,7 @@
   salt_master_address: 172.16.10.90
   salt_master_hostname: cfg01
   salt_master_management_address: 192.168.10.90
-  shared_reclass_url: ssh://mcp-jenkins@gerrit.mcp.mirantis.net:29418/salt-models/reclass-system.git
+  shared_reclass_url: ssh://mcp-jenkins@gerrit.mcp.mirantis.com:29418/salt-models/reclass-system.git
   stacklight_enabled: 'True'
   stacklight_log_address: 172.16.10.70
   stacklight_log_hostname: mon
diff --git a/tcp_tests/templates/cookied-mcp-pike-dvr-ssl/salt.yaml b/tcp_tests/templates/cookied-mcp-pike-dvr-ssl/salt.yaml
index c8215a2..08657a2 100644
--- a/tcp_tests/templates/cookied-mcp-pike-dvr-ssl/salt.yaml
+++ b/tcp_tests/templates/cookied-mcp-pike-dvr-ssl/salt.yaml
@@ -5,7 +5,7 @@
 {% from 'cookied-mcp-pike-dvr-ssl/underlay.yaml' import LAB_CONFIG_NAME with context %}
 {% from 'cookied-mcp-pike-dvr-ssl/underlay.yaml' import DOMAIN_NAME with context %}
 
-{% set SALT_MODELS_REPOSITORY = os_env('SALT_MODELS_REPOSITORY','https://gerrit.mcp.mirantis.net/salt-models/mcp-virtual-lab') %}
+{% set SALT_MODELS_REPOSITORY = os_env('SALT_MODELS_REPOSITORY','https://gerrit.mcp.mirantis.com/salt-models/mcp-virtual-lab') %}
 # Other salt model repository parameters see in shared-salt.yaml
 
 {% import 'shared-salt.yaml' as SHARED with context %}
@@ -14,8 +14,7 @@
 
 {{ SHARED.MACRO_CLONE_RECLASS_MODELS() }}
 
-{{ SHARED.MACRO_CONFIGURE_RECLASS(FORMULA_SERVICES='"linux" "reclass" "salt" "openssh" "ntp" "git" "nginx" "collectd" "sensu" "heka" "sphinx" "keystone" "mysql" "grafana" "haproxy" "rsyslog" "horizon" "prometheus" "telegraf" "elasticsearch" "powerdns" "fluentd" "maas" "jenkins" "glusterfs" "backupninja" "auditd" "logrotate"') }}
-
+{{ SHARED.MACRO_CONFIGURE_RECLASS(FORMULA_SERVICES='\*') }}
 {{ SHARED.MACRO_INSTALL_SALT_MINIONS() }}
 
 {{ SHARED.MACRO_RUN_SALT_MASTER_UNDERLAY_STATES() }}
diff --git a/tcp_tests/templates/cookied-mcp-pike-dvr/_context-cookiecutter-mcp-pike-dvr.yaml b/tcp_tests/templates/cookied-mcp-pike-dvr/_context-cookiecutter-mcp-pike-dvr.yaml
index a4c8abf..324263b 100644
--- a/tcp_tests/templates/cookied-mcp-pike-dvr/_context-cookiecutter-mcp-pike-dvr.yaml
+++ b/tcp_tests/templates/cookied-mcp-pike-dvr/_context-cookiecutter-mcp-pike-dvr.yaml
@@ -13,7 +13,7 @@
   control_vlan: '10'
   cookiecutter_template_branch: master
   cookiecutter_template_credentials: gerrit
-  cookiecutter_template_url: ssh://mcp-jenkins@gerrit.mcp.mirantis.net:29418/mk/cookiecutter-templates.git
+  cookiecutter_template_url: ssh://mcp-jenkins@gerrit.mcp.mirantis.com:29418/mk/cookiecutter-templates.git
   deploy_network_gateway: 192.168.10.1
   deploy_network_netmask: 255.255.255.0
   deploy_network_subnet: 192.168.10.0/24
@@ -145,7 +145,7 @@
   salt_master_address: 172.16.10.90
   salt_master_hostname: cfg01
   salt_master_management_address: 192.168.10.90
-  shared_reclass_url: ssh://mcp-jenkins@gerrit.mcp.mirantis.net:29418/salt-models/reclass-system.git
+  shared_reclass_url: ssh://mcp-jenkins@gerrit.mcp.mirantis.com:29418/salt-models/reclass-system.git
   stacklight_enabled: 'True'
   fluentd_enabled: 'True'
   stacklight_log_address: 172.16.10.60
diff --git a/tcp_tests/templates/cookied-mcp-pike-dvr/salt.yaml b/tcp_tests/templates/cookied-mcp-pike-dvr/salt.yaml
index ba3d5d6..c5a459f 100644
--- a/tcp_tests/templates/cookied-mcp-pike-dvr/salt.yaml
+++ b/tcp_tests/templates/cookied-mcp-pike-dvr/salt.yaml
@@ -5,7 +5,7 @@
 {% from 'cookied-mcp-pike-dvr/underlay.yaml' import LAB_CONFIG_NAME with context %}
 {% from 'cookied-mcp-pike-dvr/underlay.yaml' import DOMAIN_NAME with context %}
 
-{% set SALT_MODELS_REPOSITORY = os_env('SALT_MODELS_REPOSITORY','https://gerrit.mcp.mirantis.net/salt-models/mcp-virtual-lab') %}
+{% set SALT_MODELS_REPOSITORY = os_env('SALT_MODELS_REPOSITORY','https://gerrit.mcp.mirantis.com/salt-models/mcp-virtual-lab') %}
 # Other salt model repository parameters see in shared-salt.yaml
 
 {% import 'shared-salt.yaml' as SHARED with context %}
@@ -14,8 +14,7 @@
 
 {{ SHARED.MACRO_CLONE_RECLASS_MODELS() }}
 
-{{ SHARED.MACRO_CONFIGURE_RECLASS(FORMULA_SERVICES='"linux" "reclass" "glusterfs" "jenkins" "maas" "backupninja" "salt" "openssh" "ntp" "git" "nginx" "collectd" "sensu" "heka" "sphinx" "keystone" "mysql" "grafana" "haproxy" "rsyslog" "horizon" "prometheus" "telegraf" "elasticsearch" "powerdns" "fluentd" "runtest" "auditd" "logrotate"') }}
-
+{{ SHARED.MACRO_CONFIGURE_RECLASS(FORMULA_SERVICES='\*') }}
 {{ SHARED.MACRO_INSTALL_SALT_MINIONS() }}
 
 {{ SHARED.MACRO_RUN_SALT_MASTER_UNDERLAY_STATES() }}
diff --git a/tcp_tests/templates/cookied-mcp-pike-ovs/_context-cookiecutter-mcp-pike-ovs.yaml b/tcp_tests/templates/cookied-mcp-pike-ovs/_context-cookiecutter-mcp-pike-ovs.yaml
index cd29897..941b78f 100644
--- a/tcp_tests/templates/cookied-mcp-pike-ovs/_context-cookiecutter-mcp-pike-ovs.yaml
+++ b/tcp_tests/templates/cookied-mcp-pike-ovs/_context-cookiecutter-mcp-pike-ovs.yaml
@@ -13,7 +13,7 @@
   control_vlan: '10'
   cookiecutter_template_branch: master
   cookiecutter_template_credentials: gerrit
-  cookiecutter_template_url: ssh://mcp-jenkins@gerrit.mcp.mirantis.net:29418/mk/cookiecutter-templates.git
+  cookiecutter_template_url: ssh://mcp-jenkins@gerrit.mcp.mirantis.com:29418/mk/cookiecutter-templates.git
   deploy_network_gateway: 192.168.10.1
   deploy_network_netmask: 255.255.255.0
   deploy_network_subnet: 192.168.10.0/24
@@ -145,7 +145,7 @@
   salt_master_address: 172.16.10.90
   salt_master_hostname: cfg01
   salt_master_management_address: 192.168.10.90
-  shared_reclass_url: ssh://mcp-jenkins@gerrit.mcp.mirantis.net:29418/salt-models/reclass-system.git
+  shared_reclass_url: ssh://mcp-jenkins@gerrit.mcp.mirantis.com:29418/salt-models/reclass-system.git
   fluentd_enabled: 'True'
   stacklight_enabled: 'True'
   stacklight_log_address: 172.16.10.60
diff --git a/tcp_tests/templates/cookied-mcp-pike-ovs/salt.yaml b/tcp_tests/templates/cookied-mcp-pike-ovs/salt.yaml
index 4a089ac..c657839 100644
--- a/tcp_tests/templates/cookied-mcp-pike-ovs/salt.yaml
+++ b/tcp_tests/templates/cookied-mcp-pike-ovs/salt.yaml
@@ -5,7 +5,7 @@
 {% from 'cookied-mcp-pike-ovs/underlay.yaml' import LAB_CONFIG_NAME with context %}
 {% from 'cookied-mcp-pike-ovs/underlay.yaml' import DOMAIN_NAME with context %}
 
-{% set SALT_MODELS_REPOSITORY = os_env('SALT_MODELS_REPOSITORY','https://gerrit.mcp.mirantis.net/salt-models/mcp-virtual-lab') %}
+{% set SALT_MODELS_REPOSITORY = os_env('SALT_MODELS_REPOSITORY','https://gerrit.mcp.mirantis.com/salt-models/mcp-virtual-lab') %}
 # Other salt model repository parameters see in shared-salt.yaml
 
 {% import 'shared-salt.yaml' as SHARED with context %}
@@ -14,8 +14,7 @@
 
 {{ SHARED.MACRO_CLONE_RECLASS_MODELS() }}
 
-{{ SHARED.MACRO_CONFIGURE_RECLASS(FORMULA_SERVICES='"linux" "reclass" "salt" "openssh" "ntp" "git" "nginx" "collectd" "sensu" "heka" "sphinx" "keystone" "mysql" "grafana" "haproxy" "rsyslog" "horizon" "prometheus" "telegraf" "elasticsearch" "fluentd" "runtest" "maas" "jenkins" "glusterfs" "backupninja" "auditd" "logrotate"') }}
-
+{{ SHARED.MACRO_CONFIGURE_RECLASS(FORMULA_SERVICES='\*') }}
 {{ SHARED.MACRO_INSTALL_SALT_MINIONS() }}
 
 {{ SHARED.MACRO_RUN_SALT_MASTER_UNDERLAY_STATES() }}
diff --git a/tcp_tests/templates/cookied-mcp-queens-dvr-ceph/cookiecutter-context-dvr-ceph.yaml b/tcp_tests/templates/cookied-mcp-queens-dvr-ceph/cookiecutter-context-dvr-ceph.yaml
new file mode 100644
index 0000000..c4c7371
--- /dev/null
+++ b/tcp_tests/templates/cookied-mcp-queens-dvr-ceph/cookiecutter-context-dvr-ceph.yaml
@@ -0,0 +1,232 @@
+default_context:
+  barbican_backend: dogtag
+  barbican_enabled: 'False'
+  auditd_enabled: 'True'
+  backend_network_netmask: 255.255.255.0
+  backend_network_subnet: 10.167.4.0/24
+  backend_vlan: '10'
+  backup_private_key: |-
+    -----BEGIN RSA PRIVATE KEY-----
+    MIIEpQIBAAKCAQEAuY7v++mza4e75f80GYE2iIdZ30d7yvT6Xym00iD/OxRWNtXe
+    rIh7M0X30Q0F2D3hVvPz57axTheOK3xFRVvPoIZjm3fVgwNzQmTyfAZz4TOdTtWx
+    9cye8Bo20qlRpq8wFQMSDhgRv0J1iX6LjJsr8pM1A8q3e4GYnv0DrLBZ1Iq7+T/k
+    qzzsT7PuvGvEK63J/DaE6BI73QG+0W0MvblddznwXvXLo/VlBXajWOv37YHiMFMT
+    Zap7lTvGVEyxByVEM04Bo7ABF2PEPwGrGL9nOpJ1LSxBCcryNVyZbveFF/e8A1Cj
+    178rD+W4H5p2Agr5A/y3LZpTkyhnTtWXzwT3YwIDAQABAoIBACiUNa8lgHM3j8PQ
+    d5hMRZy93M2TWGMeB9Lf0AdT5/0HiYMcakHY5vhjiLpS2sBbZ/gYCXLW5Rdq11Bz
+    MMLmPRWhzg6lui+YhZAze0PcNWM+YlxnJy/Vu7xOP0b6eDy3exBdR4mFgfwNkJ6s
+    6d+p34aA4ssdfdqokLPUKQWO21Y7UVYbht6Tv55nd3YMGXHxJ0phitf7/dFsEX9Z
+    sPSdWqkYMP2UWQBrFSjxV9Q+kE8OQ1VYDFCRa/9a5QHMrFo/0dOxLkZosTcCHM8A
+    H2RHPcKrxFWn7A3eAiA4VCvtM8RX239Bi7Gdvfl1HflSkQwBDUV8F2RZLHM2NU2T
+    EGBQcuECgYEA4ZBwZAtJIQ0R35prGLFj+drb/IKr+x2WD9WOZ83cheGSwdCRk/he
+    zZ5cCKgmSqg9cDJ4/vraoqmAlwQ4uj4e1TudgHPwdDUPuwoveIbUfUyzdIZMt0s4
+    fe61AUhEniIOi09H+E2yHz6OWSw3uA4SKkNsMT4RZc4Nag3Fo86Rrj8CgYEA0piY
+    HMYPHposfjVNM0PMU9F1lwQJMdx3a55JYgUc8cMvrsZPzvwJqrGCMNjP4lPwl/AS
+    x73yaxcxEYGiG6wDkvx+hujjyAx+sal62EB9ofJGDI7u8L2/0voW53RWvTUBsy8e
+    +xOQTewCAAYGLIJnGfEyVqEAu9IPwz3pep8xtd0CgYEAruTusDOr9SuMI0M5LQFG
+    UpHnJogvT1smYoqki0osZcZ8ozjT19aps2bJV5EBd7uxP5BzDsl0wtEIuo90aLwH
+    7i/2NIYw9/m4g78nBZ4NnkXdk0karLhvSf3PbPoa8j3X5x6G4DlmFiHL/8pwPY7z
+    eL+kYR4OIVC+R+/7wcJGZMMCgYEAqOLg0epvw53mYoxCTgNoACvw/lupOAhS6MY2
+    mVn6XVOnkKTO6fIrmmziOGQXSq0APAi2NuL4XrNpkV2BcGmhMCY3Hd/0k8CZdcax
+    km0dk1skm/ugWQYCqKIQ7irZSMESjO0UDkwhJKxI6lXqa5VkM2S/dsOFQBp0s6GZ
+    9NFn3y0CgYEAogzKchxouu4BgqHn76W0IB/XeTuiCDSGRv+IwMoghxbPoT6lO920
+    OHWoo+bX3VuxpCFkN2fFH6V8WncUrv4ItAgxGftL8h9BhMRKiatwOBAw0vG/CO2G
+    CIyvmjhIvpIdAl8i1jIJw1sn/ZVYm8+ZKy4VAqPevc3Ze7WGoMUkFyg=
+    -----END RSA PRIVATE KEY-----
+  backup_public_key: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQC5ju/76bNrh7vl/zQZgTaIh1nfR3vK9PpfKbTSIP87FFY21d6siHszRffRDQXYPeFW8/PntrFOF44rfEVFW8+ghmObd9WDA3NCZPJ8BnPhM51O1bH1zJ7wGjbSqVGmrzAVAxIOGBG/QnWJfouMmyvykzUDyrd7gZie/QOssFnUirv5P+SrPOxPs+68a8Qrrcn8NoToEjvdAb7RbQy9uV13OfBe9cuj9WUFdqNY6/ftgeIwUxNlqnuVO8ZUTLEHJUQzTgGjsAEXY8Q/AasYv2c6knUtLEEJyvI1XJlu94UX97wDUKPXvysP5bgfmnYCCvkD/LctmlOTKGdO1ZfPBPdj
+  bmk_enabled: 'False'
+  ceph_cluster_network: 10.167.4.0/24
+  ceph_enabled: 'True'
+  ceph_hyper_converged: 'False'
+  ceph_mon_node01_address: 10.167.4.66
+  ceph_mon_node01_hostname: cmn01
+  ceph_mon_node02_address: 10.167.4.67
+  ceph_mon_node02_hostname: cmn02
+  ceph_mon_node03_address: 10.167.4.68
+  ceph_mon_node03_hostname: cmn03
+  ceph_osd_backend: bluestore
+  ceph_osd_block_db_size: '10'
+  ceph_osd_bond_mode: active-backup
+  ceph_osd_count: '2'
+  ceph_osd_data_disks: /dev/vdb
+  ceph_osd_journal_or_block_db_disks: /dev/vdc
+  ceph_osd_node_count: '2'
+  ceph_osd_journal_size: '10'
+  ceph_osd_primary_first_nic: eth1
+  ceph_osd_primary_second_nic: eth2
+  ceph_osd_rack01_backend_subnet: 10.167.4
+  ceph_osd_rack01_hostname: osd
+  ceph_osd_rack01_single_subnet: 10.167.4
+  ceph_osd_single_address_ranges: 10.167.4.94-10.167.4.95
+  ceph_osd_deploy_address_ranges: 10.167.5.94-10.167.5.95
+  ceph_osd_backend_address_ranges: 10.167.4.94-10.167.4.95
+  ceph_public_network: 10.167.4.0/24
+  ceph_rgw_address: 10.167.4.75
+  ceph_rgw_hostname: rgw
+  ceph_rgw_node01_address: 10.167.4.76
+  ceph_rgw_node01_hostname: rgw01
+  ceph_rgw_node02_address: 10.167.4.77
+  ceph_rgw_node02_hostname: rgw02
+  ceph_rgw_node03_address: 10.167.4.78
+  ceph_rgw_node03_hostname: rgw03
+  ceph_version: luminous
+  cicd_enabled: 'False'
+  cluster_domain: cookied-mcp-queens-dvr-ceph.local
+  cluster_name: cookied-mcp-queens-dvr-ceph
+  compute_bond_mode: active-backup
+  compute_primary_first_nic: eth1
+  compute_primary_second_nic: eth2
+  context_seed: tekHhhWzn3YrxKbXGMvtWYj1usHGrRBYd2gfFwWNCnRentwCu1QKANHvpIeZCRvz
+  control_network_netmask: 255.255.255.0
+  control_network_subnet: 10.167.4.0/24
+  control_vlan: '10'
+  cookiecutter_template_branch: 'proposed'
+  cookiecutter_template_credentials: gerrit
+  cookiecutter_template_url: https://gerrit.mcp.mirantis.com/mk/cookiecutter-templates.git
+  deploy_network_gateway: 10.167.5.1
+  deploy_network_netmask: 255.255.255.0
+  deploy_network_subnet: 10.167.5.0/24
+  deployment_type: physical
+  dns_server01: 172.18.176.6
+  dns_server02: 172.18.208.44
+  email_address: obutenko@mirantis.com
+  gainsight_service_enabled: 'False'
+  gateway_primary_first_nic: eth1
+  gateway_primary_second_nic: eth2
+  infra_bond_mode: active-backup
+  infra_deploy_nic: eth0
+  infra_kvm01_control_address: 10.167.4.11
+  infra_kvm01_deploy_address: 10.167.5.11
+  infra_kvm01_hostname: kvm01
+  infra_kvm02_control_address: 10.167.4.12
+  infra_kvm02_deploy_address: 10.167.5.12
+  infra_kvm02_hostname: kvm02
+  infra_kvm03_control_address: 10.167.4.13
+  infra_kvm03_deploy_address: 10.167.5.13
+  infra_kvm03_hostname: kvm03
+  infra_kvm_vip_address: 10.167.4.10
+  infra_primary_first_nic: eth1
+  infra_primary_second_nic: eth2
+  internal_proxy_enabled: 'False'
+  kubernetes_ctl_on_kvm: 'False'
+  kubernetes_enabled: 'False'
+  local_repositories: 'False'
+  maas_deploy_address: 10.167.5.15
+  maas_deploy_network_name: deploy_network
+  maas_deploy_range_end: 10.167.5.230
+  maas_deploy_range_start: 10.167.5.20
+  maas_deploy_vlan: '0'
+  maas_enabled: 'False'
+  maas_fabric_name: deploy_fabric
+  maas_hostname: cfg01
+  mcp_common_scripts_branch: 'proposed'
+  mcp_version: proposed
+  no_platform: 'False'
+  offline_deployment: 'False'
+  opencontrail_enabled: 'False'
+  openssh_groups: ''
+  openstack_benchmark_node01_address: 10.167.4.95
+  openstack_benchmark_node01_hostname: bmk01
+  openstack_cluster_size: compact
+  openstack_compute_count: '2'
+  openstack_compute_rack01_hostname: cmp
+  openstack_compute_rack01_single_subnet: 10.167.4
+  openstack_compute_rack01_tenant_subnet: 10.167.6
+  openstack_compute_single_address_ranges: 10.167.4.105-10.167.4.150
+  openstack_compute_deploy_address_ranges: 10.167.5.105-10.167.5.150
+  openstack_compute_tenant_address_ranges: 10.167.6.105-10.167.6.150
+  openstack_compute_backend_address_ranges: 10.167.6.105-10.167.6.150
+  openstack_dns_hostname: dns
+  openstack_dns_node01_address: 10.167.4.111
+  openstack_dns_node01_hostname: dns01
+  openstack_dns_node02_address: 10.167.4.112
+  openstack_dns_node02_hostname: dns02
+  openstack_control_address: 10.167.4.10
+  openstack_control_hostname: ctl
+  openstack_control_node01_address: 10.167.4.11
+  openstack_control_node01_hostname: ctl01
+  openstack_control_node02_address: 10.167.4.12
+  openstack_control_node02_hostname: ctl02
+  openstack_control_node03_address: 10.167.4.13
+  openstack_control_node03_hostname: ctl03
+  openstack_database_address: 10.167.4.10
+  openstack_database_hostname: ctl
+  openstack_database_node01_address: 10.167.4.11
+  openstack_database_node01_hostname: ctl01
+  openstack_database_node02_address: 10.167.4.12
+  openstack_database_node02_hostname: ctl02
+  openstack_database_node03_address: 10.167.4.13
+  openstack_database_node03_hostname: ctl03
+  openstack_enabled: 'True'
+  openstack_gateway_node01_address: 10.167.4.224
+  openstack_gateway_node01_hostname: gtw01
+  openstack_gateway_node01_tenant_address: 10.167.6.6
+  openstack_gateway_node02_address: 10.167.4.225
+  openstack_gateway_node02_hostname: gtw02
+  openstack_gateway_node02_tenant_address: 10.167.6.7
+  openstack_gateway_node03_address: 10.167.4.226
+  openstack_gateway_node03_hostname: gtw03
+  openstack_gateway_node03_tenant_address: 10.167.6.8
+  openstack_message_queue_address: 10.167.4.10
+  openstack_message_queue_hostname: ctl
+  openstack_message_queue_node01_address: 10.167.4.11
+  openstack_message_queue_node01_hostname: ctl01
+  openstack_message_queue_node02_address: 10.167.4.12
+  openstack_message_queue_node02_hostname: ctl02
+  openstack_message_queue_node03_address: 10.167.4.13
+  openstack_message_queue_node03_hostname: ctl03
+  openstack_network_engine: ovs
+  openstack_neutron_bgp_vpn: 'False'
+  openstack_neutron_bgp_vpn_driver: bagpipe
+  openstack_neutron_qos: 'False'
+  openstack_neutron_vlan_aware_vms: 'False'
+  openstack_nfv_dpdk_enabled: 'False'
+  openstack_nfv_sriov_enabled: 'False'
+  openstack_nova_compute_nfv_req_enabled: 'False'
+  openstack_nova_compute_reserved_host_memory_mb: '900'
+  openstack_ovs_dvr_enabled: 'True'
+  openstack_ovs_encapsulation_type: vxlan
+  openstack_proxy_address: 10.167.4.80
+  openstack_proxy_hostname: prx
+  openstack_proxy_node01_address: 10.167.4.81
+  openstack_proxy_node01_hostname: prx01
+  openstack_proxy_node02_address: 10.167.4.82
+  openstack_proxy_node02_hostname: prx02
+  openstack_upgrade_node01_address: 10.167.4.19
+  openstack_version: queens
+  osd_padding_with_zeros: 'False'
+  oss_enabled: 'False'
+  platform: openstack_enabled
+  public_host: ${_param:openstack_proxy_address}
+  publication_method: email
+  reclass_repository: https://github.com/Mirantis/mk-lab-salt-model.git
+  salt_api_password: BX7ium4MaRPIWBdyhj4LTbiedwg3yLep
+  salt_api_password_hash: $6$qYqzkiRP$MiqA5ZMfsmdXJcuTTyeCgNPv9CBGO5nSH4HwRKPGUh0MFXcEa8JDCUEtS8xLHCkol7CMdq.l6CG7of0iaUJ.u.
+  salt_master_address: 10.167.4.15
+  salt_master_hostname: cfg01
+  salt_master_management_address: 10.167.5.15
+  shared_reclass_branch: 'proposed'
+  shared_reclass_url: https://github.com/Mirantis/reclass-system-salt-model.git
+  sriov_network_subnet: 10.55.0.0/16
+  stacklight_enabled: 'False'
+  stacklight_version: '2'
+  static_ips_on_deploy_network_enabled: 'False'
+  tenant_network_gateway: 10.167.6.1
+  tenant_network_netmask: 255.255.255.0
+  tenant_network_subnet: 10.167.6.0/24
+  tenant_vlan: '20'
+  upstream_proxy_enabled: 'False'
+  use_default_network_scheme: 'True'
+  version: proposed
+  vnf_onboarding_enabled: 'False'
+  rsync_fernet_rotation: 'True'
+  compute_padding_with_zeros: False
+  designate_backend: bind
+  designate_enabled: 'False'
+  nova_vnc_tls_enabled: 'False'
+  galera_ssl_enabled: 'False'
+  openstack_mysql_x509_enabled: 'False'
+  rabbitmq_ssl_enabled: 'False'
+  openstack_rabbitmq_x509_enabled: 'False'
+  tenant_telemetry_enabled: 'False'
+  gnocchi_aggregation_storage: file
diff --git a/tcp_tests/templates/cookied-mcp-queens-dvr-ceph/core.yaml b/tcp_tests/templates/cookied-mcp-queens-dvr-ceph/core.yaml
new file mode 100644
index 0000000..fab7c18
--- /dev/null
+++ b/tcp_tests/templates/cookied-mcp-queens-dvr-ceph/core.yaml
@@ -0,0 +1,17 @@
+{% from 'cookied-mcp-queens-dvr-ceph/underlay.yaml' import HOSTNAME_CFG01 with context %}
+
+{% import 'shared-core.yaml' as SHARED_CORE with context %}
+
+{{ SHARED_CORE.MACRO_INSTALL_KEEPALIVED() }}
+
+{{ SHARED_CORE.MACRO_INSTALL_GLUSTERFS() }}
+
+{{ SHARED_CORE.MACRO_INSTALL_RABBITMQ() }}
+
+{{ SHARED_CORE.MACRO_INSTALL_GALERA() }}
+
+{{ SHARED_CORE.MACRO_INSTALL_HAPROXY() }}
+
+{{ SHARED_CORE.MACRO_INSTALL_MEMCACHED() }}
+
+{{ SHARED_CORE.MACRO_CHECK_VIP() }}
diff --git a/tcp_tests/templates/cookied-mcp-queens-dvr-ceph/openstack.yaml b/tcp_tests/templates/cookied-mcp-queens-dvr-ceph/openstack.yaml
new file mode 100644
index 0000000..5d19c16
--- /dev/null
+++ b/tcp_tests/templates/cookied-mcp-queens-dvr-ceph/openstack.yaml
@@ -0,0 +1,36 @@
+{% from 'cookied-mcp-pike-dvr-ceph/underlay.yaml' import HOSTNAME_CFG01 with context %}
+{% from 'cookied-mcp-pike-dvr-ceph/underlay.yaml' import HOSTNAME_CTL01 with context %}
+{% from 'cookied-mcp-pike-dvr-ceph/underlay.yaml' import HOSTNAME_CTL02 with context %}
+{% from 'cookied-mcp-pike-dvr-ceph/underlay.yaml' import HOSTNAME_CTL03 with context %}
+{% from 'cookied-mcp-pike-dvr-ceph/underlay.yaml' import HOSTNAME_GTW01 with context %}
+{% from 'shared-salt.yaml' import IPV4_NET_EXTERNAL_PREFIX with context %}
+{% from 'shared-salt.yaml' import IPV4_NET_TENANT_PREFIX with context %}
+
+{% import 'shared-salt.yaml' as SHARED with context %}
+{% import 'shared-openstack.yaml' as SHARED_OPENSTACK with context %}
+
+{% import 'shared-ceph.yaml' as SHARED_CEPH with context %}
+
+{{ SHARED_CEPH.MACRO_INSTALL_CEPH_MONS() }}
+
+{{ SHARED_CEPH.MACRO_INSTALL_CEPH_MGR() }}
+
+{{ SHARED_CEPH.MACRO_INSTALL_CEPH_OSD_AND_RADOSGW() }}
+
+{{ SHARED_CEPH.CONNECT_CEPH_TO_SERVICES() }}
+
+{{ SHARED_OPENSTACK.MACRO_INSTALL_KEYSTONE(USE_ORCHESTRATE=true) }}
+
+{{ SHARED_OPENSTACK.MACRO_INSTALL_GLANCE() }}
+
+{{ SHARED_OPENSTACK.MACRO_INSTALL_NOVA() }}
+
+{{ SHARED_OPENSTACK.MACRO_INSTALL_CINDER() }}
+
+{{ SHARED_OPENSTACK.MACRO_INSTALL_NEUTRON() }}
+
+{{ SHARED_OPENSTACK.MACRO_INSTALL_HEAT() }}
+
+{{ SHARED_OPENSTACK.MACRO_INSTALL_HORIZON() }}
+
+{{ SHARED_OPENSTACK.MACRO_INSTALL_COMPUTE(CELL_MAPPING=true) }}
\ No newline at end of file
diff --git a/tcp_tests/templates/cookied-mcp-queens-dvr-ceph/salt.yaml b/tcp_tests/templates/cookied-mcp-queens-dvr-ceph/salt.yaml
new file mode 100644
index 0000000..dd8fd17
--- /dev/null
+++ b/tcp_tests/templates/cookied-mcp-queens-dvr-ceph/salt.yaml
@@ -0,0 +1,30 @@
+{% from 'cookied-mcp-queens-dvr-ceph/underlay.yaml' import HOSTNAME_CFG01 with context %}
+{% from 'cookied-mcp-queens-dvr-ceph/underlay.yaml' import HOSTNAME_CMP01 with context %}
+{% from 'cookied-mcp-queens-dvr-ceph/underlay.yaml' import HOSTNAME_CMP02 with context %}
+{% from 'cookied-mcp-queens-dvr-ceph/underlay.yaml' import HOSTNAME_GTW01 with context %}
+{% from 'cookied-mcp-queens-dvr-ceph/underlay.yaml' import LAB_CONFIG_NAME with context %}
+{% from 'cookied-mcp-queens-dvr-ceph/underlay.yaml' import DOMAIN_NAME with context %}
+
+{% set SALT_MODELS_REPOSITORY = os_env('SALT_MODELS_REPOSITORY','https://gerrit.mcp.mirantis.com/salt-models/mcp-virtual-lab') %}
+# Other salt model repository parameters see in shared-salt.yaml
+
+{% import 'shared-salt.yaml' as SHARED with context %}
+
+{{ SHARED.MACRO_INSTALL_SALT_MASTER() }}
+
+{{ SHARED.MACRO_CLONE_RECLASS_MODELS() }}
+
+{{ SHARED.MACRO_CONFIGURE_RECLASS(FORMULA_SERVICES='\*') }}
+{{ SHARED.MACRO_INSTALL_SALT_MINIONS() }}
+
+{{ SHARED.MACRO_RUN_SALT_MASTER_UNDERLAY_STATES() }}
+
+{{ SHARED.MACRO_GENERATE_INVENTORY() }}
+
+{{ SHARED.MACRO_NETWORKING_WORKAROUNDS() }}
+
+{{ SHARED.MACRO_BOOTSTRAP_ALL_MINIONS() }}
+
+{{SHARED.MACRO_CHECK_SALT_VERSION_SERVICES_ON_CFG()}}
+
+{{SHARED.MACRO_CHECK_SALT_VERSION_ON_NODES()}}
\ No newline at end of file
diff --git a/tcp_tests/templates/cookied-mcp-queens-dvr-ceph/underlay--meta-data.yaml b/tcp_tests/templates/cookied-mcp-queens-dvr-ceph/underlay--meta-data.yaml
new file mode 100644
index 0000000..3699401
--- /dev/null
+++ b/tcp_tests/templates/cookied-mcp-queens-dvr-ceph/underlay--meta-data.yaml
@@ -0,0 +1,4 @@
+| # All the data below will be stored as a string object
+  instance-id: iid-local1
+  hostname: {hostname}
+  local-hostname: {hostname}
diff --git a/tcp_tests/templates/cookied-mcp-queens-dvr-ceph/underlay--user-data-cfg01.yaml b/tcp_tests/templates/cookied-mcp-queens-dvr-ceph/underlay--user-data-cfg01.yaml
new file mode 100644
index 0000000..da7908d
--- /dev/null
+++ b/tcp_tests/templates/cookied-mcp-queens-dvr-ceph/underlay--user-data-cfg01.yaml
@@ -0,0 +1,61 @@
+| # All the data below will be stored as a string object
+  #cloud-config, see http://cloudinit.readthedocs.io/en/latest/topics/examples.html
+
+  ssh_pwauth: True
+  users:
+   - name: root
+     sudo: ALL=(ALL) NOPASSWD:ALL
+     shell: /bin/bash
+     ssh_authorized_keys:
+     {% for key in config.underlay.ssh_keys %}
+      - ssh-rsa {{ key['public'] }}
+     {% endfor %}
+
+  disable_root: false
+  chpasswd:
+   list: |
+    root:r00tme
+   expire: False
+
+  bootcmd:
+   # Enable root access
+   - sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
+   - service sshd restart
+  output:
+    all: '| tee -a /var/log/cloud-init-output.log /dev/tty0'
+
+  runcmd:
+   # Configure dhclient
+   - sudo echo "nameserver {gateway}" >> /etc/resolvconf/resolv.conf.d/base
+   - sudo resolvconf -u
+
+   # Prepare network connection
+   - sudo ifdown ens3
+   - sudo ip r d default || true  # remove existing default route to get it from dhcp
+   - sudo ifup ens3
+   #- sudo route add default gw {gateway} {interface_name}
+
+   # Create swap
+   - fallocate -l 4G /swapfile
+   - chmod 600 /swapfile
+   - mkswap /swapfile
+   - swapon /swapfile
+   - echo "/swapfile   none    swap    defaults    0   0" >> /etc/fstab
+
+   - echo "nameserver 172.18.208.44" >> /etc/resolv.conf;
+
+  write_files:
+   - path: /etc/network/interfaces
+     content: |
+          auto ens3
+          iface ens3 inet dhcp
+
+   - path: /root/.ssh/config
+     owner: root:root
+     permissions: '0600'
+     content: |
+          Host *
+            ServerAliveInterval 300
+            ServerAliveCountMax 10
+            StrictHostKeyChecking no
+            UserKnownHostsFile /dev/null
diff --git a/tcp_tests/templates/cookied-mcp-queens-dvr-ceph/underlay--user-data1604.yaml b/tcp_tests/templates/cookied-mcp-queens-dvr-ceph/underlay--user-data1604.yaml
new file mode 100644
index 0000000..979424f
--- /dev/null
+++ b/tcp_tests/templates/cookied-mcp-queens-dvr-ceph/underlay--user-data1604.yaml
@@ -0,0 +1,49 @@
+| # All the data below will be stored as a string object
+  #cloud-config, see http://cloudinit.readthedocs.io/en/latest/topics/examples.html
+
+  ssh_pwauth: True
+  users:
+   - name: root
+     sudo: ALL=(ALL) NOPASSWD:ALL
+     shell: /bin/bash
+     ssh_authorized_keys:
+     {% for key in config.underlay.ssh_keys %}
+      - ssh-rsa {{ key['public'] }}
+     {% endfor %}
+
+  disable_root: false
+  chpasswd:
+   list: |
+    root:r00tme
+   expire: False
+
+  bootcmd:
+   # Enable root access
+   - sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
+   - service sshd restart
+  output:
+    all: '| tee -a /var/log/cloud-init-output.log /dev/tty0'
+
+  runcmd:
+   - export TERM=linux
+   - export LANG=C
+   # Configure dhclient
+   - sudo echo "nameserver {gateway}" >> /etc/resolvconf/resolv.conf.d/base
+   - sudo resolvconf -u
+
+   # Prepare network connection
+   - sudo ifup ens3
+   #- sudo route add default gw {gateway} {interface_name}
+
+   # Create swap
+   - fallocate -l 4G /swapfile
+   - chmod 600 /swapfile
+   - mkswap /swapfile
+   - swapon /swapfile
+   - echo "/swapfile   none    swap    defaults   0   0" >> /etc/fstab
+
+  write_files:
+   - path: /etc/network/interfaces
+     content: |
+          auto ens3
+          iface ens3 inet dhcp
diff --git a/tcp_tests/templates/cookied-mcp-queens-dvr-ceph/underlay.yaml b/tcp_tests/templates/cookied-mcp-queens-dvr-ceph/underlay.yaml
new file mode 100644
index 0000000..39b6c0a
--- /dev/null
+++ b/tcp_tests/templates/cookied-mcp-queens-dvr-ceph/underlay.yaml
@@ -0,0 +1,674 @@
+{% set REPOSITORY_SUITE = os_env('REPOSITORY_SUITE', 'proposed') %}
+
+{% import 'cookied-mcp-queens-dvr-ceph/underlay--meta-data.yaml' as CLOUDINIT_META_DATA with context %}
+{% import 'cookied-mcp-queens-dvr-ceph/underlay--user-data-cfg01.yaml' as CLOUDINIT_USER_DATA_CFG01 with context %}
+{% import 'cookied-mcp-queens-dvr-ceph/underlay--user-data1604.yaml' as CLOUDINIT_USER_DATA_1604 with context %}
+
+---
+aliases:
+ - &interface_model {{ os_env('INTERFACE_MODEL', 'virtio') }}
+ - &cloudinit_meta_data {{ CLOUDINIT_META_DATA }}
+ - &cloudinit_user_data_cfg01 {{ CLOUDINIT_USER_DATA_CFG01 }}
+ - &cloudinit_user_data_1604 {{ CLOUDINIT_USER_DATA_1604 }}
+
+{% set LAB_CONFIG_NAME = os_env('LAB_CONFIG_NAME', 'cookied-mcp-queens-dvr-ceph') %}
+{% set DOMAIN_NAME = os_env('DOMAIN_NAME', LAB_CONFIG_NAME) + '.local' %}
+{% set HOSTNAME_CFG01 = os_env('HOSTNAME_CFG01', 'cfg01.' + DOMAIN_NAME) %}
+{% set HOSTNAME_CTL01 = os_env('HOSTNAME_CTL01', 'ctl01.' + DOMAIN_NAME) %}
+{% set HOSTNAME_CTL02 = os_env('HOSTNAME_CTL02', 'ctl02.' + DOMAIN_NAME) %}
+{% set HOSTNAME_CTL03 = os_env('HOSTNAME_CTL03', 'ctl03.' + DOMAIN_NAME) %}
+{% set HOSTNAME_CMP01 = os_env('HOSTNAME_CMP01', 'cmp1.' + DOMAIN_NAME) %}
+{% set HOSTNAME_CMP02 = os_env('HOSTNAME_CMP02', 'cmp2.' + DOMAIN_NAME) %}
+{% set HOSTNAME_CMN01 = os_env('HOSTNAME_CMN01', 'cmn01.' + DOMAIN_NAME) %}
+{% set HOSTNAME_CMN02 = os_env('HOSTNAME_CMN02', 'cmn02.' + DOMAIN_NAME) %}
+{% set HOSTNAME_CMN03 = os_env('HOSTNAME_CMN03', 'cmn03.' + DOMAIN_NAME) %}
+{% set HOSTNAME_RGW01 = os_env('HOSTNAME_CMN01', 'rgw01.' + DOMAIN_NAME) %}
+{% set HOSTNAME_RGW02 = os_env('HOSTNAME_CMN02', 'rgw02.' + DOMAIN_NAME) %}
+{% set HOSTNAME_RGW03 = os_env('HOSTNAME_CMN03', 'rgw03.' + DOMAIN_NAME) %}
+{% set HOSTNAME_OSD01 = os_env('HOSTNAME_OSD01', 'osd1.' + DOMAIN_NAME) %}
+{% set HOSTNAME_OSD02 = os_env('HOSTNAME_OSD02', 'osd2.' + DOMAIN_NAME) %}
+{% set HOSTNAME_GTW01 = os_env('HOSTNAME_GTW01', 'gtw01.' + DOMAIN_NAME) %}
+{% set HOSTNAME_PRX01 = os_env('HOSTNAME_PRX01', 'prx01.' + DOMAIN_NAME) %}
+
+template:
+  devops_settings:
+    env_name: {{ os_env('ENV_NAME', 'cookied-mcp-queens-dvr-ceph_' + REPOSITORY_SUITE + "_" + os_env('BUILD_NUMBER', '')) }}
+
+    address_pools:
+      private-pool01:
+        net: {{ os_env('PRIVATE_ADDRESS_POOL01', '10.60.0.0/16:24') }}
+        params:
+          ip_reserved:
+            gateway: +1
+            l2_network_device: +1
+            default_{{ HOSTNAME_CFG01 }}: +15
+            default_{{ HOSTNAME_CTL01 }}: +11
+            default_{{ HOSTNAME_CTL02 }}: +12
+            default_{{ HOSTNAME_CTL03 }}: +13
+            default_{{ HOSTNAME_CMP01 }}: +105
+            default_{{ HOSTNAME_CMP02 }}: +106
+            default_{{ HOSTNAME_OSD01 }}: +94
+            default_{{ HOSTNAME_OSD02 }}: +95
+            default_{{ HOSTNAME_CMN01 }}: +96
+            default_{{ HOSTNAME_CMN02 }}: +97
+            default_{{ HOSTNAME_CMN03 }}: +98
+            default_{{ HOSTNAME_RGW01 }}: +76
+            default_{{ HOSTNAME_RGW02 }}: +77
+            default_{{ HOSTNAME_RGW03 }}: +78
+            default_{{ HOSTNAME_GTW01 }}: +110
+            default_{{ HOSTNAME_PRX01 }}: +121
+          ip_ranges:
+            dhcp: [+70, -10]
+
+      admin-pool01:
+        net: {{ os_env('ADMIN_ADDRESS_POOL01', '10.70.0.0/16:24') }}
+        params:
+          ip_reserved:
+            gateway: +1
+            l2_network_device: +1
+            default_{{ HOSTNAME_CFG01 }}: +15
+            default_{{ HOSTNAME_CTL01 }}: +11
+            default_{{ HOSTNAME_CTL02 }}: +12
+            default_{{ HOSTNAME_CTL03 }}: +13
+            default_{{ HOSTNAME_CMP01 }}: +105
+            default_{{ HOSTNAME_CMP02 }}: +106
+            default_{{ HOSTNAME_OSD01 }}: +94
+            default_{{ HOSTNAME_OSD02 }}: +95
+            default_{{ HOSTNAME_CMN01 }}: +96
+            default_{{ HOSTNAME_CMN02 }}: +97
+            default_{{ HOSTNAME_CMN03 }}: +98
+            default_{{ HOSTNAME_RGW01 }}: +76
+            default_{{ HOSTNAME_RGW02 }}: +77
+            default_{{ HOSTNAME_RGW03 }}: +78
+            default_{{ HOSTNAME_GTW01 }}: +110
+            default_{{ HOSTNAME_PRX01 }}: +121
+          ip_ranges:
+            dhcp: [+70, -10]
+
+      tenant-pool01:
+        net: {{ os_env('TENANT_ADDRESS_POOL01', '10.80.0.0/16:24') }}
+        params:
+          ip_reserved:
+            gateway: +1
+            l2_network_device: +1
+            default_{{ HOSTNAME_CFG01 }}: +15
+            default_{{ HOSTNAME_CTL01 }}: +11
+            default_{{ HOSTNAME_CTL02 }}: +12
+            default_{{ HOSTNAME_CTL03 }}: +13
+            default_{{ HOSTNAME_CMP01 }}: +105
+            default_{{ HOSTNAME_CMP02 }}: +106
+            default_{{ HOSTNAME_OSD01 }}: +94
+            default_{{ HOSTNAME_OSD02 }}: +95
+            default_{{ HOSTNAME_CMN01 }}: +96
+            default_{{ HOSTNAME_CMN02 }}: +97
+            default_{{ HOSTNAME_CMN03 }}: +98
+            default_{{ HOSTNAME_RGW01 }}: +76
+            default_{{ HOSTNAME_RGW02 }}: +77
+            default_{{ HOSTNAME_RGW03 }}: +78
+            default_{{ HOSTNAME_GTW01 }}: +110
+            default_{{ HOSTNAME_PRX01 }}: +121
+          ip_ranges:
+            dhcp: [+10, -10]
+
+      external-pool01:
+        net: {{ os_env('EXTERNAL_ADDRESS_POOL01', '10.90.0.0/16:24') }}
+        params:
+          ip_reserved:
+            gateway: +1
+            l2_network_device: +1
+            default_{{ HOSTNAME_CFG01 }}: +15
+            default_{{ HOSTNAME_CTL01 }}: +11
+            default_{{ HOSTNAME_CTL02 }}: +12
+            default_{{ HOSTNAME_CTL03 }}: +13
+            default_{{ HOSTNAME_CMP01 }}: +105
+            default_{{ HOSTNAME_CMP02 }}: +106
+            default_{{ HOSTNAME_OSD01 }}: +94
+            default_{{ HOSTNAME_OSD02 }}: +95
+            default_{{ HOSTNAME_CMN01 }}: +96
+            default_{{ HOSTNAME_CMN02 }}: +97
+            default_{{ HOSTNAME_CMN03 }}: +98
+            default_{{ HOSTNAME_RGW01 }}: +76
+            default_{{ HOSTNAME_RGW02 }}: +77
+            default_{{ HOSTNAME_RGW03 }}: +78
+            default_{{ HOSTNAME_GTW01 }}: +110
+            default_{{ HOSTNAME_PRX01 }}: +121
+          ip_ranges:
+            dhcp: [+130, +230]
+
+    groups:
+      - name: default
+        driver:
+          name: devops.driver.libvirt
+          params:
+            connection_string: !os_env CONNECTION_STRING, qemu:///system
+            stp: False
+            hpet: False
+            enable_acpi: true
+            use_host_cpu: !os_env DRIVER_USE_HOST_CPU, true
+            use_hugepages: !os_env DRIVER_USE_HUGEPAGES, false
+
+        network_pools:
+          admin: admin-pool01
+          private: private-pool01
+          tenant: tenant-pool01
+          external: external-pool01
+
+        l2_network_devices:
+          private:
+            address_pool: private-pool01
+            dhcp: false
+            forward:
+              mode: route
+
+          admin:
+            address_pool: admin-pool01
+            dhcp: true
+            forward:
+              mode: nat
+
+          tenant:
+            address_pool: tenant-pool01
+            dhcp: false
+
+          external:
+            address_pool: external-pool01
+            dhcp: true
+            forward:
+              mode: route
+
+        group_volumes:
+         - name: cloudimage1604    # This name is used for 'backing_store' option for node volumes.
+           source_image: !os_env IMAGE_PATH1604  # https://cloud-images.ubuntu.com/xenial/current/xenial-server-cloudimg-amd64-disk1.img
+           format: qcow2
+         - name: cfg01_day01_image               # Pre-configured day01 image
+           source_image: {{ os_env('IMAGE_PATH_CFG01_DAY01', os_env('IMAGE_PATH1604')) }} # http://images.mirantis.com/cfg01-day01.qcow2 or fallback to IMAGE_PATH1604
+           format: qcow2
+         - name: mcp_ubuntu_1604_image           # Pre-configured image for control plane
+           source_image: !os_env MCP_IMAGE_PATH1604
+           format: qcow2
+
+        nodes:
+          - name: {{ HOSTNAME_CFG01 }}
+            role: salt_master
+            params:
+              vcpu: !os_env SLAVE_NODE_CPU, 2
+              memory: !os_env SLAVE_NODE_MEMORY, 8192
+              boot:
+                - hd
+              cloud_init_volume_name: iso
+              cloud_init_iface_up: ens3
+              volumes:
+                - name: system
+                  capacity: !os_env NODE_VOLUME_SIZE, 150
+                  backing_store: cfg01_day01_image
+                  format: qcow2
+                - name: iso  # Volume with name 'iso' will be used
+                             # for store image with cloud-init metadata.
+                  capacity: 1
+                  format: raw
+                  device: cdrom
+                  bus: ide
+                  cloudinit_meta_data: *cloudinit_meta_data
+                  cloudinit_user_data: *cloudinit_user_data_cfg01
+
+              interfaces:
+                - label: ens3
+                  l2_network_device: admin
+                  interface_model: *interface_model
+                - label: ens4
+                  l2_network_device: private
+                  interface_model: *interface_model
+              network_config:
+                ens3:
+                  networks:
+                    - admin
+                ens4:
+                  networks:
+                    - private
+
+          - name: {{ HOSTNAME_CTL01 }}
+            role: salt_minion
+            params:
+              vcpu: !os_env SLAVE_NODE_CPU, 2
+              memory: !os_env SLAVE_NODE_MEMORY, 16384
+              boot:
+                - hd
+              cloud_init_volume_name: iso
+              cloud_init_iface_up: ens3
+              volumes:
+                - name: system
+                  capacity: !os_env NODE_VOLUME_SIZE, 150
+                  backing_store: mcp_ubuntu_1604_image
+                  format: qcow2
+                - name: cinder
+                  capacity: 50
+                  format: qcow2
+                - name: iso  # Volume with name 'iso' will be used
+                             # for store image with cloud-init metadata.
+                  capacity: 1
+                  format: raw
+                  device: cdrom
+                  bus: ide
+                  cloudinit_meta_data: *cloudinit_meta_data
+                  cloudinit_user_data: *cloudinit_user_data_1604
+
+              interfaces: &interfaces
+                - label: ens3
+                  l2_network_device: admin
+                  interface_model: *interface_model
+                - label: ens4
+                  l2_network_device: private
+                  interface_model: *interface_model
+              network_config: &network_config
+                ens3:
+                  networks:
+                    - admin
+                ens4:
+                  networks:
+                    - private
+
+          - name: {{ HOSTNAME_CTL02 }}
+            role: salt_minion
+            params:
+              vcpu: !os_env SLAVE_NODE_CPU, 2
+              memory: !os_env SLAVE_NODE_MEMORY, 16384
+              boot:
+                - hd
+              cloud_init_volume_name: iso
+              cloud_init_iface_up: ens3
+              volumes:
+                - name: system
+                  capacity: !os_env NODE_VOLUME_SIZE, 150
+                  backing_store: mcp_ubuntu_1604_image
+                  format: qcow2
+                - name: cinder
+                  capacity: 50
+                  format: qcow2
+                - name: iso  # Volume with name 'iso' will be used
+                             # for store image with cloud-init metadata.
+                  capacity: 1
+                  format: raw
+                  device: cdrom
+                  bus: ide
+                  cloudinit_meta_data: *cloudinit_meta_data
+                  cloudinit_user_data: *cloudinit_user_data_1604
+
+              interfaces: *interfaces
+              network_config: *network_config
+
+          - name: {{ HOSTNAME_CTL03 }}
+            role: salt_minion
+            params:
+              vcpu: !os_env SLAVE_NODE_CPU, 2
+              memory: !os_env SLAVE_NODE_MEMORY, 16384
+              boot:
+                - hd
+              cloud_init_volume_name: iso
+              cloud_init_iface_up: ens3
+              volumes:
+                - name: system
+                  capacity: !os_env NODE_VOLUME_SIZE, 150
+                  backing_store: mcp_ubuntu_1604_image
+                  format: qcow2
+                - name: cinder
+                  capacity: 50
+                  format: qcow2
+                - name: iso  # Volume with name 'iso' will be used
+                             # for store image with cloud-init metadata.
+                  capacity: 1
+                  format: raw
+                  device: cdrom
+                  bus: ide
+                  cloudinit_meta_data: *cloudinit_meta_data
+                  cloudinit_user_data: *cloudinit_user_data_1604
+
+              interfaces: *interfaces
+              network_config: *network_config
+
+          - name: {{ HOSTNAME_CMN01 }}
+            role: salt_minion
+            params:
+              vcpu: !os_env SLAVE_NODE_CPU, 2
+              memory: !os_env SLAVE_NODE_MEMORY, 2048
+              boot:
+                - hd
+              cloud_init_volume_name: iso
+              cloud_init_iface_up: ens3
+              volumes:
+                - name: system
+                  capacity: !os_env NODE_VOLUME_SIZE, 150
+                  backing_store: cloudimage1604
+                  format: qcow2
+                - name: iso  # Volume with name 'iso' will be used
+                             # for store image with cloud-init metadata.
+                  capacity: 1
+                  format: raw
+                  device: cdrom
+                  bus: ide
+                  cloudinit_meta_data: *cloudinit_meta_data
+                  cloudinit_user_data: *cloudinit_user_data_1604
+
+              interfaces: *interfaces
+              network_config: *network_config
+
+          - name: {{ HOSTNAME_CMN02 }}
+            role: salt_minion
+            params:
+              vcpu: !os_env SLAVE_NODE_CPU, 2
+              memory: !os_env SLAVE_NODE_MEMORY, 2048
+              boot:
+                - hd
+              cloud_init_volume_name: iso
+              cloud_init_iface_up: ens3
+              volumes:
+                - name: system
+                  capacity: !os_env NODE_VOLUME_SIZE, 150
+                  backing_store: cloudimage1604
+                  format: qcow2
+                - name: iso  # Volume with name 'iso' will be used
+                             # for store image with cloud-init metadata.
+                  capacity: 1
+                  format: raw
+                  device: cdrom
+                  bus: ide
+                  cloudinit_meta_data: *cloudinit_meta_data
+                  cloudinit_user_data: *cloudinit_user_data_1604
+
+              interfaces: *interfaces
+              network_config: *network_config
+
+          - name: {{ HOSTNAME_CMN03 }}
+            role: salt_minion
+            params:
+              vcpu: !os_env SLAVE_NODE_CPU, 2
+              memory: !os_env SLAVE_NODE_MEMORY, 2048
+              boot:
+                - hd
+              cloud_init_volume_name: iso
+              cloud_init_iface_up: ens3
+              volumes:
+                - name: system
+                  capacity: !os_env NODE_VOLUME_SIZE, 150
+                  backing_store: cloudimage1604
+                  format: qcow2
+                - name: iso  # Volume with name 'iso' will be used
+                             # for store image with cloud-init metadata.
+                  capacity: 1
+                  format: raw
+                  device: cdrom
+                  bus: ide
+                  cloudinit_meta_data: *cloudinit_meta_data
+                  cloudinit_user_data: *cloudinit_user_data_1604
+
+              interfaces: *interfaces
+              network_config: *network_config
+
+          - name: {{ HOSTNAME_OSD01 }}
+            role: salt_minion
+            params:
+              vcpu: !os_env SLAVE_NODE_CPU, 2
+              memory: !os_env SLAVE_NODE_MEMORY, 2048
+              boot:
+                - hd
+              cloud_init_volume_name: iso
+              cloud_init_iface_up: ens3
+              volumes:
+                - name: system
+                  capacity: !os_env NODE_VOLUME_SIZE, 150
+                  backing_store: cloudimage1604
+                  format: qcow2
+                - name: cinder
+                  capacity: 50
+                  format: qcow2
+                - name: ceph
+                  capacity: 50
+                  format: qcow2
+                - name: iso  # Volume with name 'iso' will be used
+                             # for store image with cloud-init metadata.
+                  capacity: 1
+                  format: raw
+                  device: cdrom
+                  bus: ide
+                  cloudinit_meta_data: *cloudinit_meta_data
+                  cloudinit_user_data: *cloudinit_user_data_1604
+
+              interfaces: *interfaces
+              network_config: *network_config
+
+          - name: {{ HOSTNAME_OSD02 }}
+            role: salt_minion
+            params:
+              vcpu: !os_env SLAVE_NODE_CPU, 2
+              memory: !os_env SLAVE_NODE_MEMORY, 2048
+              boot:
+                - hd
+              cloud_init_volume_name: iso
+              cloud_init_iface_up: ens3
+              volumes:
+                - name: system
+                  capacity: !os_env NODE_VOLUME_SIZE, 150
+                  backing_store: cloudimage1604
+                  format: qcow2
+                - name: cinder
+                  capacity: 50
+                  format: qcow2
+                - name: ceph
+                  capacity: 50
+                  format: qcow2
+                - name: iso  # Volume with name 'iso' will be used
+                             # for store image with cloud-init metadata.
+                  capacity: 1
+                  format: raw
+                  device: cdrom
+                  bus: ide
+                  cloudinit_meta_data: *cloudinit_meta_data
+                  cloudinit_user_data: *cloudinit_user_data_1604
+
+              interfaces: *interfaces
+              network_config: *network_config
+
+          - name: {{ HOSTNAME_RGW01 }}
+            role: salt_minion
+            params:
+              vcpu: !os_env SLAVE_NODE_CPU, 2
+              memory: !os_env SLAVE_NODE_MEMORY, 2048
+              boot:
+                - hd
+              cloud_init_volume_name: iso
+              cloud_init_iface_up: ens3
+              volumes:
+                - name: system
+                  capacity: !os_env NODE_VOLUME_SIZE, 150
+                  backing_store: cloudimage1604
+                  format: qcow2
+                - name: iso  # Volume with name 'iso' will be used
+                             # for store image with cloud-init metadata.
+                  capacity: 1
+                  format: raw
+                  device: cdrom
+                  bus: ide
+                  cloudinit_meta_data: *cloudinit_meta_data
+                  cloudinit_user_data: *cloudinit_user_data_1604
+
+              interfaces: *interfaces
+              network_config: *network_config
+
+          - name: {{ HOSTNAME_RGW02 }}
+            role: salt_minion
+            params:
+              vcpu: !os_env SLAVE_NODE_CPU, 2
+              memory: !os_env SLAVE_NODE_MEMORY, 2048
+              boot:
+                - hd
+              cloud_init_volume_name: iso
+              cloud_init_iface_up: ens3
+              volumes:
+                - name: system
+                  capacity: !os_env NODE_VOLUME_SIZE, 150
+                  backing_store: cloudimage1604
+                  format: qcow2
+                - name: iso  # Volume with name 'iso' will be used
+                             # for store image with cloud-init metadata.
+                  capacity: 1
+                  format: raw
+                  device: cdrom
+                  bus: ide
+                  cloudinit_meta_data: *cloudinit_meta_data
+                  cloudinit_user_data: *cloudinit_user_data_1604
+
+              interfaces: *interfaces
+              network_config: *network_config
+          - name: {{ HOSTNAME_RGW03 }}
+            role: salt_minion
+            params:
+              vcpu: !os_env SLAVE_NODE_CPU, 2
+              memory: !os_env SLAVE_NODE_MEMORY, 2048
+              boot:
+                - hd
+              cloud_init_volume_name: iso
+              cloud_init_iface_up: ens3
+              volumes:
+                - name: system
+                  capacity: !os_env NODE_VOLUME_SIZE, 150
+                  backing_store: cloudimage1604
+                  format: qcow2
+                - name: iso  # Volume with name 'iso' will be used
+                             # for store image with cloud-init metadata.
+                  capacity: 1
+                  format: raw
+                  device: cdrom
+                  bus: ide
+                  cloudinit_meta_data: *cloudinit_meta_data
+                  cloudinit_user_data: *cloudinit_user_data_1604
+
+              interfaces: *interfaces
+              network_config: *network_config
+
+          - name: {{ HOSTNAME_PRX01 }}
+            role: salt_minion
+            params:
+              vcpu: !os_env SLAVE_NODE_CPU, 1
+              memory: !os_env SLAVE_NODE_MEMORY, 2048
+              boot:
+                - hd
+              cloud_init_volume_name: iso
+              cloud_init_iface_up: ens3
+              volumes:
+                - name: system
+                  capacity: !os_env NODE_VOLUME_SIZE, 150
+                  backing_store: mcp_ubuntu_1604_image
+                  format: qcow2
+                - name: iso  # Volume with name 'iso' will be used
+                             # for store image with cloud-init metadata.
+                  capacity: 1
+                  format: raw
+                  device: cdrom
+                  bus: ide
+                  cloudinit_meta_data: *cloudinit_meta_data
+                  cloudinit_user_data: *cloudinit_user_data_1604
+
+              interfaces: *interfaces
+              network_config: *network_config
+
+          - name: {{ HOSTNAME_CMP01 }}
+            role: salt_minion
+            params:
+              vcpu: !os_env SLAVE_NODE_CPU, 3
+              memory: !os_env SLAVE_NODE_MEMORY, 4096
+              boot:
+                - hd
+              cloud_init_volume_name: iso
+              cloud_init_iface_up: ens3
+              volumes:
+                - name: system
+                  capacity: !os_env NODE_VOLUME_SIZE, 150
+                  backing_store: cloudimage1604
+                  format: qcow2
+                - name: iso  # Volume with name 'iso' will be used
+                             # for store image with cloud-init metadata.
+                  capacity: 1
+                  format: raw
+                  device: cdrom
+                  bus: ide
+                  cloudinit_meta_data: *cloudinit_meta_data
+                  cloudinit_user_data: *cloudinit_user_data_1604
+
+
+              interfaces: &all_interfaces
+                - label: ens3
+                  l2_network_device: admin
+                  interface_model: *interface_model
+                - label: ens4
+                  l2_network_device: private
+                  interface_model: *interface_model
+                - label: ens5
+                  l2_network_device: tenant
+                  interface_model: *interface_model
+                - label: ens6
+                  l2_network_device: external
+                  interface_model: *interface_model
+              network_config: &all_network_config
+                ens3:
+                  networks:
+                    - admin
+                ens4:
+                  networks:
+                    - private
+                ens5:
+                  networks:
+                    - tenant
+                ens6:
+                  networks:
+                    - external
+
+          - name: {{ HOSTNAME_CMP02 }}
+            role: salt_minion
+            params:
+              vcpu: !os_env SLAVE_NODE_CPU, 3
+              memory: !os_env SLAVE_NODE_MEMORY, 4096
+              boot:
+                - hd
+              cloud_init_volume_name: iso
+              cloud_init_iface_up: ens3
+              volumes:
+                - name: system
+                  capacity: !os_env NODE_VOLUME_SIZE, 150
+                  backing_store: cloudimage1604
+                  format: qcow2
+                - name: iso  # Volume with name 'iso' will be used
+                             # for store image with cloud-init metadata.
+                  capacity: 1
+                  format: raw
+                  device: cdrom
+                  bus: ide
+                  cloudinit_meta_data: *cloudinit_meta_data
+                  cloudinit_user_data: *cloudinit_user_data_1604
+
+              interfaces: *all_interfaces
+              network_config: *all_network_config
+
+          - name: {{ HOSTNAME_GTW01 }}
+            role: salt_minion
+            params:
+              vcpu: !os_env SLAVE_NODE_CPU, 4
+              memory: !os_env SLAVE_NODE_MEMORY, 4096
+              boot:
+                - hd
+              cloud_init_volume_name: iso
+              cloud_init_iface_up: ens3
+              volumes:
+                - name: system
+                  capacity: !os_env NODE_VOLUME_SIZE, 150
+                  backing_store: cloudimage1604
+                  format: qcow2
+                - name: iso  # Volume with name 'iso' will be used
+                             # for store image with cloud-init metadata.
+                  capacity: 1
+                  format: raw
+                  device: cdrom
+                  bus: ide
+                  cloudinit_meta_data: *cloudinit_meta_data
+                  cloudinit_user_data: *cloudinit_user_data_1604
+
+              interfaces: *all_interfaces
+              network_config: *all_network_config
diff --git a/tcp_tests/templates/cookied-mcp-queens-dvr-ceph/vcp-context-environment.yaml b/tcp_tests/templates/cookied-mcp-queens-dvr-ceph/vcp-context-environment.yaml
new file mode 100644
index 0000000..f279b44
--- /dev/null
+++ b/tcp_tests/templates/cookied-mcp-queens-dvr-ceph/vcp-context-environment.yaml
@@ -0,0 +1,172 @@
+nodes:
+    cfg01.cookied-mcp-queens-dvr-ceph.local:
+      reclass_storage_name: infra_config_node01
+      roles:
+      - infra_config
+      - linux_system_codename_xenial
+      interfaces:
+        ens3:
+          role: single_dhcp
+        ens4:
+          role: single_ctl
+
+    ctl01.cookied-mcp-queens-dvr-ceph.local:
+      reclass_storage_name: openstack_control_node01
+      roles:
+      - infra_kvm
+      - openstack_control_leader
+      - openstack_database_leader
+      - openstack_message_queue
+      - linux_system_codename_xenial
+      interfaces:
+        ens3:
+          role: single_dhcp
+        ens4:
+          role: single_ctl
+
+    ctl02.cookied-mcp-queens-dvr-ceph.local:
+      reclass_storage_name: openstack_control_node02
+      roles:
+      - infra_kvm
+      - openstack_control
+      - openstack_database
+      - openstack_message_queue
+      - linux_system_codename_xenial
+      interfaces:
+        ens3:
+          role: single_dhcp
+        ens4:
+          role: single_ctl
+
+    ctl03.cookied-mcp-queens-dvr-ceph.local:
+      reclass_storage_name: openstack_control_node03
+      roles:
+      - infra_kvm
+      - openstack_control
+      - openstack_database
+      - openstack_message_queue
+      - linux_system_codename_xenial
+      interfaces:
+        ens3:
+          role: single_dhcp
+        ens4:
+          role: single_ctl
+
+    prx01.cookied-mcp-queens-dvr-ceph.local:
+      reclass_storage_name: openstack_proxy_node01
+      roles:
+      - openstack_proxy
+      - linux_system_codename_xenial
+      interfaces:
+        ens3:
+          role: single_dhcp
+        ens4:
+          role: single_ctl
+
+    # Generator-based computes. For compatibility only
+    cmp<<count>>.cookied-mcp-queens-dvr-ceph.local:
+      reclass_storage_name: openstack_compute_rack01
+      roles:
+      - openstack_compute
+      - linux_system_codename_xenial
+      interfaces:
+        ens3:
+          role: single_dhcp
+        ens4:
+          role: single_ctl
+        ens5:
+          role: bond0_ab_ovs_vxlan_mesh
+        ens6:
+          role: bond1_ab_ovs_floating
+
+    gtw01.cookied-mcp-queens-dvr-ceph.local:
+      reclass_storage_name: openstack_gateway_node01
+      roles:
+      - openstack_gateway
+      - linux_system_codename_xenial
+      interfaces:
+        ens3:
+          role: single_dhcp
+        ens4:
+          role: single_ctl
+        ens5:
+          role: bond0_ab_ovs_vxlan_mesh
+        ens6:
+          role: bond1_ab_ovs_floating
+
+    osd<<count>>.cookied-mcp-queens-dvr-ceph.local:
+      reclass_storage_name: ceph_osd_rack01
+      roles:
+      - ceph_osd
+      - linux_system_codename_xenial
+      interfaces:
+        ens3:
+          role: single_dhcp
+        ens4:
+          role: single_ctl
+
+    cmn01.cookied-mcp-queens-dvr-ceph.local:
+      reclass_storage_name: ceph_mon_node01
+      roles:
+      - ceph_mon
+      - linux_system_codename_xenial
+      interfaces:
+        ens3:
+          role: single_dhcp
+        ens4:
+          role: single_ctl
+
+    cmn02.cookied-mcp-queens-dvr-ceph.local:
+      reclass_storage_name: ceph_mon_node02
+      roles:
+      - ceph_mon
+      - linux_system_codename_xenial
+      interfaces:
+        ens3:
+          role: single_dhcp
+        ens4:
+          role: single_ctl
+
+    cmn03.cookied-mcp-queens-dvr-ceph.local:
+      reclass_storage_name: ceph_mon_node03
+      roles:
+      - ceph_mon
+      - linux_system_codename_xenial
+      interfaces:
+        ens3:
+          role: single_dhcp
+        ens4:
+          role: single_ctl
+
+    rgw01.cookied-mcp-queens-dvr-ceph.local:
+      reclass_storage_name: ceph_rgw_node01
+      roles:
+      - ceph_rgw
+      - linux_system_codename_xenial
+      interfaces:
+        ens3:
+          role: single_dhcp
+        ens4:
+          role: single_ctl
+
+    rgw02.cookied-mcp-queens-dvr-ceph.local:
+      reclass_storage_name: ceph_rgw_node02
+      roles:
+      - ceph_rgw
+      - linux_system_codename_xenial
+      interfaces:
+        ens3:
+          role: single_dhcp
+        ens4:
+          role: single_ctl
+
+    rgw03.cookied-mcp-queens-dvr-ceph.local:
+      reclass_storage_name: ceph_rgw_node03
+      roles:
+      - ceph_rgw
+      - linux_system_codename_xenial
+      interfaces:
+        ens3:
+          role: single_dhcp
+        ens4:
+          role: single_ctl
\ No newline at end of file
diff --git a/tcp_tests/templates/cookied-mcp-queens-dvr/_context-cookiecutter-mcp-queens-dvr.yaml b/tcp_tests/templates/cookied-mcp-queens-dvr/_context-cookiecutter-mcp-queens-dvr.yaml
index f43d3f7..2dc0a2c 100644
--- a/tcp_tests/templates/cookied-mcp-queens-dvr/_context-cookiecutter-mcp-queens-dvr.yaml
+++ b/tcp_tests/templates/cookied-mcp-queens-dvr/_context-cookiecutter-mcp-queens-dvr.yaml
@@ -1,4 +1,7 @@
 default_context:
+  barbican_backend: dogtag
+  barbican_enabled: 'False'
+  auditd_enabled: 'True'
   bmk_enabled: 'False'
   ceph_enabled: 'False'
   cicd_enabled: 'False'
@@ -13,7 +16,7 @@
   control_vlan: '10'
   cookiecutter_template_branch: master
   cookiecutter_template_credentials: gerrit
-  cookiecutter_template_url: ssh://mcp-jenkins@gerrit.mcp.mirantis.net:29418/mk/cookiecutter-templates.git
+  cookiecutter_template_url: ssh://mcp-jenkins@gerrit.mcp.mirantis.com:29418/mk/cookiecutter-templates.git
   deploy_network_gateway: 192.168.10.1
   deploy_network_netmask: 255.255.255.0
   deploy_network_subnet: 192.168.10.0/24
@@ -41,6 +44,7 @@
   local_repositories: 'False'
   maas_deploy_address: 192.168.10.90
   maas_hostname: cfg01
+  maas_enabled: 'False'
   mcp_version: stable
   offline_deployment: 'False'
   opencontrail_enabled: 'False'
@@ -51,6 +55,10 @@
   openstack_compute_rack01_hostname: cmp
   openstack_compute_rack01_single_subnet: 172.16.10
   openstack_compute_rack01_tenant_subnet: 10.1.0
+  openstack_compute_single_address_ranges: 172.16.10.105-172.16.10.150
+  openstack_compute_deploy_address_ranges: 192.168.10.105-192.168.10.150
+  openstack_compute_tenant_address_ranges: 10.1.0.105-10.1.0.150
+  openstack_compute_backend_address_ranges: 10.1.0.105-10.1.0.150
   openstack_control_address: 172.16.10.100
   openstack_control_hostname: ctl
   openstack_control_node01_address: 172.16.10.101
@@ -100,6 +108,11 @@
   openstack_proxy_node02_address: 172.16.10.122
   openstack_proxy_node02_hostname: prx02
   openstack_upgrade_node01_address: 172.16.10.19
+  openstack_dns_hostname: dns
+  openstack_dns_node01_address: 172.16.10.111
+  openstack_dns_node01_hostname: dns01
+  openstack_dns_node02_address: 172.16.10.112
+  openstack_dns_node02_hostname: dns02
   openstack_version: queens
   oss_enabled: 'False'
   oss_node03_address: ${_param:stacklight_monitor_node03_address}
@@ -145,9 +158,9 @@
   salt_master_address: 172.16.10.90
   salt_master_hostname: cfg01
   salt_master_management_address: 192.168.10.90
-  shared_reclass_url: ssh://mcp-jenkins@gerrit.mcp.mirantis.net:29418/salt-models/reclass-system.git
-  stacklight_enabled: 'True'
+  shared_reclass_url: ssh://mcp-jenkins@gerrit.mcp.mirantis.com:29418/salt-models/reclass-system.git
   fluentd_enabled: 'True'
+  stacklight_enabled: 'True'
   stacklight_log_address: 172.16.10.60
   stacklight_log_hostname: log
   stacklight_log_node01_address: 172.16.10.61
@@ -173,7 +186,7 @@
   stacklight_telemetry_node03_address: 172.16.10.88
   stacklight_telemetry_node03_hostname: mtr03
   stacklight_version: '2'
-  stacklight_long_term_storage_type: prometheus
+  stacklight_long_term_storage_type: influxdb
   static_ips_on_deploy_network_enabled: 'False'
   tenant_network_gateway: 10.1.0.1
   tenant_network_netmask: 255.255.255.0
@@ -181,3 +194,18 @@
   tenant_vlan: '20'
   upstream_proxy_enabled: 'False'
   use_default_network_scheme: 'False'
+  rsync_fernet_rotation: 'True'
+  compute_padding_with_zeros: False
+  designate_backend: bind
+  designate_enabled: 'True'
+  nova_vnc_tls_enabled: 'False'
+  galera_ssl_enabled: 'False'
+  openstack_mysql_x509_enabled: 'False'
+  rabbitmq_ssl_enabled: 'False'
+  openstack_rabbitmq_x509_enabled: 'False'
+  tenant_telemetry_enabled: 'False'
+  gnocchi_aggregation_storage: file
+  manila_enabled: 'True'
+  manila_share_backend: 'lvm'
+  manila_lvm_volume_name: 'manila-volume'
+  manila_lvm_devices: '/dev/loop1'
\ No newline at end of file
diff --git a/tcp_tests/templates/cookied-mcp-queens-dvr/_context-environment.yaml b/tcp_tests/templates/cookied-mcp-queens-dvr/_context-environment.yaml
index 0f806cf..0deff14 100644
--- a/tcp_tests/templates/cookied-mcp-queens-dvr/_context-environment.yaml
+++ b/tcp_tests/templates/cookied-mcp-queens-dvr/_context-environment.yaml
@@ -1,5 +1,5 @@
 nodes:
-    cfg01.mcp11-ovs-dpdk.local:
+    cfg01.mcp-queens-dvr.local:
       reclass_storage_name: infra_config_node01
       roles:
       - infra_config
@@ -10,16 +10,13 @@
         ens4:
           role: single_ctl
 
-    ctl01.mcp11-ovs-dpdk.local:
+    ctl01.mcp-queens-dvr.local:
       reclass_storage_name: openstack_control_node01
       roles:
       - infra_kvm
       - openstack_control_leader
       - openstack_database_leader
       - openstack_message_queue
-#      - features_designate_pool_manager_database
-#      - features_designate_pool_manager
-#      - features_designate_pool_manager_keystone
       - linux_system_codename_xenial
       interfaces:
         ens3:
@@ -27,15 +24,13 @@
         ens4:
           role: single_ctl
 
-    ctl02.mcp11-ovs-dpdk.local:
+    ctl02.mcp-queens-dvr.local:
       reclass_storage_name: openstack_control_node02
       roles:
       - infra_kvm
       - openstack_control
       - openstack_database
       - openstack_message_queue
-#      - features_designate_pool_manager_database
-#      - features_designate_pool_manager
       - linux_system_codename_xenial
       interfaces:
         ens3:
@@ -43,15 +38,13 @@
         ens4:
           role: single_ctl
 
-    ctl03.mcp11-ovs-dpdk.local:
+    ctl03.mcp-queens-dvr.local:
       reclass_storage_name: openstack_control_node03
       roles:
       - infra_kvm
       - openstack_control
       - openstack_database
       - openstack_message_queue
-#      - features_designate_pool_manager_database
-#      - features_designate_pool_manager
       - linux_system_codename_xenial
       interfaces:
         ens3:
@@ -59,11 +52,10 @@
         ens4:
           role: single_ctl
 
-    prx01.mcp11-ovs-dpdk.local:
+    prx01.mcp-queens-dvr.local:
       reclass_storage_name: openstack_proxy_node01
       roles:
       - openstack_proxy
-#      - features_designate_pool_manager_proxy
       - linux_system_codename_xenial
       interfaces:
         ens3:
@@ -71,7 +63,7 @@
         ens4:
           role: single_ctl
 
-    mon01.mcp11-ovs-dpdk.local:
+    mon01.mcp-queens-dvr.local:
       reclass_storage_name: stacklight_server_node01
       roles:
       - stacklightv2_server_leader
@@ -82,7 +74,7 @@
         ens4:
           role: single_ctl
 
-    mon02.mcp11-ovs-dpdk.local:
+    mon02.mcp-queens-dvr.local:
       reclass_storage_name: stacklight_server_node02
       roles:
       - stacklightv2_server
@@ -93,7 +85,7 @@
         ens4:
           role: single_ctl
 
-    mon03.mcp11-ovs-dpdk.local:
+    mon03.mcp-queens-dvr.local:
       reclass_storage_name: stacklight_server_node03
       roles:
       - stacklightv2_server
@@ -104,7 +96,7 @@
         ens4:
           role: single_ctl
 
-    log01.mcp11-ovs-dpdk.local:
+    log01.mcp-queens-dvr.local:
       reclass_storage_name: stacklight_log_node01
       roles:
       - stacklight_log_leader_v2
@@ -115,7 +107,7 @@
         ens4:
           role: single_ctl
 
-    log02.mcp11-ovs-dpdk.local:
+    log02.mcp-queens-dvr.local:
       reclass_storage_name: stacklight_log_node02
       roles:
       - stacklight_log
@@ -126,7 +118,7 @@
         ens4:
           role: single_ctl
 
-    log03.mcp11-ovs-dpdk.local:
+    log03.mcp-queens-dvr.local:
       reclass_storage_name: stacklight_log_node03
       roles:
       - stacklight_log
@@ -137,7 +129,7 @@
         ens4:
           role: single_ctl
 
-    mtr01.mcp11-ovs-dpdk.local:
+    mtr01.mcp-queens-dvr.local:
       reclass_storage_name: stacklight_telemetry_node01
       roles:
       - stacklight_telemetry_leader
@@ -148,7 +140,7 @@
         ens4:
           role: single_ctl
 
-    mtr02.mcp11-ovs-dpdk.local:
+    mtr02.mcp-queens-dvr.local:
       reclass_storage_name: stacklight_telemetry_node02
       roles:
       - stacklight_telemetry
@@ -159,7 +151,7 @@
         ens4:
           role: single_ctl
 
-    mtr03.mcp11-ovs-dpdk.local:
+    mtr03.mcp-queens-dvr.local:
       reclass_storage_name: stacklight_telemetry_node03
       roles:
       - stacklight_telemetry
@@ -171,7 +163,7 @@
           role: single_ctl
 
     # Generator-based computes. For compatibility only
-    cmp<<count>>.mcp11-ovs-dpdk.local:
+    cmp<<count>>.mcp-queens-dvr.local:
       reclass_storage_name: openstack_compute_rack01
       roles:
       - openstack_compute
@@ -186,7 +178,7 @@
         ens6:
           role: bond1_ab_ovs_floating
 
-    gtw01.mcp11-ovs-dpdk.local:
+    gtw01.mcp-queens-dvr.local:
       reclass_storage_name: openstack_gateway_node01
       roles:
       - openstack_gateway
@@ -201,36 +193,13 @@
         ens6:
           role: bond1_ab_ovs_floating
 
-#    dns01.mcp11-ovs-dpdk.local:
-#      reclass_storage_name: openstack_dns_node01
-#      roles:
-#      - features_designate_pool_manager_dns
-#      - linux_system_codename_xenial
-#      classes:
-#      - system.linux.system.repo.mcp.extra
-#      - system.linux.system.repo.mcp.apt_mirantis.openstack
-#      - system.linux.system.repo.mcp.apt_mirantis.ubuntu
-#      - system.linux.system.repo.mcp.apt_mirantis.saltstack
-#      interfaces:
-#        ens3:
-#          role: single_dhcp
-#        ens4:
-#          role: single_ctl
-#          single_address: ${_param:openstack_dns_node01_address}
-#
-#    dns02.mcp11-ovs-dpdk.local:
-#      reclass_storage_name: openstack_dns_node02
-#      roles:
-#      - features_designate_pool_manager_dns
-#      - linux_system_codename_xenial
-#      classes:
-#      - system.linux.system.repo.mcp.extra
-#      - system.linux.system.repo.mcp.apt_mirantis.openstack
-#      - system.linux.system.repo.mcp.apt_mirantis.ubuntu
-#      - system.linux.system.repo.mcp.apt_mirantis.saltstack
-#      interfaces:
-#        ens3:
-#          role: single_dhcp
-#        ens4:
-#          role: single_ctl
-#          single_address: ${_param:openstack_dns_node02_address}
+    share01.mcp-queens-dvr.local:
+      reclass_storage_name: openstack_share_node01
+      roles:
+      - openstack_share
+      - linux_system_codename_xenial
+      interfaces:
+        ens3:
+          role: single_dhcp
+        ens4:
+          role: single_ctl
\ No newline at end of file
diff --git a/tcp_tests/templates/cookied-mcp-queens-dvr/core.yaml b/tcp_tests/templates/cookied-mcp-queens-dvr/core.yaml
index 92b21a5..bb1185e 100644
--- a/tcp_tests/templates/cookied-mcp-queens-dvr/core.yaml
+++ b/tcp_tests/templates/cookied-mcp-queens-dvr/core.yaml
@@ -1,119 +1,17 @@
 {% from 'cookied-mcp-queens-dvr/underlay.yaml' import HOSTNAME_CFG01 with context %}
 
-{% import 'shared-backup-restore.yaml' as BACKUP with context %}
+{% import 'shared-core.yaml' as SHARED_CORE with context %}
 
-# Install support services
-- description: Install keepalived on ctl01
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@keepalived:cluster and *01*' state.sls keepalived
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 10}
-  skip_fail: true
+{{ SHARED_CORE.MACRO_INSTALL_KEEPALIVED() }}
 
-- description: Install keepalived
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@keepalived:cluster' state.sls keepalived
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 10}
-  skip_fail: true
+{{ SHARED_CORE.MACRO_INSTALL_GLUSTERFS() }}
 
-- description: Install glusterfs
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@glusterfs:server' state.sls glusterfs.server.service
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
+{{ SHARED_CORE.MACRO_INSTALL_RABBITMQ() }}
 
-- description: Setup glusterfs on primary controller
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@glusterfs:server' state.sls glusterfs.server.setup -b 1
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 2, delay: 5}
-  skip_fail: false
+{{ SHARED_CORE.MACRO_INSTALL_GALERA() }}
 
-- description: Check the gluster status
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@glusterfs:server' cmd.run 'gluster peer status; gluster volume status' -b 1
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
+{{ SHARED_CORE.MACRO_INSTALL_HAPROXY() }}
 
-- description: Install RabbitMQ on ctl01
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@rabbitmq:server and *01*' state.sls rabbitmq
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
+{{ SHARED_CORE.MACRO_INSTALL_MEMCACHED() }}
 
-- description: Install RabbitMQ
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@rabbitmq:server' state.sls rabbitmq
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Check the rabbitmq status
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@rabbitmq:server' cmd.run 'rabbitmqctl cluster_status'
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Install Galera on first server
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@galera:master' state.sls galera
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Install Galera on other servers
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@galera:slave' state.sls galera -b 1
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Check mysql status
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@galera:*' mysql.status | grep -A1 -e "wsrep_incoming_addresses\|wsrep_cluster_size"
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: true
-
-
-- description: Install haproxy
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@haproxy:proxy' state.sls haproxy
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Check haproxy status
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@haproxy:proxy' service.status haproxy
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Restart rsyslog
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@haproxy:proxy' service.restart rsyslog
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Install memcached on all controllers
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@memcached:server' state.sls memcached
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Check the VIP
-  cmd: |
-    OPENSTACK_CONTROL_ADDRESS=`salt-call --out=newline_values_only pillar.get _param:openstack_control_address`;
-    echo "_param:openstack_control_address (vip): ${OPENSTACK_CONTROL_ADDRESS}";
-    salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@keepalived:cluster' cmd.run "ip a | grep ${OPENSTACK_CONTROL_ADDRESS}" | grep -B1 ${OPENSTACK_CONTROL_ADDRESS}
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 3, delay: 10}
-  skip_fail: false
+{{ SHARED_CORE.MACRO_CHECK_VIP() }}
diff --git a/tcp_tests/templates/cookied-mcp-queens-dvr/openstack.yaml b/tcp_tests/templates/cookied-mcp-queens-dvr/openstack.yaml
index 14cf12e..70cc4f5 100644
--- a/tcp_tests/templates/cookied-mcp-queens-dvr/openstack.yaml
+++ b/tcp_tests/templates/cookied-mcp-queens-dvr/openstack.yaml
@@ -15,143 +15,22 @@
 
 # Install OpenStack control services
 
-{%- if OVERRIDE_POLICY != '' %}
-- description: Upload policy override
-  upload:
-    local_path:  {{ config.salt_deploy.templates_dir }}{{ LAB_CONFIG_NAME }}/
-    local_filename: overrides-policy.yml
-    remote_path: /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/openstack/
-  node_name: {{ HOSTNAME_CFG01 }}
-
-- description: Create custom cluster control class
-  cmd: echo -e "classes:\n- cluster.{{ LAB_CONFIG_NAME }}.openstack.control_orig\n$(cat /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/openstack/overrides-policy.yml)" > /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/openstack/overrides-policy.yml
-  node_name: {{ HOSTNAME_CFG01 }}
-
-- description: Rename control classes
-  cmd: mv /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/openstack/control.yml /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/openstack/control_orig.yml &&
-    ln -s /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/openstack/overrides-policy.yml /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/openstack/control.yml &&
-    salt --hard-crash --state-output=mixed --state-verbose=False '*' saltutil.sync_all &&
-    salt --hard-crash --state-output=mixed --state-verbose=False '*' saltutil.refresh_pillar
-  node_name: {{ HOSTNAME_CFG01 }}
-{%- endif %}
-
-{{ SHARED_OPENSTACK.MACRO_INSTALL_KEYSTONE(USE_ORCHESTRATE=false) }}
+{{ SHARED_OPENSTACK.MACRO_INSTALL_KEYSTONE(USE_ORCHESTRATE=true) }}
 
 {{ SHARED_OPENSTACK.MACRO_INSTALL_GLANCE() }}
 
 {{ SHARED_OPENSTACK.MACRO_INSTALL_NOVA() }}
 
-{{ SHARED_OPENSTACK.MACRO_INSTALL_CINDER() }}
+{{ SHARED_OPENSTACK.MACRO_INSTALL_CINDER(INSTALL_VOLUME=true) }}
 
 {{ SHARED_OPENSTACK.MACRO_INSTALL_NEUTRON() }}
 
-# isntall designate
-#- description: Install powerdns
-#  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-#    -C 'I@powerdns:server' state.sls powerdns.server
-#  node_name: {{ HOSTNAME_CFG01 }}
-#  retry: {count: 1, delay: 5}
-#  skip_fail: false
-
-#- description: Install designate
-#  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-#    -C 'I@designate:server' state.sls designate -b 1
-#  node_name: {{ HOSTNAME_CFG01 }}
-#  retry: {count: 5, delay: 10}
-#  skip_fail: false
-
 {{ SHARED_OPENSTACK.MACRO_INSTALL_HEAT() }}
 
-- description: Deploy horizon dashboard
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@horizon:server' state.sls horizon
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: true
+{{ SHARED_OPENSTACK.MACRO_INSTALL_DESIGNATE(INSTALL_BIND=true) }}
 
-- description: Deploy nginx proxy
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@nginx:server' state.sls nginx
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: true
+{{ SHARED_OPENSTACK.MACRO_INSTALL_HORIZON() }}
 
+{{ SHARED_OPENSTACK.MACRO_INSTALL_COMPUTE(CELL_MAPPING=true) }}
 
-# Install compute node
-
-- description: Apply formulas for compute node
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'cmp*' state.apply
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: true
-
-- description: Re-apply(as in doc) formulas for compute node
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'cmp*' state.apply
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Check IP on computes
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'cmp*' cmd.run
-    'ip a'
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 10, delay: 30}
-  skip_fail: false
-
-- description: Create net04_external
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
-    '. /root/keystonercv3; neutron net-create net04_ext --router:external True --provider:physical_network physnet1 --provider:network_type flat'
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 30}
-  skip_fail: false
-
-- description: Create subnet_external
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
-    '. /root/keystonercv3; neutron subnet-create net04_ext {{ IPV4_NET_EXTERNAL_PREFIX }}.0/24 --name net04_ext__subnet --disable-dhcp --allocation-pool start={{ IPV4_NET_EXTERNAL_PREFIX }}.150,end={{ IPV4_NET_EXTERNAL_PREFIX }}.180 --gateway {{ IPV4_NET_EXTERNAL_PREFIX }}.1'
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 30}
-  skip_fail: false
-
-- description: Create net04
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
-    '. /root/keystonercv3; neutron net-create net04'
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 30}
-  skip_fail: false
-
-- description: Create subnet_net04
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
-    '. /root/keystonercv3; neutron subnet-create net04 {{ IPV4_NET_TENANT_PREFIX }}.0/24 --name net04__subnet --allocation-pool start={{ IPV4_NET_TENANT_PREFIX }}.120,end={{ IPV4_NET_TENANT_PREFIX }}.240'
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 30}
-  skip_fail: false
-
-- description: Create router
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
-    '. /root/keystonercv3; neutron router-create net04_router01'
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 30}
-  skip_fail: false
-
-- description: Set geteway
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
-    '. /root/keystonercv3; neutron router-gateway-set net04_router01 net04_ext'
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 30}
-  skip_fail: false
-
-- description:  Add interface
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
-    '. /root/keystonercv3; neutron router-interface-add net04_router01 net04__subnet'
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 30}
-  skip_fail: false
-
-- description: sync time
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False '*' cmd.run
-    'service ntp stop; ntpd -gq;  service ntp start'
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 30}
-  skip_fail: false
-
-{{ SHARED.INSTALL_DOCKER_ON_GTW() }}
+{{ SHARED_OPENSTACK.MACRO_INSTALL_MANILA() }}
diff --git a/tcp_tests/templates/cookied-mcp-queens-dvr/salt.yaml b/tcp_tests/templates/cookied-mcp-queens-dvr/salt.yaml
index cc98f46..4b04af1 100644
--- a/tcp_tests/templates/cookied-mcp-queens-dvr/salt.yaml
+++ b/tcp_tests/templates/cookied-mcp-queens-dvr/salt.yaml
@@ -5,7 +5,7 @@
 {% from 'cookied-mcp-queens-dvr/underlay.yaml' import LAB_CONFIG_NAME with context %}
 {% from 'cookied-mcp-queens-dvr/underlay.yaml' import DOMAIN_NAME with context %}
 
-{% set SALT_MODELS_REPOSITORY = os_env('SALT_MODELS_REPOSITORY','https://gerrit.mcp.mirantis.net/salt-models/mcp-virtual-lab') %}
+{% set SALT_MODELS_REPOSITORY = os_env('SALT_MODELS_REPOSITORY','https://gerrit.mcp.mirantis.com/salt-models/mcp-virtual-lab') %}
 # Other salt model repository parameters see in shared-salt.yaml
 
 {% import 'shared-salt.yaml' as SHARED with context %}
@@ -14,8 +14,7 @@
 
 {{ SHARED.MACRO_CLONE_RECLASS_MODELS() }}
 
-{{ SHARED.MACRO_CONFIGURE_RECLASS(FORMULA_SERVICES='"linux" "reclass" "glusterfs" "jenkins" "maas" "backupninja" "salt" "openssh" "ntp" "git" "nginx" "collectd" "sensu" "heka" "sphinx" "keystone" "mysql" "grafana" "haproxy" "rsyslog" "horizon" "prometheus" "telegraf" "elasticsearch" "powerdns" "fluentd" "runtest" "auditd" "logrotate"') }}
-
+{{ SHARED.MACRO_CONFIGURE_RECLASS(FORMULA_SERVICES='\*') }}
 {{ SHARED.MACRO_INSTALL_SALT_MINIONS() }}
 
 {{ SHARED.MACRO_RUN_SALT_MASTER_UNDERLAY_STATES() }}
diff --git a/tcp_tests/templates/cookied-mcp-queens-dvr/sl.yaml b/tcp_tests/templates/cookied-mcp-queens-dvr/sl.yaml
index 1d323ba..6cc9fd3 100644
--- a/tcp_tests/templates/cookied-mcp-queens-dvr/sl.yaml
+++ b/tcp_tests/templates/cookied-mcp-queens-dvr/sl.yaml
@@ -1,256 +1,26 @@
 {% from 'cookied-mcp-queens-dvr/underlay.yaml' import HOSTNAME_CFG01 with context %}
+
+{% import 'shared-sl.yaml' as SHARED_SL with context %}
 {% import 'shared-sl-tests.yaml' as SHARED_SL_TESTS with context %}
-# Install docker swarm
-- description: Configure docker service
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' state.sls docker.host
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 10}
-  skip_fail: false
 
-- description: Install docker swarm on master node
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:master' state.sls docker.swarm
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 10}
-  skip_fail: false
+{{  SHARED_SL.MACRO_INSTALL_DOCKER_SWARM() }}
 
-- description: Send grains to the swarm slave nodes
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' state.sls salt.minion.grains
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 10}
-  skip_fail: false
+{{  SHARED_SL.MACRO_INSTALL_MONGODB() }}
 
-- description:  Update mine
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' mine.update
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 10}
-  skip_fail: false
+{{  SHARED_SL.MACRO_INSTALL_MONGODB_CLUSTER() }}
 
-- description:  Refresh modules
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' saltutil.refresh_modules; sleep 5;
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 10}
-  skip_fail: false
+{{  SHARED_SL.MACRO_INSTALL_PROMETEUS() }}
 
-- description:  Rerun swarm on slaves to proper token population
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:master' state.sls docker.swarm
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 10}
-  skip_fail: false
+{{  SHARED_SL.MACRO_INSTALL_LOG_COLLECTION() }}
 
-- description:  Configure slave nodes
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:manager' state.sls docker.swarm -b 1
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 10}
-  skip_fail: false
+{{  SHARED_SL.MACRO_INSTALL_CEILOMETER_COLLECTOR() }}
 
-- description:  List registered Docker swarm nodes
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:master' cmd.run 'docker node ls'
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 10}
-  skip_fail: false
+{{  SHARED_SL.MACRO_COLLECT_GRAINS() }}
 
-- description: Install keepalived on mon nodes
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'mon*' state.sls keepalived
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 10}
-  skip_fail: false
+{{  SHARED_SL.MACRO_CONFIGURE_SERVICES() }}
 
-- description: Check the VIP on mon nodes
-  cmd: |
-    SL_VIP=`salt-call --out=newline_values_only pillar.get _param:stacklight_monitor_address`;
-    echo "_param:stacklight_monitor_address (vip): ${SL_VIP}";
-    salt --hard-crash --state-output=mixed --state-verbose=False -C 'mon*' cmd.run "ip a | grep ${SL_VIP}" | grep -B1 ${SL_VIP}
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-# Install slv2 infra
-# Install MongoDB for alerta
-- description: Install Mongo if target matches
-  cmd: |
-    if salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@mongodb:server' match.pillar 'mongodb:server' ; then
-      salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@mongodb:server' state.sls mongodb.server
-    fi
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 10}
-  skip_fail: false
-
-# Create MongoDB cluster
-- description: Install Mongo if target matches
-  cmd: |
-    if salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@mongodb:server' match.pillar 'mongodb:server' ; then
-      salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@mongodb:server' state.sls mongodb.cluster
-    fi
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 5, delay: 20}
-  skip_fail: false
-
-- description: Install telegraf
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@telegraf:agent or I@telegraf:remote_agent' state.sls telegraf
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 2, delay: 10}
-  skip_fail: false
-
-- description: Configure Prometheus exporters, if pillar 'prometheus:exporters' exists on any server
-  cmd: |
-    if salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@prometheus:exporters' match.pillar 'prometheus:exporters' ; then
-      salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@prometheus:exporters' state.sls prometheus
-    fi
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 10}
-  skip_fail: false
-
-- description: Install elasticsearch server
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@elasticsearch:server' state.sls elasticsearch.server -b 1
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 10}
-  skip_fail: false
-
-- description: Install kibana server
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@kibana:server' state.sls kibana.server -b 1
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 10}
-  skip_fail: false
-
-- description: Install elasticsearch client
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@elasticsearch:client' state.sls elasticsearch.client
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 2, delay: 30}
-  skip_fail: false
-
-- description: Install kibana client
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@kibana:client' state.sls kibana.client
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 10}
-  skip_fail: false
-
-- description: Check influix db
-  cmd: |
-    INFLUXDB_SERVICE=`salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@influxdb:server' test.ping 1>/dev/null 2>&1 && echo true`;
-    echo "Influxdb service presence: ${INFLUXDB_SERVICE}";
-    if [[ "$INFLUXDB_SERVICE" == "true" ]]; then
-        salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@influxdb:server' state.sls influxdb
-    fi
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: true
-
-# Install Prometheus LTS(optional if set in model)
-- description: Prometheus LTS(optional if set in model)
-  cmd: |
-    PROMETHEUS_SERVICE=`salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@prometheus:relay' test.ping 1>/dev/null 2>&1 && echo true`;
-    echo "PROMETHEUS rely service presence: ${PROMETHEUS_SERVICE}";
-    if [[ "$PROMETHEUS_SERVICE" == "true" ]]; then
-        salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@prometheus:relay' state.sls prometheus
-    fi
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: true
-
-# Install service for the log collection
-- description: Configure fluentd
-  cmd: |
-    FLUENTD_SERVICE=`salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@fluentd:agent' test.ping 1>/dev/null 2>&1 && echo true`;
-    echo "Fluentd service presence: ${FLUENTD_SERVICE}";
-    if [[ "$FLUENTD_SERVICE" == "true" ]]; then
-        salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@fluentd:agent' state.sls fluentd
-    else
-        salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@heka:log_collector' state.sls heka.log_collector
-    fi
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 10}
-  skip_fail: false
-
-#Install heka ceilometer collector
-- description: Install heka ceilometer if they exists
-  cmd: |
-    CEILO=`salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@heka:ceilometer_collector:enabled' test.ping 1>/dev/null 2>&1 && echo true`;
-    echo "Ceilometer service presence: ${CEILO}";
-    if [[ "$CEILO" == "true" ]]; then
-        salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@heka:ceilometer_collector:enabled' state.sls heka.ceilometer_collector;
-        salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@heka:ceilometer_collector:enabled' service.restart ceilometer_collector
-    fi
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 10}
-  skip_fail: false
-
-# Collect grains needed to configure the services
-
-- description: Get grains
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@salt:minion' state.sls salt.minion.grains
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 10}
-  skip_fail: false
-
-- description: Sync modules
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@salt:minion' saltutil.refresh_modules
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 10}
-  skip_fail: false
-
-- description: Update mine
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@salt:minion' mine.update; sleep 5;
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 5, delay: 15}
-  skip_fail: false
-
-# Configure the services running in Docker Swarm
-- description: Configure prometheus in docker swarm
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm and I@prometheus:server' state.sls prometheus,heka.remote_collector
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 10}
-  skip_fail: false
-
-#Launch containers
-- description: launch prometheus containers
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:master and I@prometheus:server' state.sls docker.client
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 2, delay: 10}
-  skip_fail: false
-
-- description: Check docker ps
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm and I@prometheus:server' cmd.run "docker ps"
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 2, delay: 10}
-  skip_fail: false
-
-- description: Install sphinx
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@sphinx:server' state.sls sphinx
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 10}
-  skip_fail: false
-
-
-#- description: Install prometheus alertmanager
-#  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' state.sls prometheus,heka.remote_collector -b 1
-#  node_name: {{ HOSTNAME_CFG01 }}
-#  retry: {count: 1, delay: 10}
-#  skip_fail: false
-
-#- description: run docker state
-#  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:master' state.sls docker
-#  node_name: {{ HOSTNAME_CFG01 }}
-#  retry: {count: 1, delay: 10}
-#  skip_fail: false
-#
-#- description: docker ps
-#  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' dockerng.ps
-#  node_name: {{ HOSTNAME_CFG01 }}
-#  retry: {count: 1, delay: 10}
-#  skip_fail: false
-
-- description: Configure Grafana dashboards and datasources
-  cmd: sleep 30;  salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@grafana:client' state.sls grafana.client
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 2, delay: 10}
-  skip_fail: false
-
-- description: Run salt minion to create cert files
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False "*" state.sls salt.minion
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 10}
-  skip_fail: false
+{{  SHARED_SL.MACRO_LAUNCH_CONTAINERS() }}
 
 {{  SHARED_SL_TESTS.MACRO_CLONE_SL_TESTS() }}
+
 {{  SHARED_SL_TESTS.MACRO_CONFIGURE_TESTS() }}
diff --git a/tcp_tests/templates/cookied-mcp-queens-dvr/underlay.yaml b/tcp_tests/templates/cookied-mcp-queens-dvr/underlay.yaml
index 7c1628c..f8c13d4 100644
--- a/tcp_tests/templates/cookied-mcp-queens-dvr/underlay.yaml
+++ b/tcp_tests/templates/cookied-mcp-queens-dvr/underlay.yaml
@@ -18,8 +18,8 @@
 {% set HOSTNAME_CTL01 = os_env('HOSTNAME_CTL01', 'ctl01.' + DOMAIN_NAME) %}
 {% set HOSTNAME_CTL02 = os_env('HOSTNAME_CTL02', 'ctl02.' + DOMAIN_NAME) %}
 {% set HOSTNAME_CTL03 = os_env('HOSTNAME_CTL03', 'ctl03.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CMP01 = os_env('HOSTNAME_CMP01', 'cmp01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CMP02 = os_env('HOSTNAME_CMP02', 'cmp02.' + DOMAIN_NAME) %}
+{% set HOSTNAME_CMP01 = os_env('HOSTNAME_CMP01', 'cmp1.' + DOMAIN_NAME) %}
+{% set HOSTNAME_CMP02 = os_env('HOSTNAME_CMP02', 'cmp2.' + DOMAIN_NAME) %}
 {% set HOSTNAME_MON01 = os_env('HOSTNAME_MON01', 'mon01.' + DOMAIN_NAME) %}
 {% set HOSTNAME_MON02 = os_env('HOSTNAME_MON02', 'mon02.' + DOMAIN_NAME) %}
 {% set HOSTNAME_MON03 = os_env('HOSTNAME_MON03', 'mon03.' + DOMAIN_NAME) %}
@@ -30,9 +30,8 @@
 {% set HOSTNAME_MTR02 = os_env('HOSTNAME_MTR02', 'mtr02.' + DOMAIN_NAME) %}
 {% set HOSTNAME_MTR03 = os_env('HOSTNAME_MTR03', 'mtr03.' + DOMAIN_NAME) %}
 {% set HOSTNAME_GTW01 = os_env('HOSTNAME_GTW01', 'gtw01.' + DOMAIN_NAME) %}
-#{% set HOSTNAME_DNS01 = os_env('HOSTNAME_DNS01', 'dns01.' + DOMAIN_NAME) %}
-#{% set HOSTNAME_DNS02 = os_env('HOSTNAME_DNS02', 'dns02.' + DOMAIN_NAME) %}
 {% set HOSTNAME_PRX01 = os_env('HOSTNAME_PRX01', 'prx01.' + DOMAIN_NAME) %}
+{% set HOSTNAME_SHARE01 = os_env('HOSTNAME_SHARE01', 'share01.' + DOMAIN_NAME) %}
 
 template:
   devops_settings:
@@ -61,9 +60,8 @@
             default_{{ HOSTNAME_MTR02 }}: +87
             default_{{ HOSTNAME_MTR03 }}: +88
             default_{{ HOSTNAME_GTW01 }}: +110
-#            default_{{ HOSTNAME_DNS01 }}: +111
-#            default_{{ HOSTNAME_DNS02 }}: +112
             default_{{ HOSTNAME_PRX01 }}: +121
+            default_{{ HOSTNAME_SHARE01 }}: +130
           ip_ranges:
             dhcp: [+90, -10]
 
@@ -89,9 +87,8 @@
             default_{{ HOSTNAME_MTR02 }}: +87
             default_{{ HOSTNAME_MTR03 }}: +88
             default_{{ HOSTNAME_GTW01 }}: +110
-#            default_{{ HOSTNAME_DNS01 }}: +111
-#            default_{{ HOSTNAME_DNS02 }}: +112
             default_{{ HOSTNAME_PRX01 }}: +121
+            default_{{ HOSTNAME_SHARE01 }}: +130
           ip_ranges:
             dhcp: [+90, -10]
 
@@ -117,9 +114,8 @@
             default_{{ HOSTNAME_MTR02 }}: +87
             default_{{ HOSTNAME_MTR03 }}: +88
             default_{{ HOSTNAME_GTW01 }}: +110
-#            default_{{ HOSTNAME_DNS01 }}: +111
-#            default_{{ HOSTNAME_DNS02 }}: +112
             default_{{ HOSTNAME_PRX01 }}: +121
+            default_{{ HOSTNAME_SHARE01 }}: +130
           ip_ranges:
             dhcp: [+10, -10]
 
@@ -145,11 +141,10 @@
             default_{{ HOSTNAME_MTR02 }}: +87
             default_{{ HOSTNAME_MTR03 }}: +88
             default_{{ HOSTNAME_GTW01 }}: +110
-#            default_{{ HOSTNAME_DNS01 }}: +111
-#            default_{{ HOSTNAME_DNS02 }}: +112
             default_{{ HOSTNAME_PRX01 }}: +121
+            default_{{ HOSTNAME_SHARE01 }}: +130
           ip_ranges:
-            dhcp: [+10, -10]
+            dhcp: [+130, +220]
 
 
     groups:
@@ -711,54 +706,28 @@
               interfaces: *all_interfaces
               network_config: *all_network_config
 
-#          - name: {{ HOSTNAME_DNS01 }}
-#            role: salt_minion
-#            params:
-#              vcpu: !os_env SLAVE_NODE_CPU, 1
-#              memory: !os_env SLAVE_NODE_MEMORY, 2048
-#              boot:
-#                - hd
-#              cloud_init_volume_name: iso
-#              cloud_init_iface_up: ens3
-#              volumes:
-#                - name: system
-#                  capacity: !os_env NODE_VOLUME_SIZE, 150
-#                  backing_store: mcp_ubuntu_1604_image
-#                  format: qcow2
-#                - name: iso  # Volume with name 'iso' will be used
-#                             # for store image with cloud-init metadata.
-#                  capacity: 1
-#                  format: raw
-#                  device: cdrom
-#                  bus: ide
-#                  cloudinit_meta_data: *cloudinit_meta_data
-#                  cloudinit_user_data: *cloudinit_user_data_1604
-#
-#              interfaces: *all_interfaces
-#              network_config: *all_network_config
-#
-#          - name: {{ HOSTNAME_DNS02 }}
-#            role: salt_minion
-#            params:
-#              vcpu: !os_env SLAVE_NODE_CPU, 1
-#              memory: !os_env SLAVE_NODE_MEMORY, 2048
-#              boot:
-#                - hd
-#              cloud_init_volume_name: iso
-#              cloud_init_iface_up: ens3
-#              volumes:
-#                - name: system
-#                  capacity: !os_env NODE_VOLUME_SIZE, 150
-#                  backing_store: mcp_ubuntu_1604_image
-#                  format: qcow2
-#                - name: iso  # Volume with name 'iso' will be used
-#                             # for store image with cloud-init metadata.
-#                  capacity: 1
-#                  format: raw
-#                  device: cdrom
-#                  bus: ide
-#                  cloudinit_meta_data: *cloudinit_meta_data
-#                  cloudinit_user_data: *cloudinit_user_data_1604
-#
-#              interfaces: *all_interfaces
-#              network_config: *all_network_config
+          - name: {{ HOSTNAME_SHARE01 }}
+            role: salt_minion
+            params:
+              vcpu: !os_env SLAVE_NODE_CPU, 2
+              memory: !os_env SLAVE_NODE_MEMORY, 4096
+              boot:
+                - hd
+              cloud_init_volume_name: iso
+              cloud_init_iface_up: ens3
+              volumes:
+                - name: system
+                  capacity: !os_env NODE_VOLUME_SIZE, 150
+                  backing_store: mcp_ubuntu_1604_image
+                  format: qcow2
+                - name: iso  # Volume with name 'iso' will be used
+                             # for store image with cloud-init metadata.
+                  capacity: 1
+                  format: raw
+                  device: cdrom
+                  bus: ide
+                  cloudinit_meta_data: *cloudinit_meta_data
+                  cloudinit_user_data: *cloudinit_user_data_1604
+
+              interfaces: *all_interfaces
+              network_config: *all_network_config
diff --git a/tcp_tests/templates/cookied-mcp-queens-ovs/_context-cookiecutter-mcp-queens-ovs.yaml b/tcp_tests/templates/cookied-mcp-queens-ovs/_context-cookiecutter-mcp-queens-ovs.yaml
index e6e73a5..d8f2505 100644
--- a/tcp_tests/templates/cookied-mcp-queens-ovs/_context-cookiecutter-mcp-queens-ovs.yaml
+++ b/tcp_tests/templates/cookied-mcp-queens-ovs/_context-cookiecutter-mcp-queens-ovs.yaml
@@ -1,4 +1,7 @@
 default_context:
+  barbican_backend: dogtag
+  barbican_enabled: 'False'
+  auditd_enabled: 'True'
   bmk_enabled: 'False'
   ceph_enabled: 'False'
   cicd_enabled: 'False'
@@ -13,7 +16,7 @@
   control_vlan: '10'
   cookiecutter_template_branch: master
   cookiecutter_template_credentials: gerrit
-  cookiecutter_template_url: ssh://mcp-jenkins@gerrit.mcp.mirantis.net:29418/mk/cookiecutter-templates.git
+  cookiecutter_template_url: ssh://mcp-jenkins@gerrit.mcp.mirantis.com:29418/mk/cookiecutter-templates.git
   deploy_network_gateway: 192.168.10.1
   deploy_network_netmask: 255.255.255.0
   deploy_network_subnet: 192.168.10.0/24
@@ -41,6 +44,7 @@
   local_repositories: 'False'
   maas_deploy_address: 192.168.10.90
   maas_hostname: cfg01
+  maas_enabled: 'False'
   mcp_version: stable
   offline_deployment: 'False'
   opencontrail_enabled: 'False'
@@ -51,6 +55,10 @@
   openstack_compute_rack01_hostname: cmp
   openstack_compute_rack01_single_subnet: 172.16.10
   openstack_compute_rack01_tenant_subnet: 10.1.0
+  openstack_compute_single_address_ranges: 172.16.10.105-172.16.10.150
+  openstack_compute_deploy_address_ranges: 192.168.10.105-192.168.10.150
+  openstack_compute_tenant_address_ranges: 10.1.0.105-10.1.0.150
+  openstack_compute_backend_address_ranges: 10.1.0.105-10.1.0.150
   openstack_control_address: 172.16.10.100
   openstack_control_hostname: ctl
   openstack_control_node01_address: 172.16.10.101
@@ -100,6 +108,11 @@
   openstack_proxy_node02_address: 172.16.10.122
   openstack_proxy_node02_hostname: prx02
   openstack_upgrade_node01_address: 172.16.10.19
+  openstack_dns_hostname: dns
+  openstack_dns_node01_address: 172.16.10.111
+  openstack_dns_node01_hostname: dns01
+  openstack_dns_node02_address: 172.16.10.112
+  openstack_dns_node02_hostname: dns02
   openstack_version: queens
   oss_enabled: 'False'
   oss_node03_address: ${_param:stacklight_monitor_node03_address}
@@ -145,7 +158,7 @@
   salt_master_address: 172.16.10.90
   salt_master_hostname: cfg01
   salt_master_management_address: 192.168.10.90
-  shared_reclass_url: ssh://mcp-jenkins@gerrit.mcp.mirantis.net:29418/salt-models/reclass-system.git
+  shared_reclass_url: ssh://mcp-jenkins@gerrit.mcp.mirantis.com:29418/salt-models/reclass-system.git
   fluentd_enabled: 'True'
   stacklight_enabled: 'True'
   stacklight_log_address: 172.16.10.60
@@ -181,3 +194,18 @@
   tenant_vlan: '20'
   upstream_proxy_enabled: 'False'
   use_default_network_scheme: 'False'
+  rsync_fernet_rotation: 'True'
+  compute_padding_with_zeros: False
+  designate_backend: powerdns
+  designate_enabled: 'True'
+  nova_vnc_tls_enabled: 'False'
+  galera_ssl_enabled: 'False'
+  openstack_mysql_x509_enabled: 'False'
+  rabbitmq_ssl_enabled: 'False'
+  openstack_rabbitmq_x509_enabled: 'False'
+  tenant_telemetry_enabled: 'False'
+  gnocchi_aggregation_storage: file
+  manila_enabled: 'True'
+  manila_share_backend: 'lvm'
+  manila_lvm_volume_name: 'manila-volume'
+  manila_lvm_devices: '/dev/loop1'
diff --git a/tcp_tests/templates/cookied-mcp-queens-ovs/_context-environment.yaml b/tcp_tests/templates/cookied-mcp-queens-ovs/_context-environment.yaml
index 4c7091b..6230f55 100644
--- a/tcp_tests/templates/cookied-mcp-queens-ovs/_context-environment.yaml
+++ b/tcp_tests/templates/cookied-mcp-queens-ovs/_context-environment.yaml
@@ -1,5 +1,5 @@
 nodes:
-    cfg01.mcp11-ovs-dpdk.local:
+    cfg01.mcp-queens-ovs.local:
       reclass_storage_name: infra_config_node01
       roles:
       - infra_config
@@ -10,17 +10,13 @@
         ens4:
           role: single_ctl
 
-    ctl01.mcp11-ovs-dpdk.local:
+    ctl01.mcp-queens-ovs.local:
       reclass_storage_name: openstack_control_node01
       roles:
       - infra_kvm
       - openstack_control_leader
       - openstack_database_leader
       - openstack_message_queue
-    #  - features_designate_bind9_database
-    #  - features_designate_bind9_dns
-    #  - features_designate_bind9
-    #  - features_designate_bind9_keystone
       - linux_system_codename_xenial
       interfaces:
         ens3:
@@ -28,16 +24,13 @@
         ens4:
           role: single_ctl
 
-    ctl02.mcp11-ovs-dpdk.local:
+    ctl02.mcp-queens-ovs.local:
       reclass_storage_name: openstack_control_node02
       roles:
       - infra_kvm
       - openstack_control
       - openstack_database
       - openstack_message_queue
-    #  - features_designate_bind9_database
-    #  - features_designate_bind9_dns
-    #  - features_designate_bind9
       - linux_system_codename_xenial
       interfaces:
         ens3:
@@ -45,15 +38,13 @@
         ens4:
           role: single_ctl
 
-    ctl03.mcp11-ovs-dpdk.local:
+    ctl03.mcp-queens-ovs.local:
       reclass_storage_name: openstack_control_node03
       roles:
       - infra_kvm
       - openstack_control
       - openstack_database
       - openstack_message_queue
-    #  - features_designate_bind9_database
-    #  - features_designate_bind9
       - linux_system_codename_xenial
       interfaces:
         ens3:
@@ -61,11 +52,10 @@
         ens4:
           role: single_ctl
 
-    prx01.mcp11-ovs-dpdk.local:
+    prx01.mcp-queens-ovs.local:
       reclass_storage_name: openstack_proxy_node01
       roles:
       - openstack_proxy
-    #  - features_designate_bind9_proxy
       - linux_system_codename_xenial
       interfaces:
         ens3:
@@ -73,7 +63,7 @@
         ens4:
           role: single_ctl
 
-    mon01.mcp11-ovs-dpdk.local:
+    mon01.mcp-queens-ovs.local:
       reclass_storage_name: stacklight_server_node01
       roles:
       - stacklightv2_server_leader
@@ -84,7 +74,7 @@
         ens4:
           role: single_ctl
 
-    mon02.mcp11-ovs-dpdk.local:
+    mon02.mcp-queens-ovs.local:
       reclass_storage_name: stacklight_server_node02
       roles:
       - stacklightv2_server
@@ -95,7 +85,7 @@
         ens4:
           role: single_ctl
 
-    mon03.mcp11-ovs-dpdk.local:
+    mon03.mcp-queens-ovs.local:
       reclass_storage_name: stacklight_server_node03
       roles:
       - stacklightv2_server
@@ -106,7 +96,7 @@
         ens4:
           role: single_ctl
 
-    log01.mcp11-ovs-dpdk.local:
+    log01.mcp-queens-ovs.local:
       reclass_storage_name: stacklight_log_node01
       roles:
       - stacklight_log_leader_v2
@@ -117,7 +107,7 @@
         ens4:
           role: single_ctl
 
-    log02.mcp11-ovs-dpdk.local:
+    log02.mcp-queens-ovs.local:
       reclass_storage_name: stacklight_log_node02
       roles:
       - stacklight_log
@@ -128,7 +118,7 @@
         ens4:
           role: single_ctl
 
-    log03.mcp11-ovs-dpdk.local:
+    log03.mcp-queens-ovs.local:
       reclass_storage_name: stacklight_log_node03
       roles:
       - stacklight_log
@@ -139,7 +129,7 @@
         ens4:
           role: single_ctl
 
-    mtr01.mcp11-ovs-dpdk.local:
+    mtr01.mcp-queens-ovs.local:
       reclass_storage_name: stacklight_telemetry_node01
       roles:
       - stacklight_telemetry_leader
@@ -150,7 +140,7 @@
         ens4:
           role: single_ctl
 
-    mtr02.mcp11-ovs-dpdk.local:
+    mtr02.mcp-queens-ovs.local:
       reclass_storage_name: stacklight_telemetry_node02
       roles:
       - stacklight_telemetry
@@ -161,7 +151,7 @@
         ens4:
           role: single_ctl
 
-    mtr03.mcp11-ovs-dpdk.local:
+    mtr03.mcp-queens-ovs.local:
       reclass_storage_name: stacklight_telemetry_node03
       roles:
       - stacklight_telemetry
@@ -173,7 +163,7 @@
           role: single_ctl
 
     # Generator-based computes. For compatibility only
-    cmp<<count>>.mcp11-ovs-dpdk.local:
+    cmp<<count>>.mcp-queens-ovs.local:
       reclass_storage_name: openstack_compute_rack01
       roles:
       - openstack_compute
@@ -188,7 +178,7 @@
         ens6:
           role: bond1_ab_ovs_floating
 
-    gtw01.mcp11-ovs-dpdk.local:
+    gtw01.mcp-queens-ovs.local:
       reclass_storage_name: openstack_gateway_node01
       roles:
       - openstack_gateway
@@ -202,3 +192,36 @@
           role: bond0_ab_ovs_vxlan_mesh
         ens6:
           role: bond1_ab_ovs_floating
+
+    dns01.mcp-queens-dvr.local:
+      reclass_storage_name: openstack_dns_node01
+      roles:
+      - openstack_dns
+      - linux_system_codename_xenial
+      interfaces:
+        ens3:
+          role: single_dhcp
+        ens4:
+          role: single_ctl
+
+    dns02.mcp-queens-dvr.local:
+      reclass_storage_name: openstack_dns_node02
+      roles:
+      - openstack_dns
+      - linux_system_codename_xenial
+      interfaces:
+        ens3:
+          role: single_dhcp
+        ens4:
+          role: single_ctl
+
+    share01.mcp-queens-dvr.local:
+      reclass_storage_name: openstack_share_node01
+      roles:
+      - openstack_share
+      - linux_system_codename_xenial
+      interfaces:
+        ens3:
+          role: single_dhcp
+        ens4:
+          role: single_ctl
\ No newline at end of file
diff --git a/tcp_tests/templates/cookied-mcp-queens-ovs/core.yaml b/tcp_tests/templates/cookied-mcp-queens-ovs/core.yaml
index afd0d5a..6b36603 100644
--- a/tcp_tests/templates/cookied-mcp-queens-ovs/core.yaml
+++ b/tcp_tests/templates/cookied-mcp-queens-ovs/core.yaml
@@ -1,119 +1,17 @@
 {% from 'cookied-mcp-queens-ovs/underlay.yaml' import HOSTNAME_CFG01 with context %}
 
-{% import 'shared-backup-restore.yaml' as BACKUP with context %}
+{% import 'shared-core.yaml' as SHARED_CORE with context %}
 
-# Install support services
-- description: Install keepalived on ctl01
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@keepalived:cluster and *01*' state.sls keepalived
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 10}
-  skip_fail: true
+{{ SHARED_CORE.MACRO_INSTALL_KEEPALIVED() }}
 
-- description: Install keepalived
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@keepalived:cluster' state.sls keepalived
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 10}
-  skip_fail: true
+{{ SHARED_CORE.MACRO_INSTALL_GLUSTERFS() }}
 
-- description: Install glusterfs
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@glusterfs:server' state.sls glusterfs.server.service
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
+{{ SHARED_CORE.MACRO_INSTALL_RABBITMQ() }}
 
-- description: Setup glusterfs on primary controller
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@glusterfs:server' state.sls glusterfs.server.setup -b 1
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 2, delay: 5}
-  skip_fail: false
+{{ SHARED_CORE.MACRO_INSTALL_GALERA() }}
 
-- description: Check the gluster status
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@glusterfs:server' cmd.run 'gluster peer status; gluster volume status' -b 1
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
+{{ SHARED_CORE.MACRO_INSTALL_HAPROXY() }}
 
-- description: Install RabbitMQ on ctl01
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@rabbitmq:server and *01*' state.sls rabbitmq
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
+{{ SHARED_CORE.MACRO_INSTALL_MEMCACHED() }}
 
-- description: Install RabbitMQ
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@rabbitmq:server' state.sls rabbitmq
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Check the rabbitmq status
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@rabbitmq:server' cmd.run 'rabbitmqctl cluster_status'
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Install Galera on first server
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@galera:master' state.sls galera
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Install Galera on other servers
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@galera:slave' state.sls galera -b 1
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Check mysql status
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@galera:*' mysql.status | grep -A1 -e "wsrep_incoming_addresses\|wsrep_cluster_size"
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: true
-
-
-- description: Install haproxy
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@haproxy:proxy' state.sls haproxy
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Check haproxy status
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@haproxy:proxy' service.status haproxy
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Restart rsyslog
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@haproxy:proxy' service.restart rsyslog
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Install memcached on all controllers
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@memcached:server' state.sls memcached
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Check the VIP
-  cmd: |
-    OPENSTACK_CONTROL_ADDRESS=`salt-call --out=newline_values_only pillar.get _param:openstack_control_address`;
-    echo "_param:openstack_control_address (vip): ${OPENSTACK_CONTROL_ADDRESS}";
-    salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@keepalived:cluster' cmd.run "ip a | grep ${OPENSTACK_CONTROL_ADDRESS}" | grep -B1 ${OPENSTACK_CONTROL_ADDRESS}
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 3, delay: 10}
-  skip_fail: false
+{{ SHARED_CORE.MACRO_CHECK_VIP() }}
diff --git a/tcp_tests/templates/cookied-mcp-queens-ovs/openstack.yaml b/tcp_tests/templates/cookied-mcp-queens-ovs/openstack.yaml
index 26775ce..75fd27f 100644
--- a/tcp_tests/templates/cookied-mcp-queens-ovs/openstack.yaml
+++ b/tcp_tests/templates/cookied-mcp-queens-ovs/openstack.yaml
@@ -8,129 +8,29 @@
 {% from 'shared-salt.yaml' import IPV4_NET_EXTERNAL_PREFIX with context %}
 {% from 'shared-salt.yaml' import IPV4_NET_TENANT_PREFIX with context %}
 {% set LAB_CONFIG_NAME = os_env('LAB_CONFIG_NAME') %}
+{% set OVERRIDE_POLICY = os_env('OVERRIDE_POLICY', '') %}
 
 {% import 'shared-salt.yaml' as SHARED with context %}
 {% import 'shared-openstack.yaml' as SHARED_OPENSTACK with context %}
 
 # Install OpenStack control services
 
-{{ SHARED_OPENSTACK.MACRO_INSTALL_KEYSTONE(USE_ORCHESTRATE=false) }}
+{{ SHARED_OPENSTACK.MACRO_INSTALL_KEYSTONE(USE_ORCHESTRATE=true) }}
 
 {{ SHARED_OPENSTACK.MACRO_INSTALL_GLANCE() }}
 
 {{ SHARED_OPENSTACK.MACRO_INSTALL_NOVA() }}
 
-{{ SHARED_OPENSTACK.MACRO_INSTALL_CINDER() }}
+{{ SHARED_OPENSTACK.MACRO_INSTALL_CINDER(INSTALL_VOLUME=true) }}
 
 {{ SHARED_OPENSTACK.MACRO_INSTALL_NEUTRON() }}
 
-# isntall designate
-#- description: Install bind
-#  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-#    -C 'I@bind:server' state.sls bind
-#  node_name: {{ HOSTNAME_CFG01 }}
-#  retry: {count: 1, delay: 5}
-#  skip_fail: false
-
-#- description: Install designate
-#  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-#    -C 'I@designate:server' state.sls designate -b 1
-#  node_name: {{ HOSTNAME_CFG01 }}
-#  retry: {count: 5, delay: 10}
-#  skip_fail: false
-
 {{ SHARED_OPENSTACK.MACRO_INSTALL_HEAT() }}
 
-- description: Deploy horizon dashboard
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@horizon:server' state.sls horizon
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: true
+{{ SHARED_OPENSTACK.MACRO_INSTALL_DESIGNATE(INSTALL_POWERDNS=true) }}
 
-- description: Deploy nginx proxy
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@nginx:server' state.sls nginx
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: true
+{{ SHARED_OPENSTACK.MACRO_INSTALL_HORIZON() }}
 
+{{ SHARED_OPENSTACK.MACRO_INSTALL_COMPUTE(CELL_MAPPING=true) }}
 
-# Install compute node
-
-- description: Apply formulas for compute node
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'cmp*' state.apply
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: true
-
-- description: Re-apply(as in doc) formulas for compute node
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'cmp*' state.apply
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Check IP on computes
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'cmp*' cmd.run
-    'ip a'
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 10, delay: 30}
-  skip_fail: false
-
-- description: Create net04_external
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
-    '. /root/keystonercv3; neutron net-create net04_ext --router:external True --provider:physical_network physnet1 --provider:network_type flat'
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 30}
-  skip_fail: false
-
-- description: Create subnet_external
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
-    '. /root/keystonercv3; neutron subnet-create net04_ext {{ IPV4_NET_EXTERNAL_PREFIX }}.0/24 --name net04_ext__subnet --disable-dhcp --allocation-pool start={{ IPV4_NET_EXTERNAL_PREFIX }}.150,end={{ IPV4_NET_EXTERNAL_PREFIX }}.180 --gateway {{ IPV4_NET_EXTERNAL_PREFIX }}.1'
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 30}
-  skip_fail: false
-
-- description: Create net04
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
-    '. /root/keystonercv3; neutron net-create net04'
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 30}
-  skip_fail: false
-
-- description: Create subnet_net04
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
-    '. /root/keystonercv3; neutron subnet-create net04 {{ IPV4_NET_TENANT_PREFIX }}.0/24 --name net04__subnet --allocation-pool start={{ IPV4_NET_TENANT_PREFIX }}.120,end={{ IPV4_NET_TENANT_PREFIX }}.240'
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 30}
-  skip_fail: false
-
-- description: Create router
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
-    '. /root/keystonercv3; neutron router-create net04_router01'
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 30}
-  skip_fail: false
-
-- description: Set geteway
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
-    '. /root/keystonercv3; neutron router-gateway-set net04_router01 net04_ext'
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 30}
-  skip_fail: false
-
-- description:  Add interface
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
-    '. /root/keystonercv3; neutron router-interface-add net04_router01 net04__subnet'
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 30}
-  skip_fail: false
-
-- description: sync time
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False '*' cmd.run
-    'service ntp stop; ntpd -gq;  service ntp start'
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 30}
-  skip_fail: false
-
-{{ SHARED.INSTALL_DOCKER_ON_GTW() }}
+{{ SHARED_OPENSTACK.MACRO_INSTALL_MANILA() }}
\ No newline at end of file
diff --git a/tcp_tests/templates/cookied-mcp-queens-ovs/overrides-policy.yml b/tcp_tests/templates/cookied-mcp-queens-ovs/overrides-policy.yml
new file mode 100644
index 0000000..1f35a6b
--- /dev/null
+++ b/tcp_tests/templates/cookied-mcp-queens-ovs/overrides-policy.yml
@@ -0,0 +1,40 @@
+parameters:
+  nova:
+    controller:
+      policy:
+        context_is_admin: 'role:admin or role:administrator'
+        'compute:create': 'rule:admin_or_owner'
+        'compute:create:attach_network':
+  cinder:
+    controller:
+      policy:
+        'volume:delete': 'rule:admin_or_owner'
+        'volume:extend':
+  neutron:
+    server:
+      policy:
+        create_subnet: 'rule:admin_or_network_owner'
+        'get_network:queue_id': 'rule:admin_only'
+        'create_network:shared':
+  glance:
+    server:
+      policy:
+        publicize_image: "role:admin"
+        add_member:
+  keystone:
+    server:
+      policy:
+        admin_or_token_subject: 'rule:admin_required or rule:token_subject'
+  heat:
+    server:
+      policy:
+        context_is_admin: 'role:admin and is_admin_project:True'
+        deny_stack_user: 'not role:heat_stack_user'
+        deny_everybody: '!'
+        'cloudformation:ValidateTemplate': 'rule:deny_everybody'
+        'cloudformation:DescribeStackResources':
+  ceilometer:
+    server:
+      policy:
+        segregation: 'rule:context_is_admin'
+        'telemetry:get_resource':
diff --git a/tcp_tests/templates/cookied-mcp-queens-ovs/salt.yaml b/tcp_tests/templates/cookied-mcp-queens-ovs/salt.yaml
index 40e2f4e..e88d8a4 100644
--- a/tcp_tests/templates/cookied-mcp-queens-ovs/salt.yaml
+++ b/tcp_tests/templates/cookied-mcp-queens-ovs/salt.yaml
@@ -5,7 +5,7 @@
 {% from 'cookied-mcp-queens-ovs/underlay.yaml' import LAB_CONFIG_NAME with context %}
 {% from 'cookied-mcp-queens-ovs/underlay.yaml' import DOMAIN_NAME with context %}
 
-{% set SALT_MODELS_REPOSITORY = os_env('SALT_MODELS_REPOSITORY','https://gerrit.mcp.mirantis.net/salt-models/mcp-virtual-lab') %}
+{% set SALT_MODELS_REPOSITORY = os_env('SALT_MODELS_REPOSITORY','https://gerrit.mcp.mirantis.com/salt-models/mcp-virtual-lab') %}
 # Other salt model repository parameters see in shared-salt.yaml
 
 {% import 'shared-salt.yaml' as SHARED with context %}
@@ -14,8 +14,7 @@
 
 {{ SHARED.MACRO_CLONE_RECLASS_MODELS() }}
 
-{{ SHARED.MACRO_CONFIGURE_RECLASS(FORMULA_SERVICES='"linux" "reclass" "salt" "openssh" "ntp" "git" "nginx" "collectd" "sensu" "heka" "sphinx" "keystone" "mysql" "grafana" "haproxy" "rsyslog" "horizon" "prometheus" "telegraf" "elasticsearch" "fluentd" "runtest" "maas" "jenkins" "glusterfs" "backupninja" "auditd" "logrotate"') }}
-
+{{ SHARED.MACRO_CONFIGURE_RECLASS(FORMULA_SERVICES='\*') }}
 {{ SHARED.MACRO_INSTALL_SALT_MINIONS() }}
 
 {{ SHARED.MACRO_RUN_SALT_MASTER_UNDERLAY_STATES() }}
@@ -29,21 +28,3 @@
 {{SHARED.MACRO_CHECK_SALT_VERSION_SERVICES_ON_CFG()}}
 
 {{SHARED.MACRO_CHECK_SALT_VERSION_ON_NODES()}}
-
-- description: Hack gtw node
-  cmd: salt '{{ HOSTNAME_GTW01 }}' cmd.run "ip addr del {{ SHARED.IPV4_NET_CONTROL_PREFIX }}.110/24 dev ens4; ip addr flush dev ens4";
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 10}
-  skip_fail: false
-
-- description: Hack cmp01 node
-  cmd: salt '{{ HOSTNAME_CMP01 }}' cmd.run "ip addr del {{ SHARED.IPV4_NET_CONTROL_PREFIX }}.105/24 dev ens4; ip addr flush dev ens4";
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 10}
-  skip_fail: false
-
-- description: Hack cmp02 node
-  cmd: salt '{{ HOSTNAME_CMP02 }}' cmd.run "ip addr del {{ SHARED.IPV4_NET_CONTROL_PREFIX }}.106/24 dev ens4; ip addr flush dev ens4";
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 10}
-  skip_fail: false
diff --git a/tcp_tests/templates/cookied-mcp-queens-ovs/sl.yaml b/tcp_tests/templates/cookied-mcp-queens-ovs/sl.yaml
index 748c05e..587cb1e 100644
--- a/tcp_tests/templates/cookied-mcp-queens-ovs/sl.yaml
+++ b/tcp_tests/templates/cookied-mcp-queens-ovs/sl.yaml
@@ -1,258 +1,26 @@
 {% from 'cookied-mcp-queens-ovs/underlay.yaml' import HOSTNAME_CFG01 with context %}
 
+{% import 'shared-sl.yaml' as SHARED_SL with context %}
 {% import 'shared-sl-tests.yaml' as SHARED_SL_TESTS with context %}
 
-# Install docker swarm
-- description: Configure docker service
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' state.sls docker.host
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 10}
-  skip_fail: false
+{{  SHARED_SL.MACRO_INSTALL_DOCKER_SWARM() }}
 
-- description: Install docker swarm on master node
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:master' state.sls docker.swarm
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 10}
-  skip_fail: false
+{{  SHARED_SL.MACRO_INSTALL_MONGODB() }}
 
-- description: Send grains to the swarm slave nodes
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' state.sls salt.minion.grains
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 10}
-  skip_fail: false
+{{  SHARED_SL.MACRO_INSTALL_MONGODB_CLUSTER() }}
 
-- description:  Update mine
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' mine.update
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 10}
-  skip_fail: false
+{{  SHARED_SL.MACRO_INSTALL_PROMETEUS() }}
 
-- description:  Refresh modules
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' saltutil.refresh_modules; sleep 5;
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 10}
-  skip_fail: false
+{{  SHARED_SL.MACRO_INSTALL_LOG_COLLECTION() }}
 
-- description:  Rerun swarm on slaves to proper token population
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:master' state.sls docker.swarm
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 10}
-  skip_fail: false
+{{  SHARED_SL.MACRO_INSTALL_CEILOMETER_COLLECTOR() }}
 
-- description:  Configure slave nodes
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:manager' state.sls docker.swarm -b 1
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 10}
-  skip_fail: false
+{{  SHARED_SL.MACRO_COLLECT_GRAINS() }}
 
-- description:  List registered Docker swarm nodes
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:master' cmd.run 'docker node ls'
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 10}
-  skip_fail: false
+{{  SHARED_SL.MACRO_CONFIGURE_SERVICES() }}
 
-- description: Install keepalived on mon nodes
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'mon*' state.sls keepalived
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 10}
-  skip_fail: false
-
-- description: Check the VIP on mon nodes
-  cmd: |
-    SL_VIP=`salt-call --out=newline_values_only pillar.get _param:stacklight_monitor_address`;
-    echo "_param:stacklight_monitor_address (vip): ${SL_VIP}";
-    salt --hard-crash --state-output=mixed --state-verbose=False -C 'mon*' cmd.run "ip a | grep ${SL_VIP}" | grep -B1 ${SL_VIP}
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-# Install slv2 infra
-# Install MongoDB for alerta
-- description: Install Mongo if target matches
-  cmd: |
-    if salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@mongodb:server' match.pillar 'mongodb:server' ; then
-      salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@mongodb:server' state.sls mongodb.server
-    fi
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 10}
-  skip_fail: false
-
-# Create MongoDB cluster
-- description: Install Mongo if target matches
-  cmd: |
-    if salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@mongodb:server' match.pillar 'mongodb:server' ; then
-      salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@mongodb:server' state.sls mongodb.cluster
-    fi
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 5, delay: 20}
-  skip_fail: false
-
-- description: Install telegraf
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@telegraf:agent or I@telegraf:remote_agent' state.sls telegraf
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 2, delay: 10}
-  skip_fail: false
-
-- description: Configure Prometheus exporters, if pillar 'prometheus:exporters' exists on any server
-  cmd: |
-    if salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@prometheus:exporters' match.pillar 'prometheus:exporters' ; then
-      salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@prometheus:exporters' state.sls prometheus
-    fi
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 10}
-  skip_fail: false
-
-- description: Install elasticsearch server
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@elasticsearch:server' state.sls elasticsearch.server -b 1
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 10}
-  skip_fail: false
-
-- description: Install kibana server
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@kibana:server' state.sls kibana.server -b 1
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 10}
-  skip_fail: false
-
-- description: Install elasticsearch client
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@elasticsearch:client' state.sls elasticsearch.client
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 2, delay: 30}
-  skip_fail: false
-
-- description: Install kibana client
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@kibana:client' state.sls kibana.client
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 10}
-  skip_fail: false
-
-- description: Check influix db
-  cmd: |
-    INFLUXDB_SERVICE=`salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@influxdb:server' test.ping 1>/dev/null 2>&1 && echo true`;
-    echo "Influxdb service presence: ${INFLUXDB_SERVICE}";
-    if [[ "$INFLUXDB_SERVICE" == "true" ]]; then
-        salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@influxdb:server' state.sls influxdb
-    fi
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: true
-
-# Install Prometheus LTS(optional if set in model)
-- description: Prometheus LTS(optional if set in model)
-  cmd: |
-    PROMETHEUS_SERVICE=`salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@prometheus:relay' test.ping 1>/dev/null 2>&1 && echo true`;
-    echo "PROMETHEUS rely service presence: ${PROMETHEUS_SERVICE}";
-    if [[ "$PROMETHEUS_SERVICE" == "true" ]]; then
-        salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@prometheus:relay' state.sls prometheus
-    fi
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: true
-
-# Install service for the log collection
-- description: Configure fluentd
-  cmd: |
-    FLUENTD_SERVICE=`salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@fluentd:agent' test.ping 1>/dev/null 2>&1 && echo true`;
-    echo "Fluentd service presence: ${FLUENTD_SERVICE}";
-    if [[ "$FLUENTD_SERVICE" == "true" ]]; then
-        salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@fluentd:agent' state.sls fluentd
-    else
-        salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@heka:log_collector' state.sls heka.log_collector
-    fi
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 10}
-  skip_fail: false
-
-#Install heka ceilometer collector
-- description: Install heka ceilometer if they exists
-  cmd: |
-    CEILO=`salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@heka:ceilometer_collector:enabled' test.ping 1>/dev/null 2>&1 && echo true`;
-    echo "Ceilometer service presence: ${CEILO}";
-    if [[ "$CEILO" == "true" ]]; then
-        salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@heka:ceilometer_collector:enabled' state.sls heka.ceilometer_collector;
-        salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@heka:ceilometer_collector:enabled' service.restart ceilometer_collector
-    fi
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 10}
-  skip_fail: false
-
-# Collect grains needed to configure the services
-
-- description: Get grains
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@salt:minion' state.sls salt.minion.grains
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 10}
-  skip_fail: false
-
-- description: Sync modules
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@salt:minion' saltutil.refresh_modules
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 10}
-  skip_fail: false
-
-- description: Update mine
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@salt:minion' mine.update; sleep 5;
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 5, delay: 15}
-  skip_fail: false
-
-# Configure the services running in Docker Swarm
-- description: Configure prometheus in docker swarm
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm and I@prometheus:server' state.sls prometheus,heka.remote_collector
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 10}
-  skip_fail: false
-
-#Launch containers
-- description: launch prometheus containers
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:master and I@prometheus:server' state.sls docker.client
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 2, delay: 10}
-  skip_fail: false
-
-- description: Check docker ps
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm and I@prometheus:server' cmd.run "docker ps"
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 2, delay: 10}
-  skip_fail: false
-
-- description: Install sphinx
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@sphinx:server' state.sls sphinx
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 10}
-  skip_fail: false
-
-
-#- description: Install prometheus alertmanager
-#  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' state.sls prometheus,heka.remote_collector -b 1
-#  node_name: {{ HOSTNAME_CFG01 }}
-#  retry: {count: 1, delay: 10}
-#  skip_fail: false
-
-#- description: run docker state
-#  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:master' state.sls docker
-#  node_name: {{ HOSTNAME_CFG01 }}
-#  retry: {count: 1, delay: 10}
-#  skip_fail: false
-#
-#- description: docker ps
-#  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' dockerng.ps
-#  node_name: {{ HOSTNAME_CFG01 }}
-#  retry: {count: 1, delay: 10}
-#  skip_fail: false
-
-- description: Configure Grafana dashboards and datasources
-  cmd: sleep 30;  salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@grafana:client' state.sls grafana.client
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 2, delay: 10}
-  skip_fail: false
-
-- description: Run salt minion to create cert files
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False "*" state.sls salt.minion
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 10}
-  skip_fail: false
+{{  SHARED_SL.MACRO_LAUNCH_CONTAINERS() }}
 
 {{  SHARED_SL_TESTS.MACRO_CLONE_SL_TESTS() }}
+
 {{  SHARED_SL_TESTS.MACRO_CONFIGURE_TESTS() }}
diff --git a/tcp_tests/templates/cookied-mcp-queens-ovs/underlay.yaml b/tcp_tests/templates/cookied-mcp-queens-ovs/underlay.yaml
index 0000cc9..0b7b0e8 100644
--- a/tcp_tests/templates/cookied-mcp-queens-ovs/underlay.yaml
+++ b/tcp_tests/templates/cookied-mcp-queens-ovs/underlay.yaml
@@ -18,8 +18,8 @@
 {% set HOSTNAME_CTL01 = os_env('HOSTNAME_CTL01', 'ctl01.' + DOMAIN_NAME) %}
 {% set HOSTNAME_CTL02 = os_env('HOSTNAME_CTL02', 'ctl02.' + DOMAIN_NAME) %}
 {% set HOSTNAME_CTL03 = os_env('HOSTNAME_CTL03', 'ctl03.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CMP01 = os_env('HOSTNAME_CMP01', 'cmp01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CMP02 = os_env('HOSTNAME_CMP02', 'cmp02.' + DOMAIN_NAME) %}
+{% set HOSTNAME_CMP01 = os_env('HOSTNAME_CMP01', 'cmp1.' + DOMAIN_NAME) %}
+{% set HOSTNAME_CMP02 = os_env('HOSTNAME_CMP02', 'cmp2.' + DOMAIN_NAME) %}
 {% set HOSTNAME_MON01 = os_env('HOSTNAME_MON01', 'mon01.' + DOMAIN_NAME) %}
 {% set HOSTNAME_MON02 = os_env('HOSTNAME_MON02', 'mon02.' + DOMAIN_NAME) %}
 {% set HOSTNAME_MON03 = os_env('HOSTNAME_MON03', 'mon03.' + DOMAIN_NAME) %}
@@ -30,7 +30,10 @@
 {% set HOSTNAME_MTR02 = os_env('HOSTNAME_MTR02', 'mtr02.' + DOMAIN_NAME) %}
 {% set HOSTNAME_MTR03 = os_env('HOSTNAME_MTR03', 'mtr03.' + DOMAIN_NAME) %}
 {% set HOSTNAME_GTW01 = os_env('HOSTNAME_GTW01', 'gtw01.' + DOMAIN_NAME) %}
+{% set HOSTNAME_DNS01 = os_env('HOSTNAME_DNS01', 'dns01.' + DOMAIN_NAME) %}
+{% set HOSTNAME_DNS02 = os_env('HOSTNAME_DNS02', 'dns02.' + DOMAIN_NAME) %}
 {% set HOSTNAME_PRX01 = os_env('HOSTNAME_PRX01', 'prx01.' + DOMAIN_NAME) %}
+{% set HOSTNAME_SHARE01 = os_env('HOSTNAME_SHARE01', 'share01.' + DOMAIN_NAME) %}
 
 template:
   devops_settings:
@@ -59,7 +62,10 @@
             default_{{ HOSTNAME_MTR02 }}: +87
             default_{{ HOSTNAME_MTR03 }}: +88
             default_{{ HOSTNAME_GTW01 }}: +110
+            default_{{ HOSTNAME_DNS01 }}: +111
+            default_{{ HOSTNAME_DNS02 }}: +112
             default_{{ HOSTNAME_PRX01 }}: +121
+            default_{{ HOSTNAME_SHARE01 }}: +130
           ip_ranges:
             dhcp: [+90, -10]
 
@@ -85,7 +91,10 @@
             default_{{ HOSTNAME_MTR02 }}: +87
             default_{{ HOSTNAME_MTR03 }}: +88
             default_{{ HOSTNAME_GTW01 }}: +110
+            default_{{ HOSTNAME_DNS01 }}: +111
+            default_{{ HOSTNAME_DNS02 }}: +112
             default_{{ HOSTNAME_PRX01 }}: +121
+            default_{{ HOSTNAME_SHARE01 }}: +130
           ip_ranges:
             dhcp: [+90, -10]
 
@@ -111,7 +120,10 @@
             default_{{ HOSTNAME_MTR02 }}: +87
             default_{{ HOSTNAME_MTR03 }}: +88
             default_{{ HOSTNAME_GTW01 }}: +110
+            default_{{ HOSTNAME_DNS01 }}: +111
+            default_{{ HOSTNAME_DNS02 }}: +112
             default_{{ HOSTNAME_PRX01 }}: +121
+            default_{{ HOSTNAME_SHARE01 }}: +130
           ip_ranges:
             dhcp: [+10, -10]
 
@@ -137,9 +149,12 @@
             default_{{ HOSTNAME_MTR02 }}: +87
             default_{{ HOSTNAME_MTR03 }}: +88
             default_{{ HOSTNAME_GTW01 }}: +110
+            default_{{ HOSTNAME_DNS01 }}: +111
+            default_{{ HOSTNAME_DNS02 }}: +112
             default_{{ HOSTNAME_PRX01 }}: +121
+            default_{{ HOSTNAME_SHARE01 }}: +130
           ip_ranges:
-            dhcp: [+10, -10]
+            dhcp: [+130, +220]
 
 
     groups:
@@ -699,3 +714,81 @@
 
               interfaces: *all_interfaces
               network_config: *all_network_config
+
+          - name: {{ HOSTNAME_DNS01 }}
+            role: salt_minion
+            params:
+              vcpu: !os_env SLAVE_NODE_CPU, 1
+              memory: !os_env SLAVE_NODE_MEMORY, 2048
+              boot:
+                - hd
+              cloud_init_volume_name: iso
+              cloud_init_iface_up: ens3
+              volumes:
+                - name: system
+                  capacity: !os_env NODE_VOLUME_SIZE, 150
+                  backing_store: mcp_ubuntu_1604_image
+                  format: qcow2
+                - name: iso  # Volume with name 'iso' will be used
+                             # for store image with cloud-init metadata.
+                  capacity: 1
+                  format: raw
+                  device: cdrom
+                  bus: ide
+                  cloudinit_meta_data: *cloudinit_meta_data
+                  cloudinit_user_data: *cloudinit_user_data_1604
+
+              interfaces: *all_interfaces
+              network_config: *all_network_config
+
+          - name: {{ HOSTNAME_DNS02 }}
+            role: salt_minion
+            params:
+              vcpu: !os_env SLAVE_NODE_CPU, 1
+              memory: !os_env SLAVE_NODE_MEMORY, 2048
+              boot:
+                - hd
+              cloud_init_volume_name: iso
+              cloud_init_iface_up: ens3
+              volumes:
+                - name: system
+                  capacity: !os_env NODE_VOLUME_SIZE, 150
+                  backing_store: mcp_ubuntu_1604_image
+                  format: qcow2
+                - name: iso  # Volume with name 'iso' will be used
+                             # for store image with cloud-init metadata.
+                  capacity: 1
+                  format: raw
+                  device: cdrom
+                  bus: ide
+                  cloudinit_meta_data: *cloudinit_meta_data
+                  cloudinit_user_data: *cloudinit_user_data_1604
+
+              interfaces: *all_interfaces
+              network_config: *all_network_config
+
+          - name: {{ HOSTNAME_SHARE01 }}
+            role: salt_minion
+            params:
+              vcpu: !os_env SLAVE_NODE_CPU, 2
+              memory: !os_env SLAVE_NODE_MEMORY, 4096
+              boot:
+                - hd
+              cloud_init_volume_name: iso
+              cloud_init_iface_up: ens3
+              volumes:
+                - name: system
+                  capacity: !os_env NODE_VOLUME_SIZE, 150
+                  backing_store: mcp_ubuntu_1604_image
+                  format: qcow2
+                - name: iso  # Volume with name 'iso' will be used
+                             # for store image with cloud-init metadata.
+                  capacity: 1
+                  format: raw
+                  device: cdrom
+                  bus: ide
+                  cloudinit_meta_data: *cloudinit_meta_data
+                  cloudinit_user_data: *cloudinit_user_data_1604
+
+              interfaces: *all_interfaces
+              network_config: *all_network_config
diff --git a/tcp_tests/templates/cookied-model-generator/salt_cookied-bm-dpdk-pipeline.yaml b/tcp_tests/templates/cookied-model-generator/salt_cookied-bm-dpdk-pipeline.yaml
index a42b5f0..7185a67 100644
--- a/tcp_tests/templates/cookied-model-generator/salt_cookied-bm-dpdk-pipeline.yaml
+++ b/tcp_tests/templates/cookied-model-generator/salt_cookied-bm-dpdk-pipeline.yaml
@@ -1,7 +1,7 @@
 {% from 'cookied-model-generator/underlay.yaml' import HOSTNAME_CFG01 with context %}
 {% from 'cookied-model-generator/underlay.yaml' import DOMAIN_NAME with context %}
 
-{% set SALT_MODELS_REPOSITORY = os_env('SALT_MODELS_REPOSITORY','https://gerrit.mcp.mirantis.net/salt-models/mcp-baremetal-lab') %}
+{% set SALT_MODELS_REPOSITORY = os_env('SALT_MODELS_REPOSITORY','https://gerrit.mcp.mirantis.com/salt-models/mcp-baremetal-lab') %}
 # Other salt model repository parameters see in shared-salt.yaml
 {% set LAB_CONFIG_NAME = 'cookied-bm-dpdk-pipeline' %}
 {% set SALT_VERSION = os_env('SALT_VERSION', '2017.7') %}
diff --git a/tcp_tests/templates/cookied-model-generator/salt_cookied-bm-mcp-ovs-dpdk.yaml b/tcp_tests/templates/cookied-model-generator/salt_cookied-bm-mcp-ovs-dpdk.yaml
index c727bb1..562c1b9 100644
--- a/tcp_tests/templates/cookied-model-generator/salt_cookied-bm-mcp-ovs-dpdk.yaml
+++ b/tcp_tests/templates/cookied-model-generator/salt_cookied-bm-mcp-ovs-dpdk.yaml
@@ -1,7 +1,7 @@
 {% from 'cookied-model-generator/underlay.yaml' import HOSTNAME_CFG01 with context %}
 {% from 'cookied-model-generator/underlay.yaml' import DOMAIN_NAME with context %}
 
-{% set SALT_MODELS_REPOSITORY = os_env('SALT_MODELS_REPOSITORY','https://gerrit.mcp.mirantis.net/salt-models/mcp-baremetal-lab') %}
+{% set SALT_MODELS_REPOSITORY = os_env('SALT_MODELS_REPOSITORY','https://gerrit.mcp.mirantis.com/salt-models/mcp-baremetal-lab') %}
 # Other salt model repository parameters see in shared-salt.yaml
 {% set LAB_CONFIG_NAME = 'cookied-bm-mcp-ovs-dpdk' %}
 {% set SALT_VERSION = os_env('SALT_VERSION', '2017.7') %}
diff --git a/tcp_tests/templates/cookied-model-generator/salt_cookied-cicd-k8s-calico-sl.yaml b/tcp_tests/templates/cookied-model-generator/salt_cookied-cicd-k8s-calico-sl.yaml
index 0e8095a..cf03c60 100644
--- a/tcp_tests/templates/cookied-model-generator/salt_cookied-cicd-k8s-calico-sl.yaml
+++ b/tcp_tests/templates/cookied-model-generator/salt_cookied-cicd-k8s-calico-sl.yaml
@@ -25,6 +25,8 @@
     # Workaround for compute nodes. Auto-registration for compute nodes cannot be used without external address inventory
     reclass-tools add-key parameters._param.kubernetes_compute_node01_address {{ SHARED.IPV4_NET_CONTROL_PREFIX }}.101 /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/kubernetes/init.yml;
     reclass-tools add-key parameters._param.kubernetes_compute_node02_address {{ SHARED.IPV4_NET_CONTROL_PREFIX }}.102 /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/kubernetes/init.yml;
+    reclass-tools add-key parameters._param.kubernetes_compute_node03_address {{ SHARED.IPV4_NET_CONTROL_PREFIX }}.103 /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/kubernetes/init.yml;
+    reclass-tools add-key parameters._param.kubernetes_compute_node04_address {{ SHARED.IPV4_NET_CONTROL_PREFIX }}.104 /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/kubernetes/init.yml;
 
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 5}
diff --git a/tcp_tests/templates/cookied-model-generator/salt_cookied-cicd-k8s-calico.yaml b/tcp_tests/templates/cookied-model-generator/salt_cookied-cicd-k8s-calico.yaml
index 776a516..7eacc05 100644
--- a/tcp_tests/templates/cookied-model-generator/salt_cookied-cicd-k8s-calico.yaml
+++ b/tcp_tests/templates/cookied-model-generator/salt_cookied-cicd-k8s-calico.yaml
@@ -25,6 +25,8 @@
     # Workaround for compute nodes. Auto-registration for compute nodes cannot be used without external address inventory
     reclass-tools add-key parameters._param.kubernetes_compute_node01_address {{ SHARED.IPV4_NET_CONTROL_PREFIX }}.101 /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/kubernetes/init.yml;
     reclass-tools add-key parameters._param.kubernetes_compute_node02_address {{ SHARED.IPV4_NET_CONTROL_PREFIX }}.102 /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/kubernetes/init.yml;
+    reclass-tools add-key parameters._param.kubernetes_compute_node03_address {{ SHARED.IPV4_NET_CONTROL_PREFIX }}.103 /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/kubernetes/init.yml;
+    reclass-tools add-key parameters._param.kubernetes_compute_node04_address {{ SHARED.IPV4_NET_CONTROL_PREFIX }}.104 /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/kubernetes/init.yml;
 
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 5}
diff --git a/tcp_tests/templates/cookied-model-generator/salt_cookied-cicd-k8s-genie.yaml b/tcp_tests/templates/cookied-model-generator/salt_cookied-cicd-k8s-genie.yaml
index 130d95a..21349bf 100644
--- a/tcp_tests/templates/cookied-model-generator/salt_cookied-cicd-k8s-genie.yaml
+++ b/tcp_tests/templates/cookied-model-generator/salt_cookied-cicd-k8s-genie.yaml
@@ -25,6 +25,8 @@
     # Workaround for compute nodes. Auto-registration for compute nodes cannot be used without external address inventory
     reclass-tools add-key parameters._param.kubernetes_compute_node01_address {{ SHARED.IPV4_NET_CONTROL_PREFIX }}.101 /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/kubernetes/init.yml;
     reclass-tools add-key parameters._param.kubernetes_compute_node02_address {{ SHARED.IPV4_NET_CONTROL_PREFIX }}.102 /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/kubernetes/init.yml;
+    reclass-tools add-key parameters._param.kubernetes_compute_node03_address {{ SHARED.IPV4_NET_CONTROL_PREFIX }}.103 /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/kubernetes/init.yml;
+    reclass-tools add-key parameters._param.kubernetes_compute_node04_address {{ SHARED.IPV4_NET_CONTROL_PREFIX }}.104 /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/kubernetes/init.yml;
 
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 5}
diff --git a/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-mitaka-dvr.yaml b/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-mitaka-dvr.yaml
index 22297a6..23e0968 100644
--- a/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-mitaka-dvr.yaml
+++ b/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-mitaka-dvr.yaml
@@ -1,7 +1,7 @@
 {% from 'cookied-model-generator/underlay.yaml' import HOSTNAME_CFG01 with context %}
 {% from 'cookied-model-generator/underlay.yaml' import DOMAIN_NAME with context %}
 
-{% set SALT_MODELS_REPOSITORY = os_env('SALT_MODELS_REPOSITORY','https://gerrit.mcp.mirantis.net/salt-models/mcp-virtual-lab') %}
+{% set SALT_MODELS_REPOSITORY = os_env('SALT_MODELS_REPOSITORY','https://gerrit.mcp.mirantis.com/salt-models/mcp-virtual-lab') %}
 # Other salt model repository parameters see in shared-salt.yaml
 {% set SALT_VERSION = os_env('SALT_VERSION', '2017.7') %}
 
diff --git a/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-mitaka-ovs.yaml b/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-mitaka-ovs.yaml
index 48e019a..ecc5ac8 100644
--- a/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-mitaka-ovs.yaml
+++ b/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-mitaka-ovs.yaml
@@ -1,7 +1,7 @@
 {% from 'cookied-model-generator/underlay.yaml' import HOSTNAME_CFG01 with context %}
 {% from 'cookied-model-generator/underlay.yaml' import DOMAIN_NAME with context %}
 
-{% set SALT_MODELS_REPOSITORY = os_env('SALT_MODELS_REPOSITORY','https://gerrit.mcp.mirantis.net/salt-models/mcp-virtual-lab') %}
+{% set SALT_MODELS_REPOSITORY = os_env('SALT_MODELS_REPOSITORY','https://gerrit.mcp.mirantis.com/salt-models/mcp-virtual-lab') %}
 # Other salt model repository parameters see in shared-salt.yaml
 {% set SALT_VERSION = os_env('SALT_VERSION', '2017.7') %}
 {% set LAB_CONFIG_NAME = 'cookied-mcp-mitaka-ovs' %}
diff --git a/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-newton-dvr.yaml b/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-newton-dvr.yaml
index 570602d..07a7649 100644
--- a/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-newton-dvr.yaml
+++ b/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-newton-dvr.yaml
@@ -1,7 +1,7 @@
 {% from 'cookied-model-generator/underlay.yaml' import HOSTNAME_CFG01 with context %}
 {% from 'cookied-model-generator/underlay.yaml' import DOMAIN_NAME with context %}
 
-{% set SALT_MODELS_REPOSITORY = os_env('SALT_MODELS_REPOSITORY','https://gerrit.mcp.mirantis.net/salt-models/mcp-virtual-lab') %}
+{% set SALT_MODELS_REPOSITORY = os_env('SALT_MODELS_REPOSITORY','https://gerrit.mcp.mirantis.com/salt-models/mcp-virtual-lab') %}
 # Other salt model repository parameters see in shared-salt.yaml
 
 {% set LAB_CONFIG_NAME = 'cookied-mcp-newton-dvr' %}
diff --git a/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-newton-ovs.yaml b/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-newton-ovs.yaml
index 318cf87..22828b9 100644
--- a/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-newton-ovs.yaml
+++ b/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-newton-ovs.yaml
@@ -1,7 +1,7 @@
 {% from 'cookied-model-generator/underlay.yaml' import HOSTNAME_CFG01 with context %}
 {% from 'cookied-model-generator/underlay.yaml' import DOMAIN_NAME with context %}
 
-{% set SALT_MODELS_REPOSITORY = os_env('SALT_MODELS_REPOSITORY','https://gerrit.mcp.mirantis.net/salt-models/mcp-virtual-lab') %}
+{% set SALT_MODELS_REPOSITORY = os_env('SALT_MODELS_REPOSITORY','https://gerrit.mcp.mirantis.com/salt-models/mcp-virtual-lab') %}
 # Other salt model repository parameters see in shared-salt.yaml
 
 {% set LAB_CONFIG_NAME = 'cookied-mcp-newton-ovs' %}
diff --git a/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-ocata-dop-sl2.yaml b/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-ocata-dop-sl2.yaml
index 0178514..3211797 100644
--- a/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-ocata-dop-sl2.yaml
+++ b/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-ocata-dop-sl2.yaml
@@ -1,7 +1,7 @@
 {% from 'cookied-model-generator/underlay.yaml' import HOSTNAME_CFG01 with context %}
 {% from 'cookied-model-generator/underlay.yaml' import DOMAIN_NAME with context %}
 
-{% set SALT_MODELS_REPOSITORY = os_env('SALT_MODELS_REPOSITORY','https://gerrit.mcp.mirantis.net/salt-models/mcp-virtual-lab') %}
+{% set SALT_MODELS_REPOSITORY = os_env('SALT_MODELS_REPOSITORY','https://gerrit.mcp.mirantis.com/salt-models/mcp-virtual-lab') %}
 # Other salt model repository parameters see in shared-salt.yaml
 
 {% set LAB_CONFIG_NAME = 'cookied-mcp-ocata-dop-sl2' %}
diff --git a/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-ocata-dvr.yaml b/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-ocata-dvr.yaml
index 55e8c2a..ad9686f 100644
--- a/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-ocata-dvr.yaml
+++ b/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-ocata-dvr.yaml
@@ -1,7 +1,7 @@
 {% from 'cookied-model-generator/underlay.yaml' import HOSTNAME_CFG01 with context %}
 {% from 'cookied-model-generator/underlay.yaml' import DOMAIN_NAME with context %}
 
-{% set SALT_MODELS_REPOSITORY = os_env('SALT_MODELS_REPOSITORY','https://gerrit.mcp.mirantis.net/salt-models/mcp-virtual-lab') %}
+{% set SALT_MODELS_REPOSITORY = os_env('SALT_MODELS_REPOSITORY','https://gerrit.mcp.mirantis.com/salt-models/mcp-virtual-lab') %}
 # Other salt model repository parameters see in shared-salt.yaml
 
 {% set LAB_CONFIG_NAME = 'virtual-mcp-ocata-dvr' %}
diff --git a/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-ocata-ovs.yaml b/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-ocata-ovs.yaml
index 911b935..522eba9 100644
--- a/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-ocata-ovs.yaml
+++ b/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-ocata-ovs.yaml
@@ -1,7 +1,7 @@
 {% from 'cookied-model-generator/underlay.yaml' import HOSTNAME_CFG01 with context %}
 {% from 'cookied-model-generator/underlay.yaml' import DOMAIN_NAME with context %}
 
-{% set SALT_MODELS_REPOSITORY = os_env('SALT_MODELS_REPOSITORY','https://gerrit.mcp.mirantis.net/salt-models/mcp-virtual-lab') %}
+{% set SALT_MODELS_REPOSITORY = os_env('SALT_MODELS_REPOSITORY','https://gerrit.mcp.mirantis.com/salt-models/mcp-virtual-lab') %}
 # Other salt model repository parameters see in shared-salt.yaml
 
 {% set LAB_CONFIG_NAME = 'virtual-mcp-ocata-ovs' %}
diff --git a/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-pike-dvr-ceph.yaml b/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-pike-dvr-ceph.yaml
index 35dc2df..988f469 100644
--- a/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-pike-dvr-ceph.yaml
+++ b/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-pike-dvr-ceph.yaml
@@ -43,6 +43,14 @@
   retry: {count: 1, delay: 5}
   skip_fail: false
 
+- description: Temporary workaround !! Fix or debug
+  cmd: |
+    sed -i 's/pg_num: 128/pg_num: 4/g' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/ceph/setup.yml;
+    sed -i 's/pgp_num: 128/pgp_num: 4/g' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/ceph/setup.yml;
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: true
+
 {{ SHARED.MACRO_GENERATE_INVENTORY(RERUN_SALTMASTER_STATE=true) }}
 
 
diff --git a/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-queens-dvr-ceph.yaml b/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-queens-dvr-ceph.yaml
new file mode 100644
index 0000000..e5a1623
--- /dev/null
+++ b/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-queens-dvr-ceph.yaml
@@ -0,0 +1,57 @@
+{% from 'cookied-model-generator/underlay.yaml' import HOSTNAME_CFG01 with context %}
+{% from 'cookied-model-generator/underlay.yaml' import DOMAIN_NAME with context %}
+
+{% set LAB_CONFIG_NAME = 'cookied-mcp-queens-dvr-ceph' %}
+# Name of the context file (without extension, that is fixed .yaml) used to render the Environment model
+{% set ENVIRONMENT_MODEL_INVENTORY_NAME = os_env('ENVIRONMENT_MODEL_INVENTORY_NAME', LAB_CONFIG_NAME) %}
+# Path to the context files used to render Cluster and Environment models
+{%- set CLUSTER_CONTEXT_NAME = 'cookiecutter-context-dvr-ceph.yaml' %}
+{%- set ENVIRONMENT_CONTEXT_NAMES = ['vcp-context-environment.yaml'] %}
+
+{% import 'shared-salt.yaml' as SHARED with context %}
+
+{{ SHARED.MACRO_INSTALL_PACKAGES_ON_NODES(HOSTNAME_CFG01) }}
+{{ SHARED.MACRO_INSTALL_FORMULAS('\*') }}
+
+{{ SHARED.MACRO_GENERATE_COOKIECUTTER_MODEL() }}
+
+{{ SHARED.MACRO_GENERATE_AND_ENABLE_ENVIRONMENT_MODEL() }}
+
+- description: "Workaround for combined roles: remove unnecessary classes"
+  cmd: |
+    set -e;
+    sed -i '/system.reclass.storage.system.physical_control_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
+    sed -i '/system.reclass.storage.system.openstack_database_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
+    sed -i '/system.reclass.storage.system.openstack_message_queue_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
+    sed -i '/system.salt.control.placement.openstack.compact/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/kvm.yml;
+
+    # Start compute node addresses from .105 , as in static models
+    sed -i 's/start: 101/start: 105/g' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
+
+    . /root/venv-reclass-tools/bin/activate;
+    reclass-tools del-key parameters.reclass.storage.node.infra_kvm_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
+    reclass-tools del-key parameters.reclass.storage.node.infra_kvm_node02 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
+    reclass-tools del-key parameters.reclass.storage.node.infra_kvm_node03 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
+    reclass-tools del-key parameters.reclass.storage.node.openstack_database_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
+    reclass-tools del-key parameters.reclass.storage.node.openstack_database_node02 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
+    reclass-tools del-key parameters.reclass.storage.node.openstack_database_node03 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
+    reclass-tools del-key parameters.reclass.storage.node.openstack_message_queue_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
+    reclass-tools del-key parameters.reclass.storage.node.openstack_message_queue_node02 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
+    reclass-tools del-key parameters.reclass.storage.node.openstack_message_queue_node03 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
+
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+- description: Temporary workaround !! Fix or debug
+  cmd: |
+    sed -i 's/pg_num: 128/pg_num: 4/g' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/ceph/setup.yml;
+    sed -i 's/pgp_num: 128/pgp_num: 4/g' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/ceph/setup.yml;
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: true
+
+{{ SHARED.MACRO_GENERATE_INVENTORY(RERUN_SALTMASTER_STATE=true) }}
+
+
+
diff --git a/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-queens-dvr.yaml b/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-queens-dvr.yaml
index 6abc5d0..4732a9a 100644
--- a/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-queens-dvr.yaml
+++ b/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-queens-dvr.yaml
@@ -24,17 +24,7 @@
     sed -i '/system.reclass.storage.system.physical_control_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
     sed -i '/system.reclass.storage.system.openstack_database_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
     sed -i '/system.reclass.storage.system.openstack_message_queue_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
-    # sed -i '/system.reclass.storage.system.stacklight_telemetry_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
-    # sed -i '/system.reclass.storage.system.stacklight_log_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
     sed -i '/system.salt.control.placement.openstack.compact/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/kvm.yml;
-    # Start compute node addresses from .105 , as in static models
-    sed -i 's/start: 101/start: 105/g' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
-
-    # salt-call reclass.cluster_meta_set name='openstack_dns_node01_address' value='{{ SHARED.IPV4_NET_CONTROL_PREFIX }}.111' file_name=/srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/init.yml
-    # salt-call reclass.cluster_meta_set name='openstack_dns_node02_address' value='{{ SHARED.IPV4_NET_CONTROL_PREFIX }}.112' file_name=/srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/init.yml
-    # Workaround of missing reclass.system for dns role
-    # salt-call reclass.cluster_meta_set name='salt_master_host' value='${_param:infra_config_deploy_address}' file_name=/srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/init.yml
-
     . /root/venv-reclass-tools/bin/activate;
     reclass-tools del-key parameters.reclass.storage.node.infra_kvm_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
     reclass-tools del-key parameters.reclass.storage.node.infra_kvm_node02 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
@@ -45,12 +35,14 @@
     reclass-tools del-key parameters.reclass.storage.node.openstack_message_queue_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
     reclass-tools del-key parameters.reclass.storage.node.openstack_message_queue_node02 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
     reclass-tools del-key parameters.reclass.storage.node.openstack_message_queue_node03 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
-    # reclass-tools del-key parameters.reclass.storage.node.stacklight_log_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
     # Add cinder volume on cmp nodes. PROD-20945
     reclass-tools add-key 'classes' 'system.cinder.volume.single' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/compute/init.yml --merge ;
     reclass-tools add-key 'classes' 'system.cinder.volume.backend.lvm' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/compute/init.yml --merge ;
     reclass-tools add-key 'classes' 'system.linux.storage.loopback' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/compute/init.yml --merge ;
     reclass-tools add-key parameters._param.loopback_device_size '20' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/init.yml;
+    # Add for manila
+    reclass-tools add-key parameters._param.loopback_device1_size '20' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/init.yml;
+    reclass-tools add-key 'classes' 'system.linux.storage.loopback_manila' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/compute/init.yml --merge ;
 
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 5}
diff --git a/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-queens-ovs.yaml b/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-queens-ovs.yaml
index e6fc5f0..6434349 100644
--- a/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-queens-ovs.yaml
+++ b/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-queens-ovs.yaml
@@ -24,17 +24,7 @@
     sed -i '/system.reclass.storage.system.physical_control_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
     sed -i '/system.reclass.storage.system.openstack_database_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
     sed -i '/system.reclass.storage.system.openstack_message_queue_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
-    # sed -i '/system.reclass.storage.system.stacklight_telemetry_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
     sed -i '/system.salt.control.placement.openstack.compact/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/kvm.yml;
-    # sed -i '/system.reclass.storage.system.stacklight_log_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
-
-    # Start compute node addresses from .105 , as in static models
-    sed -i 's/start: 101/start: 105/g' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
-
-    # Bind9 services are placed on the first two ctl nodes
-    # salt-call reclass.cluster_meta_set name='openstack_dns_node01_address' value='${_param:openstack_control_node01_address}' file_name=/srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/init.yml
-    # salt-call reclass.cluster_meta_set name='openstack_dns_node02_address' value='${_param:openstack_control_node02_address}' file_name=/srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/init.yml
-
     . /root/venv-reclass-tools/bin/activate;
     reclass-tools del-key parameters.reclass.storage.node.infra_kvm_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
     reclass-tools del-key parameters.reclass.storage.node.infra_kvm_node02 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
@@ -45,12 +35,14 @@
     reclass-tools del-key parameters.reclass.storage.node.openstack_message_queue_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
     reclass-tools del-key parameters.reclass.storage.node.openstack_message_queue_node02 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
     reclass-tools del-key parameters.reclass.storage.node.openstack_message_queue_node03 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
-    # reclass-tools del-key parameters.reclass.storage.node.stacklight_log_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
     # Add cinder volume on cmp nodes. PROD-20945
     reclass-tools add-key 'classes' 'system.cinder.volume.single' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/compute/init.yml --merge ;
     reclass-tools add-key 'classes' 'system.cinder.volume.backend.lvm' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/compute/init.yml --merge ;
     reclass-tools add-key 'classes' 'system.linux.storage.loopback' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/compute/init.yml --merge ;
     reclass-tools add-key parameters._param.loopback_device_size '20' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/init.yml;
+    # Add for manila
+    reclass-tools add-key parameters._param.loopback_device1_size '20' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/init.yml;
+    reclass-tools add-key 'classes' 'system.linux.storage.loopback_manila' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/compute/init.yml --merge ;
 
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 5}
diff --git a/tcp_tests/templates/k8s-ha-calico/salt.yaml b/tcp_tests/templates/k8s-ha-calico/salt.yaml
index d1ce14a..b066fc2 100644
--- a/tcp_tests/templates/k8s-ha-calico/salt.yaml
+++ b/tcp_tests/templates/k8s-ha-calico/salt.yaml
@@ -2,7 +2,7 @@
 {% from 'k8s-ha-calico/underlay.yaml' import LAB_CONFIG_NAME with context %}
 {% from 'k8s-ha-calico/underlay.yaml' import DOMAIN_NAME with context %}
 
-{% set SALT_MODELS_REPOSITORY = os_env('SALT_MODELS_REPOSITORY','https://gerrit.mcp.mirantis.net/salt-models/mcp-virtual-lab') %}
+{% set SALT_MODELS_REPOSITORY = os_env('SALT_MODELS_REPOSITORY','https://gerrit.mcp.mirantis.com/salt-models/mcp-virtual-lab') %}
 # Other salt model repository parameters see in shared-salt.yaml
 
 {% import 'shared-salt.yaml' as SHARED with context %}
diff --git a/tcp_tests/templates/k8s-ha-contrail/salt.yaml b/tcp_tests/templates/k8s-ha-contrail/salt.yaml
index 8e83628..086dc83 100644
--- a/tcp_tests/templates/k8s-ha-contrail/salt.yaml
+++ b/tcp_tests/templates/k8s-ha-contrail/salt.yaml
@@ -2,7 +2,7 @@
 {% from 'k8s-ha-contrail/underlay.yaml' import LAB_CONFIG_NAME with context %}
 {% from 'k8s-ha-contrail/underlay.yaml' import DOMAIN_NAME with context %}
 
-{% set SALT_MODELS_REPOSITORY = os_env('SALT_MODELS_REPOSITORY','https://gerrit.mcp.mirantis.net/salt-models/mcp-virtual-lab') %}
+{% set SALT_MODELS_REPOSITORY = os_env('SALT_MODELS_REPOSITORY','https://gerrit.mcp.mirantis.com/salt-models/mcp-virtual-lab') %}
 # Other salt model repository parameters see in shared-salt.yaml
 
 {% import 'shared-salt.yaml' as SHARED with context %}
diff --git a/tcp_tests/templates/mcp-ocata-local-repo-dvr/underlay--user-data-apt01.yaml b/tcp_tests/templates/mcp-ocata-local-repo-dvr/underlay--user-data-apt01.yaml
index 017b944..3f4c1a2 100644
--- a/tcp_tests/templates/mcp-ocata-local-repo-dvr/underlay--user-data-apt01.yaml
+++ b/tcp_tests/templates/mcp-ocata-local-repo-dvr/underlay--user-data-apt01.yaml
@@ -84,7 +84,7 @@
    - find /srv/pillar/ -type f -exec sed -i "s/==HOST_MIRROR_MCP_MIRANTIS==/{{ os_env('HOST_MIRROR_MCP_MIRANTIS', 'mirror.mcp.mirantis.net') }}/g" {} +
    - find /srv/pillar/ -type f -exec sed -i "s/==HOST_MIRROR_FUEL_INFRA==/{{ os_env('HOST_MIRROR_FUEL_INFRA', 'mirror.fuel-infra.org') }}/g" {} +
    - find /srv/pillar/ -type f -exec sed -i "s/==HOST_PPA_LAUNCHPAD==/{{ os_env('HOST_PPA_LAUNCHPAD', 'ppa.launchpad.net') }}/g" {} +
-   - find /srv/pillar/ -type f -exec sed -i "s/==HOST_GERRIT_MCP_MIRANTIS==/{{ os_env('HOST_GERRIT_MCP_MIRANTIS', 'gerrit.mcp.mirantis.net') }}/g" {} +
+   - find /srv/pillar/ -type f -exec sed -i "s/==HOST_GERRIT_MCP_MIRANTIS==/{{ os_env('HOST_GERRIT_MCP_MIRANTIS', 'gerrit.mcp.mirantis.com') }}/g" {} +
    - salt-call --local  --state-output=mixed state.sls dnsmasq;
    - salt-call --local  --state-output=mixed state.sls nginx;
    ########################################################
diff --git a/tcp_tests/templates/cookied-mcp-pike-dvr-ceph/ceph.yaml b/tcp_tests/templates/shared-ceph.yaml
similarity index 64%
rename from tcp_tests/templates/cookied-mcp-pike-dvr-ceph/ceph.yaml
rename to tcp_tests/templates/shared-ceph.yaml
index 5f2a4ae..267e407 100644
--- a/tcp_tests/templates/cookied-mcp-pike-dvr-ceph/ceph.yaml
+++ b/tcp_tests/templates/shared-ceph.yaml
@@ -1,12 +1,7 @@
-{% from 'cookied-mcp-pike-dvr-ceph/underlay.yaml' import HOSTNAME_CFG01 with context %}
-{% from 'cookied-mcp-pike-dvr-ceph/underlay.yaml' import HOSTNAME_GTW01 with context %}
-{% from 'cookied-mcp-pike-dvr-ceph/underlay.yaml' import LAB_CONFIG_NAME with context %}
-{% from 'cookied-mcp-pike-dvr-ceph/underlay.yaml' import DOMAIN_NAME with context %}
-{% import 'shared-salt.yaml' as SHARED with context %}
+{# Collection of common macroses shared across ceph and radosgw #}
 
-{% import 'shared-backup-restore.yaml' as BACKUP with context %}
+{%- macro MACRO_INSTALL_CEPH_MONS() %}
 
-# Install ceph mons
 - description: Update grains
   cmd: salt --hard-crash --state-output=mixed --state-verbose=False
     -C 'I@ceph:common' state.sls salt.minion.grains
@@ -41,6 +36,9 @@
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 5}
   skip_fail: false
+{%- endmacro %}
+
+{%- macro MACRO_INSTALL_CEPH_MGR() %}
 
 - description: Install ceph mgr if defined(needed only for Luminious)
   cmd: |
@@ -50,6 +48,9 @@
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 2, delay: 5}
   skip_fail: false
+{%- endmacro %}
+
+{%- macro MACRO_INSTALL_CEPH_OSD_AND_RADOSGW() %}
 
 - description: Install ceph osd
   cmd: salt --hard-crash --state-output=mixed --state-verbose=False
@@ -104,68 +105,25 @@
   cmd: |
     salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@ceph:radosgw' saltutil.sync_grains;
     salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@ceph:radosgw' state.sls ceph.radosgw;
-    salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@keystone:client' state.sls keystone.client;
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 2, delay: 5}
   skip_fail: false
+{%- endmacro %}
 
-- description: Install ceph clinet
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@ceph:setup' state.sls ceph.setup
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 10}
-  skip_fail: false
-
-{%- for ssh in config.underlay.ssh %}
-  {%- set salt_roles = [] %}
-  {%- for role in ssh['roles'] %}
-    {%- if role in config.salt_deploy.salt_roles %}
-      {%- set _ = salt_roles.append(role) %}
-    {%- endif %}
-  {%- endfor %}
-
-  {%- if salt_roles %}
-- description: Restart salt-minion as workaround of PROD-16970
-  cmd: |
-    service salt-minion restart;  # For case if salt-minion was already installed
-  node_name: {{ ssh['node_name'] }}
-  retry: {count: 1, delay: 1}
-  skip_fail: false
-  {%- endif %}
-{%- endfor %}
-
+{%- macro CONNECT_CEPH_TO_SERVICES() %}
 - description: Connect ceph to glance
   cmd: |
-    salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@ceph:common and I@glance:server' state.sls ceph.common,ceph.setup.keyring,glance;
-    salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@ceph:common and I@glance:server' service.restart glance-api;
-    salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@ceph:common and I@glance:server' service.restart glance-registry;
+    salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@ceph:common and I@glance:server' state.sls ceph.common,ceph.setup.keyring;
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 2, delay: 5}
   skip_fail: false
 
 - description: Connect ceph to cinder and nova
   cmd: |
-    salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@ceph:common and I@cinder:controller' state.sls ceph.common,ceph.setup.keyring,cinder;
+    salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@ceph:common and I@cinder:controller' state.sls ceph.common,ceph.setup.keyring;
     salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@ceph:common and I@nova:compute' state.sls ceph.common,ceph.setup.keyring;
     salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@ceph:common and I@nova:compute' saltutil.sync_grains;
-    salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@ceph:common and I@nova:compute' state.sls nova;
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 2, delay: 5}
   skip_fail: false
-
-- description: Restart cinder volume
-  cmd: |
-    salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@cinder:controller' service.restart cinder-volume;
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 2, delay: 5}
-  skip_fail: false
-
-- description: Restart nova-compute
-  cmd: |
-    salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@nova:compute' service.restart nova-compute;
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 2, delay: 5}
-  skip_fail: false
-
-{{ BACKUP.MACRO_BACKUP_CEPH() }}
-{{ SHARED.INSTALL_DOCKER_ON_GTW() }}
+{%- endmacro %}
diff --git a/tcp_tests/templates/shared-core.yaml b/tcp_tests/templates/shared-core.yaml
index 6099bc7..02f7456 100644
--- a/tcp_tests/templates/shared-core.yaml
+++ b/tcp_tests/templates/shared-core.yaml
@@ -47,6 +47,24 @@
 
 {%- endmacro %}
 
+{%- macro MACRO_INSTALL_KEEPALIVED() %}
+
+- description: Install keepalived on ctl01
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@keepalived:cluster and *01*' state.sls keepalived
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 10}
+  skip_fail: true
+
+- description: Install keepalived
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@keepalived:cluster' state.sls keepalived
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 10}
+  skip_fail: true
+
+{%- endmacro %}
+
 {%- macro MACRO_INSTALL_GLUSTERFS() %}
 
 - description: Install glusterfs
@@ -71,3 +89,101 @@
   skip_fail: false
 
 {%- endmacro %}
+
+{%- macro MACRO_INSTALL_RABBITMQ() %}
+
+- description: Install RabbitMQ on ctl01
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@rabbitmq:server and *01*' state.sls rabbitmq
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+- description: Install RabbitMQ
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@rabbitmq:server' state.sls rabbitmq
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+- description: Check the rabbitmq status
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@rabbitmq:server' cmd.run 'rabbitmqctl cluster_status'
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+{%- endmacro %}
+
+{%- macro MACRO_INSTALL_GALERA() %}
+
+- description: Install Galera on first server
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@galera:master' state.sls galera
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+- description: Install Galera on other servers
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@galera:slave' state.sls galera -b 1
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+- description: Check mysql status
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@galera:*' mysql.status | grep -A1 -e "wsrep_incoming_addresses\|wsrep_cluster_size"
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: true
+
+{%- endmacro %}
+
+{%- macro MACRO_INSTALL_HAPROXY() %}
+
+- description: Install haproxy
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@haproxy:proxy' state.sls haproxy
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+- description: Check haproxy status
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@haproxy:proxy' service.status haproxy
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+- description: Restart rsyslog
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@haproxy:proxy' service.restart rsyslog
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+{%- endmacro %}
+
+{%- macro MACRO_INSTALL_MEMCACHED() %}
+
+- description: Install memcached on all controllers
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@memcached:server' state.sls memcached
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+{%- endmacro %}
+
+{%- macro MACRO_CHECK_VIP() %}
+- description: Check the VIP
+  cmd: |
+    OPENSTACK_CONTROL_ADDRESS=`salt-call --out=newline_values_only pillar.get _param:openstack_control_address`;
+    echo "_param:openstack_control_address (vip): ${OPENSTACK_CONTROL_ADDRESS}";
+    salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@keepalived:cluster' cmd.run "ip a | grep ${OPENSTACK_CONTROL_ADDRESS}" | grep -B1 ${OPENSTACK_CONTROL_ADDRESS}
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 3, delay: 10}
+  skip_fail: false
+
+{%- endmacro %}
diff --git a/tcp_tests/templates/shared-openstack.yaml b/tcp_tests/templates/shared-openstack.yaml
index c10ad43..cec31ae 100644
--- a/tcp_tests/templates/shared-openstack.yaml
+++ b/tcp_tests/templates/shared-openstack.yaml
@@ -3,7 +3,7 @@
 {%- macro MACRO_INSTALL_KEYSTONE(USE_ORCHESTRATE=true) %}
   {%- if USE_ORCHESTRATE %}
 - description: |
-    Execute salt orchestration state to configure all needed 
+    Execute salt orchestration state to configure all needed
     prerequisites like creating SSH public key ant etc.
     Workaround for PROD-22488, use for PROD-22535.
   cmd: salt-run state.orchestrate keystone.orchestrate.deploy
@@ -63,7 +63,8 @@
 
 - description: Check keystone service-list
   cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@keystone:server' cmd.run '. /root/keystonercv3; openstack service list'
+    -C "I@keystone:server" cmd.run ". /root/keystonercv3;
+    openstack service list"
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 5}
   skip_fail: false
@@ -100,7 +101,8 @@
 
 - description: Check glance image-list
   cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@keystone:server' cmd.run '. /root/keystonerc; glance image-list'
+    -C "I@keystone:server" cmd.run ". /root/keystonercv3;
+    openstack image list"
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 5}
   skip_fail: false
@@ -123,15 +125,16 @@
 
 - description: Check nova service-list
   cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C "I@keystone:server" cmd.run ". /root/keystonercv3; nova
-    --debug service-list"
+    -C "I@keystone:server" cmd.run ". /root/keystonercv3;
+    openstack compute service list"
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 5}
   skip_fail: false
 
 - description: Check nova list
   cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C "I@keystone:server" cmd.run ". /root/keystonercv3; nova --debug list"
+    -C "I@keystone:server" cmd.run ". /root/keystonercv3;
+    openstack server list"
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 5}
   skip_fail: false
@@ -154,7 +157,8 @@
 
 - description: Check cinder list
   cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@keystone:server' cmd.run '. /root/keystonerc; cinder list'
+    -C "I@keystone:server" cmd.run ". /root/keystonercv3;
+    openstack volume list"
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 5}
   skip_fail: false
@@ -195,7 +199,7 @@
 
 - description: Check neutron agent-list
   cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@keystone:server' cmd.run '. /root/keystonerc; neutron agent-list'
+    -C 'I@keystone:server' cmd.run '. /root/keystonercv3; neutron agent-list'
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 5}
   skip_fail: false
@@ -220,7 +224,8 @@
 
 - description: Check heat service
   cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@keystone:server' cmd.run '. /root/keystonercv3; openstack orchestration resource type list'
+    -C 'I@keystone:server' cmd.run '. /root/keystonercv3;
+    openstack orchestration resource type list'
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 5, delay: 10}
   skip_fail: false
@@ -242,8 +247,25 @@
   skip_fail: true
 {%- endmacro %}
 
-{%- macro MACRO_INSTALL_DESIGNATE() %}
-# Note: deploy backend for designate firstly
+{%- macro MACRO_INSTALL_DESIGNATE(INSTALL_POWERDNS=false, INSTALL_BIND=false) %}
+  {%- if INSTALL_POWERDNS %}
+- description: Install powerdns
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@powerdns:server' state.sls powerdns.server
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+  {%- endif %}
+
+  {%- if INSTALL_BIND %}
+- description: Install bind
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@bind:server' state.sls bind
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+  {%- endif %}
+
 - description: Install designate on primary node
   cmd: salt --hard-crash --state-output=mixed --state-verbose=False
     -C "I@designate:server and *01*" state.sls designate.server
@@ -272,7 +294,64 @@
 {%- endmacro %}
 
 {%- macro MACRO_INSTALL_MANILA() %}
-# TO DO
+- description: Install manila-api on first node
+  cmd: |
+    salt -C 'I@manila:api and *01*' state.sls manila.api;
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+- description: Install manila-api on other nodes
+  cmd: |
+    salt -C 'I@manila:api and not *01*' state.sls manila.api;
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+- description: Install manila-scheduler
+  cmd: |
+    salt -C 'I@manila:scheduler' state.sls manila.scheduler;
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+- description: Install manila-share
+  cmd: |
+    salt -C 'I@manila:share' state.sls manila.share;
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+- description: WR https://mirantis.jira.com/browse/PROD-19012
+  cmd: |
+    salt 'ctl*' cmd.run 'systemctl restart apache2'
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+- description: Check manila-services
+  cmd: |
+    salt 'ctl01*' cmd.run '. /root/keystonercv3; manila service-list'
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 3, delay: 15}
+  skip_fail: false
+
+- description: Create manila type
+  cmd: |
+    salt 'cfg01*' state.sls manila.client
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+- description: Create CIFS and NFS share and check it status
+  cmd: |
+    salt 'ctl01*' cmd.run '. /root/keystonercv3; manila create CIFS 1 --share-type=default';
+    salt 'ctl01*' cmd.run '. /root/keystonercv3; manila create NFS 1 --share-type=default';
+    sleep 5;
+    salt 'ctl01*' cmd.run '. /root/keystonercv3; manila list';
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
 {%- endmacro %}
 
 {%- macro MACRO_INSTALL_OCTAVIA_API() %}
@@ -291,7 +370,7 @@
 # TO DO
 {%- endmacro %}
 
-{%- macro MACRO_INSTALL_COMPUTE() %}
+{%- macro MACRO_INSTALL_COMPUTE(CELL_MAPPING=false) %}
 # Install compute node
 - description: Apply formulas for compute node
   cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'cmp*' state.apply
@@ -311,4 +390,34 @@
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 10, delay: 30}
   skip_fail: false
+
+  {%- if CELL_MAPPING %}
+- description: Re-run nova for map cell mapping
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C "I@nova:controller and *01*" state.sls nova.controller
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+  {%- endif %}
+
 {%- endmacro %}
+
+{%- macro OVERRIDE_POLICY() %}
+- description: Upload policy override
+  upload:
+    local_path:  {{ config.salt_deploy.templates_dir }}{{ LAB_CONFIG_NAME }}/
+    local_filename: overrides-policy.yml
+    remote_path: /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/openstack/
+  node_name: {{ HOSTNAME_CFG01 }}
+
+- description: Create custom cluster control class
+  cmd: echo -e "classes:\n- cluster.{{ LAB_CONFIG_NAME }}.openstack.control_orig\n$(cat /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/openstack/overrides-policy.yml)" > /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/openstack/overrides-policy.yml
+  node_name: {{ HOSTNAME_CFG01 }}
+
+- description: Rename control classes
+  cmd: mv /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/openstack/control.yml /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/openstack/control_orig.yml &&
+    ln -s /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/openstack/overrides-policy.yml /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/openstack/control.yml &&
+    salt --hard-crash --state-output=mixed --state-verbose=False '*' saltutil.sync_all &&
+    salt --hard-crash --state-output=mixed --state-verbose=False '*' saltutil.refresh_pillar
+  node_name: {{ HOSTNAME_CFG01 }}
+{%- endmacro %}
\ No newline at end of file
diff --git a/tcp_tests/templates/shared-salt.yaml b/tcp_tests/templates/shared-salt.yaml
index aaaf9d4..1f4ced5 100644
--- a/tcp_tests/templates/shared-salt.yaml
+++ b/tcp_tests/templates/shared-salt.yaml
@@ -5,11 +5,11 @@
 {# Reference to a patch that should be applied to the model if required, for example: export SALT_MODELS_REF_CHANGE=refs/changes/19/7219/12 #}
 {% set SALT_MODELS_REF_CHANGE = os_env('SALT_MODELS_REF_CHANGE', '') %}
 {# Pin to a specified commit in salt-models/reclass-system #}
-{% set SALT_MODELS_SYSTEM_REPOSITORY = os_env('SALT_MODELS_SYSTEM_REPOSITORY','https://gerrit.mcp.mirantis.net/salt-models/reclass-system') %}
+{% set SALT_MODELS_SYSTEM_REPOSITORY = os_env('SALT_MODELS_SYSTEM_REPOSITORY','https://gerrit.mcp.mirantis.com/salt-models/reclass-system') %}
 {% set SALT_MODELS_SYSTEM_COMMIT = os_env('SALT_MODELS_SYSTEM_COMMIT','') %}
 {% set SALT_MODELS_SYSTEM_REF_CHANGE = os_env('SALT_MODELS_SYSTEM_REF_CHANGE','') %}
 {% set SALT_MODELS_SYSTEM_TAG = os_env('SALT_MODELS_SYSTEM_TAG','') %}
-{% set COOKIECUTTER_TEMPLATES_REPOSITORY = os_env('COOKIECUTTER_TEMPLATES_REPOSITORY','https://gerrit.mcp.mirantis.net/mk/cookiecutter-templates') %}
+{% set COOKIECUTTER_TEMPLATES_REPOSITORY = os_env('COOKIECUTTER_TEMPLATES_REPOSITORY','https://gerrit.mcp.mirantis.com/mk/cookiecutter-templates') %}
 {% set COOKIECUTTER_REF_CHANGE = os_env('COOKIECUTTER_REF_CHANGE','') %}
 {% set COOKIECUTTER_TAG = os_env('COOKIECUTTER_TAG','') %}
 {% set COOKIECUTTER_TEMPLATE_COMMIT = os_env('COOKIECUTTER_TEMPLATE_COMMIT','') %}
@@ -51,7 +51,7 @@
 {% set SALT_FORMULAS_REFS = os_env('SALT_FORMULAS_REFS', '') %}
 {% set TEMPEST_PATTERN = os_env('TEMPEST_PATTERN', 'tempest') %}
 {% set EXCLUDE_TEST_ARGS = os_env('EXCLUDE_TEST_ARGS', '') %}
-{% set SALT_FORMULAS_REPO = os_env('SALT_FORMULAS_REPO', 'https://gerrit.mcp.mirantis.net/salt-formulas') %}
+{% set SALT_FORMULAS_REPO = os_env('SALT_FORMULAS_REPO', 'https://gerrit.mcp.mirantis.com/salt-formulas') %}
 
 # Needed for using different models in different templates
 {% set CLUSTER_NAME = os_env('CLUSTER_NAME', LAB_CONFIG_NAME) %}
@@ -241,16 +241,16 @@
     git checkout {{ SALT_MODELS_COMMIT }};
     {%- endif %}
 
-    {%- if SALT_MODELS_SYSTEM_COMMIT != '' %}
-    pushd classes/system/;
-    git checkout {{ SALT_MODELS_SYSTEM_COMMIT }};
-    popd;
-    {%- elif SALT_MODELS_SYSTEM_REF_CHANGE != '' %}
+    {%- if SALT_MODELS_SYSTEM_REF_CHANGE != '' %}
     pushd classes/system/ && \
     {%- for item in SALT_MODELS_SYSTEM_REF_CHANGE.split(" ") %}
     git fetch {{ SALT_MODELS_SYSTEM_REPOSITORY }} {{ item }} && git cherry-pick FETCH_HEAD;
     {%- endfor %}
     popd;
+    {%- elif SALT_MODELS_SYSTEM_COMMIT != '' %}
+    pushd classes/system/;
+    git checkout {{ SALT_MODELS_SYSTEM_COMMIT }};
+    popd;
     {%- elif SALT_MODELS_SYSTEM_TAG != '' %}
     pushd classes/system/;
     git fetch --all --tags --prune
@@ -345,18 +345,18 @@
       {%- if CLUSTER_PRODUCT_MODELS != '' %}
         echo "CLUSTER_PRODUCT_MODELS={{ CLUSTER_PRODUCT_MODELS }}"
       {%- endif %}
-      {%- if COOKIECUTTER_TEMPLATE_COMMIT != '' %}
-        echo "COOKIECUTTER_TEMPLATE_COMMIT={{ COOKIECUTTER_TEMPLATE_COMMIT }}"
-      {%- elif COOKIECUTTER_REF_CHANGE != '' %}
+      {%- if COOKIECUTTER_REF_CHANGE != '' %}
         echo "COOKIECUTTER_REF_CHANGE={{ COOKIECUTTER_REF_CHANGE }}"
+      {%- elif COOKIECUTTER_TEMPLATE_COMMIT != '' %}
+        echo "COOKIECUTTER_TEMPLATE_COMMIT={{ COOKIECUTTER_TEMPLATE_COMMIT }}"
       {%- elif COOKIECUTTER_TAG != '' %}
         echo "COOKIECUTTER_TAG={{ COOKIECUTTER_TAG }}"
       {%- endif %}
       echo "SALT_MODELS_SYSTEM_REPOSITORY={{ SALT_MODELS_SYSTEM_REPOSITORY }}"
-      {%- if SALT_MODELS_SYSTEM_COMMIT != '' %}
-        echo "SALT_MODELS_SYSTEM_COMMIT={{ SALT_MODELS_SYSTEM_COMMIT }}"
-      {%- elif SALT_MODELS_SYSTEM_REF_CHANGE != '' %}
+      {%- if SALT_MODELS_SYSTEM_REF_CHANGE != '' %}
         echo "SALT_MODELS_SYSTEM_REF_CHANGE={{ SALT_MODELS_SYSTEM_REF_CHANGE }}"
+      {%- elif SALT_MODELS_SYSTEM_COMMIT != '' %}
+        echo "SALT_MODELS_SYSTEM_COMMIT={{ SALT_MODELS_SYSTEM_COMMIT }}"
       {%- elif SALT_MODELS_SYSTEM_TAG != '' %}
         echo "SALT_MODELS_SYSTEM_TAG={{ SALT_MODELS_SYSTEM_TAG }}"
       {%- endif %}
@@ -373,14 +373,14 @@
     pip install cookiecutter
     export GIT_SSL_NO_VERIFY=true; git clone {{ COOKIECUTTER_TEMPLATES_REPOSITORY }} /root/cookiecutter-templates
 
-    {%- if COOKIECUTTER_TEMPLATE_COMMIT != '' %}
-    pushd /root/cookiecutter-templates
-    git checkout {{ COOKIECUTTER_TEMPLATE_COMMIT }}
-    popd
-    {%- elif COOKIECUTTER_REF_CHANGE != '' %}
+    {%- if COOKIECUTTER_REF_CHANGE != '' %}
     pushd /root/cookiecutter-templates
     git fetch {{ COOKIECUTTER_TEMPLATES_REPOSITORY }} {{ COOKIECUTTER_REF_CHANGE }} && git checkout FETCH_HEAD
     popd
+    {%- elif COOKIECUTTER_TEMPLATE_COMMIT != '' %}
+    pushd /root/cookiecutter-templates
+    git checkout {{ COOKIECUTTER_TEMPLATE_COMMIT }}
+    popd
     {%- elif COOKIECUTTER_TAG != '' %}
     pushd /root/cookiecutter-templates
     git fetch --all --tags --prune
@@ -447,16 +447,16 @@
   cmd: |
     set -e;
     set -x;
-    {%- if SALT_MODELS_SYSTEM_COMMIT != '' %}
-    pushd /srv/salt/reclass/classes/system/
-    git checkout {{ SALT_MODELS_SYSTEM_COMMIT }};
-    popd;
-    {%- elif SALT_MODELS_SYSTEM_REF_CHANGE != '' %}
+    {%- if SALT_MODELS_SYSTEM_REF_CHANGE != '' %}
     pushd /srv/salt/reclass/classes/system/ && \
     {%- for item in SALT_MODELS_SYSTEM_REF_CHANGE.split(" ") %}
     git fetch {{ SALT_MODELS_SYSTEM_REPOSITORY }} {{ item }} && git cherry-pick FETCH_HEAD;
     {%- endfor %}
     popd;
+    {%- elif SALT_MODELS_SYSTEM_COMMIT != '' %}
+    pushd /srv/salt/reclass/classes/system/
+    git checkout {{ SALT_MODELS_SYSTEM_COMMIT }};
+    popd;
     {%- elif SALT_MODELS_SYSTEM_TAG != '' %}
     pushd /srv/salt/reclass/classes/system/
     git fetch --all --tags --prune
diff --git a/tcp_tests/templates/shared-sl.yaml b/tcp_tests/templates/shared-sl.yaml
new file mode 100644
index 0000000..d41d1c1
--- /dev/null
+++ b/tcp_tests/templates/shared-sl.yaml
@@ -0,0 +1,252 @@
+{# Collection of sl macroses #}
+
+{%- macro MACRO_INSTALL_DOCKER_SWARM() %}
+# Install docker swarm
+- description: Configure docker service
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' state.sls docker.host
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 10}
+  skip_fail: false
+
+- description: Install docker swarm on master node
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:master' state.sls docker.swarm
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 10}
+  skip_fail: false
+
+- description: Send grains to the swarm slave nodes
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' state.sls salt.minion.grains
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 10}
+  skip_fail: false
+
+- description:  Update mine
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' mine.update
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 10}
+  skip_fail: false
+
+- description:  Refresh modules
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' saltutil.refresh_modules; sleep 5;
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 10}
+  skip_fail: false
+
+- description:  Rerun swarm on slaves to proper token population
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:master' state.sls docker.swarm
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 10}
+  skip_fail: false
+
+- description:  Configure slave nodes
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:manager' state.sls docker.swarm -b 1
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 10}
+  skip_fail: false
+
+- description:  List registered Docker swarm nodes
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:master' cmd.run 'docker node ls'
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 10}
+  skip_fail: false
+
+- description: Install keepalived on mon nodes
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'mon*' state.sls keepalived
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 10}
+  skip_fail: false
+
+- description: Check the VIP on mon nodes
+  cmd: |
+    SL_VIP=`salt-call --out=newline_values_only pillar.get _param:stacklight_monitor_address`;
+    echo "_param:stacklight_monitor_address (vip): ${SL_VIP}";
+    salt --hard-crash --state-output=mixed --state-verbose=False -C 'mon*' cmd.run "ip a | grep ${SL_VIP}" | grep -B1 ${SL_VIP}
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+{%- endmacro %}
+
+{%- macro MACRO_INSTALL_MONGODB() %}
+# Install slv2 infra
+# Install MongoDB for alerta
+- description: Install Mongo if target matches
+  cmd: |
+    if salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@mongodb:server' match.pillar 'mongodb:server' ; then
+      salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@mongodb:server' state.sls mongodb.server
+    fi
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 10}
+  skip_fail: false
+{%- endmacro %}
+
+{%- macro MACRO_INSTALL_MONGODB_CLUSTER() %}
+# Create MongoDB cluster
+- description: Install Mongo if target matches
+  cmd: |
+    if salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@mongodb:server' match.pillar 'mongodb:server' ; then
+      salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@mongodb:server' state.sls mongodb.cluster
+    fi
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 5, delay: 20}
+  skip_fail: false
+
+- description: Install telegraf
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@telegraf:agent or I@telegraf:remote_agent' state.sls telegraf
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 2, delay: 10}
+  skip_fail: false
+
+- description: Configure Prometheus exporters, if pillar 'prometheus:exporters' exists on any server
+  cmd: |
+    if salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@prometheus:exporters' match.pillar 'prometheus:exporters' ; then
+      salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@prometheus:exporters' state.sls prometheus
+    fi
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 10}
+  skip_fail: false
+
+- description: Install elasticsearch server
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@elasticsearch:server' state.sls elasticsearch.server -b 1
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 10}
+  skip_fail: false
+
+- description: Install kibana server
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@kibana:server' state.sls kibana.server -b 1
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 10}
+  skip_fail: false
+
+- description: Install elasticsearch client
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@elasticsearch:client' state.sls elasticsearch.client
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 2, delay: 30}
+  skip_fail: false
+
+- description: Install kibana client
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@kibana:client' state.sls kibana.client
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 10}
+  skip_fail: false
+
+- description: Check influix db
+  cmd: |
+    INFLUXDB_SERVICE=`salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@influxdb:server' test.ping 1>/dev/null 2>&1 && echo true`;
+    echo "Influxdb service presence: ${INFLUXDB_SERVICE}";
+    if [[ "$INFLUXDB_SERVICE" == "true" ]]; then
+        salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@influxdb:server' state.sls influxdb
+    fi
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: true
+{%- endmacro %}
+
+{%- macro MACRO_INSTALL_PROMETEUS() %}
+# Install Prometheus LTS(optional if set in model)
+- description: Prometheus LTS(optional if set in model)
+  cmd: |
+    PROMETHEUS_SERVICE=`salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@prometheus:relay' test.ping 1>/dev/null 2>&1 && echo true`;
+    echo "PROMETHEUS rely service presence: ${PROMETHEUS_SERVICE}";
+    if [[ "$PROMETHEUS_SERVICE" == "true" ]]; then
+        salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@prometheus:relay' state.sls prometheus
+    fi
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: true
+{%- endmacro %}
+
+{%- macro MACRO_INSTALL_LOG_COLLECTION() %}
+# Install service for the log collection
+- description: Configure fluentd
+  cmd: |
+    FLUENTD_SERVICE=`salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@fluentd:agent' test.ping 1>/dev/null 2>&1 && echo true`;
+    echo "Fluentd service presence: ${FLUENTD_SERVICE}";
+    if [[ "$FLUENTD_SERVICE" == "true" ]]; then
+        salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@fluentd:agent' state.sls fluentd
+    else
+        salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@heka:log_collector' state.sls heka.log_collector
+    fi
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 10}
+  skip_fail: false
+{%- endmacro %}
+
+{%- macro MACRO_INSTALL_CEILOMETER_COLLECTOR() %}
+# Install heka ceilometer collector
+- description: Install heka ceilometer if they exists
+  cmd: |
+    CEILO=`salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@heka:ceilometer_collector:enabled' test.ping 1>/dev/null 2>&1 && echo true`;
+    echo "Ceilometer service presence: ${CEILO}";
+    if [[ "$CEILO" == "true" ]]; then
+        salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@heka:ceilometer_collector:enabled' state.sls heka.ceilometer_collector;
+        salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@heka:ceilometer_collector:enabled' service.restart ceilometer_collector
+    fi
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 10}
+  skip_fail: false
+{%- endmacro %}
+
+{%- macro MACRO_COLLECT_GRAINS() %}
+# Collect grains needed to configure the services
+
+- description: Get grains
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@salt:minion' state.sls salt.minion.grains
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 10}
+  skip_fail: false
+
+- description: Sync modules
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@salt:minion' saltutil.refresh_modules
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 10}
+  skip_fail: false
+
+- description: Update mine
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@salt:minion' mine.update; sleep 5;
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 5, delay: 15}
+  skip_fail: false
+{%- endmacro %}
+
+{%- macro MACRO_CONFIGURE_SERVICES() %}
+# Configure the services running in Docker Swarm
+- description: Configure prometheus in docker swarm
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm and I@prometheus:server' state.sls prometheus,heka.remote_collector
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 10}
+  skip_fail: false
+{%- endmacro %}
+
+{%- macro MACRO_LAUNCH_CONTAINERS() %}
+# Launch containers
+- description: launch prometheus containers
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:master and I@prometheus:server' state.sls docker.client
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 2, delay: 10}
+  skip_fail: false
+
+- description: Check docker ps
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm and I@prometheus:server' cmd.run "docker ps"
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 2, delay: 10}
+  skip_fail: false
+
+- description: Install sphinx
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@sphinx:server' state.sls sphinx
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 10}
+  skip_fail: false
+
+- description: Configure Grafana dashboards and datasources
+  cmd: sleep 30;  salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@grafana:client' state.sls grafana.client
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 2, delay: 10}
+  skip_fail: false
+
+- description: Run salt minion to create cert files
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False "*" state.sls salt.minion
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 10}
+  skip_fail: false
+{%- endmacro %}
\ No newline at end of file
diff --git a/tcp_tests/templates/virtual-mcp-ocata-ceph-offline/underlay--user-data-apt01.yaml b/tcp_tests/templates/virtual-mcp-ocata-ceph-offline/underlay--user-data-apt01.yaml
index 69772ac..9710531 100644
--- a/tcp_tests/templates/virtual-mcp-ocata-ceph-offline/underlay--user-data-apt01.yaml
+++ b/tcp_tests/templates/virtual-mcp-ocata-ceph-offline/underlay--user-data-apt01.yaml
@@ -83,7 +83,7 @@
    - find /srv/pillar/ -type f -exec sed -i "s/==HOST_MIRROR_MCP_MIRANTIS==/{{ os_env('HOST_MIRROR_MCP_MIRANTIS', 'mirror.mcp.mirantis.net') }}/g" {} +
    - find /srv/pillar/ -type f -exec sed -i "s/==HOST_MIRROR_FUEL_INFRA==/{{ os_env('HOST_MIRROR_FUEL_INFRA', 'mirror.fuel-infra.org') }}/g" {} +
    - find /srv/pillar/ -type f -exec sed -i "s/==HOST_PPA_LAUNCHPAD==/{{ os_env('HOST_PPA_LAUNCHPAD', 'ppa.launchpad.net') }}/g" {} +
-   - find /srv/pillar/ -type f -exec sed -i "s/==HOST_GERRIT_MCP_MIRANTIS==/{{ os_env('HOST_GERRIT_MCP_MIRANTIS', 'gerrit.mcp.mirantis.net') }}/g" {} +
+   - find /srv/pillar/ -type f -exec sed -i "s/==HOST_GERRIT_MCP_MIRANTIS==/{{ os_env('HOST_GERRIT_MCP_MIRANTIS', 'gerrit.mcp.mirantis.com') }}/g" {} +
    - salt-call --local  --state-output=mixed state.sls dnsmasq;
    - salt-call --local  --state-output=mixed state.sls nginx;
    ########################################################
diff --git a/tcp_tests/templates/virtual-mcp-ocata-dvr/_context-cookiecutter-mcp-ocata-dvr.yaml b/tcp_tests/templates/virtual-mcp-ocata-dvr/_context-cookiecutter-mcp-ocata-dvr.yaml
index 505282d..97ea7c9 100644
--- a/tcp_tests/templates/virtual-mcp-ocata-dvr/_context-cookiecutter-mcp-ocata-dvr.yaml
+++ b/tcp_tests/templates/virtual-mcp-ocata-dvr/_context-cookiecutter-mcp-ocata-dvr.yaml
@@ -13,7 +13,7 @@
   control_vlan: '10'
   cookiecutter_template_branch: master
   cookiecutter_template_credentials: gerrit
-  cookiecutter_template_url: ssh://mcp-jenkins@gerrit.mcp.mirantis.net:29418/mk/cookiecutter-templates.git
+  cookiecutter_template_url: ssh://mcp-jenkins@gerrit.mcp.mirantis.com:29418/mk/cookiecutter-templates.git
   deploy_network_gateway: 192.168.10.1
   deploy_network_netmask: 255.255.255.0
   deploy_network_subnet: 192.168.10.0/24
@@ -145,7 +145,7 @@
   salt_master_address: 172.16.10.90
   salt_master_hostname: cfg01
   salt_master_management_address: 192.168.10.90
-  shared_reclass_url: ssh://mcp-jenkins@gerrit.mcp.mirantis.net:29418/salt-models/reclass-system.git
+  shared_reclass_url: ssh://mcp-jenkins@gerrit.mcp.mirantis.com:29418/salt-models/reclass-system.git
   fluentd_enabled: 'True'
   stacklight_enabled: 'True'
   stacklight_log_address: 172.16.10.70
diff --git a/tcp_tests/templates/virtual-mcp-ocata-dvr/salt.yaml b/tcp_tests/templates/virtual-mcp-ocata-dvr/salt.yaml
index c73cfed..e7e32a6 100644
--- a/tcp_tests/templates/virtual-mcp-ocata-dvr/salt.yaml
+++ b/tcp_tests/templates/virtual-mcp-ocata-dvr/salt.yaml
@@ -5,7 +5,7 @@
 {% from 'virtual-mcp-ocata-dvr/underlay.yaml' import LAB_CONFIG_NAME with context %}
 {% from 'virtual-mcp-ocata-dvr/underlay.yaml' import DOMAIN_NAME with context %}
 
-{% set SALT_MODELS_REPOSITORY = os_env('SALT_MODELS_REPOSITORY','https://gerrit.mcp.mirantis.net/salt-models/mcp-virtual-lab') %}
+{% set SALT_MODELS_REPOSITORY = os_env('SALT_MODELS_REPOSITORY','https://gerrit.mcp.mirantis.com/salt-models/mcp-virtual-lab') %}
 # Other salt model repository parameters see in shared-salt.yaml
 {% set OVERRIDES = os_env('OVERRIDES', 'override_example: true') %}
 {% set OVERRIDES_FILENAME = os_env('OVERRIDES_FILENAME', '/srv/salt/reclass/classes/environment/virtual-mcp-ocata-dvr/overrides.yml') %}
diff --git a/tcp_tests/templates/virtual-mcp-ocata-ovs-ceph/salt.yaml b/tcp_tests/templates/virtual-mcp-ocata-ovs-ceph/salt.yaml
index 16af75a..350be48 100644
--- a/tcp_tests/templates/virtual-mcp-ocata-ovs-ceph/salt.yaml
+++ b/tcp_tests/templates/virtual-mcp-ocata-ovs-ceph/salt.yaml
@@ -2,7 +2,7 @@
 {% from 'virtual-mcp-ocata-ovs-ceph/underlay.yaml' import LAB_CONFIG_NAME with context %}
 {% from 'virtual-mcp-ocata-ovs-ceph/underlay.yaml' import DOMAIN_NAME with context %}
 
-{% set SALT_MODELS_REPOSITORY = os_env('SALT_MODELS_REPOSITORY','https://gerrit.mcp.mirantis.net/salt-models/mcp-virtual-lab') %}
+{% set SALT_MODELS_REPOSITORY = os_env('SALT_MODELS_REPOSITORY','https://gerrit.mcp.mirantis.com/salt-models/mcp-virtual-lab') %}
 # Other salt model repository parameters see in shared-salt.yaml
 
 {% import 'shared-salt.yaml' as SHARED with context %}
diff --git a/tcp_tests/templates/virtual-mcp-ocata-ovs/_context-cookiecutter-mcp-ocata-ovs.yaml b/tcp_tests/templates/virtual-mcp-ocata-ovs/_context-cookiecutter-mcp-ocata-ovs.yaml
index eedb7d9..ef5925c 100644
--- a/tcp_tests/templates/virtual-mcp-ocata-ovs/_context-cookiecutter-mcp-ocata-ovs.yaml
+++ b/tcp_tests/templates/virtual-mcp-ocata-ovs/_context-cookiecutter-mcp-ocata-ovs.yaml
@@ -13,7 +13,7 @@
   control_vlan: '10'
   cookiecutter_template_branch: master
   cookiecutter_template_credentials: gerrit
-  cookiecutter_template_url: ssh://mcp-jenkins@gerrit.mcp.mirantis.net:29418/mk/cookiecutter-templates.git
+  cookiecutter_template_url: ssh://mcp-jenkins@gerrit.mcp.mirantis.com:29418/mk/cookiecutter-templates.git
   deploy_network_gateway: 192.168.10.1
   deploy_network_netmask: 255.255.255.0
   deploy_network_subnet: 192.168.10.0/24
@@ -145,7 +145,7 @@
   salt_master_address: 172.16.10.90
   salt_master_hostname: cfg01
   salt_master_management_address: 192.168.10.90
-  shared_reclass_url: ssh://mcp-jenkins@gerrit.mcp.mirantis.net:29418/salt-models/reclass-system.git
+  shared_reclass_url: ssh://mcp-jenkins@gerrit.mcp.mirantis.com:29418/salt-models/reclass-system.git
   fluentd_enabled: 'True'
   stacklight_enabled: 'True'
   stacklight_log_address: 172.16.10.70
diff --git a/tcp_tests/templates/virtual-mcp-ocata-ovs/salt.yaml b/tcp_tests/templates/virtual-mcp-ocata-ovs/salt.yaml
index 6bac1ea..3a3ed3a 100644
--- a/tcp_tests/templates/virtual-mcp-ocata-ovs/salt.yaml
+++ b/tcp_tests/templates/virtual-mcp-ocata-ovs/salt.yaml
@@ -5,7 +5,7 @@
 {% from 'virtual-mcp-ocata-ovs/underlay.yaml' import LAB_CONFIG_NAME with context %}
 {% from 'virtual-mcp-ocata-ovs/underlay.yaml' import DOMAIN_NAME with context %}
 
-{% set SALT_MODELS_REPOSITORY = os_env('SALT_MODELS_REPOSITORY','https://gerrit.mcp.mirantis.net/salt-models/mcp-virtual-lab') %}
+{% set SALT_MODELS_REPOSITORY = os_env('SALT_MODELS_REPOSITORY','https://gerrit.mcp.mirantis.com/salt-models/mcp-virtual-lab') %}
 # Other salt model repository parameters see in shared-salt.yaml
 {% set OVERRIDES = os_env('OVERRIDES', 'override_example: true') %}
 {% set OVERRIDES_FILENAME = os_env('OVERRIDES_FILENAME', '/srv/salt/reclass/classes/environment/virtual-mcp-ocata-ovs/overrides.yml') %}
diff --git a/tcp_tests/templates/virtual-mcp-pike-dvr-ceph-rgw/salt.yaml b/tcp_tests/templates/virtual-mcp-pike-dvr-ceph-rgw/salt.yaml
index cf418f5..89c3882 100644
--- a/tcp_tests/templates/virtual-mcp-pike-dvr-ceph-rgw/salt.yaml
+++ b/tcp_tests/templates/virtual-mcp-pike-dvr-ceph-rgw/salt.yaml
@@ -2,7 +2,7 @@
 {% from 'virtual-mcp-pike-dvr-ceph-rgw/underlay.yaml' import LAB_CONFIG_NAME with context %}
 {% from 'virtual-mcp-pike-dvr-ceph-rgw/underlay.yaml' import DOMAIN_NAME with context %}
 
-{% set SALT_MODELS_REPOSITORY = os_env('SALT_MODELS_REPOSITORY','https://gerrit.mcp.mirantis.net/salt-models/mcp-virtual-lab') %}
+{% set SALT_MODELS_REPOSITORY = os_env('SALT_MODELS_REPOSITORY','https://gerrit.mcp.mirantis.com/salt-models/mcp-virtual-lab') %}
 # Other salt model repository parameters see in shared-salt.yaml
 
 {% import 'shared-salt.yaml' as SHARED with context %}
diff --git a/tcp_tests/templates/virtual-mcp-pike-dvr-maas/_context-cookiecutter-mcp-pike-dvr.yaml b/tcp_tests/templates/virtual-mcp-pike-dvr-maas/_context-cookiecutter-mcp-pike-dvr.yaml
index e2ba165..9e0598d 100644
--- a/tcp_tests/templates/virtual-mcp-pike-dvr-maas/_context-cookiecutter-mcp-pike-dvr.yaml
+++ b/tcp_tests/templates/virtual-mcp-pike-dvr-maas/_context-cookiecutter-mcp-pike-dvr.yaml
@@ -13,7 +13,7 @@
   control_vlan: '10'
   cookiecutter_template_branch: master
   cookiecutter_template_credentials: gerrit
-  cookiecutter_template_url: ssh://mcp-jenkins@gerrit.mcp.mirantis.net:29418/mk/cookiecutter-templates.git
+  cookiecutter_template_url: ssh://mcp-jenkins@gerrit.mcp.mirantis.com:29418/mk/cookiecutter-templates.git
   deploy_network_gateway: 192.168.10.1
   deploy_network_netmask: 255.255.255.0
   deploy_network_subnet: 192.168.10.0/24
@@ -116,7 +116,7 @@
   salt_master_address: 172.16.10.90
   salt_master_hostname: cfg01
   salt_master_management_address: 192.168.10.90
-  shared_reclass_url: ssh://mcp-jenkins@gerrit.mcp.mirantis.net:29418/salt-models/reclass-system.git
+  shared_reclass_url: ssh://mcp-jenkins@gerrit.mcp.mirantis.com:29418/salt-models/reclass-system.git
   stacklight_enabled: 'True'
   stacklight_log_address: 172.16.10.70
   stacklight_log_hostname: mon
diff --git a/tcp_tests/templates/virtual-mcp-pike-dvr-maas/cfg01_configure.yaml b/tcp_tests/templates/virtual-mcp-pike-dvr-maas/cfg01_configure.yaml
index 00577bf..bcbfec4 100644
--- a/tcp_tests/templates/virtual-mcp-pike-dvr-maas/cfg01_configure.yaml
+++ b/tcp_tests/templates/virtual-mcp-pike-dvr-maas/cfg01_configure.yaml
@@ -5,7 +5,7 @@
 {% from 'virtual-mcp-pike-dvr-maas/underlay.yaml' import LAB_CONFIG_NAME with context %}
 {% from 'virtual-mcp-pike-dvr-maas/underlay.yaml' import DOMAIN_NAME with context %}
 
-{% set SALT_MODELS_REPOSITORY = os_env('SALT_MODELS_REPOSITORY','https://gerrit.mcp.mirantis.net/salt-models/mcp-virtual-lab') %}
+{% set SALT_MODELS_REPOSITORY = os_env('SALT_MODELS_REPOSITORY','https://gerrit.mcp.mirantis.com/salt-models/mcp-virtual-lab') %}
 {% set CLUSTER_NAME = os_env('CLUSTER_NAME', LAB_CONFIG_NAME) %}
 # Other salt model repository parameters see in shared-salt.yaml
 
diff --git a/tcp_tests/templates/virtual-mcp-pike-dvr-ssl-barbican/salt.yaml b/tcp_tests/templates/virtual-mcp-pike-dvr-ssl-barbican/salt.yaml
index 937987b..bb4c5e4 100644
--- a/tcp_tests/templates/virtual-mcp-pike-dvr-ssl-barbican/salt.yaml
+++ b/tcp_tests/templates/virtual-mcp-pike-dvr-ssl-barbican/salt.yaml
@@ -5,7 +5,7 @@
 {% from 'virtual-mcp-pike-dvr-ssl-barbican/underlay.yaml' import LAB_CONFIG_NAME with context %}
 {% from 'virtual-mcp-pike-dvr-ssl-barbican/underlay.yaml' import DOMAIN_NAME with context %}
 
-{% set SALT_MODELS_REPOSITORY = os_env('SALT_MODELS_REPOSITORY','https://gerrit.mcp.mirantis.net/salt-models/mcp-virtual-lab') %}
+{% set SALT_MODELS_REPOSITORY = os_env('SALT_MODELS_REPOSITORY','https://gerrit.mcp.mirantis.com/salt-models/mcp-virtual-lab') %}
 # Other salt model repository parameters see in shared-salt.yaml
 
 {% import 'shared-salt.yaml' as SHARED with context %}
diff --git a/tcp_tests/templates/virtual-mcp-pike-dvr-ssl/salt.yaml b/tcp_tests/templates/virtual-mcp-pike-dvr-ssl/salt.yaml
index 3d66dcd..6729010 100644
--- a/tcp_tests/templates/virtual-mcp-pike-dvr-ssl/salt.yaml
+++ b/tcp_tests/templates/virtual-mcp-pike-dvr-ssl/salt.yaml
@@ -5,7 +5,7 @@
 {% from 'virtual-mcp-pike-dvr-ssl/underlay.yaml' import LAB_CONFIG_NAME with context %}
 {% from 'virtual-mcp-pike-dvr-ssl/underlay.yaml' import DOMAIN_NAME with context %}
 
-{% set SALT_MODELS_REPOSITORY = os_env('SALT_MODELS_REPOSITORY','https://gerrit.mcp.mirantis.net/salt-models/mcp-virtual-lab') %}
+{% set SALT_MODELS_REPOSITORY = os_env('SALT_MODELS_REPOSITORY','https://gerrit.mcp.mirantis.com/salt-models/mcp-virtual-lab') %}
 # Other salt model repository parameters see in shared-salt.yaml
 
 {% import 'shared-salt.yaml' as SHARED with context %}
diff --git a/tcp_tests/templates/virtual-mcp-pike-dvr/salt.yaml b/tcp_tests/templates/virtual-mcp-pike-dvr/salt.yaml
index 4beeb41..e027b64 100644
--- a/tcp_tests/templates/virtual-mcp-pike-dvr/salt.yaml
+++ b/tcp_tests/templates/virtual-mcp-pike-dvr/salt.yaml
@@ -2,7 +2,7 @@
 {% from 'virtual-mcp-pike-dvr/underlay.yaml' import LAB_CONFIG_NAME with context %}
 {% from 'virtual-mcp-pike-dvr/underlay.yaml' import DOMAIN_NAME with context %}
 
-{% set SALT_MODELS_REPOSITORY = os_env('SALT_MODELS_REPOSITORY','https://gerrit.mcp.mirantis.net/salt-models/mcp-virtual-lab') %}
+{% set SALT_MODELS_REPOSITORY = os_env('SALT_MODELS_REPOSITORY','https://gerrit.mcp.mirantis.com/salt-models/mcp-virtual-lab') %}
 # Other salt model repository parameters see in shared-salt.yaml
 
 {% import 'shared-salt.yaml' as SHARED with context %}
diff --git a/tcp_tests/templates/virtual-mcp-pike-ovs-ceph/salt.yaml b/tcp_tests/templates/virtual-mcp-pike-ovs-ceph/salt.yaml
index 0ffdab0..684c535 100644
--- a/tcp_tests/templates/virtual-mcp-pike-ovs-ceph/salt.yaml
+++ b/tcp_tests/templates/virtual-mcp-pike-ovs-ceph/salt.yaml
@@ -2,7 +2,7 @@
 {% from 'virtual-mcp-pike-ovs-ceph/underlay.yaml' import LAB_CONFIG_NAME with context %}
 {% from 'virtual-mcp-pike-ovs-ceph/underlay.yaml' import DOMAIN_NAME with context %}
 
-{% set SALT_MODELS_REPOSITORY = os_env('SALT_MODELS_REPOSITORY','https://gerrit.mcp.mirantis.net/salt-models/mcp-virtual-lab') %}
+{% set SALT_MODELS_REPOSITORY = os_env('SALT_MODELS_REPOSITORY','https://gerrit.mcp.mirantis.com/salt-models/mcp-virtual-lab') %}
 # Other salt model repository parameters see in shared-salt.yaml
 
 {% import 'shared-salt.yaml' as SHARED with context %}
diff --git a/tcp_tests/templates/virtual-mcp-pike-ovs-l2gw-bgpvpn/salt.yaml b/tcp_tests/templates/virtual-mcp-pike-ovs-l2gw-bgpvpn/salt.yaml
index 4efcbd3..292d022 100644
--- a/tcp_tests/templates/virtual-mcp-pike-ovs-l2gw-bgpvpn/salt.yaml
+++ b/tcp_tests/templates/virtual-mcp-pike-ovs-l2gw-bgpvpn/salt.yaml
@@ -6,7 +6,7 @@
 {% from 'virtual-mcp-pike-ovs-l2gw-bgpvpn/underlay.yaml' import DOMAIN_NAME with context %}
 {% from 'virtual-mcp-pike-ovs-l2gw-bgpvpn/underlay.yaml' import HOSTNAME_VSWITCH with context %}
 
-{% set SALT_MODELS_REPOSITORY = os_env('SALT_MODELS_REPOSITORY','https://gerrit.mcp.mirantis.net/salt-models/mcp-virtual-lab') %}
+{% set SALT_MODELS_REPOSITORY = os_env('SALT_MODELS_REPOSITORY','https://gerrit.mcp.mirantis.com/salt-models/mcp-virtual-lab') %}
 # Other salt model repository parameters see in shared-salt.yaml
 
 {% import 'shared-salt.yaml' as SHARED with context %}
diff --git a/tcp_tests/templates/virtual-mcp-pike-ovs/salt.yaml b/tcp_tests/templates/virtual-mcp-pike-ovs/salt.yaml
index 00da72e..04a3e30 100644
--- a/tcp_tests/templates/virtual-mcp-pike-ovs/salt.yaml
+++ b/tcp_tests/templates/virtual-mcp-pike-ovs/salt.yaml
@@ -2,7 +2,7 @@
 {% from 'virtual-mcp-pike-ovs/underlay.yaml' import LAB_CONFIG_NAME with context %}
 {% from 'virtual-mcp-pike-ovs/underlay.yaml' import DOMAIN_NAME with context %}
 
-{% set SALT_MODELS_REPOSITORY = os_env('SALT_MODELS_REPOSITORY','https://gerrit.mcp.mirantis.net/salt-models/mcp-virtual-lab') %}
+{% set SALT_MODELS_REPOSITORY = os_env('SALT_MODELS_REPOSITORY','https://gerrit.mcp.mirantis.com/salt-models/mcp-virtual-lab') %}
 # Other salt model repository parameters see in shared-salt.yaml
 
 {% import 'shared-salt.yaml' as SHARED with context %}
diff --git a/tcp_tests/templates/virtual-mcp-sl-os/salt.yaml b/tcp_tests/templates/virtual-mcp-sl-os/salt.yaml
index a15946f..2bf4bb3 100644
--- a/tcp_tests/templates/virtual-mcp-sl-os/salt.yaml
+++ b/tcp_tests/templates/virtual-mcp-sl-os/salt.yaml
@@ -2,7 +2,7 @@
 {% from 'virtual-mcp-sl-os/underlay.yaml' import LAB_CONFIG_NAME with context %}
 {% from 'virtual-mcp-sl-os/underlay.yaml' import DOMAIN_NAME with context %}
 
-{% set SALT_MODELS_REPOSITORY = os_env('SALT_MODELS_REPOSITORY','https://gerrit.mcp.mirantis.net/salt-models/mcp-virtual-lab') %}
+{% set SALT_MODELS_REPOSITORY = os_env('SALT_MODELS_REPOSITORY','https://gerrit.mcp.mirantis.com/salt-models/mcp-virtual-lab') %}
 # Other salt model repository parameters see in shared-salt.yaml
 {% set OVERRIDES = os_env('OVERRIDES', 'override_example: true') %}
 {% set OVERRIDES_FILENAME = os_env('OVERRIDES_FILENAME', '/srv/salt/reclass/classes/cluster/overrides.yml') %}
diff --git a/tcp_tests/templates/virtual-mcp-trusty/salt.yaml b/tcp_tests/templates/virtual-mcp-trusty/salt.yaml
index 67695d9..7213162 100644
--- a/tcp_tests/templates/virtual-mcp-trusty/salt.yaml
+++ b/tcp_tests/templates/virtual-mcp-trusty/salt.yaml
@@ -2,7 +2,7 @@
 {% from 'virtual-mcp-trusty/underlay.yaml' import LAB_CONFIG_NAME with context %}
 {% from 'virtual-mcp-trusty/underlay.yaml' import DOMAIN_NAME with context %}
 
-{% set SALT_MODELS_REPOSITORY = os_env('SALT_MODELS_REPOSITORY','https://gerrit.mcp.mirantis.net/salt-models/mcp-virtual-lab') %}
+{% set SALT_MODELS_REPOSITORY = os_env('SALT_MODELS_REPOSITORY','https://gerrit.mcp.mirantis.com/salt-models/mcp-virtual-lab') %}
 # Other salt model repository parameters see in shared-salt.yaml
 {% set OVERRIDES = os_env('OVERRIDES', 'override_example: true') %}
 {% set SALT_VERSION = os_env('SALT_VERSION', '2017.7') %}
diff --git a/tcp_tests/templates/virtual-mcp11-dvr/salt.yaml b/tcp_tests/templates/virtual-mcp11-dvr/salt.yaml
index a62f1cd..b557d3a 100644
--- a/tcp_tests/templates/virtual-mcp11-dvr/salt.yaml
+++ b/tcp_tests/templates/virtual-mcp11-dvr/salt.yaml
@@ -2,7 +2,7 @@
 {% from 'virtual-mcp11-dvr/underlay.yaml' import LAB_CONFIG_NAME with context %}
 {% from 'virtual-mcp11-dvr/underlay.yaml' import DOMAIN_NAME with context %}
 
-{% set SALT_MODELS_REPOSITORY = os_env('SALT_MODELS_REPOSITORY','https://gerrit.mcp.mirantis.net/salt-models/mcp-virtual-lab') %}
+{% set SALT_MODELS_REPOSITORY = os_env('SALT_MODELS_REPOSITORY','https://gerrit.mcp.mirantis.com/salt-models/mcp-virtual-lab') %}
 # Other salt model repository parameters see in shared-salt.yaml
 
 {% import 'shared-salt.yaml' as SHARED with context %}
diff --git a/tcp_tests/templates/virtual-mcp11-k8s-calico-minimal/salt.yaml b/tcp_tests/templates/virtual-mcp11-k8s-calico-minimal/salt.yaml
index 9f5e5c3..d16a126 100644
--- a/tcp_tests/templates/virtual-mcp11-k8s-calico-minimal/salt.yaml
+++ b/tcp_tests/templates/virtual-mcp11-k8s-calico-minimal/salt.yaml
@@ -2,7 +2,7 @@
 {% from 'virtual-mcp11-k8s-calico-minimal/underlay.yaml' import LAB_CONFIG_NAME with context %}
 {% from 'virtual-mcp11-k8s-calico-minimal/underlay.yaml' import DOMAIN_NAME with context %}
 
-{% set SALT_MODELS_REPOSITORY = os_env('SALT_MODELS_REPOSITORY','https://gerrit.mcp.mirantis.net/salt-models/mcp-virtual-lab') %}
+{% set SALT_MODELS_REPOSITORY = os_env('SALT_MODELS_REPOSITORY','https://gerrit.mcp.mirantis.com/salt-models/mcp-virtual-lab') %}
 # Other salt model repository parameters see in shared-salt.yaml
 
 {% import 'shared-salt.yaml' as SHARED with context %}
diff --git a/tcp_tests/templates/virtual-mcp11-k8s-calico/salt.yaml b/tcp_tests/templates/virtual-mcp11-k8s-calico/salt.yaml
index 198c0f8..36a0228 100644
--- a/tcp_tests/templates/virtual-mcp11-k8s-calico/salt.yaml
+++ b/tcp_tests/templates/virtual-mcp11-k8s-calico/salt.yaml
@@ -2,7 +2,7 @@
 {% from 'virtual-mcp11-k8s-calico/underlay.yaml' import LAB_CONFIG_NAME with context %}
 {% from 'virtual-mcp11-k8s-calico/underlay.yaml' import DOMAIN_NAME with context %}
 
-{% set SALT_MODELS_REPOSITORY = os_env('SALT_MODELS_REPOSITORY','https://gerrit.mcp.mirantis.net/salt-models/mcp-virtual-lab') %}
+{% set SALT_MODELS_REPOSITORY = os_env('SALT_MODELS_REPOSITORY','https://gerrit.mcp.mirantis.com/salt-models/mcp-virtual-lab') %}
 # Other salt model repository parameters see in shared-salt.yaml
 
 {% set ENABLE_COMPUTES_SELF_REGISTER = os_env('ENABLE_COMPUTES_SELF_REGISTER', '') %}
diff --git a/tcp_tests/templates/virtual-mcp11-k8s-contrail/salt.yaml b/tcp_tests/templates/virtual-mcp11-k8s-contrail/salt.yaml
index 557f5dc..d0844bc 100644
--- a/tcp_tests/templates/virtual-mcp11-k8s-contrail/salt.yaml
+++ b/tcp_tests/templates/virtual-mcp11-k8s-contrail/salt.yaml
@@ -2,7 +2,7 @@
 {% from 'virtual-mcp11-k8s-contrail/underlay.yaml' import LAB_CONFIG_NAME with context %}
 {% from 'virtual-mcp11-k8s-contrail/underlay.yaml' import DOMAIN_NAME with context %}
 
-{% set SALT_MODELS_REPOSITORY = os_env('SALT_MODELS_REPOSITORY','https://gerrit.mcp.mirantis.net/salt-models/mcp-virtual-lab') %}
+{% set SALT_MODELS_REPOSITORY = os_env('SALT_MODELS_REPOSITORY','https://gerrit.mcp.mirantis.com/salt-models/mcp-virtual-lab') %}
 # Other salt model repository parameters see in shared-salt.yaml
 
 
diff --git a/tcp_tests/templates/virtual-mcp11-ovs-dpdk/salt.yaml b/tcp_tests/templates/virtual-mcp11-ovs-dpdk/salt.yaml
index 1deeb7d..27999e1 100644
--- a/tcp_tests/templates/virtual-mcp11-ovs-dpdk/salt.yaml
+++ b/tcp_tests/templates/virtual-mcp11-ovs-dpdk/salt.yaml
@@ -2,7 +2,7 @@
 {% from 'virtual-mcp11-ovs-dpdk/underlay.yaml' import LAB_CONFIG_NAME with context %}
 {% from 'virtual-mcp11-ovs-dpdk/underlay.yaml' import DOMAIN_NAME with context %}
 
-{% set SALT_MODELS_REPOSITORY = os_env('SALT_MODELS_REPOSITORY','https://gerrit.mcp.mirantis.net/salt-models/mcp-virtual-lab') %}
+{% set SALT_MODELS_REPOSITORY = os_env('SALT_MODELS_REPOSITORY','https://gerrit.mcp.mirantis.com/salt-models/mcp-virtual-lab') %}
 # Other salt model repository parameters see in shared-salt.yaml
 
 {% import 'shared-salt.yaml' as SHARED with context %}
diff --git a/tcp_tests/templates/virtual-mcp11-ovs.new/salt.yaml b/tcp_tests/templates/virtual-mcp11-ovs.new/salt.yaml
index eac93fc..46c02a0 100644
--- a/tcp_tests/templates/virtual-mcp11-ovs.new/salt.yaml
+++ b/tcp_tests/templates/virtual-mcp11-ovs.new/salt.yaml
@@ -2,7 +2,7 @@
 {% from 'virtual-mcp11-ovs/underlay.yaml' import LAB_CONFIG_NAME with context %}
 {% from 'virtual-mcp11-ovs/underlay.yaml' import DOMAIN_NAME with context %}
 
-{% set SALT_MODELS_REPOSITORY = os_env('SALT_MODELS_REPOSITORY','https://gerrit.mcp.mirantis.net/salt-models/mcp-virtual-lab') %}
+{% set SALT_MODELS_REPOSITORY = os_env('SALT_MODELS_REPOSITORY','https://gerrit.mcp.mirantis.com/salt-models/mcp-virtual-lab') %}
 # Other salt model repository parameters see in shared-salt.yaml
 
 {% import 'shared-salt.yaml' as SHARED with context %}
diff --git a/tcp_tests/templates/virtual-mcp11-ovs/salt.yaml b/tcp_tests/templates/virtual-mcp11-ovs/salt.yaml
index 8ec1740..cfa0272 100644
--- a/tcp_tests/templates/virtual-mcp11-ovs/salt.yaml
+++ b/tcp_tests/templates/virtual-mcp11-ovs/salt.yaml
@@ -2,7 +2,7 @@
 {% from 'virtual-mcp11-ovs/underlay.yaml' import LAB_CONFIG_NAME with context %}
 {% from 'virtual-mcp11-ovs/underlay.yaml' import DOMAIN_NAME with context %}
 
-{% set SALT_MODELS_REPOSITORY = os_env('SALT_MODELS_REPOSITORY','https://gerrit.mcp.mirantis.net/salt-models/mcp-virtual-lab') %}
+{% set SALT_MODELS_REPOSITORY = os_env('SALT_MODELS_REPOSITORY','https://gerrit.mcp.mirantis.com/salt-models/mcp-virtual-lab') %}
 # Other salt model repository parameters see in shared-salt.yaml
 
 {% import 'shared-salt.yaml' as SHARED with context %}
diff --git a/tcp_tests/templates/virtual-offline-pike-ovs-dpdk/post_openstack.yaml b/tcp_tests/templates/virtual-offline-pike-ovs-dpdk/post_openstack.yaml
new file mode 100644
index 0000000..9f28ba9
--- /dev/null
+++ b/tcp_tests/templates/virtual-offline-pike-ovs-dpdk/post_openstack.yaml
@@ -0,0 +1,118 @@
+{% from 'virtual-offline-pike-ovs-dpdk/underlay.yaml' import HOSTNAME_CFG01 with context %}
+{% from 'virtual-offline-pike-ovs-dpdk/underlay.yaml' import HOSTNAME_CTL01 with context %}
+{% from 'virtual-offline-pike-ovs-dpdk/underlay.yaml' import HOSTNAME_CTL02 with context %}
+{% from 'virtual-offline-pike-ovs-dpdk/underlay.yaml' import HOSTNAME_CTL03 with context %}
+{% from 'virtual-offline-pike-ovs-dpdk/underlay.yaml' import HOSTNAME_GTW01 with context %}
+{% from 'shared-salt.yaml' import IPV4_NET_EXTERNAL_PREFIX with context %}
+{% from 'shared-salt.yaml' import IPV4_NET_TENANT_PREFIX with context %}
+{% set REPOSITORY_SUITE = os_env('REPOSITORY_SUITE', 'testing') %}
+# Install OpenStack control services
+{% set DOMAIN_NAME = os_env('DOMAIN_NAME', 'virtual-offline-pike-ovs-dpdk') %}
+{% import 'shared-backup-restore.yaml' as BACKUP with context %}
+{% import 'shared-salt.yaml' as SHARED with context %}
+{% import 'shared-openstack.yaml' as SHARED_OPENSTACK with context %}
+
+{% set DOCKER_LOCAL_REPO = os_env('DOCKER_LOCAL_REPO', 'deb [arch=amd64] http://mirror.mcp.mirantis.local.test/ubuntu-xenial/docker ' + REPOSITORY_SUITE + ' stable') %}
+
+  # Upload cirros image
+- description: Upload cirros image on ctl01
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
+    'wget http://download.cirros-cloud.net/0.3.4/cirros-0.3.4-i386-disk.img'
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 2, delay: 30}
+  skip_fail: false
+
+- description: Create net04_external
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
+    '. /root/keystonercv3; neutron net-create net04_ext --router:external True --provider:physical_network physnet1 --provider:network_type flat'
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 30}
+  skip_fail: false
+
+- description: Create subnet_external
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
+    '. /root/keystonercv3; neutron subnet-create net04_ext {{ IPV4_NET_EXTERNAL_PREFIX }}.0/24 --name net04_ext__subnet --disable-dhcp --allocation-pool start={{ IPV4_NET_EXTERNAL_PREFIX }}.150,end={{ IPV4_NET_EXTERNAL_PREFIX }}.180 --gateway {{ IPV4_NET_EXTERNAL_PREFIX }}.1'
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 30}
+  skip_fail: false
+
+- description: Create net04
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
+    '. /root/keystonercv3; neutron net-create net04'
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 30}
+  skip_fail: false
+
+- description: Create subnet_net04
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
+    '. /root/keystonercv3; neutron subnet-create net04 {{ IPV4_NET_TENANT_PREFIX }}.0/24 --name net04__subnet --allocation-pool start={{ IPV4_NET_TENANT_PREFIX }}.120,end={{ IPV4_NET_TENANT_PREFIX }}.240'
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 30}
+  skip_fail: false
+
+- description: Create router
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
+    '. /root/keystonercv3; neutron router-create net04_router01'
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 30}
+  skip_fail: false
+
+- description: Set geteway
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
+    '. /root/keystonercv3; neutron router-gateway-set net04_router01 net04_ext'
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 30}
+  skip_fail: false
+
+- description:  Add interface
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
+    '. /root/keystonercv3; neutron router-interface-add net04_router01 net04__subnet'
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 30}
+  skip_fail: false
+
+- description: sync time
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False '*' cmd.run
+    'service ntp stop; ntpd -gq;  service ntp start'
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 30}
+  skip_fail: false
+
+- description: Enable local docker repo
+  cmd: |
+    set -e;
+    echo "{{ DOCKER_LOCAL_REPO }}" > /etc/apt/sources.list.d/mcp_docker.list;
+    apt-get clean; apt-get update;
+  node_name: {{ HOSTNAME_GTW01 }}
+  retry: {count: 1, delay: 30}
+  skip_fail: false
+
+- description: Install docker-ce on gtw
+  cmd: salt-call cmd.run 'apt-get install docker-ce -y'
+  node_name: {{ HOSTNAME_GTW01 }}
+  retry: {count: 1, delay: 30}
+  skip_fail: false
+
+- description: Enable forward policy on gtw
+  cmd: |
+    set -e;
+    iptables --policy FORWARD ACCEPT;
+  node_name: {{ HOSTNAME_GTW01 }}
+  retry: {count: 1, delay: 30}
+  skip_fail: false
+
+- description: create rc file on cfg
+  cmd: scp ctl01:/root/keystonercv3 /root
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 30}
+  skip_fail: false
+
+- description: Copy rc file
+  cmd: scp /root/keystonercv3 gtw01:/root
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 30}
+  skip_fail: false
+
+{{ BACKUP.MACRO_WR_NGINX_MASTER() }}
+{{ BACKUP.MACRO_BACKUP_BACKUPNINJA() }}
+{{ BACKUP.MACRO_BACKUP_XTRABACKUP() }}
\ No newline at end of file
diff --git a/tcp_tests/templates/virtual-offline-pike-ovs-dpdk/salt.yaml b/tcp_tests/templates/virtual-offline-pike-ovs-dpdk/salt.yaml
index 921cd7b..d671337 100644
--- a/tcp_tests/templates/virtual-offline-pike-ovs-dpdk/salt.yaml
+++ b/tcp_tests/templates/virtual-offline-pike-ovs-dpdk/salt.yaml
@@ -3,6 +3,9 @@
 {% from 'virtual-offline-pike-ovs-dpdk/underlay.yaml' import HOSTNAME_CTL01 with context %}
 {% from 'virtual-offline-pike-ovs-dpdk/underlay.yaml' import HOSTNAME_CTL02 with context %}
 {% from 'virtual-offline-pike-ovs-dpdk/underlay.yaml' import HOSTNAME_CTL03 with context %}
+{% from 'virtual-offline-pike-ovs-dpdk/underlay.yaml' import HOSTNAME_CID01 with context %}
+{% from 'virtual-offline-pike-ovs-dpdk/underlay.yaml' import HOSTNAME_CID02 with context %}
+{% from 'virtual-offline-pike-ovs-dpdk/underlay.yaml' import HOSTNAME_CID03 with context %}
 {% from 'virtual-offline-pike-ovs-dpdk/underlay.yaml' import HOSTNAME_CMP01 with context %}
 {% from 'virtual-offline-pike-ovs-dpdk/underlay.yaml' import HOSTNAME_CMP02 with context %}
 {% from 'virtual-offline-pike-ovs-dpdk/underlay.yaml' import HOSTNAME_GTW01 with context %}
@@ -43,6 +46,9 @@
 {{ SHARED.MACRO_INSTALL_PACKAGES_ON_NODES(HOSTNAME_CTL01) }}
 {{ SHARED.MACRO_INSTALL_PACKAGES_ON_NODES(HOSTNAME_CTL02) }}
 {{ SHARED.MACRO_INSTALL_PACKAGES_ON_NODES(HOSTNAME_CTL03) }}
+{{ SHARED.MACRO_INSTALL_PACKAGES_ON_NODES(HOSTNAME_CID01) }}
+{{ SHARED.MACRO_INSTALL_PACKAGES_ON_NODES(HOSTNAME_CID02) }}
+{{ SHARED.MACRO_INSTALL_PACKAGES_ON_NODES(HOSTNAME_CID03) }}
 {{ SHARED.MACRO_INSTALL_PACKAGES_ON_NODES(HOSTNAME_CMP01) }}
 {{ SHARED.MACRO_INSTALL_PACKAGES_ON_NODES(HOSTNAME_CMP02) }}
 {{ SHARED.MACRO_INSTALL_PACKAGES_ON_NODES(HOSTNAME_GTW01) }}
@@ -65,7 +71,7 @@
   retry: {count: 1, delay: 10}
   skip_fail: false
 
-{{ SHARED.MACRO_CONFIGURE_RECLASS(FORMULA_SERVICES='"linux" "reclass" "salt" "openssh" "ntp" "git" "nginx" "collectd" "sensu" "heka" "sphinx" "keystone" "mysql" "grafana" "haproxy" "rsyslog" "horizon" "prometheus" "telegraf" "elasticsearch" "powerdns" "fluentd" "backupninja" "watchdog" "runtest" "auditd" "logrotate" "gnocchi" "manila"') }}
+{{ SHARED.MACRO_CONFIGURE_RECLASS(FORMULA_SERVICES='"linux" "reclass" "salt" "openssh" "ntp" "git" "nginx" "collectd" "sensu" "heka" "sphinx" "keystone" "mysql" "grafana" "haproxy" "rsyslog" "horizon" "prometheus" "telegraf" "elasticsearch" "powerdns" "fluentd" "backupninja" "watchdog" "runtest" "auditd" "logrotate" "gnocchi" "manila" "jenkins" "glusterfs"') }}
 
 {{ SHARED.MACRO_INSTALL_SALT_MINIONS() }}
 
@@ -88,15 +94,60 @@
   retry: {count: 1, delay: 10}
   skip_fail: false
 
-- description: Install watchdog
-  cmd: salt -C "I@watchdog:server" state.sls watchdog;
+#- description: Install watchdog
+#  cmd: salt -C "I@watchdog:server" state.sls watchdog;
+#  node_name: {{ HOSTNAME_CFG01 }}
+#  retry: {count: 1, delay: 10}
+#  skip_fail: false
+
+- description: WR for correct acces to git repo from jenkins on cfg01 node
+  cmd: |
+    export GIT_SSL_NO_VERIFY=true; git clone --mirror https://gerrit.mcp.mirantis.local.test/mk/mk-pipelines /home/repo/mk/mk-pipelines/;
+    export GIT_SSL_NO_VERIFY=true; git clone --mirror https://gerrit.mcp.mirantis.local.test/mcp-ci/pipeline-library /home/repo/mcp-ci/pipeline-library/;
+    chown -R git:www-data /home/repo/mk/mk-pipelines/*;
+    chown -R git:www-data /home/repo/mcp-ci/pipeline-library/*;
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 10}
   skip_fail: false
 
+- description: '*Workaround* to remove apt key until migrate on CC'
+  cmd: salt-key -d apt01.virtual-offline-pike-ovs-dpdk  -y
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 1}
+  skip_fail: true
+
+- description: '*Workaround* stop minion on apt like proxy node'
+  cmd: systemctl stop salt-minion.service
+  node_name: {{ HOSTNAME_APT01 }}
+  retry: {count: 1, delay: 1}
+  skip_fail: true
+
 - description: Workaround to avoid reboot cmp nodes bring OVS interfaces UP
   cmd: |
     salt 'cmp*' cmd.run "ifup br-mesh";
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 10}
-  skip_fail: false
\ No newline at end of file
+  skip_fail: false
+
+- description: Temporary WR
+  cmd: |
+    ssh-keyscan cfg01 > /var/lib/jenkins/.ssh/known_hosts || true;
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 10}
+  skip_fail: true
+
+- description: Enable Jenkins
+  cmd: |
+    systemctl enable jenkins || true;
+    systemctl restart jenkins || true;
+    sleep 5;
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 10}
+  skip_fail: true
+
+- description: run jenkins.client
+  cmd: |
+    salt-call state.sls jenkins.client
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 3, delay: 60}
+  skip_fail: false
diff --git a/tcp_tests/templates/virtual-offline-pike-ovs-dpdk/underlay--user-data-apt01.yaml b/tcp_tests/templates/virtual-offline-pike-ovs-dpdk/underlay--user-data-apt01.yaml
index 7297a41..fe2c8f3 100644
--- a/tcp_tests/templates/virtual-offline-pike-ovs-dpdk/underlay--user-data-apt01.yaml
+++ b/tcp_tests/templates/virtual-offline-pike-ovs-dpdk/underlay--user-data-apt01.yaml
@@ -83,7 +83,7 @@
    - find /srv/pillar/ -type f -exec sed -i "s/==HOST_MIRROR_MCP_MIRANTIS==/{{ os_env('HOST_MIRROR_MCP_MIRANTIS', 'mirror.mcp.mirantis.net') }}/g" {} +
    - find /srv/pillar/ -type f -exec sed -i "s/==HOST_MIRROR_FUEL_INFRA==/{{ os_env('HOST_MIRROR_FUEL_INFRA', 'mirror.fuel-infra.org') }}/g" {} +
    - find /srv/pillar/ -type f -exec sed -i "s/==HOST_PPA_LAUNCHPAD==/{{ os_env('HOST_PPA_LAUNCHPAD', 'ppa.launchpad.net') }}/g" {} +
-   - find /srv/pillar/ -type f -exec sed -i "s/==HOST_GERRIT_MCP_MIRANTIS==/{{ os_env('HOST_GERRIT_MCP_MIRANTIS', 'gerrit.mcp.mirantis.net') }}/g" {} +
+   - find /srv/pillar/ -type f -exec sed -i "s/==HOST_GERRIT_MCP_MIRANTIS==/{{ os_env('HOST_GERRIT_MCP_MIRANTIS', 'gerrit.mcp.mirantis.com') }}/g" {} +
    - salt-call --local  --state-output=mixed state.sls dnsmasq;
    - salt-call --local  --state-output=mixed state.sls nginx;
    ########################################################
diff --git a/tcp_tests/templates/virtual-offline-pike-ovs-dpdk/underlay.yaml b/tcp_tests/templates/virtual-offline-pike-ovs-dpdk/underlay.yaml
index ab4dbe5..15da576 100644
--- a/tcp_tests/templates/virtual-offline-pike-ovs-dpdk/underlay.yaml
+++ b/tcp_tests/templates/virtual-offline-pike-ovs-dpdk/underlay.yaml
@@ -19,6 +19,9 @@
 {% set DOMAIN_NAME = os_env('DOMAIN_NAME', LAB_CONFIG_NAME + '.local') %}
 {% set HOSTNAME_APT01 = os_env('HOSTNAME_APT01', 'apt01.' + DOMAIN_NAME) %}
 {% set HOSTNAME_CFG01 = os_env('HOSTNAME_CFG01', 'cfg01.' + DOMAIN_NAME) %}
+{% set HOSTNAME_CID01 = os_env('HOSTNAME_CID01', 'cid01.' + DOMAIN_NAME) %}
+{% set HOSTNAME_CID02 = os_env('HOSTNAME_CID02', 'cid02.' + DOMAIN_NAME) %}
+{% set HOSTNAME_CID03 = os_env('HOSTNAME_CID03', 'cid03.' + DOMAIN_NAME) %}
 {% set HOSTNAME_CTL01 = os_env('HOSTNAME_CTL01', 'ctl01.' + DOMAIN_NAME) %}
 {% set HOSTNAME_CTL02 = os_env('HOSTNAME_CTL02', 'ctl02.' + DOMAIN_NAME) %}
 {% set HOSTNAME_CTL03 = os_env('HOSTNAME_CTL03', 'ctl03.' + DOMAIN_NAME) %}
@@ -45,6 +48,10 @@
             l2_network_device: +1
             default_{{ HOSTNAME_APT01 }}: +122
             default_{{ HOSTNAME_CFG01 }}: +100
+            default_{{ HOSTNAME_CID }}: +80
+            default_{{ HOSTNAME_CID01 }}: +91
+            default_{{ HOSTNAME_CID02 }}: +92
+            default_{{ HOSTNAME_CID03 }}: +93
             default_{{ HOSTNAME_CTL01 }}: +101
             default_{{ HOSTNAME_CTL02 }}: +102
             default_{{ HOSTNAME_CTL03 }}: +103
@@ -59,7 +66,7 @@
             default_{{ HOSTNAME_DNS02 }}: +112
             default_{{ HOSTNAME_VS }}: +178
           ip_ranges:
-            dhcp: [+90, -10]
+            dhcp: [+60, -10]
 
       admin-pool01:
         net: {{ os_env('ADMIN_ADDRESS_POOL01', '10.70.0.0/16:24') }}
@@ -69,6 +76,10 @@
             l2_network_device: +1
             default_{{ HOSTNAME_APT01 }}: +122
             default_{{ HOSTNAME_CFG01 }}: +90
+            default_{{ HOSTNAME_CID }}: +80
+            default_{{ HOSTNAME_CID01 }}: +91
+            default_{{ HOSTNAME_CID02 }}: +92
+            default_{{ HOSTNAME_CID03 }}: +93
             default_{{ HOSTNAME_CTL01 }}: +101
             default_{{ HOSTNAME_CTL02 }}: +102
             default_{{ HOSTNAME_CTL03 }}: +103
@@ -83,7 +94,7 @@
             default_{{ HOSTNAME_DNS02 }}: +112
             default_{{ HOSTNAME_VS }}: +178
           ip_ranges:
-            dhcp: [+90, -10]
+            dhcp: [+60, -10]
 
       tenant-pool01:
         net: {{ os_env('TENANT_ADDRESS_POOL01', '10.80.0.0/16:24') }}
@@ -666,3 +677,81 @@
 
               interfaces: *interfaces
               network_config: *network_config
+
+          - name: {{ HOSTNAME_CID01 }}
+            role: salt_minion
+            params:
+              vcpu: {{ os_env('CID_NODE_CPU', 3) }}
+              memory: {{ os_env('CID_NODE_MEMORY', 8192) }}
+              boot:
+                - hd
+              cloud_init_volume_name: iso
+              cloud_init_iface_up: ens3
+              volumes:
+                - name: system
+                  capacity: {{ os_env('CID_NODE_VOLUME_SIZE', 150) }}
+                  backing_store: mcp_ubuntu_1604_image
+                  format: qcow2
+                - name: iso  # Volume with name 'iso' will be used
+                             # for store image with cloud-init metadata.
+                  capacity: 1
+                  format: raw
+                  device: cdrom
+                  bus: ide
+                  cloudinit_meta_data: *cloudinit_meta_data
+                  cloudinit_user_data: *cloudinit_user_data_1604
+
+              interfaces: *interfaces
+              network_config: *network_config
+
+          - name: {{ HOSTNAME_CID02 }}
+            role: salt_minion
+            params:
+              vcpu: {{ os_env('CID_NODE_CPU', 3) }}
+              memory: {{ os_env('CID_NODE_MEMORY', 8192) }}
+              boot:
+                - hd
+              cloud_init_volume_name: iso
+              cloud_init_iface_up: ens3
+              volumes:
+                - name: system
+                  capacity: {{ os_env('CID_NODE_VOLUME_SIZE', 150) }}
+                  backing_store: mcp_ubuntu_1604_image
+                  format: qcow2
+                - name: iso  # Volume with name 'iso' will be used
+                             # for store image with cloud-init metadata.
+                  capacity: 1
+                  format: raw
+                  device: cdrom
+                  bus: ide
+                  cloudinit_meta_data: *cloudinit_meta_data
+                  cloudinit_user_data: *cloudinit_user_data_1604
+
+              interfaces: *interfaces
+              network_config: *network_config
+
+          - name: {{ HOSTNAME_CID03 }}
+            role: salt_minion
+            params:
+              vcpu: {{ os_env('CID_NODE_CPU', 3) }}
+              memory: {{ os_env('CID_NODE_MEMORY', 8192) }}
+              boot:
+                - hd
+              cloud_init_volume_name: iso
+              cloud_init_iface_up: ens3
+              volumes:
+                - name: system
+                  capacity: {{ os_env('CID_NODE_VOLUME_SIZE', 150) }}
+                  backing_store: mcp_ubuntu_1604_image
+                  format: qcow2
+                - name: iso  # Volume with name 'iso' will be used
+                             # for store image with cloud-init metadata.
+                  capacity: 1
+                  format: raw
+                  device: cdrom
+                  bus: ide
+                  cloudinit_meta_data: *cloudinit_meta_data
+                  cloudinit_user_data: *cloudinit_user_data_1604
+
+              interfaces: *interfaces
+              network_config: *network_config
diff --git a/tcp_tests/templates/virtual-offline-pike-ovs/underlay--user-data-apt01.yaml b/tcp_tests/templates/virtual-offline-pike-ovs/underlay--user-data-apt01.yaml
index 7297a41..fe2c8f3 100644
--- a/tcp_tests/templates/virtual-offline-pike-ovs/underlay--user-data-apt01.yaml
+++ b/tcp_tests/templates/virtual-offline-pike-ovs/underlay--user-data-apt01.yaml
@@ -83,7 +83,7 @@
    - find /srv/pillar/ -type f -exec sed -i "s/==HOST_MIRROR_MCP_MIRANTIS==/{{ os_env('HOST_MIRROR_MCP_MIRANTIS', 'mirror.mcp.mirantis.net') }}/g" {} +
    - find /srv/pillar/ -type f -exec sed -i "s/==HOST_MIRROR_FUEL_INFRA==/{{ os_env('HOST_MIRROR_FUEL_INFRA', 'mirror.fuel-infra.org') }}/g" {} +
    - find /srv/pillar/ -type f -exec sed -i "s/==HOST_PPA_LAUNCHPAD==/{{ os_env('HOST_PPA_LAUNCHPAD', 'ppa.launchpad.net') }}/g" {} +
-   - find /srv/pillar/ -type f -exec sed -i "s/==HOST_GERRIT_MCP_MIRANTIS==/{{ os_env('HOST_GERRIT_MCP_MIRANTIS', 'gerrit.mcp.mirantis.net') }}/g" {} +
+   - find /srv/pillar/ -type f -exec sed -i "s/==HOST_GERRIT_MCP_MIRANTIS==/{{ os_env('HOST_GERRIT_MCP_MIRANTIS', 'gerrit.mcp.mirantis.com') }}/g" {} +
    - salt-call --local  --state-output=mixed state.sls dnsmasq;
    - salt-call --local  --state-output=mixed state.sls nginx;
    ########################################################
diff --git a/tcp_tests/templates/virtual-offline-ssl/underlay--user-data-apt01.yaml b/tcp_tests/templates/virtual-offline-ssl/underlay--user-data-apt01.yaml
index 7297a41..fe2c8f3 100644
--- a/tcp_tests/templates/virtual-offline-ssl/underlay--user-data-apt01.yaml
+++ b/tcp_tests/templates/virtual-offline-ssl/underlay--user-data-apt01.yaml
@@ -83,7 +83,7 @@
    - find /srv/pillar/ -type f -exec sed -i "s/==HOST_MIRROR_MCP_MIRANTIS==/{{ os_env('HOST_MIRROR_MCP_MIRANTIS', 'mirror.mcp.mirantis.net') }}/g" {} +
    - find /srv/pillar/ -type f -exec sed -i "s/==HOST_MIRROR_FUEL_INFRA==/{{ os_env('HOST_MIRROR_FUEL_INFRA', 'mirror.fuel-infra.org') }}/g" {} +
    - find /srv/pillar/ -type f -exec sed -i "s/==HOST_PPA_LAUNCHPAD==/{{ os_env('HOST_PPA_LAUNCHPAD', 'ppa.launchpad.net') }}/g" {} +
-   - find /srv/pillar/ -type f -exec sed -i "s/==HOST_GERRIT_MCP_MIRANTIS==/{{ os_env('HOST_GERRIT_MCP_MIRANTIS', 'gerrit.mcp.mirantis.net') }}/g" {} +
+   - find /srv/pillar/ -type f -exec sed -i "s/==HOST_GERRIT_MCP_MIRANTIS==/{{ os_env('HOST_GERRIT_MCP_MIRANTIS', 'gerrit.mcp.mirantis.com') }}/g" {} +
    - salt-call --local  --state-output=mixed state.sls dnsmasq;
    - salt-call --local  --state-output=mixed state.sls nginx;
    ########################################################
diff --git a/tcp_tests/templates/virtual-pike-ovs-dpdk/_context-cookiecutter-pike-ovs-dpdk.yaml b/tcp_tests/templates/virtual-pike-ovs-dpdk/_context-cookiecutter-pike-ovs-dpdk.yaml
index c72c3bb..81d958d 100644
--- a/tcp_tests/templates/virtual-pike-ovs-dpdk/_context-cookiecutter-pike-ovs-dpdk.yaml
+++ b/tcp_tests/templates/virtual-pike-ovs-dpdk/_context-cookiecutter-pike-ovs-dpdk.yaml
@@ -13,7 +13,7 @@
   control_vlan: '10'
   cookiecutter_template_branch: master
   cookiecutter_template_credentials: gerrit
-  cookiecutter_template_url: https://gerrit.mcp.mirantis.net/mk/cookiecutter-templates.git
+  cookiecutter_template_url: https://gerrit.mcp.mirantis.com/mk/cookiecutter-templates.git
   deploy_network_gateway: 192.168.10.1
   deploy_network_netmask: 255.255.255.0
   deploy_network_subnet: 192.168.10.0/24
@@ -144,7 +144,7 @@
   salt_master_hostname: cfg01
   salt_master_management_address: 192.168.10.90
   shared_reclass_branch: master
-  shared_reclass_url: https://gerrit.mcp.mirantis.net/salt-models/reclass-system.git
+  shared_reclass_url: https://gerrit.mcp.mirantis.com/salt-models/reclass-system.git
   stacklight_enabled: 'False'
   stacklight_version: '2'
   static_ips_on_deploy_network_enabled: 'False'
diff --git a/tcp_tests/templates/virtual-pike-ovs-dpdk/salt.yaml b/tcp_tests/templates/virtual-pike-ovs-dpdk/salt.yaml
index de035c2..bcbcd75 100644
--- a/tcp_tests/templates/virtual-pike-ovs-dpdk/salt.yaml
+++ b/tcp_tests/templates/virtual-pike-ovs-dpdk/salt.yaml
@@ -2,7 +2,7 @@
 {% from 'virtual-pike-ovs-dpdk/underlay.yaml' import LAB_CONFIG_NAME with context %}
 {% from 'virtual-pike-ovs-dpdk/underlay.yaml' import DOMAIN_NAME with context %}
 
-{% set SALT_MODELS_REPOSITORY = os_env('SALT_MODELS_REPOSITORY','https://gerrit.mcp.mirantis.net/salt-models/mcp-virtual-lab') %}
+{% set SALT_MODELS_REPOSITORY = os_env('SALT_MODELS_REPOSITORY','https://gerrit.mcp.mirantis.com/salt-models/mcp-virtual-lab') %}
 # Other salt model repository parameters see in shared-salt.yaml
 
 {% import 'shared-salt.yaml' as SHARED with context %}
@@ -11,7 +11,7 @@
 
 {{ SHARED.MACRO_CLONE_RECLASS_MODELS() }}
 
-{{ SHARED.MACRO_CONFIGURE_RECLASS(FORMULA_SERVICES='"linux" "reclass" "salt" "openssh" "ntp" "git" "nginx" "collectd" "sensu" "heka" "sphinx" "keystone" "mysql" "grafana" "haproxy" "rsyslog" "horizon" "prometheus" "telegraf" "elasticsearch" "fluentd" "backupninja" "runtest" "logrotate"') }}
+{{ SHARED.MACRO_CONFIGURE_RECLASS(FORMULA_SERVICES='"linux" "reclass" "salt" "openssh" "ntp" "git" "nginx" "collectd" "sensu" "heka" "sphinx" "keystone" "mysql" "grafana" "haproxy" "rsyslog" "horizon" "prometheus" "telegraf" "elasticsearch" "fluentd" "backupninja" "runtest" "logrotate" "jenkins"') }}
 
 {{ SHARED.MACRO_INSTALL_SALT_MINIONS() }}
 
diff --git a/tcp_tests/tests/system/test_3rdparty_suites.py b/tcp_tests/tests/system/test_3rdparty_suites.py
new file mode 100644
index 0000000..d545532
--- /dev/null
+++ b/tcp_tests/tests/system/test_3rdparty_suites.py
@@ -0,0 +1,93 @@
+#    Copyright 2016 Mirantis, Inc.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+import pytest
+
+from tcp_tests import logger
+from tcp_tests import settings
+
+LOG = logger.logger
+
+
+class Test3rdpartySuites(object):
+    """Test class for running 3rdparty test suites
+
+    Requires environment variables:
+      ENV_NAME
+      LAB_CONFIG_NAME
+      TESTS_CONFIGS
+    """
+
+    @pytest.mark.grab_versions
+    @pytest.mark.parametrize("_", [settings.ENV_NAME])
+    @pytest.mark.run_tempest
+    def test_run_tempest(self, tempest_actions, show_step, _):
+        """Runner for Juniper contrail-tests
+
+        Scenario:
+            1. Run tempest
+        """
+        show_step(1)
+        tempest_actions.prepare_and_run_tempest()
+
+    @pytest.mark.grab_versions
+    @pytest.mark.parametrize("_", [settings.ENV_NAME])
+    @pytest.mark.run_stacklight
+    def test_run_stacklight(self, sl_actions, show_step, _):
+        """Runner for Stacklight tests
+
+        Scenario:
+            1. Run SL test
+        """
+
+        # Run SL component tetsts
+        show_step(1)
+        sl_actions.setup_sl_functional_tests(
+                'cfg01',
+        )
+        sl_actions.run_sl_functional_tests(
+                'cfg01',
+                '/root/stacklight-pytest/stacklight_tests/',
+                'tests/prometheus',
+                'test_alerts.py',
+                junit_report_name='stacklight_report.xml')
+        # Download report
+        sl_actions.download_sl_test_report(
+                'cfg01',
+                '/root/stacklight-pytest/stacklight_tests/'
+                'stacklight_report.xml')
+
+    @pytest.mark.grab_versions
+    @pytest.mark.extract(container_system='docker', extract_from='conformance',
+                         files_to_extract=['report'])
+    @pytest.mark.merge_xunit(path='/root/report',
+                             output='/root/conformance_result.xml')
+    @pytest.mark.grab_k8s_results(name=['k8s_conformance.log',
+                                        'conformance_result.xml'])
+    @pytest.mark.parametrize("_", [settings.ENV_NAME])
+    @pytest.mark.k8s_conformance
+    def test_run_k8s_conformance(self, show_step, config, k8s_actions,
+                                 k8s_logs, _):
+        """Test run of k8s conformance tests"""
+        k8s_actions.run_conformance()
+
+    @pytest.mark.grab_versions
+    @pytest.mark.grab_k8s_results(name=['virtlet_conformance.log',
+                                        'report.xml'])
+    @pytest.mark.parametrize("_", [settings.ENV_NAME])
+    @pytest.mark.k8s_conformance_virtlet
+    def test_run_k8s_conformance_virtlet(self, show_step, config, k8s_actions,
+                                         k8s_logs, _):
+        """Test run of k8s virtlet conformance tests"""
+        k8s_actions.run_virtlet_conformance()
diff --git a/tcp_tests/tests/system/test_install_mcp_ovs_pike.py b/tcp_tests/tests/system/test_install_mcp_ovs_pike.py
index 69d8324..56efe59 100644
--- a/tcp_tests/tests/system/test_install_mcp_ovs_pike.py
+++ b/tcp_tests/tests/system/test_install_mcp_ovs_pike.py
@@ -16,6 +16,7 @@
 
 from tcp_tests import logger
 from tcp_tests import settings
+from tcp_tests.managers.jenkins.client import JenkinsClient
 
 LOG = logger.logger
 
@@ -280,6 +281,7 @@
 
     @pytest.mark.grab_versions
     @pytest.mark.fail_snapshot
+    @pytest.mark.offline_dpdk
     def test_mcp_dpdk_ovs_install(self, underlay,
                                   openstack_deployed,
                                   openstack_actions,
@@ -299,3 +301,103 @@
             tempest_actions.prepare_and_run_tempest(dpdk=True)
 
         LOG.info("*************** DONE **************")
+
+    @pytest.mark.fail_snapshot
+    @pytest.mark.offline_dpdk
+    def test_pipeline_deploy_os_dpdk(self, show_step,
+                                     underlay, config, salt_deployed,
+                                     tempest_actions,
+                                     openstack_actions):
+        """Deploy cid, deploys os with pipelines
+
+        Scenario:
+            1. Prepare salt on hosts.
+            2. Connect to jenkins on cfg01 node
+            3. Run deploy on cfg01 node
+            4. Connect to jenkins on cid node
+            5. Run deploy DT on cid node
+            6. Run deploy of os with DT
+        """
+        show_step(1)
+        nodes = underlay.node_names()
+        LOG.info("Nodes - {}".format(nodes))
+        show_step(2)
+        cfg_node_name = underlay.get_target_node_names(
+            target='cfg')[0]
+        salt_api = salt_deployed.get_pillar(
+            cfg_node_name, '_param:jenkins_salt_api_url')
+        salt_api = salt_api[0].get(cfg_node_name)
+        jenkins = JenkinsClient(
+            host='http://{}:8081'.format(config.salt.salt_master_host),
+            username='admin',
+            password='r00tme')
+        params = jenkins.make_defults_params('deploy_openstack')
+        params['SALT_MASTER_URL'] = salt_api
+        params['STACK_INSTALL'] = 'core,cicd'
+
+        show_step(3)
+        build = jenkins.run_build('deploy_openstack', params)
+        jenkins.wait_end_of_build(
+            name=build[0],
+            build_id=build[1],
+            timeout=60 * 60 * 4)
+        result = jenkins.build_info(name=build[0],
+                                    build_id=build[1])['result']
+        assert result == 'SUCCESS', \
+            "Deploy core, cid failed {0}{1}".format(
+                jenkins.build_info(name=build[0], build_id=build[1]), result)
+
+        show_step(4)
+        cid_node = underlay.get_target_node_names(
+            target='cid01')[0]
+        salt_output = salt_deployed.get_pillar(
+            cid_node, 'jenkins:client:master:password')
+        cid_passwd = salt_output[0].get(cid_node)
+
+        pillar = 'keepalived:cluster:instance:cicd_control_vip:address'
+        addresses = salt_deployed.get_pillar('cid01*', pillar)
+        ip = list(set([ip
+                  for item in addresses
+                  for node, ip in item.items() if ip]))
+        LOG.info('Jenkins ip is {}'.format(ip))
+        try:
+            assert len(ip) > 0, 'fail to find jenkins ip'
+        except AssertionError:
+            salt_deployed._salt.local(
+                tgt='cid*', fun='cmd.run',
+                args='service keepalived restart')
+            addresses = salt_deployed.get_pillar('cid01*', pillar)
+            ip = list(set([ip
+                      for item in addresses
+                      for node, ip in item.items() if ip]))
+            LOG.info('Jenkins ip is {}'.format(ip))
+            assert len(ip) > 0, 'fail to find jenkins ip {}'.format(addresses)
+
+        jenkins = JenkinsClient(
+            host='http://{}:8081'.format(ip[0]),
+            username='admin',
+            password=cid_passwd)
+        params['STACK_INSTALL'] = 'ovs,openstack'
+        params['SALT_MASTER_URL'] = 'http://{}:6969'.format(
+            config.salt.salt_master_host)
+        show_step(5)
+        build = jenkins.run_build('deploy_openstack', params)
+        jenkins.wait_end_of_build(
+            name=build[0],
+            build_id=build[1],
+            timeout=60 * 60 * 4)
+        result = jenkins.build_info(name=build[0],
+                                    build_id=build[1])['result']
+        assert result == 'SUCCESS',\
+            "Deploy openstack was failed with results {0} {1}".format(
+                jenkins.build_info(name=build[0], build_id=build[1]),
+                result)
+
+        # Prepare resources before test
+        steps_path = config.openstack_deploy.penstack_resources_steps_path
+        commands = underlay.read_template(steps_path)
+        openstack_actions.install(commands)
+
+        if settings.RUN_TEMPEST:
+            tempest_actions.prepare_and_run_tempest()
+        LOG.info("*************** DONE **************")
diff --git a/tcp_tests/tests/system/test_install_mcp_sl_os.py b/tcp_tests/tests/system/test_install_mcp_sl_os.py
index 58ad9c7..ce56f7f 100644
--- a/tcp_tests/tests/system/test_install_mcp_sl_os.py
+++ b/tcp_tests/tests/system/test_install_mcp_sl_os.py
@@ -69,6 +69,28 @@
 
     @pytest.mark.grab_versions
     @pytest.mark.fail_snapshot
+    def test_mcp_os_newton_install(self, underlay, openstack_deployed,
+                                   openstack_actions):
+        """Test for deploying an mcp environment and check it
+        Scenario:
+        1. Prepare salt on hosts
+        2. Setup controller nodes
+        3. Setup compute nodes
+        4. Run tempest
+
+        """
+        openstack_actions._salt.local(
+            tgt='*', fun='cmd.run',
+            args='service ntp stop; ntpd -gq; service ntp start')
+
+        if settings.RUN_TEMPEST:
+            openstack_actions.run_tempest(pattern=settings.PATTERN,
+                                          conf_name='lvm_mcp_newton.conf')
+            openstack_actions.download_tempest_report()
+        LOG.info("*************** DONE **************")
+
+    @pytest.mark.grab_versions
+    @pytest.mark.fail_snapshot
     def test_mcp_sl_os_install(self, underlay, config, openstack_deployed,
                                stacklight_deployed, openstack_actions):
         """Test for deploying an mcp environment and check it
diff --git a/tcp_tests/tests/system/test_k8s_actions.py b/tcp_tests/tests/system/test_k8s_actions.py
index b75d59c..2fddde6 100644
--- a/tcp_tests/tests/system/test_k8s_actions.py
+++ b/tcp_tests/tests/system/test_k8s_actions.py
@@ -15,6 +15,8 @@
 import pytest
 import netaddr
 import os
+import json
+import requests
 
 from tcp_tests import logger
 from tcp_tests import settings
@@ -27,6 +29,10 @@
 class TestMCPK8sActions(object):
     """Test class for different k8s actions"""
 
+    def __read_testdata_yaml(self, name):
+        dir = os.path.join(os.path.dirname(__file__), 'testdata/k8s')
+        return read_yaml_file(dir, name)
+
     @pytest.mark.grab_versions
     @pytest.mark.fail_snapshot
     @pytest.mark.cz8116
@@ -191,6 +197,7 @@
 
     @pytest.mark.grap_versions
     @pytest.mark.fail_snapshot
+    @pytest.mark.k8s_genie
     def test_k8s_genie_flannel(self, show_step, config,
                                salt_deployed, k8s_deployed):
         """Test genie-cni+flannel cni setup
@@ -237,9 +244,8 @@
             namespace="kube-system", name_prefix="kube-flannel-") > 0
 
         show_step(3)
-        data_dir = os.path.join(os.path.dirname(__file__), 'testdata/k8s')
         flannel_pod = k8s_deployed.api.pods.create(
-            body=read_yaml_file(data_dir, 'pod-sample-flannel.yaml'))
+            body=self.__read_testdata_yaml('pod-sample-flannel.yaml'))
         flannel_pod.wait_running()
 
         show_step(4)
@@ -249,7 +255,7 @@
 
         show_step(5)
         calico_pod = k8s_deployed.api.pods.create(
-            body=read_yaml_file(data_dir, 'pod-sample-calico.yaml'))
+            body=self.__read_testdata_yaml('pod-sample-calico.yaml'))
         calico_pod.wait_running()
 
         show_step(6)
@@ -259,7 +265,7 @@
 
         show_step(7)
         multicni_pod = k8s_deployed.api.pods.create(
-            body=read_yaml_file(data_dir, 'pod-sample-multicni.yaml'))
+            body=self.__read_testdata_yaml('pod-sample-multicni.yaml'))
         multicni_pod.wait_running()
 
         show_step(8)
@@ -272,7 +278,7 @@
 
         show_step(9)
         nocni_pod = k8s_deployed.api.pods.create(
-            body=read_yaml_file(data_dir, 'pod-sample.yaml'))
+            body=self.__read_testdata_yaml('pod-sample.yaml'))
         nocni_pod.wait_running()
 
         show_step(10)
@@ -307,3 +313,149 @@
         calico_pod.delete()
         multicni_pod.delete()
         nocni_pod.delete()
+
+    @pytest.mark.grap_versions
+    @pytest.mark.fail_snapshot
+    def test_k8s_dashboard(self, show_step, config,
+                           salt_deployed, k8s_deployed):
+        """Test dashboard setup
+
+        Scenario:
+            1. Setup Kubernetes cluster
+            2. Try to curl login status api
+            3. Create a test-admin-user account
+            4. Try to login in dashboard using test-admin-user account
+            5. Get and check list of namespaces using dashboard api
+        """
+        show_step(1)
+
+        show_step(2)
+        system_ns = 'kube-system'
+        dashboard_service = \
+            k8s_deployed.api.services.get('kubernetes-dashboard', system_ns)
+        dashboard_url = 'https://{}'.format(dashboard_service.get_ip())
+
+        def dashboard_curl(url, data=None, headers=None):
+            """ Using curl command on controller node. Alternatives:
+                - connect_{get,post}_namespaced_service_proxy_with_path -
+                    k8s lib does not provide way to pass headers or POST data
+                - raw rest k8s api - need to auth somehow
+                - new load-balancer svc for dashboard + requests python lib -
+                    requires working metallb or other load-balancer
+            """
+            args = ['--insecure']
+            for name in headers or {}:
+                args.append('--header')
+                args.append("{0}: {1}".format(name, headers[name]))
+            if data is not None:
+                args.append('--data')
+                args.append(data)
+            return ''.join(k8s_deployed.curl(dashboard_url + url, *args))
+
+        assert 'tokenPresent' in \
+            json.loads(dashboard_curl('/api/v1/login/status'))
+
+        show_step(3)
+        account = k8s_deployed.api.serviceaccounts.create(
+            namespace=system_ns,
+            body=self.__read_testdata_yaml('test-admin-user-account.yaml'))
+        account.wait_secret_generation()
+
+        k8s_deployed.api.clusterrolebindings.create(
+            body=self.__read_testdata_yaml(
+                'test-admin-user-cluster-role-bind.yaml'))
+
+        account_secret = account.read().secrets[0]
+        account_token = k8s_deployed.api.secrets.get(
+            namespace=system_ns, name=account_secret.name).read().data['token']
+
+        show_step(4)
+        csrf_token = \
+            json.loads(dashboard_curl('/api/v1/csrftoken/login'))['token']
+        login_headers = {'X-CSRF-TOKEN': csrf_token,
+                         'Content-Type': 'application/json'}
+        jwe_token = json.loads(dashboard_curl(
+            '/api/v1/login', headers=login_headers,
+            data=json.dumps({'token': account_token.decode('base64')})
+        ))['jweToken']
+        headers = {'jweToken': jwe_token}
+
+        show_step(5)
+        dashboard_namespaces = json.loads(
+            dashboard_curl('/api/v1/namespace', headers=headers))['namespaces']
+
+        namespaces_names_list = \
+            [ns.name for ns in k8s_deployed.api.namespaces.list()]
+        for namespace in dashboard_namespaces:
+            assert namespace['objectMeta']['name'] in namespaces_names_list
+
+    @pytest.mark.grap_versions
+    @pytest.mark.fail_snapshot
+    def test_k8s_ingress_nginx(self, show_step, config,
+                               salt_deployed, k8s_deployed):
+        """Test ingress-nginx configured and working with metallb
+
+        Scenario:
+            1. Setup Kubernetes cluster with metallb
+            2. Create 2 example deployments and expose them
+            3. Create ingress controller with 2 backends to each deployment
+            service respectively
+            4. Wait ingress for deploy
+            5. Try to reach default endpoint
+            6. Try to reach test1 and test2 deployment services endpoints
+        """
+        show_step(1)
+        if not config.k8s_deploy.kubernetes_metallb_enabled:
+            pytest.skip("Test requires metallb addon enabled")
+        if not config.k8s_deploy.kubernetes_ingressnginx_enabled:
+            pytest.skip("Test requires ingress-nginx addon enabled")
+
+        show_step(2)
+        image = 'nginxdemos/hello:plain-text'
+        port = 80
+        dep1 = k8s_deployed.run_sample_deployment(
+            'dep-ingress-1', image=image, port=port)
+        dep2 = k8s_deployed.run_sample_deployment(
+            'dep-ingress-2', image=image, port=port)
+        svc1 = dep1.wait_ready().expose()
+        svc2 = dep2.wait_ready().expose()
+
+        show_step(3)
+        body = {
+            'apiVersion': 'extensions/v1beta1',
+            'kind': 'Ingress',
+            'metadata': {'name': 'ingress-test'},
+            'spec': {
+                'rules': [{'http': {
+                    'paths': [{
+                        'backend': {
+                            'serviceName': svc1.name,
+                            'servicePort': port},
+                        'path': '/test1'}, {
+                        'backend': {
+                            'serviceName': svc2.name,
+                            'servicePort': port},
+                        'path': '/test2'
+                    }]
+                }}]
+            }
+        }
+        ingress = k8s_deployed.api.ingresses.create(body=body)
+
+        show_step(4)
+        ingress.wait_ready()
+
+        show_step(5)
+        ingress_address = "https://{}".format(
+            ingress.read().status.load_balancer.ingress[0].ip)
+
+        assert requests.get(ingress_address, verify=False).status_code == 404
+
+        show_step(6)
+        req1 = requests.get(ingress_address + "/test1", verify=False)
+        assert req1.status_code == 200
+        assert 'dep-ingress-1' in req1.text
+
+        req2 = requests.get(ingress_address + "/test2", verify=False)
+        assert req2.status_code == 200
+        assert 'dep-ingress-2' in req2.text
diff --git a/tcp_tests/tests/system/testdata/k8s/test-admin-user-account.yaml b/tcp_tests/tests/system/testdata/k8s/test-admin-user-account.yaml
new file mode 100644
index 0000000..889fa48
--- /dev/null
+++ b/tcp_tests/tests/system/testdata/k8s/test-admin-user-account.yaml
@@ -0,0 +1,5 @@
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+  name: test-admin-user
+  namespace: kube-system
diff --git a/tcp_tests/tests/system/testdata/k8s/test-admin-user-cluster-role-bind.yaml b/tcp_tests/tests/system/testdata/k8s/test-admin-user-cluster-role-bind.yaml
new file mode 100644
index 0000000..cad78ce
--- /dev/null
+++ b/tcp_tests/tests/system/testdata/k8s/test-admin-user-cluster-role-bind.yaml
@@ -0,0 +1,12 @@
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRoleBinding
+metadata:
+  name: test-admin-user
+roleRef:
+  apiGroup: rbac.authorization.k8s.io
+  kind: ClusterRole
+  name: cluster-admin
+subjects:
+- kind: ServiceAccount
+  name: test-admin-user
+  namespace: kube-system
diff --git a/tcp_tests/utils/env_k8s b/tcp_tests/utils/env_k8s
index 01e20fd..39764d0 100755
--- a/tcp_tests/utils/env_k8s
+++ b/tcp_tests/utils/env_k8s
@@ -20,15 +20,21 @@
     unset kube_apiserver_port
     unset kubernetes_admin_user
     unset kubernetes_admin_password
+    unset kubernetes_version_major
+    unset kubernetes_version_minor
 else
     KUBE_TARGET='I@haproxy:proxy:enabled:true and I@kubernetes:master and *01*'
     export kube_host=$(${PYTHONPATH}/tcp_tests/utils/get_param.py -C "${KUBE_TARGET}" pillar.get haproxy:proxy:listen:k8s_secure:binds:address)
     export kube_apiserver_port=$(${PYTHONPATH}/tcp_tests/utils/get_param.py -C "${KUBE_TARGET}" pillar.get haproxy:proxy:listen:k8s_secure:binds:port)
     export kubernetes_admin_user=$(${PYTHONPATH}/tcp_tests/utils/get_param.py -C "${KUBE_TARGET}" pillar.get kubernetes:master:admin:username)
     export kubernetes_admin_password=$(${PYTHONPATH}/tcp_tests/utils/get_param.py -C "${KUBE_TARGET}" pillar.get kubernetes:master:admin:password)
+    export kubernetes_version_major=$(${PYTHONPATH}/tcp_tests/utils/get_param.py -C "${KUBE_TARGET}" cmd.run "curl -s 127.0.0.1:8080/version|grep major| cut -d'\"' -f4|sed 's/[^0-9]*//g'")
+    export kubernetes_version_minor=$(${PYTHONPATH}/tcp_tests/utils/get_param.py -C "${KUBE_TARGET}" cmd.run "curl -s 127.0.0.1:8080/version|grep minor| cut -d'\"' -f4|sed 's/[^0-9]*//g'")
 fi
 
 echo "export kube_host='$kube_host'  # Kubernetes API host"
 echo "export kube_apiserver_port='${kube_apiserver_port}'  # Kubernetes API port"
 echo "export kubernetes_admin_user='${kubernetes_admin_user}'  # Kubernetes admin user"
 echo "export kubernetes_admin_password='${kubernetes_admin_password}'  # Kubernetes admin password"
+echo "export kubernetes_version_major='${kubernetes_version_major}'   # Kubernetes version major"
+echo "export kubernetes_version_minor='${kubernetes_version_minor}'   # Kubernetes version minor"
diff --git a/tcp_tests/utils/get_jenkins_job_stages.py b/tcp_tests/utils/get_jenkins_job_stages.py
index 143e1a2..361b8d1 100755
--- a/tcp_tests/utils/get_jenkins_job_stages.py
+++ b/tcp_tests/utils/get_jenkins_job_stages.py
@@ -15,6 +15,7 @@
 import argparse
 import os
 import sys
+import time
 
 sys.path.append(os.getcwd())
 try:
@@ -107,8 +108,17 @@
                                 for line in log["text"].splitlines()))
         return res
 
-    wf = jenkins.get_workflow(opts.job_name, opts.build_number)
-    info = jenkins.build_info(opts.job_name, int(wf['id']))
+    for _ in range(3):
+        wf = jenkins.get_workflow(opts.job_name, opts.build_number)
+        info = jenkins.build_info(opts.job_name, int(wf['id']))
+        if info is not None:
+            break
+        time.sleep(3)
+
+    if not info:
+        raise("Cannot get info for the job {0}:{1}".format(opts.job_name,
+                                                           opts.build_number))
+
     build_description = ("[" + info['fullDisplayName'] + "] " +
                          info['url'] + " : " + info['result'])
     stages = get_stages(wf['stages'], 0)