Merge changes I2f1a8ae6,Ida9e87c6,I5b3a2fa3,I2c0f649e,I28f9dd16, ...

* changes:
  Add MAKE_SNAPSHOT_STAGES job parameters
  Add heat template for oc41 deploymeent
  Add possibility to use ENV_MANAGER=heat in test pipelines
  Add get_param_heat_template.py
  Lookup nodes and networks in the nested stacks
  Improve heat environment manager
  Replace env_* scripts with static scripts
diff --git a/jobs/pipelines/deploy-cicd-and-run-tests.groovy b/jobs/pipelines/deploy-cicd-and-run-tests.groovy
index f5c5b9a..f9273e1 100644
--- a/jobs/pipelines/deploy-cicd-and-run-tests.groovy
+++ b/jobs/pipelines/deploy-cicd-and-run-tests.groovy
@@ -3,30 +3,57 @@
 def common = new com.mirantis.mk.Common()
 def shared = new com.mirantis.system_qa.SharedPipeline()
 def steps = "hardware,create_model,salt," + env.DRIVETRAIN_STACK_INSTALL + "," + env.PLATFORM_STACK_INSTALL
+def env_manager = env.ENV_MANAGER ?: 'devops'
+
+if (env_manager == 'devops') {
+    jenkins_slave_node_name = "${NODE_NAME}"
+    make_snapshot_stages = "${env.MAKE_SNAPSHOT_STAGES}" != "false" ? true : false
+} else if (env_manager == 'heat') {
+    jenkins_slave_node_name = "openstack_slave_${JOB_NAME}"
+    make_snapshot_stages = false
+}
 
 currentBuild.description = "${NODE_NAME}:${ENV_NAME}"
 
-def deploy(shared, common, steps) {
+def deploy(shared, common, steps, env_manager) {
     def report_text = ''
     try {
 
         stage("Clean the environment and clone tcp-qa") {
-            shared.prepare_working_dir()
+            shared.prepare_working_dir(env_manager)
         }
 
         stage("Create environment, generate model, bootstrap the salt-cluster") {
             // steps: "hardware,create_model,salt"
-            shared.swarm_bootstrap_salt_cluster_devops()
+            if (env_manager == 'devops') {
+                shared.swarm_bootstrap_salt_cluster_devops()
+            } else if (env_manager == 'heat') {
+                // If shared.swarm_bootstrap_salt_cluster_heat() failed,
+                // do not schedule shared.swarm_testrail_report() on the non existing Jenkins slave
+                shared.swarm_bootstrap_salt_cluster_heat(jenkins_slave_node_name)
+                // When the Heat stack created, set jenkins_slave_node_name to the new Jenkins slave
+                // disable dos.py snapshots for 'heat' manager
+            } else {
+                throw new Exception("Unknow env_manager: '${env_manager}'")
+            }
         }
 
         stage("Install core infrastructure and deploy CICD nodes") {
-            // steps: env.DRIVETRAIN_STACK_INSTALL
-            shared.swarm_deploy_cicd(env.DRIVETRAIN_STACK_INSTALL, env.DRIVETRAIN_STACK_INSTALL_TIMEOUT)
+        if (env.DRIVETRAIN_STACK_INSTALL) {
+                // steps: env.DRIVETRAIN_STACK_INSTALL
+                shared.swarm_deploy_cicd(env.DRIVETRAIN_STACK_INSTALL, env.DRIVETRAIN_STACK_INSTALL_TIMEOUT, jenkins_slave_node_name, make_snapshot_stages)
+            } else {
+                common.printMsg("DRIVETRAIN_STACK_INSTALL is empty, skipping 'swarm-deploy-cicd' job", "green")
+            }
         }
 
         stage("Deploy platform components") {
-            // steps: env.PLATFORM_STACK_INSTALL
-            shared.swarm_deploy_platform(env.PLATFORM_STACK_INSTALL, env.PLATFORM_STACK_INSTALL_TIMEOUT)
+            if (env.PLATFORM_STACK_INSTALL) {
+                // steps: env.PLATFORM_STACK_INSTALL
+                shared.swarm_deploy_platform(env.PLATFORM_STACK_INSTALL, env.PLATFORM_STACK_INSTALL_TIMEOUT, jenkins_slave_node_name, make_snapshot_stages)
+            } else {
+                common.printMsg("PLATFORM_STACK_INSTALL is empty, skipping 'swarm-deploy-platform' job", "green")
+            }
         }
 
         currentBuild.result = 'SUCCESS'
@@ -34,42 +61,50 @@
     } catch (e) {
         common.printMsg("Deploy is failed: " + e.message , "purple")
         report_text = e.message
-        def snapshot_name = "deploy_failed"
-        shared.run_cmd("""\
-            dos.py suspend ${ENV_NAME} || true
-            dos.py snapshot ${ENV_NAME} ${snapshot_name} || true
-        """)
-        if ("${env.SHUTDOWN_ENV_ON_TEARDOWN}" == "false") {
+        if (make_snapshot_stages) {
+            def snapshot_name = "deploy_failed"
             shared.run_cmd("""\
-                dos.py resume ${ENV_NAME} || true
+                dos.py suspend ${ENV_NAME} || true
+                dos.py snapshot ${ENV_NAME} ${snapshot_name} || true
             """)
+            if ("${env.SHUTDOWN_ENV_ON_TEARDOWN}" == "false") {
+                shared.run_cmd("""\
+                    dos.py resume ${ENV_NAME} || true
+                """)
+            }
+            shared.devops_snapshot_info(snapshot_name)
         }
-        shared.devops_snapshot_info(snapshot_name)
         throw e
     } finally {
         shared.create_deploy_result_report(steps, currentBuild.result, report_text)
     }
 }
 
-def test(shared, common, steps) {
+def test(shared, common, steps, env_manager) {
     try {
         stage("Run tests") {
-            shared.swarm_run_pytest(steps)
+            if (env.RUN_TEST_OPTS) {
+                shared.swarm_run_pytest(steps, jenkins_slave_node_name, make_snapshot_stages)
+            } else {
+                common.printMsg("RUN_TEST_OPTS is empty, skipping 'swarm-run-pytest' job", "green")
+            }
         }
 
     } catch (e) {
         common.printMsg("Tests are failed: " + e.message, "purple")
-        def snapshot_name = "tests_failed"
-        shared.run_cmd("""\
-            dos.py suspend ${ENV_NAME} || true
-            dos.py snapshot ${ENV_NAME} ${snapshot_name} || true
-        """)
-        if ("${env.SHUTDOWN_ENV_ON_TEARDOWN}" == "false") {
+        if (make_snapshot_stages) {
+            def snapshot_name = "tests_failed"
             shared.run_cmd("""\
-                dos.py resume ${ENV_NAME} || true
+                dos.py suspend ${ENV_NAME} || true
+                dos.py snapshot ${ENV_NAME} ${snapshot_name} || true
             """)
+            if ("${env.SHUTDOWN_ENV_ON_TEARDOWN}" == "false") {
+                shared.run_cmd("""\
+                    dos.py resume ${ENV_NAME} || true
+                """)
+            }
+            shared.devops_snapshot_info(snapshot_name)
         }
-        shared.devops_snapshot_info(snapshot_name)
         throw e
     }
 }
@@ -80,18 +115,20 @@
   node ("${NODE_NAME}") {
     try {
         // run deploy stages
-        deploy(shared, common, steps)
+        deploy(shared, common, steps, env_manager)
         // run test stages
-        test(shared, common, steps)
+        test(shared, common, steps, env_manager)
     } catch (e) {
         common.printMsg("Job is failed: " + e.message, "purple")
         throw e
     } finally {
-        // shutdown the environment if required
-        if ("${env.SHUTDOWN_ENV_ON_TEARDOWN}" == "true") {
-            shared.run_cmd("""\
-                dos.py destroy ${ENV_NAME} || true
-            """)
+        if (make_snapshot_stages) {
+            // shutdown the environment if required
+            if ("${env.SHUTDOWN_ENV_ON_TEARDOWN}" == "true") {
+                shared.run_cmd("""\
+                    dos.py destroy ${ENV_NAME} || true
+                """)
+            }
         }
 
         stage("Archive all xml reports") {
diff --git a/jobs/pipelines/deploy-without-drivetrain-and-run-tests.groovy b/jobs/pipelines/deploy-without-drivetrain-and-run-tests.groovy
index 96ddf76..33f8516 100644
--- a/jobs/pipelines/deploy-without-drivetrain-and-run-tests.groovy
+++ b/jobs/pipelines/deploy-without-drivetrain-and-run-tests.groovy
@@ -49,7 +49,7 @@
 def test(shared, common, steps) {
     try {
         stage("Run tests") {
-            shared.swarm_run_pytest(steps)
+            shared.swarm_run_pytest(steps, "${NODE_NAME}")
         }
 
     } catch (e) {
@@ -93,7 +93,7 @@
             archiveArtifacts artifacts: "**/*.xml,**/*.ini,**/*.log,**/*.tar.gz"
         }
         stage("report results to testrail") {
-            shared.swarm_testrail_report(steps)
+            shared.swarm_testrail_report(steps, "${NODE_NAME}")
         }
         stage("Store TestRail reports to job description") {
             def String description = readFile("description.txt")
diff --git a/jobs/pipelines/swarm-bootstrap-salt-cluster-devops.groovy b/jobs/pipelines/swarm-bootstrap-salt-cluster-devops.groovy
index efeabba..392be7c 100644
--- a/jobs/pipelines/swarm-bootstrap-salt-cluster-devops.groovy
+++ b/jobs/pipelines/swarm-bootstrap-salt-cluster-devops.groovy
@@ -44,6 +44,12 @@
         error "'PARENT_WORKSPACE' contains path to non-existing directory ${PARENT_WORKSPACE} on the node '${PARENT_NODE_NAME}'."
     }
     dir("${PARENT_WORKSPACE}") {
+        if (env.TCP_QA_REFS) {
+            stage("Update working dir to patch ${TCP_QA_REFS}") {
+                shared.update_working_dir()
+            }
+        }
+
         stage("Cleanup: erase ${ENV_NAME} and remove config drive") {
             println "Remove environment ${ENV_NAME}"
             shared.run_cmd("""\
@@ -55,12 +61,6 @@
             """)
         }
 
-        if (env.TCP_QA_REFS) {
-            stage("Update working dir to patch ${TCP_QA_REFS}") {
-                shared.update_working_dir()
-            }
-        }
-
         stage("Create an environment ${ENV_NAME} in disabled state") {
             // deploy_hardware.xml
             shared.run_cmd("""\
@@ -75,18 +75,36 @@
         }
 
         stage("Generate the model") {
-            shared.generate_cookied_model()
+            def IPV4_NET_ADMIN=shared.run_cmd_stdout("dos.py net-list ${ENV_NAME} | grep admin-pool01").trim().split().last()
+            def IPV4_NET_CONTROL=shared.run_cmd_stdout("dos.py net-list ${ENV_NAME} | grep private-pool01").trim().split().last()
+            def IPV4_NET_TENANT=shared.run_cmd_stdout("dos.py net-list ${ENV_NAME} | grep tenant-pool01").trim().split().last()
+            def IPV4_NET_EXTERNAL=shared.run_cmd_stdout("dos.py net-list ${ENV_NAME} | grep external-pool01").trim().split().last()
+            shared.generate_cookied_model(IPV4_NET_ADMIN, IPV4_NET_CONTROL, IPV4_NET_TENANT, IPV4_NET_EXTERNAL)
         }
 
         stage("Generate config drive ISO") {
-            shared.generate_configdrive_iso()
+            def SALT_MASTER_IP=shared.run_cmd_stdout("""\
+                SALT_MASTER_INFO=\$(for node in \$(dos.py slave-ip-list --address-pool-name admin-pool01 ${ENV_NAME}); do echo \$node; done|grep cfg01)
+                echo \$SALT_MASTER_INFO|cut -d',' -f2
+                """).trim().split("\n").last()
+            def dhcp_ranges_json=shared.run_cmd_stdout("""\
+                fgrep dhcp_ranges ${ENV_NAME}_hardware.ini |
+                fgrep "admin-pool01"|
+                cut -d"=" -f2
+                """).trim().split("\n").last()
+            def dhcp_ranges = new groovy.json.JsonSlurperClassic().parseText(dhcp_ranges_json)
+            def ADMIN_NETWORK_GW = dhcp_ranges['admin-pool01']['gateway']
+            shared.generate_configdrive_iso(SALT_MASTER_IP, ADMIN_NETWORK_GW)
         }
 
         stage("Upload generated config drive ISO into volume on cfg01 node") {
+            def SALT_MASTER_HOSTNAME=shared.run_cmd_stdout("""\
+                SALT_MASTER_INFO=\$(for node in \$(dos.py slave-ip-list --address-pool-name admin-pool01 ${ENV_NAME}); do echo \$node; done|grep cfg01)
+                echo \$SALT_MASTER_INFO|cut -d',' -f1
+                """).trim().split("\n").last()
             shared.run_cmd("""\
                 # Get SALT_MASTER_HOSTNAME to determine the volume name
-                . ./tcp_tests/utils/env_salt
-                virsh vol-upload ${ENV_NAME}_\${SALT_MASTER_HOSTNAME}_config /home/jenkins/images/${CFG01_CONFIG_IMAGE_NAME} --pool default
+                virsh vol-upload ${ENV_NAME}_${SALT_MASTER_HOSTNAME}_config /home/jenkins/images/${CFG01_CONFIG_IMAGE_NAME} --pool default
                 virsh pool-refresh --pool default
             """)
         }
diff --git a/jobs/pipelines/swarm-bootstrap-salt-cluster-heat.groovy b/jobs/pipelines/swarm-bootstrap-salt-cluster-heat.groovy
new file mode 100644
index 0000000..cfb080f
--- /dev/null
+++ b/jobs/pipelines/swarm-bootstrap-salt-cluster-heat.groovy
@@ -0,0 +1,302 @@
+/**
+ *
+ * Create fuel-devops environment, generate a model for it
+ * and bootstrap a salt cluster on the environment nodes
+ *
+ * Expected parameters:
+
+ *   PARENT_NODE_NAME              Name of the jenkins slave to create the environment
+ *   PARENT_WORKSPACE              Path to the workspace of the parent job to use tcp-qa repo
+ *   LAB_CONFIG_NAME               Name of the tcp-qa deployment template
+ *   ENV_NAME                      Fuel-devops environment name
+ *   MCP_VERSION                   MCP version, like 2018.4 or proposed
+ *   MCP_IMAGE_PATH1604            Local path to the image http://ci.mcp.mirantis.net:8085/images/ubuntu-16-04-x64-mcpproposed.qcow2
+ *   IMAGE_PATH_CFG01_DAY01        Local path to the image http://ci.mcp.mirantis.net:8085/images/cfg01-day01-proposed.qcow2
+ *   CFG01_CONFIG_IMAGE_NAME       Name for the creating config drive image, like cfg01.${LAB_CONFIG_NAME}-config-drive.iso
+ *   TCP_QA_REFS                   Reference to the tcp-qa change on review.gerrithub.io, like refs/changes/46/418546/41
+ *   PIPELINE_LIBRARY_REF          Reference to the pipeline-library change
+ *   MK_PIPELINES_REF              Reference to the mk-pipelines change
+ *   COOKIECUTTER_TEMPLATE_COMMIT  Commit/tag/branch for cookiecutter-templates repository. If empty, then takes ${MCP_VERSION} value
+ *   SALT_MODELS_SYSTEM_COMMIT     Commit/tag/branch for reclass-system repository. If empty, then takes ${MCP_VERSION} value
+ *   SHUTDOWN_ENV_ON_TEARDOWN      optional, shutdown fuel-devops environment at the end of the job
+ *   MCP_SALT_REPO_URL             Base URL for MCP repositories required to bootstrap cfg01 node. Leave blank to use default
+ *                                 (http://mirror.mirantis.com/ from mcp-common-scripts)
+ *   MCP_SALT_REPO_KEY             URL of the key file. Leave blank to use default
+ *                                 (${MCP_SALT_REPO_URL}/${MCP_VERSION}/salt-formulas/xenial/archive-salt-formulas.key from mcp-common-scripts)
+ *   OS_AUTH_URL                   OpenStack keystone catalog URL
+ *   OS_PROJECT_NAME               OpenStack project (tenant) name
+ *   OS_USER_DOMAIN_NAME           OpenStack user domain name
+ *   OS_CREDENTIALS                OpenStack username and password credentials ID in Jenkins
+ *   LAB_PARAM_DEFAULTS            Filename placed in tcp_tests/templates/_heat_environments, with default parameters for the heat template
+ *
+ *   CREATE_JENKINS_NODE_CREDENTIALS   Jenkins username and password with rights to add/delete Jenkins agents
+ */
+
+@Library('tcp-qa')_
+
+import groovy.xml.XmlUtil
+
+common = new com.mirantis.mk.Common()
+shared = new com.mirantis.system_qa.SharedPipeline()
+
+if (! env.PARENT_NODE_NAME) {
+    error "'PARENT_NODE_NAME' must be set from the parent deployment job!"
+}
+
+currentBuild.description = "${PARENT_NODE_NAME}:${ENV_NAME}"
+def cfg01_day01_image_name = "cfg01-day01-${MCP_VERSION}"
+def ubuntu_vcp_image_name = "ubuntu-vcp-${MCP_VERSION}"
+def ubuntu_foundation_image_name = "ubuntu-16.04-foundation-${MCP_VERSION}"
+
+node ("${PARENT_NODE_NAME}") {
+    if (! fileExists("${PARENT_WORKSPACE}")) {
+        error "'PARENT_WORKSPACE' contains path to non-existing directory ${PARENT_WORKSPACE} on the node '${PARENT_NODE_NAME}'."
+    }
+    dir("${PARENT_WORKSPACE}") {
+
+        if (env.TCP_QA_REFS) {
+            stage("Update working dir to patch ${TCP_QA_REFS}") {
+                shared.update_working_dir()
+            }
+        }
+
+        withCredentials([
+           [$class          : 'UsernamePasswordMultiBinding',
+           credentialsId   : env.OS_CREDENTIALS,
+           passwordVariable: 'OS_PASSWORD',
+           usernameVariable: 'OS_USERNAME']
+        ]) {
+            env.OS_IDENTITY_API_VERSION = 3
+
+            stage("Cleanup: erase ${ENV_NAME} and remove config drive") {
+
+                // delete heat stack
+                println "Remove heat stack '${ENV_NAME}'"
+                shared.run_cmd("""\
+                    # export OS_IDENTITY_API_VERSION=3
+                    # export OS_AUTH_URL=${OS_AUTH_URL}
+                    # export OS_USERNAME=${OS_USERNAME}
+                    # export OS_PASSWORD=${OS_PASSWORD}
+                    # export OS_PROJECT_NAME=${OS_PROJECT_NAME}
+                    openstack --insecure stack delete -y ${ENV_NAME} || true
+                    while openstack --insecure stack show ${ENV_NAME} -f value -c stack_status; do sleep 10; done
+                """)
+
+                println "Remove config drive ISO"
+                shared.run_cmd("""\
+                    rm /home/jenkins/images/${CFG01_CONFIG_IMAGE_NAME} || true
+                """)
+            }
+
+            stage("Generate the model") {
+                def IPV4_NET_ADMIN=shared.run_cmd_stdout("./tcp_tests/utils/get_param_heat_template.py management_subnet_cidr").trim().split().last()
+                def IPV4_NET_CONTROL=shared.run_cmd_stdout("./tcp_tests/utils/get_param_heat_template.py control_subnet_cidr").trim().split().last()
+                def IPV4_NET_TENANT=shared.run_cmd_stdout("./tcp_tests/utils/get_param_heat_template.py tenant_subnet_cidr").trim().split().last()
+                def IPV4_NET_EXTERNAL=shared.run_cmd_stdout("./tcp_tests/utils/get_param_heat_template.py external_subnet_cidr").trim().split().last()
+                shared.generate_cookied_model(IPV4_NET_ADMIN, IPV4_NET_CONTROL, IPV4_NET_TENANT, IPV4_NET_EXTERNAL)
+            }
+
+            stage("Generate config drive ISO") {
+                def SALT_MASTER_IP=shared.run_cmd_stdout("./tcp_tests/utils/get_param_heat_template.py management_subnet_cfg01_ip").trim().split().last()
+                def ADMIN_NETWORK_GW=shared.run_cmd_stdout("./tcp_tests/utils/get_param_heat_template.py management_subnet_gateway_ip").trim().split().last()
+                shared.generate_configdrive_iso(SALT_MASTER_IP, ADMIN_NETWORK_GW)
+            }
+
+            stage("Upload Ubuntu image for foundation node") {
+                shared.run_cmd("""\
+                    if ! openstack --insecure image show ${ubuntu_foundation_image_name} -f value -c name; then
+                        wget -O ./${ubuntu_foundation_image_name} https://cloud-images.ubuntu.com/releases/16.04/release/ubuntu-16.04-server-cloudimg-amd64-disk1.img
+                        openstack --insecure image create ${ubuntu_foundation_image_name} --file ./${ubuntu_foundation_image_name} --disk-format qcow2 --container-format bare
+                        rm ./${ubuntu_foundation_image_name}
+                    else
+                        echo Image ${ubuntu_foundation_image_name} already exists
+                    fi
+                """)
+            }
+
+            stage("Upload cfg01-day01 and VCP images") {
+                shared.run_cmd("""\
+                    # export OS_IDENTITY_API_VERSION=3
+                    # export OS_AUTH_URL=${OS_AUTH_URL}
+                    # export OS_USERNAME=${OS_USERNAME}
+                    # export OS_PASSWORD=${OS_PASSWORD}
+                    # export OS_PROJECT_NAME=${OS_PROJECT_NAME}
+
+                    openstack --insecure image show ${cfg01_day01_image_name} -f value -c name || openstack --insecure image create ${cfg01_day01_image_name} --file ${IMAGE_PATH_CFG01_DAY01} --disk-format qcow2 --container-format bare
+                    openstack --insecure image show ${ubuntu_vcp_image_name} -f value -c name || openstack --insecure image create ${ubuntu_vcp_image_name} --file ${MCP_IMAGE_PATH1604} --disk-format qcow2 --container-format bare
+                """)
+            }
+
+            stage("Upload generated config drive ISO into volume on cfg01 node") {
+                shared.run_cmd("""\
+                    # export OS_IDENTITY_API_VERSION=3
+                    # export OS_AUTH_URL=${OS_AUTH_URL}
+                    # export OS_USERNAME=${OS_USERNAME}
+                    # export OS_PASSWORD=${OS_PASSWORD}
+                    # export OS_PROJECT_NAME=${OS_PROJECT_NAME}
+
+                    openstack --insecure image delete cfg01.${ENV_NAME}-config-drive.iso || true
+                    sleep 3
+                    openstack --insecure image create cfg01.${ENV_NAME}-config-drive.iso --file /home/jenkins/images/${CFG01_CONFIG_IMAGE_NAME} --disk-format iso --container-format bare
+                """)
+            }
+
+            stage("Create Heat stack '${ENV_NAME}'") {
+                // Create stack and wait for CREATE_COMPLETED status, manual analog:
+                //    openstack --insecure stack create ${ENV_NAME} \
+                //        --template ./tcp_tests/templates/${LAB_CONFIG_NAME}/underlay.hot \
+                //        --environment ./tcp_tests/templates/_heat_environments/${LAB_PARAM_DEFAULTS} \
+                //        --parameter env_name=${ENV_NAME} --parameter mcp_version=${MCP_VERSION}
+                shared.run_cmd("""\
+                    export BOOTSTRAP_TIMEOUT=3600
+                    export ENV_MANAGER=heat
+                    export TEST_GROUP=test_create_environment
+                    export SHUTDOWN_ENV_ON_TEARDOWN=false
+                    export PYTHONIOENCODING=UTF-8
+                    export REPOSITORY_SUITE=${MCP_VERSION}
+                    export ENV_NAME=${ENV_NAME}
+                    export LAB_CONFIG_NAME=${LAB_CONFIG_NAME}
+                    export LAB_PARAM_DEFAULTS=${LAB_PARAM_DEFAULTS}
+                    py.test --cache-clear -vvv -s -p no:django -p no:ipdb --junit-xml=deploy_hardware.xml -k \${TEST_GROUP}
+                """)
+            }
+
+            stage("Add the Jenkins slave node") {
+                def jenkins_slave_ip_value_name = "foundation_floating"
+                def jenkins_slave_ip = shared.run_cmd_stdout("openstack --insecure stack output show ${ENV_NAME} ${jenkins_slave_ip_value_name} -f value -c output_value").trim().split().last()
+                def jenkins_slave_executors = 2
+                common.printMsg("JENKINS_SLAVE_NODE_NAME=${JENKINS_SLAVE_NODE_NAME}", "green")
+                common.printMsg("JENKINS_SLAVE_IP=${jenkins_slave_ip}", "green")
+
+        withCredentials([
+           [$class          : 'UsernamePasswordMultiBinding',
+           credentialsId   : "${CREATE_JENKINS_NODE_CREDENTIALS}",
+           passwordVariable: 'JENKINS_PASS',
+           usernameVariable: 'JENKINS_USER']
+        ]) {
+
+                script_delete_agent = ("""\
+                    CRUMB=\$(curl --fail -0 -u \"\${JENKINS_USER}:\${JENKINS_PASS}\" \${JENKINS_URL}\'/crumbIssuer/api/xml?xpath=concat(//crumbRequestField,":",//crumb)\' 2>/dev/null)
+                    curl -w '%{http_code}' -o /dev/null \
+                        -u \"\${JENKINS_USER}:\${JENKINS_PASS}\" \
+                        -H \"Content-Type:application/x-www-form-urlencoded\" \
+                        -H \"\$CRUMB\" \
+                        \"\${JENKINS_URL}/computer/\${JENKINS_SLAVE_NODE_NAME}/doDelete\" \
+                        --request \'POST\' --data \'\'
+                    sleep 10
+                """)
+
+                script_create_agent = ("""\
+                    CRUMB=\$(curl --fail -0 -u \"\${JENKINS_USER}:\${JENKINS_PASS}\" \${JENKINS_URL}\'/crumbIssuer/api/xml?xpath=concat(//crumbRequestField,":",//crumb)\' 2>/dev/null)
+
+                    curl -L -sS -w '%{http_code}' -o /dev/null \
+                        -u \"\${JENKINS_USER}:\${JENKINS_PASS}\" \
+                        -H \"Content-Type:application/x-www-form-urlencoded\" \
+                        -H \"\$CRUMB\" \
+                        -X POST -d 'json={\
+                            \"name\": \"'\"\$JENKINS_SLAVE_NODE_NAME\"'\", \
+                            \"nodeDescription\": \"'\"\$ENV_NAME\"'\", \
+                            \"numExecutors\": \"'\"${jenkins_slave_executors}\"'\", \
+                            \"remoteFS\": \"'\"/home/jenkins/workspace\"'\", \
+                            \"labelString\": \"'\"\$ENV_NAME\"'\", \
+                            \"mode\": \"EXCLUSIVE\", \
+                            \"\": [\"hudson.plugins.sshslaves.SSHLauncher\", \"hudson.slaves.RetentionStrategy\$Always\"], \
+                            \"launcher\": {\
+                                \"stapler-class\": \"hudson.plugins.sshslaves.SSHLauncher\", \
+                                \"\$class\": \"hudson.plugins.sshslaves.SSHLauncher\", \
+                                \"host\": \"'\"${jenkins_slave_ip}\"'\", \
+                                \"credentialsId\": \"'\"\$ACCESS_JENKINS_NODE_CREDENTIALS\"'\", \
+                                \"port\": \"'\"22\"'\", \
+                                \"javaPath\": \"\", \
+                                \"jvmOptions\": \"\", \
+                                \"prefixStartSlaveCmd\": \"\", \
+                                \"suffixStartSlaveCmd\": \"\", \
+                                \"launchTimeoutSeconds\": \"\", \
+                                \"maxNumRetries\": \"\", \
+                                \"retryWaitTime\": \"\", \
+                                \"sshHostKeyVerificationStrategy\": {\
+                                    \"\$class\": \"hudson.plugins.sshslaves.verifiers.NonVerifyingKeyVerificationStrategy\" \
+                                }, \
+                                \"tcpNoDelay\": \"true\"\
+                            }, \
+                            \"retentionStrategy\": {\
+                                \"stapler-class\": \"hudson.slaves.RetentionStrategy\$Always\", \
+                                \"\$class\": \"hudson.slaves.RetentionStrategy\$Always\"\
+                            }, \
+                            \"nodeProperties\": {\
+                                \"stapler-class-bag\": \"true\"\
+                            }, \
+                            \"type\": \"hudson.slaves.DumbSlave\", \
+                            \"crumb\": \"'\"\$CRUMB\"'\"}' \
+                        \"\${JENKINS_URL}/computer/doCreateItem?name=\${JENKINS_SLAVE_NODE_NAME}&type=hudson.slaves.DumbSlave\"
+                """)
+                shared.verbose_sh(script_delete_agent, true, false, true)
+                shared.verbose_sh(script_create_agent, true, false, true)
+
+        } // withCredentials
+
+            }// stage
+
+        } // withCredentials
+    } // dir
+} // node
+
+
+node ("${JENKINS_SLAVE_NODE_NAME}") {
+    dir("${PARENT_WORKSPACE}") {
+
+        stage("Clean the environment and clone tcp-qa") {
+            deleteDir()
+            shared.verbose_sh("""\
+                [ -d /home/jenkins/fuel-devops30 ] || virtualenv /home/jenkins/fuel-devops30
+            """, true, false, true)
+            shared.run_cmd("""\
+                git clone https://github.com/Mirantis/tcp-qa.git ${PARENT_WORKSPACE}
+            """)
+            shared.update_working_dir()
+        }
+
+        withCredentials([
+           [$class          : 'UsernamePasswordMultiBinding',
+           credentialsId   : env.OS_CREDENTIALS,
+           passwordVariable: 'OS_PASSWORD',
+           usernameVariable: 'OS_USERNAME']
+        ]) {
+
+
+            stage("Run the 'underlay' and 'salt-deployed' fixtures to bootstrap salt cluster") {
+                def xml_report_name = "deploy_salt.xml"
+                try {
+                    // deploy_salt.xml
+                    shared.run_sh("""\
+                        export ENV_NAME=${ENV_NAME}
+                        export LAB_CONFIG_NAME=${LAB_CONFIG_NAME}
+                        export LAB_PARAM_DEFAULTS=${LAB_PARAM_DEFAULTS}
+                        export ENV_MANAGER=heat
+                        export SHUTDOWN_ENV_ON_TEARDOWN=false
+                        export BOOTSTRAP_TIMEOUT=3600
+                        export PYTHONIOENCODING=UTF-8
+                        export REPOSITORY_SUITE=${MCP_VERSION}
+                        export TEST_GROUP=test_bootstrap_salt
+                        py.test -vvv -s -p no:django -p no:ipdb --junit-xml=${xml_report_name} -k \${TEST_GROUP}
+                    """)
+                    // Wait for jenkins to start and IO calm down
+                    sleep(60)
+
+                } catch (e) {
+                      common.printMsg("Saltstack cluster deploy is failed", "purple")
+                      if (fileExists(xml_report_name)) {
+                          shared.download_logs("deploy_salt_${ENV_NAME}")
+                          def String junit_report_xml = readFile(xml_report_name)
+                          def String junit_report_xml_pretty = new XmlUtil().serialize(junit_report_xml)
+                          throw new Exception(junit_report_xml_pretty)
+                      } else {
+                          throw e
+                      }
+                } finally {
+                    // TODO(ddmitriev): add checks for salt cluster
+                }
+            } // stage
+        } // withCredentials
+    } // dir
+} // node
diff --git a/jobs/pipelines/swarm-deploy-cicd.groovy b/jobs/pipelines/swarm-deploy-cicd.groovy
index 1939b4d..b5e1ff7 100644
--- a/jobs/pipelines/swarm-deploy-cicd.groovy
+++ b/jobs/pipelines/swarm-deploy-cicd.groovy
@@ -11,6 +11,7 @@
  *   STACK_INSTALL_TIMEOUT         Stacks installation timeout
  *   TCP_QA_REFS                   Reference to the tcp-qa change on review.gerrithub.io, like refs/changes/46/418546/41
  *   SHUTDOWN_ENV_ON_TEARDOWN      optional, shutdown fuel-devops environment at the end of the job
+ *   MAKE_SNAPSHOT_STAGES          optional, use "dos.py snapshot" to snapshot stages
  *
  */
 
@@ -18,6 +19,7 @@
 
 common = new com.mirantis.mk.Common()
 shared = new com.mirantis.system_qa.SharedPipeline()
+make_snapshot_stages = "${env.MAKE_SNAPSHOT_STAGES}" != "false" ? true : false
 
 if (! env.PARENT_NODE_NAME) {
     error "'PARENT_NODE_NAME' must be set from the parent deployment job!"
@@ -55,8 +57,11 @@
                     stage("Sanity check the deployed component [${stack}]") {
                         shared.sanity_check_component(stack)
                     }
-                    stage("Make environment snapshot [${stack}_deployed]") {
-                        shared.devops_snapshot(stack)
+
+                    if (make_snapshot_stages) {
+                        stage("Make environment snapshot [${stack}_deployed]") {
+                            shared.devops_snapshot(stack)
+                        }
                     }
                 } // for
 
@@ -68,10 +73,12 @@
                 // TODO(ddmitriev): analyze the "def currentResult = currentBuild.result ?: 'SUCCESS'"
                 // and report appropriate data to TestRail
                 // TODO(ddmitriev): add checks for cicd cluster
-                if ("${env.SHUTDOWN_ENV_ON_TEARDOWN}" == "true") {
-                    shared.run_cmd("""\
-                        dos.py destroy ${ENV_NAME}
-                    """)
+                if (make_snapshot_stages) {
+                    if ("${env.SHUTDOWN_ENV_ON_TEARDOWN}" == "true") {
+                        shared.run_cmd("""\
+                            dos.py destroy ${ENV_NAME}
+                        """)
+                    }
                 }
             }
 
diff --git a/jobs/pipelines/swarm-deploy-platform.groovy b/jobs/pipelines/swarm-deploy-platform.groovy
index cb26aae..061e555 100644
--- a/jobs/pipelines/swarm-deploy-platform.groovy
+++ b/jobs/pipelines/swarm-deploy-platform.groovy
@@ -11,6 +11,7 @@
  *   STACK_INSTALL_TIMEOUT         Stacks installation timeout
  *   TCP_QA_REFS                   Reference to the tcp-qa change on review.gerrithub.io, like refs/changes/46/418546/41
  *   SHUTDOWN_ENV_ON_TEARDOWN      optional, shutdown fuel-devops environment at the end of the job
+ *   MAKE_SNAPSHOT_STAGES          optional, use "dos.py snapshot" to snapshot stages
  *
  */
 
@@ -18,6 +19,7 @@
 
 common = new com.mirantis.mk.Common()
 shared = new com.mirantis.system_qa.SharedPipeline()
+make_snapshot_stages = "${env.MAKE_SNAPSHOT_STAGES}" != "false" ? true : false
 
 if (! env.PARENT_NODE_NAME) {
     error "'PARENT_NODE_NAME' must be set from the parent deployment job!"
@@ -55,8 +57,10 @@
                     stage("Sanity check the deployed component [${stack}]") {
                         shared.sanity_check_component(stack)
                     }
-                    stage("Make environment snapshot [${stack}_deployed]") {
-                        shared.devops_snapshot(stack)
+                    if (make_snapshot_stages) {
+                        stage("Make environment snapshot [${stack}_deployed]") {
+                            shared.devops_snapshot(stack)
+                        }
                     }
                 } // for
 
@@ -68,10 +72,12 @@
                 // TODO(ddmitriev): analyze the "def currentResult = currentBuild.result ?: 'SUCCESS'"
                 // and report appropriate data to TestRail
                 // TODO(ddmitriev): add checks for the installed stacks
-                if ("${env.SHUTDOWN_ENV_ON_TEARDOWN}" == "true") {
-                    shared.run_cmd("""\
-                        dos.py destroy ${ENV_NAME}
-                    """)
+                if (make_snapshot_stages) {
+                    if ("${env.SHUTDOWN_ENV_ON_TEARDOWN}" == "true") {
+                        shared.run_cmd("""\
+                            dos.py destroy ${ENV_NAME}
+                        """)
+                    }
                 }
             }
 
diff --git a/jobs/pipelines/swarm-run-pytest.groovy b/jobs/pipelines/swarm-run-pytest.groovy
index 994cc70..1e4c849 100644
--- a/jobs/pipelines/swarm-run-pytest.groovy
+++ b/jobs/pipelines/swarm-run-pytest.groovy
@@ -18,12 +18,14 @@
  *   IMAGE_PATH_CFG01_DAY01        Not used (backward compatibility, for manual deployment steps only)
  *   TEMPEST_IMAGE_VERSION         Tempest image version: pike by default, can be queens.
  *   TEMPEST_TARGET                Node where tempest will be run
+ *   MAKE_SNAPSHOT_STAGES          optional, use "dos.py snapshot" to snapshot stages
  */
 
 @Library('tcp-qa')_
 
 common = new com.mirantis.mk.Common()
 shared = new com.mirantis.system_qa.SharedPipeline()
+make_snapshot_stages = "${env.MAKE_SNAPSHOT_STAGES}" != "false" ? true : false
 
 if (! env.PARENT_NODE_NAME) {
     error "'PARENT_NODE_NAME' must be set from the parent deployment job!"
@@ -77,16 +79,19 @@
 
                 def snapshot_name = "test_completed"
                 shared.download_logs("test_completed_${ENV_NAME}")
-                shared.run_cmd("""\
-                    dos.py suspend ${ENV_NAME}
-                    dos.py snapshot ${ENV_NAME} ${snapshot_name}
-                """)
-                if ("${env.SHUTDOWN_ENV_ON_TEARDOWN}" == "false") {
+
+                if (make_snapshot_stages) {
                     shared.run_cmd("""\
-                        dos.py resume ${ENV_NAME}
+                        dos.py suspend ${ENV_NAME}
+                        dos.py snapshot ${ENV_NAME} ${snapshot_name}
                     """)
+                    if ("${env.SHUTDOWN_ENV_ON_TEARDOWN}" == "false") {
+                        shared.run_cmd("""\
+                            dos.py resume ${ENV_NAME}
+                        """)
+                    }
+                    shared.devops_snapshot_info(snapshot_name)
                 }
-                shared.devops_snapshot_info(snapshot_name)
             }
 
         } catch (e) {
@@ -98,10 +103,12 @@
         } finally {
             // TODO(ddmitriev): analyze the "def currentResult = currentBuild.result ?: 'SUCCESS'"
             // and report appropriate data to TestRail
-            if ("${env.SHUTDOWN_ENV_ON_TEARDOWN}" == "true") {
-                shared.run_cmd("""\
-                    dos.py destroy ${ENV_NAME}
-                """)
+            if (make_snapshot_stages) {
+                if ("${env.SHUTDOWN_ENV_ON_TEARDOWN}" == "true") {
+                    shared.run_cmd("""\
+                        dos.py destroy ${ENV_NAME}
+                    """)
+                }
             }
         }
     }
diff --git a/src/com/mirantis/system_qa/SharedPipeline.groovy b/src/com/mirantis/system_qa/SharedPipeline.groovy
index c4927ea..8c438fb 100644
--- a/src/com/mirantis/system_qa/SharedPipeline.groovy
+++ b/src/com/mirantis/system_qa/SharedPipeline.groovy
@@ -6,6 +6,16 @@
     return Character.isDigit(version.charAt(0))
 }
 
+def verbose_sh(String script, Boolean returnStatus=false, Boolean returnStdout=false, Boolean verboseStdout=false) {
+    def common = new com.mirantis.mk.Common()
+    common.printMsg("Run shell command:\n" + script, "blue")
+    def result = sh(script: script, returnStatus: returnStatus, returnStdout: returnStdout)
+    if (verboseStdout) {
+        common.printMsg("Output:\n" + result, "cyan")
+    }
+    return result
+}
+
 def run_sh(String cmd) {
     // run shell script without catching any output
     def common = new com.mirantis.mk.Common()
@@ -125,15 +135,41 @@
     }
 }
 
-def prepare_working_dir() {
+def prepare_working_dir(env_manager) {
         println "Clean the working directory ${env.WORKSPACE}"
         deleteDir()
 
-        // do not fail if environment doesn't exists
-        println "Remove environment ${ENV_NAME}"
-        run_cmd("""\
-            dos.py erase ${ENV_NAME} || true
-        """)
+        if (env_manager == 'devops') {
+            // do not fail if environment doesn't exists
+            println "Remove fuel-devops environment '${ENV_NAME}'"
+            run_cmd("""\
+                dos.py erase ${ENV_NAME} || true
+            """)
+        } else if (env_manager == 'heat') {
+            // delete heat stack
+            println "Remove heat stack '${ENV_NAME}'"
+            withCredentials([
+                       [$class          : 'UsernamePasswordMultiBinding',
+                       credentialsId   : env.OS_CREDENTIALS,
+                       passwordVariable: 'OS_PASSWORD',
+                       usernameVariable: 'OS_USERNAME']
+            ]) {
+                run_cmd("""\
+                    export OS_IDENTITY_API_VERSION=3
+                    export OS_AUTH_URL=${OS_AUTH_URL}
+                    export OS_USERNAME=${OS_USERNAME}
+                    export OS_PASSWORD=${OS_PASSWORD}
+                    export OS_PROJECT_NAME=${OS_PROJECT_NAME}
+                    export OS_USER_DOMAIN_NAME=${OS_USER_DOMAIN_NAME}
+                    openstack --insecure stack delete -y ${ENV_NAME} || true
+                    while openstack --insecure stack show ${ENV_NAME} -f value -c stack_status; do sleep 10; done
+                """)
+            }
+
+        } else {
+            throw new Exception("Unknown env_manager: '${env_manager}'")
+        }
+
         println "Remove config drive ISO"
         run_cmd("""\
             rm /home/jenkins/images/${CFG01_CONFIG_IMAGE_NAME} || true
@@ -207,33 +243,87 @@
         build_pipeline_job('swarm-bootstrap-salt-cluster-devops', parameters)
 }
 
-def swarm_deploy_cicd(String stack_to_install, String install_timeout) {
+def swarm_bootstrap_salt_cluster_heat(String jenkins_slave_node_name) {
+        // jenkins_slave_node_name
+        def common = new com.mirantis.mk.Common()
+        def cookiecutter_template_commit = env.COOKIECUTTER_TEMPLATE_COMMIT ?: "release/${env.MCP_VERSION}"
+        def salt_models_system_commit = env.SALT_MODELS_SYSTEM_COMMIT ?: "release/${env.MCP_VERSION}"
+        def tcp_qa_refs = env.TCP_QA_REFS ?: ''
+        def mk_pipelines_ref = env.MK_PIPELINES_REF ?: ''
+        def pipeline_library_ref = env.PIPELINE_LIBRARY_REF ?: ''
+        def cookiecutter_ref_change = env.COOKIECUTTER_REF_CHANGE ?: ''
+        def environment_template_ref_change = env.ENVIRONMENT_TEMPLATE_REF_CHANGE ?: ''
+        def mcp_salt_repo_url = env.MCP_SALT_REPO_URL ?: ''
+        def mcp_salt_repo_key = env.MCP_SALT_REPO_KEY ?: ''
+        def env_ipmi_user = env.IPMI_USER ?: ''
+        def env_ipmi_pass = env.IPMI_PASS ?: ''
+        def env_lab_mgm_iface = env.LAB_MANAGEMENT_IFACE ?: ''
+        def env_lab_ctl_iface = env.LAB_CONTROL_IFACE ?: ''
+        def update_repo_custom_tag = env.UPDATE_REPO_CUSTOM_TAG ?: ''
+        def parameters = [
+                string(name: 'PARENT_NODE_NAME', value: "${NODE_NAME}"),
+                string(name: 'JENKINS_SLAVE_NODE_NAME', value: jenkins_slave_node_name),
+                string(name: 'PARENT_WORKSPACE', value: pwd()),
+                string(name: 'LAB_CONFIG_NAME', value: "${LAB_CONFIG_NAME}"),
+                string(name: 'ENV_NAME', value: "${ENV_NAME}"),
+                string(name: 'MCP_VERSION', value: "${MCP_VERSION}"),
+                string(name: 'MCP_IMAGE_PATH1604', value: "${MCP_IMAGE_PATH1604}"),
+                string(name: 'IMAGE_PATH_CFG01_DAY01', value: "${IMAGE_PATH_CFG01_DAY01}"),
+                string(name: 'CFG01_CONFIG_IMAGE_NAME', value: "${CFG01_CONFIG_IMAGE_NAME}"),
+                string(name: 'TCP_QA_REFS', value: "${tcp_qa_refs}"),
+                string(name: 'PIPELINE_LIBRARY_REF', value: "${pipeline_library_ref}"),
+                string(name: 'MK_PIPELINES_REF', value: "${mk_pipelines_ref}"),
+                string(name: 'COOKIECUTTER_TEMPLATE_COMMIT', value: "${cookiecutter_template_commit}"),
+                string(name: 'SALT_MODELS_SYSTEM_COMMIT', value: "${salt_models_system_commit}"),
+                string(name: 'COOKIECUTTER_REF_CHANGE', value: "${cookiecutter_ref_change}"),
+                string(name: 'ENVIRONMENT_TEMPLATE_REF_CHANGE', value: "${environment_template_ref_change}"),
+                string(name: 'MCP_SALT_REPO_URL', value: "${mcp_salt_repo_url}"),
+                string(name: 'MCP_SALT_REPO_KEY', value: "${mcp_salt_repo_key}"),
+                string(name: 'IPMI_USER', value: env_ipmi_user),
+                string(name: 'IPMI_PASS', value: env_ipmi_pass),
+                string(name: 'LAB_MANAGEMENT_IFACE', value: env_lab_mgm_iface),
+                string(name: 'LAB_CONTROL_IFACE', value: env_lab_ctl_iface),
+                string(name: 'UPDATE_REPO_CUSTOM_TAG', value: "${update_repo_custom_tag}"),
+                string(name: 'OS_AUTH_URL', value: "${OS_AUTH_URL}"),
+                string(name: 'OS_PROJECT_NAME', value: "${OS_PROJECT_NAME}"),
+                string(name: 'OS_USER_DOMAIN_NAME', value: "${OS_USER_DOMAIN_NAME}"),
+                string(name: 'OS_CREDENTIALS', value: "${OS_CREDENTIALS}"),
+                string(name: 'LAB_PARAM_DEFAULTS', value: "${LAB_PARAM_DEFAULTS}"),
+                booleanParam(name: 'SHUTDOWN_ENV_ON_TEARDOWN', value: false),
+            ]
+
+        build_pipeline_job('swarm-bootstrap-salt-cluster-heat', parameters)
+}
+
+def swarm_deploy_cicd(String stack_to_install, String install_timeout, String jenkins_slave_node_name, Boolean make_snapshot_stages) {
         // Run openstack_deploy job on cfg01 Jenkins for specified stacks
         def common = new com.mirantis.mk.Common()
         def tcp_qa_refs = env.TCP_QA_REFS ?: ''
         def parameters = [
-                string(name: 'PARENT_NODE_NAME', value: "${NODE_NAME}"),
+                string(name: 'PARENT_NODE_NAME', value: jenkins_slave_node_name),
                 string(name: 'PARENT_WORKSPACE', value: pwd()),
                 string(name: 'ENV_NAME', value: "${ENV_NAME}"),
                 string(name: 'STACK_INSTALL', value: stack_to_install),
                 string(name: 'STACK_INSTALL_TIMEOUT', value: install_timeout),
                 string(name: 'TCP_QA_REFS', value: "${tcp_qa_refs}"),
+                booleanParam(name: 'MAKE_SNAPSHOT_STAGES', value: make_snapshot_stages),
                 booleanParam(name: 'SHUTDOWN_ENV_ON_TEARDOWN', value: false),
             ]
         build_pipeline_job('swarm-deploy-cicd', parameters)
 }
 
-def swarm_deploy_platform(String stack_to_install, String install_timeout) {
+def swarm_deploy_platform(String stack_to_install, String install_timeout, String jenkins_slave_node_name, Boolean make_snapshot_stages) {
         // Run openstack_deploy job on CICD Jenkins for specified stacks
         def common = new com.mirantis.mk.Common()
         def tcp_qa_refs = env.TCP_QA_REFS ?: ''
         def parameters = [
-                string(name: 'PARENT_NODE_NAME', value: "${NODE_NAME}"),
+                string(name: 'PARENT_NODE_NAME', value: jenkins_slave_node_name),
                 string(name: 'PARENT_WORKSPACE', value: pwd()),
                 string(name: 'ENV_NAME', value: "${ENV_NAME}"),
                 string(name: 'STACK_INSTALL', value: stack_to_install),
                 string(name: 'STACK_INSTALL_TIMEOUT', value: install_timeout),
                 string(name: 'TCP_QA_REFS', value: "${tcp_qa_refs}"),
+                booleanParam(name: 'MAKE_SNAPSHOT_STAGES', value: make_snapshot_stages),
                 booleanParam(name: 'SHUTDOWN_ENV_ON_TEARDOWN', value: false),
             ]
         build_pipeline_job('swarm-deploy-platform', parameters)
@@ -255,7 +345,7 @@
         build_pipeline_job('swarm-deploy-platform-without-cicd', parameters)
 }
 
-def swarm_run_pytest(String passed_steps) {
+def swarm_run_pytest(String passed_steps, String jenkins_slave_node_name, Boolean make_snapshot_stages) {
         // Run pytest tests
         def common = new com.mirantis.mk.Common()
         def tcp_qa_refs = env.TCP_QA_REFS ?: ''
@@ -266,7 +356,7 @@
                 string(name: 'ENV_NAME', value: "${ENV_NAME}"),
                 string(name: 'PASSED_STEPS', value: passed_steps),
                 string(name: 'RUN_TEST_OPTS', value: "${RUN_TEST_OPTS}"),
-                string(name: 'PARENT_NODE_NAME', value: "${NODE_NAME}"),
+                string(name: 'PARENT_NODE_NAME', value: jenkins_slave_node_name),
                 string(name: 'PARENT_WORKSPACE', value: pwd()),
                 string(name: 'TCP_QA_REFS', value: "${tcp_qa_refs}"),
                 booleanParam(name: 'SHUTDOWN_ENV_ON_TEARDOWN', value: false),
@@ -277,8 +367,7 @@
                 string(name: 'TEMPEST_IMAGE_VERSION', value: "${tempest_image_version}"),
                 string(name: 'TEMPEST_TARGET', value: "${tempest_target}"),
                 string(name: 'TEMPEST_EXTRA_ARGS', value: "${tempest_extra_args}"),
-
-
+                booleanParam(name: 'MAKE_SNAPSHOT_STAGES', value: make_snapshot_stages),
             ]
         common.printMsg("Start building job 'swarm-run-pytest' with parameters:", "purple")
         common.prettyPrint(parameters)
@@ -309,13 +398,8 @@
             parameters: parameters
 }
 
-def generate_cookied_model() {
+def generate_cookied_model(IPV4_NET_ADMIN, IPV4_NET_CONTROL, IPV4_NET_TENANT, IPV4_NET_EXTERNAL) {
         def common = new com.mirantis.mk.Common()
-        // do not fail if environment doesn't exists
-        def IPV4_NET_ADMIN=run_cmd_stdout("dos.py net-list ${ENV_NAME} | grep admin-pool01").trim().split().last()
-        def IPV4_NET_CONTROL=run_cmd_stdout("dos.py net-list ${ENV_NAME} | grep private-pool01").trim().split().last()
-        def IPV4_NET_TENANT=run_cmd_stdout("dos.py net-list ${ENV_NAME} | grep tenant-pool01").trim().split().last()
-        def IPV4_NET_EXTERNAL=run_cmd_stdout("dos.py net-list ${ENV_NAME} | grep external-pool01").trim().split().last()
         println("IPV4_NET_ADMIN=" + IPV4_NET_ADMIN)
         println("IPV4_NET_CONTROL=" + IPV4_NET_CONTROL)
         println("IPV4_NET_TENANT=" + IPV4_NET_TENANT)
@@ -351,22 +435,9 @@
         build_shell_job('swarm-cookied-model-generator', parameters, "deploy_generate_model.xml")
 }
 
-def generate_configdrive_iso() {
+def generate_configdrive_iso(SALT_MASTER_IP, ADMIN_NETWORK_GW) {
         def common = new com.mirantis.mk.Common()
-        def SALT_MASTER_IP=run_cmd_stdout("""\
-            export ENV_NAME=${ENV_NAME}
-            . ./tcp_tests/utils/env_salt
-            echo \$SALT_MASTER_IP
-            """).trim().split().last()
         println("SALT_MASTER_IP=" + SALT_MASTER_IP)
-
-        def dhcp_ranges_json=run_cmd_stdout("""\
-            fgrep dhcp_ranges ${ENV_NAME}_hardware.ini |
-            fgrep "admin-pool01"|
-            cut -d"=" -f2
-            """).trim().split("\n").last()
-        def dhcp_ranges = new groovy.json.JsonSlurperClassic().parseText(dhcp_ranges_json)
-        def ADMIN_NETWORK_GW = dhcp_ranges['admin-pool01']['gateway']
         println("ADMIN_NETWORK_GW=" + ADMIN_NETWORK_GW)
 
         def mk_pipelines_ref = env.MK_PIPELINES_REF ?: ''
diff --git a/tcp_tests/fixtures/config_fixtures.py b/tcp_tests/fixtures/config_fixtures.py
index 973e034..8026e5b 100644
--- a/tcp_tests/fixtures/config_fixtures.py
+++ b/tcp_tests/fixtures/config_fixtures.py
@@ -32,6 +32,8 @@
         for test_config in tests_configs.split(','):
             config_files.append(test_config)
 
+    LOG.info("\n" + "-" * 10 + " Initialize oslo.config variables with "
+             "defaults from environment" + "-" * 10)
     config_opts = settings_oslo.load_config(config_files)
 
     if os.path.isfile(config_opts.underlay.ssh_key_file):
diff --git a/tcp_tests/fixtures/underlay_fixtures.py b/tcp_tests/fixtures/underlay_fixtures.py
index 1770c62..a8856d6 100644
--- a/tcp_tests/fixtures/underlay_fixtures.py
+++ b/tcp_tests/fixtures/underlay_fixtures.py
@@ -18,9 +18,6 @@
 from tcp_tests.helpers import utils
 from tcp_tests import logger
 from tcp_tests import settings
-from tcp_tests.managers import envmanager_devops
-from tcp_tests.managers import envmanager_empty
-from tcp_tests.managers import envmanager_heat
 from tcp_tests.managers import underlay_ssh_manager
 
 LOG = logger.logger
@@ -60,18 +57,21 @@
         # No environment manager is used.
         # 'config' should contain config.underlay.ssh settings
         # 'config' should contain config.underlay.current_snapshot setting
+        from tcp_tests.managers import envmanager_empty
         env = envmanager_empty.EnvironmentManagerEmpty(config=config)
 
     elif env_manager == 'devops':
         # fuel-devops environment manager is used.
         # config.underlay.ssh settings can be empty or witn SSH to existing env
         # config.underlay.current_snapshot
+        from tcp_tests.managers import envmanager_devops
         env = envmanager_devops.EnvironmentManager(config=config)
 
     elif env_manager == 'heat':
         # heat environment manager is used.
         # config.underlay.ssh settings can be empty or witn SSH to existing env
         # config.underlay.current_snapshot
+        from tcp_tests.managers import envmanager_heat
         env = envmanager_heat.EnvironmentManagerHeat(config=config)
     else:
         raise Exception("Unknown hardware manager: '{}'".format(env_manager))
diff --git a/tcp_tests/managers/envmanager_heat.py b/tcp_tests/managers/envmanager_heat.py
index 2d52e00..7432bd5 100644
--- a/tcp_tests/managers/envmanager_heat.py
+++ b/tcp_tests/managers/envmanager_heat.py
@@ -102,7 +102,7 @@
             username=settings.OS_USERNAME,
             password=settings.OS_PASSWORD,
             project_name=settings.OS_PROJECT_NAME,
-            user_domain_name='Default',
+            user_domain_name=settings.OS_USER_DOMAIN_NAME,
             project_domain_name='Default')
         return keystone_session.Session(auth=keystone_auth, verify=False)
 
@@ -137,13 +137,39 @@
             self.__init_heatclient()
             return self.__heatclient.resources
 
+    def __get_stack_parent(self, stack_id, stacks):
+        """Find the parent ID of the specified stack_id in the 'stacks' list"""
+        for stack in stacks:
+            if stack_id == stack.id:
+                if stack.parent:
+                    return self.__get_stack_parent(stack.parent, stacks)
+                else:
+                    return stack.id
+        raise Exception("stack with ID {} not found!".format(stack_id))
+
+    @property
+    def __nested_resources(self):
+        resources = []
+        stacks = [s for s in self.__stacks.list(show_nested=True)]
+        current_stack_id = self._current_stack.id
+        for stack in stacks:
+            parent_stack_id = self.__get_stack_parent(stack.id, stacks)
+            if parent_stack_id == current_stack_id:
+                # Add resources to list
+                LOG.info("Get resources from stack {0}"
+                         .format(stack.stack_name))
+                resources.extend([
+                    res for res in self.__resources.list(stack.id)
+                ])
+        LOG.info("Found {0} resources".format(len(resources)))
+        return resources
+
     def _get_resources_by_type(self, resource_type):
         res = []
-        for item in self.__resources.list(
-                self.__config.hardware.heat_stack_name):
+        for item in self.__nested_resources:
             if item.resource_type == resource_type:
                 resource = self.__resources.get(
-                    self.__config.hardware.heat_stack_name,
+                    item.stack_name,
                     item.resource_name)
                 res.append(resource)
         return res
@@ -199,7 +225,7 @@
                                   .format(addr_type,
                                           heat_node.attributes['name'],
                                           network))
-                if fixed is None or floating is None:
+                if fixed is None and floating is None:
                     LOG.error("Unable to determine the correct IP address "
                               "in node '{0}' for network '{1}'"
                               .format(heat_node.attributes['name'], network))
@@ -210,7 +236,13 @@
                     pool_net = netaddr.IPNetwork(address_pool['cidr'])
                     if fixed in pool_net:
                         for role in address_pool['roles']:
-                            addresses[role] = floating
+                            # addresses[role] = floating
+                            # Use fixed addresses for SSH access
+                            addresses[role] = fixed
+            if 'metadata' not in heat_node.attributes or \
+                    'roles' not in heat_node.attributes['metadata']:
+                raise Exception("Node {} doesn't have metadata:roles:[...,...]"
+                                .format(heat_node.attributes['name']))
 
             nodes.append({
                 'name': heat_node.attributes['name'],
@@ -281,8 +313,7 @@
 
     def _get_resources_with_wrong_status(self):
         res = []
-        for item in self.__resources.list(
-                self.__config.hardware.heat_stack_name):
+        for item in self.__nested_resources:
             if item.resource_status in BAD_STACK_STATUSES:
                 res.append({
                     'resource_name': item.resource_name,
@@ -485,6 +516,10 @@
             'template': template,
             'files': dict(list(tpl_files.items()) + list(env_files.items())),
             'environment': env,
+            'parameters': {
+                'mcp_version': settings.MCP_VERSION,
+                'env_name': settings.ENV_NAME,
+            }
         }
 
         if env_files_list:
diff --git a/tcp_tests/managers/saltmanager.py b/tcp_tests/managers/saltmanager.py
index c2bcc05..9b321cb 100644
--- a/tcp_tests/managers/saltmanager.py
+++ b/tcp_tests/managers/saltmanager.py
@@ -13,6 +13,7 @@
 #    under the License.
 
 import netaddr
+import pkg_resources
 
 from collections import defaultdict
 
@@ -60,6 +61,10 @@
 
         self.execute_commands(commands=commands,
                               label="Install and configure salt")
+        self.create_env_salt()
+        self.create_env_jenkins_day01()
+        self.create_env_jenkins_cicd()
+        self.create_env_k8s()
 
     def change_creds(self, username, password):
         self.__user = username
@@ -271,7 +276,7 @@
         return result['return']
 
     @utils.retry(3, exception=libpepper.PepperException)
-    def sync_time(self, tgt='*'):
+    def sync_time(self, tgt='* and not cfg01*'):
         LOG.info("NTP time sync on the salt minions '{0}'".format(tgt))
         # Force authentication update on the next API access
         # because previous authentication most probably is not valid
@@ -285,3 +290,142 @@
         for node_name, time in sorted(new_time_res[0]['return'][0].items()):
             LOG.info("{0}: {1}".format(node_name, time))
         self.__api = None
+
+    def create_env_salt(self):
+        """Creates static utils/env_salt file"""
+
+        env_salt_filename = pkg_resources.resource_filename(
+            settings.__name__, 'utils/env_salt')
+        with open(env_salt_filename, 'w') as f:
+            f.write(
+                'export SALT_MASTER_IP={host}\n'
+                'export SALTAPI_URL=http://{host}:{port}/\n'
+                'export SALTAPI_USER="{user}"\n'
+                'export SALTAPI_PASS="{password}"\n'
+                'export SALTAPI_EAUTH="pam"\n'
+                'echo "export SALT_MASTER_IP=${{SALT_MASTER_IP}}"\n'
+                'echo "export SALTAPI_URL=${{SALTAPI_URL}}"\n'
+                'echo "export SALTAPI_USER=${{SALTAPI_USER}}"\n'
+                'echo "export SALTAPI_PASS=${{SALTAPI_PASS}}"\n'
+                'echo "export SALTAPI_EAUTH=${{SALTAPI_EAUTH}}"\n'
+                .format(host=self.host, port=self.port,
+                        user=self.__user, password=self.__password)
+            )
+
+    def create_env_jenkins_day01(self):
+        """Creates static utils/env_jenkins_day01 file"""
+
+        env_jenkins_day01_filename = pkg_resources.resource_filename(
+            settings.__name__, 'utils/env_jenkins_day01')
+
+        tgt = 'I@docker:client:stack:jenkins and cfg01*'
+        jenkins_params = self.get_single_pillar(
+            tgt=tgt, pillar="jenkins:client:master")
+        jenkins_port = jenkins_params['port']
+        jenkins_user = jenkins_params['username']
+        jenkins_pass = jenkins_params['password']
+
+        with open(env_jenkins_day01_filename, 'w') as f:
+            f.write(
+                'export JENKINS_URL=http://{host}:{port}\n'
+                'export JENKINS_USER={user}\n'
+                'export JENKINS_PASS={password}\n'
+                'export JENKINS_START_TIMEOUT=60\n'
+                'export JENKINS_BUILD_TIMEOUT=1800\n'
+                'echo "export JENKINS_URL=${{JENKINS_URL}}'
+                '  # Jenkins API URL"\n'
+                'echo "export JENKINS_USER=${{JENKINS_USER}}'
+                '  # Jenkins API username"\n'
+                'echo "export JENKINS_PASS=${{JENKINS_PASS}}'
+                '  # Jenkins API password or token"n\n'
+                'echo "export JENKINS_START_TIMEOUT=${{JENKINS_START_TIMEOUT}}'
+                '  # Timeout waiting for job in queue to start building"\n'
+                'echo "export JENKINS_BUILD_TIMEOUT=${{JENKINS_BUILD_TIMEOUT}}'
+                '  # Timeout waiting for building job to complete"\n'
+                .format(host=self.host, port=jenkins_port,
+                        user=jenkins_user, password=jenkins_pass)
+            )
+
+    def create_env_jenkins_cicd(self):
+        """Creates static utils/env_jenkins_cicd file"""
+
+        env_jenkins_cicd_filename = pkg_resources.resource_filename(
+            settings.__name__, 'utils/env_jenkins_cicd')
+
+        tgt = 'I@docker:client:stack:jenkins and cid01*'
+        try:
+            jenkins_params = self.get_single_pillar(
+                tgt=tgt, pillar="jenkins:client:master")
+        except LookupError as e:
+            LOG.error("Skipping creation {0} because cannot get Jenkins CICD "
+                      "parameters from '{1}': {2}"
+                      .format(env_jenkins_cicd_filename, tgt, e.message))
+            return
+
+        jenkins_host = jenkins_params['host']
+        jenkins_port = jenkins_params['port']
+        jenkins_user = jenkins_params['username']
+        jenkins_pass = jenkins_params['password']
+
+        with open(env_jenkins_cicd_filename, 'w') as f:
+            f.write(
+                'export JENKINS_URL=http://{host}:{port}\n'
+                'export JENKINS_USER={user}\n'
+                'export JENKINS_PASS={password}\n'
+                'export JENKINS_START_TIMEOUT=60\n'
+                'export JENKINS_BUILD_TIMEOUT=1800\n'
+                'echo "export JENKINS_URL=${{JENKINS_URL}}'
+                '  # Jenkins API URL"\n'
+                'echo "export JENKINS_USER=${{JENKINS_USER}}'
+                '  # Jenkins API username"\n'
+                'echo "export JENKINS_PASS=${{JENKINS_PASS}}'
+                '  # Jenkins API password or token"n\n'
+                'echo "export JENKINS_START_TIMEOUT=${{JENKINS_START_TIMEOUT}}'
+                '  # Timeout waiting for job in queue to start building"\n'
+                'echo "export JENKINS_BUILD_TIMEOUT=${{JENKINS_BUILD_TIMEOUT}}'
+                '  # Timeout waiting for building job to complete"\n'
+                .format(host=jenkins_host, port=jenkins_port,
+                        user=jenkins_user, password=jenkins_pass)
+            )
+
+    def create_env_k8s(self):
+        """Creates static utils/env_k8s file"""
+
+        env_k8s_filename = pkg_resources.resource_filename(
+            settings.__name__, 'utils/env_k8s')
+
+        tgt = 'I@haproxy:proxy:enabled:true and I@kubernetes:master and *01*'
+        try:
+            haproxy_params = self.get_single_pillar(
+                tgt=tgt, pillar="haproxy:proxy:listen:k8s_secure:binds")[0]
+            k8s_params = self.get_single_pillar(
+                tgt=tgt, pillar="kubernetes:master:admin")
+        except LookupError as e:
+            LOG.error("Skipping creation {0} because cannot get Kubernetes "
+                      "parameters from '{1}': {2}"
+                      .format(env_k8s_filename, tgt, e.message))
+            return
+
+        kube_host = haproxy_params['address']
+        kube_apiserver_port = haproxy_params['port']
+        kubernetes_admin_user = k8s_params['username']
+        kubernetes_admin_password = k8s_params['password']
+
+        with open(env_k8s_filename, 'w') as f:
+            f.write(
+                'export kube_host={host}\n'
+                'export kube_apiserver_port={port}\n'
+                'export kubernetes_admin_user={user}\n'
+                'export kubernetes_admin_password={password}\n'
+                'echo "export kube_host=${{kube_host}}'
+                '  # Kube API host"\n'
+                'echo "export kube_apiserver_port=${{kube_apiserver_port}}'
+                '  # Kube API port"\n'
+                'echo "export kubernetes_admin_user=${{kubernetes_admin_user}}'
+                '  # Kube API username"\n'
+                'echo "export kubernetes_admin_password='
+                '${{kubernetes_admin_password}}  # Kube API password"n\n'
+                .format(host=kube_host, port=kube_apiserver_port,
+                        user=kubernetes_admin_user,
+                        password=kubernetes_admin_password)
+            )
diff --git a/tcp_tests/requirements.txt b/tcp_tests/requirements.txt
index d9eae28..759a449 100644
--- a/tcp_tests/requirements.txt
+++ b/tcp_tests/requirements.txt
@@ -26,4 +26,6 @@
 
 # For Queens: https://github.com/openstack/requirements/blob/stable/queens/global-requirements.txt
 python-heatclient>=1.10.0
-keystoneauth1>=3.3.0
\ No newline at end of file
+python-glanceclient>=2.8.0
+python-openstackclient>=3.12.0
+keystoneauth1>=3.3.0
diff --git a/tcp_tests/settings.py b/tcp_tests/settings.py
index 346506f..f98981d 100644
--- a/tcp_tests/settings.py
+++ b/tcp_tests/settings.py
@@ -101,9 +101,10 @@
 SKIP_SYNC_TIME = get_var_as_bool("SKIP_SYNC_TIME", False)
 
 # OpenStack parameters to work with Heat stacks
-OS_STACK_NAME = os.environ.get("OS_STACK_NAME", ENV_NAME)
 OS_HEAT_VERSION = os.environ.get('OS_HEAT_VERSION', 1)
 OS_AUTH_URL = os.environ.get('OS_AUTH_URL', None)
 OS_USERNAME = os.environ.get('OS_USERNAME', None)
 OS_PASSWORD = os.environ.get('OS_PASSWORD', None)
 OS_PROJECT_NAME = os.environ.get('OS_PROJECT_NAME', None)
+OS_USER_DOMAIN_NAME = os.environ.get('OS_USER_DOMAIN_NAME', 'Default')
+LAB_PARAM_DEFAULTS = os.environ.get('LAB_PARAM_DEFAULTS', '')
diff --git a/tcp_tests/settings_oslo.py b/tcp_tests/settings_oslo.py
index e930729..fa61884 100644
--- a/tcp_tests/settings_oslo.py
+++ b/tcp_tests/settings_oslo.py
@@ -22,15 +22,14 @@
 from tcp_tests.helpers import oslo_cfg_types as ct
 from tcp_tests import settings
 
-print("\n" + "-" * 10 + " Initialize oslo.config variables with defaults"
-      " from environment" + "-" * 10)
 
 _default_conf = pkg_resources.resource_filename(
     __name__, 'templates/{0}/underlay.yaml'.format(settings.LAB_CONFIG_NAME))
 _default_heat_conf = pkg_resources.resource_filename(
     __name__, 'templates/{0}/underlay.hot'.format(settings.LAB_CONFIG_NAME))
 _default_heat_env = pkg_resources.resource_filename(
-    __name__, 'templates/_heat_environments/microcloud-8116.env')
+    __name__, 'templates/_heat_environments/{0}'.format(
+        settings.LAB_PARAM_DEFAULTS))
 
 _default_salt_steps = pkg_resources.resource_filename(
     __name__, 'templates/{0}/salt.yaml'.format(settings.LAB_CONFIG_NAME))
diff --git a/tcp_tests/templates/_heat_environments/ReadMe.md b/tcp_tests/templates/_heat_environments/ReadMe.md
new file mode 100644
index 0000000..09f568a
--- /dev/null
+++ b/tcp_tests/templates/_heat_environments/ReadMe.md
@@ -0,0 +1,116 @@
+1. Required template parameters
+===============================
+Parameters with fixed names required by Jenkins pipeline swarm-bootstrap-salt-cluster-heat.groovy.
+These parameters can be defined in .env or .hot file and are used to generate model.
+```
+management_subnet_cidr
+management_subnet_gateway_ip
+management_subnet_cfg01_ip
+control_subnet_cidr
+tenant_subnet_cidr
+external_subnet_cidr
+```
+Also, the following parameters might be useful to define:
+```
+management_subnet_pool_start
+management_subnet_pool_end
+```
+
+2. Required template objects
+============================
+
+2.1 Node roles
+--------------
+
+Node roles are automatically gathered in the envmanager_heat.py
+from OS::Nova::Server , where defined as a list using "metadata:roles" key:
+
+```
+     cfg01_node:
+       type: OS::Nova::Server
+       properties:
+         metadata:
+           roles:
+           - salt_master
+```
+
+2.2 L3 network roles
+--------------------
+
+Network roles are automatically gathered in the envmanager_heat.py
+from OS::Neutron::Subnet , where defined as list of tags:
+
+```
+  control_subnet:
+    type: OS::Neutron::Subnet
+    properties:
+      ...
+      tags:
+      - private-pool01
+```
+There are four fixed network roles at the moment:
+```
+admin-pool01    # for management_subnet_cidr
+private-pool01  # for control_subnet_cidr
+tenant-pool01   # for tenant_subnet_cidr
+external-pool01 # for external_subnet_cidr
+```
+
+3. External parameters
+======================
+
+There are parameters which are automatically defined outside
+of the template defaults in the envmanager_heat.py, and can be used
+in the template to define or find specified resources:
+```
+env_name     # set from environment variable ENV_NAME. Matches heat stack name
+mcp_version  # set from environment variable MCP_VERSION
+```
+
+4. Pre-defined resources in the OpenStack cloud
+===============================================
+
+4.1 Public network
+------------------
+Public network for floating IP addresses should be pre-defined.
+Heat templates must use this network to define floating IPs.
+
+4.2 Images
+----------
+Jenkins pipeline swarm-bootstrap-salt-cluster-heat.groovy check and create
+required images. In the template, the following image names should be used:
+
+```
+# Image used to bootstrap salt master node cfg01:
+image: { list_join: ['', [ 'cfg01-day01-', { get_param: mcp_version } ]] }
+
+# Config drive image to boot cfg01, with user-data and reclass model
+image: { list_join: ['', [ 'cfg01.', { get_param: env_name }, '-config-drive.iso' ]] }
+
+# Image used to bootstrap VCP nodes:
+image: { list_join: ['', [ 'ubuntu-vcp-', { get_param: mcp_version } ]] }
+
+# Image used to bootstrap the Foundation node:
+image: { list_join: ['', [ 'ubuntu-16.04-foundation-', { get_param: mcp_version } ]] }
+```
+
+5. The foundation node
+======================
+To get direct access to the environment resources without tunnels and jump hosts,
+the pipeline swarm-bootstrap-salt-cluster-heat.groovy expects that a foundation node
+will be defined in each heat template.
+
+This node is used to launch a Jenkins agent and run Jenkins jobs inside the
+heat stack. Depending on environment, the Foundation node could be connected
+to several or to all the internal networks to run necessary tests.
+
+The template 'outputs' should contain the 'foundation_floating' key, for example:
+```
+outputs:
+  foundation_floating:
+    description: foundation node IP address (floating) from external network
+    value:
+      get_attr:
+      - foundation_node
+      - instance_floating_address
+```
diff --git a/tcp_tests/templates/_heat_environments/eu-cloud.env b/tcp_tests/templates/_heat_environments/eu-cloud.env
new file mode 100644
index 0000000..de3bb06
--- /dev/null
+++ b/tcp_tests/templates/_heat_environments/eu-cloud.env
@@ -0,0 +1,40 @@
+
+resource_registry:
+  "MCP::MultipleInstance": fragments/MultipleInstance.yaml
+  #"MCP::Flavors": fragments/Flavors.yaml
+  "MCP::MasterNode": fragments/MasterNode.yaml
+  "MCP::Compute": fragments/Compute.yaml
+  "MCP::Networks": fragments/Networks.yaml
+  "MCP::SingleInstance": fragments/Instance.yaml
+  "MCP::FoundationNode": fragments/FoundationNode.yaml
+
+parameter_defaults:
+
+  cfg_flavor: system.virtual.salt_master
+  ctl_flavor: system.golden.openstack.control
+  cid_flavor: system.golden.cicd.control
+  ntw_flavor: system.compact.opencontrail.control
+  nal_flavor: system.compact.opencontrail.analytics
+  dbs_flavor: system.golden.openstack.database
+  msg_flavor: system.golden.openstack.message_queue
+  mon_flavor: system.golden.stacklight.server
+  log_flavor: system.golden.stacklight.log
+  mtr_flavor: system.golden.stacklight.telemetry
+  cmp_flavor: system.virtual.openstack.compute
+  kvm_fake_flavor: system.virtual.fake_kvm
+  foundation_flavor: system.virtual.foundation
+
+  key_pair: system_key_8133
+
+  net_public: public
+
+  nameservers: 172.18.208.44
+  control_subnet_cidr: "10.6.0.0/24"
+  tenant_subnet_cidr: "10.8.0.0/24"
+  external_subnet_cidr: "10.9.0.0/24"
+  management_subnet_cidr: "10.7.0.0/24"
+  management_subnet_cfg01_ip: 10.7.0.15
+  management_subnet_gateway_ip: 10.7.0.1
+  management_subnet_pool_start: 10.7.0.20
+  management_subnet_pool_end: 10.7.0.60
+  salt_master_control_ip: 10.6.0.15
diff --git a/tcp_tests/templates/_heat_environments/fragments/Compute.yaml b/tcp_tests/templates/_heat_environments/fragments/Compute.yaml
new file mode 100644
index 0000000..6b4c0c7
--- /dev/null
+++ b/tcp_tests/templates/_heat_environments/fragments/Compute.yaml
@@ -0,0 +1,110 @@
+heat_template_version: queens
+
+description: Single server instance fragment
+
+parameters:
+  network:
+    type: string
+  instance_flavor:
+    type: string
+  instance_name:
+    type: string
+  instance_config_host:
+    type: string
+  key_pair:
+    type: string
+  instance_domain:
+    type: string
+  net_public:
+    type: string
+  control_net_static_ip:
+    type: string
+  underlay_userdata:
+    type: string
+  mcp_version:
+    type: string
+  env_name:
+    type: string
+
+resources:
+  instance_port01:
+    type: OS::Neutron::Port
+    properties:
+      port_security_enabled: false
+      network_id: { list_join: ['-', [ 'management_net', { get_param: env_name } ]] }
+  instance_port02:
+    type: OS::Neutron::Port
+    properties:
+      port_security_enabled: false
+      network_id: { list_join: ['-', [ 'control_net', { get_param: env_name } ]] }
+      fixed_ips:
+        - ip_address: { get_param: control_net_static_ip }
+  instance_port03:
+    type: OS::Neutron::Port
+    properties:
+      port_security_enabled: false
+      network_id: { list_join: ['-', [ 'tenant_net', { get_param: env_name } ]] }
+
+  instance_instance:
+    type: OS::Nova::Server
+    properties:
+      image_update_policy: REBUILD
+      flavor: { get_param: instance_flavor }
+      image: { list_join: ['', [ 'ubuntu-vcp-', { get_param: mcp_version } ]] }
+      key_name: { get_param: key_pair }
+      name:
+        list_join:
+        - '.'
+        - [ { get_param: instance_name }, { get_param: instance_domain } ]
+      networks:
+      - port: { get_resource: instance_port01 }
+      - port: { get_resource: instance_port02 }
+      - port: { get_resource: instance_port03 }
+      block_device_mapping_v2:
+      - device_name: /dev/vdb
+        device_type: disk
+        boot_index: -1
+        delete_on_termination: true
+        ephemeral_size: 10
+
+      user_data_format: RAW
+      user_data:
+        str_replace:
+          #template: { get_file: underlay--user-data-cfg01--heat.yaml }
+          #template: { get_file: ../underlay-userdata.yaml }
+          template: { get_param: underlay_userdata }
+          params:
+            hostname: { list_join: ['.', [ { get_param: instance_name }, { get_param: instance_domain } ]] }
+            $node_hostname: { get_param: instance_name }
+            $node_domain: { get_param: instance_domain }
+            $config_host: { get_param: instance_config_host }
+      metadata:
+        roles:
+        - salt_minion
+
+  floating_ip:
+    depends_on: [instance_instance]
+    type: OS::Neutron::FloatingIP
+    properties:
+      floating_network: { get_param: net_public }
+      port_id: { get_resource: instance_port01 }
+  floating_ip_association:
+    depends_on: [floating_ip]
+    type: OS::Neutron::FloatingIPAssociation
+    properties:
+      floatingip_id:  { get_resource: floating_ip }
+      port_id: { get_resource: instance_port01 }
+
+outputs:
+  instance_address:
+    value:
+      get_attr:
+      - instance_instance
+      - addresses
+      - 'management_net'
+      - 0
+      - addr
+    description: "Instance's private IP address"
+  instance:
+    value: { get_resource: instance_instance }
+    description: "Instance"
diff --git a/tcp_tests/templates/_heat_environments/fragments/Flavors.yaml b/tcp_tests/templates/_heat_environments/fragments/Flavors.yaml
new file mode 100644
index 0000000..6db41e6
--- /dev/null
+++ b/tcp_tests/templates/_heat_environments/fragments/Flavors.yaml
@@ -0,0 +1,123 @@
+---
+
+heat_template_version: queens
+
+resources:
+  cfg01_virtual:
+    type: OS::Nova::Flavor
+    properties:
+      disk: 100
+      extra_specs: {"capabilities:hypervisor_type": "QEMU", "hw:numa_nodes": "2", "hw:numa_cpus.0": "0,4,1,5", "hw:numa_cpus.1": "2,6,3,7", "hw:numa_mem.0": "8192", "hw:numa_mem.1": "8192"}
+      name: cfg01_virtual
+      ram: 16384
+      vcpus: 8
+
+  kvm_fake_virtual:
+    type: OS::Nova::Flavor
+    properties:
+      disk: 120
+      extra_specs: {"capabilities:hypervisor_type": "QEMU"}
+      name: kvm_fake_virtual
+      ram: 2048
+      vcpus: 1
+
+  ctl_virtual:
+    type: OS::Nova::Flavor
+    properties:
+      disk: 120
+      extra_specs: {"capabilities:hypervisor_type": "QEMU", "hw:numa_nodes": "2", "hw:numa_cpus.0": "0,4,1,5", "hw:numa_cpus.1": "2,6,3,7", "hw:numa_mem.0": "8192", "hw:numa_mem.1": "8192"}
+      name: ctl_virtual
+      ram: 16384
+      vcpus: 8
+
+  cid_virtual:
+    type: OS::Nova::Flavor
+    properties:
+      disk: 120
+      extra_specs: {"capabilities:hypervisor_type": "QEMU"}
+      name: cid_virtual
+      ram: 6144
+      vcpus: 2
+
+  ntw_virtual:
+    type: OS::Nova::Flavor
+    properties:
+      disk: 120
+      extra_specs: {"capabilities:hypervisor_type": "QEMU", "hw:numa_nodes": "2", "hw:numa_cpus.0": "0,1", "hw:numa_cpus.1": "2,3", "hw:numa_mem.0": "8192", "hw:numa_mem.1": "8192"}
+      name: ntw_virtual
+      ram: 16384
+      vcpus: 4
+
+  nal_virtual:
+    type: OS::Nova::Flavor
+    properties:
+      disk: 120
+      extra_specs: {"capabilities:hypervisor_type": "QEMU"}
+      name: nal_virtual
+      ram: 4096
+      vcpus: 4
+
+  dbs_virtual:
+    type: OS::Nova::Flavor
+    properties:
+      disk: 150
+      extra_specs: {"capabilities:hypervisor_type": "QEMU", "hw:numa_nodes": "2", "hw:numa_cpus.0": "0,1", "hw:numa_cpus.1": "2,3", "hw:numa_mem.0": "4096", "hw:numa_mem.1": "4096"}
+      name: dbs_virtual
+      ram: 8192
+      vcpus: 4
+
+  msg_virtual:
+    type: OS::Nova::Flavor
+    properties:
+      disk: 120
+      extra_specs: {"capabilities:hypervisor_type": "QEMU"}
+      name: msg_virtual
+      ram: 16384
+      vcpus: 4
+
+  mon_virtual:
+    type: OS::Nova::Flavor
+    properties:
+      disk: 120
+      extra_specs: {"capabilities:hypervisor_type": "QEMU"}
+      name: mon_virtual
+      ram: 4096
+      vcpus: 2
+
+  log_virtual:
+    type: OS::Nova::Flavor
+    properties:
+      disk: 100
+      extra_specs: {"capabilities:hypervisor_type": "QEMU"}
+      name: log_virtual
+      ram: 4096
+      vcpus: 2
+
+  mtr_virtual:
+    type: OS::Nova::Flavor
+    properties:
+      disk: 100
+      extra_specs: {"capabilities:hypervisor_type": "QEMU"}
+      name: mtr_virtual
+      ram: 4096
+      vcpus: 2
+
+  cmp_virtual:
+    type: OS::Nova::Flavor
+    properties:
+      disk: 150
+      extra_specs: {"capabilities:hypervisor_type": "QEMU", "hw:numa_nodes": "2", "hw:numa_cpus.0": "0,1", "hw:numa_cpus.1": "2,3", "hw:numa_mem.0": "4096", "hw:numa_mem.1": "4096"}
+      name: cmp_virtual
+      ephemeral: 10
+      ram: 8192
+      vcpus: 4
+
+  foundation_virtual:
+    type: OS::Nova::Flavor
+    properties:
+      disk: 100
+      extra_specs: {"capabilities:hypervisor_type": "QEMU"}
+      name: foundation_virtual
+      ram: 4096
+      vcpus: 2
+...
diff --git a/tcp_tests/templates/_heat_environments/fragments/FoundationNode.yaml b/tcp_tests/templates/_heat_environments/fragments/FoundationNode.yaml
new file mode 100644
index 0000000..91f058a
--- /dev/null
+++ b/tcp_tests/templates/_heat_environments/fragments/FoundationNode.yaml
@@ -0,0 +1,117 @@
+heat_template_version: queens
+
+description: Single server instance fragment
+
+parameters:
+  network:
+    type: string
+  instance_flavor:
+    type: string
+  instance_name:
+    type: string
+  instance_config_host:
+    type: string
+  key_pair:
+    type: string
+  instance_domain:
+    type: string
+  net_public:
+    type: string
+  control_net_static_ip:
+    type: string
+  underlay_userdata:
+    type: string
+  env_name:
+    type: string
+  mcp_version:
+    type: string
+
+resources:
+  instance_port01:
+    type: OS::Neutron::Port
+    properties:
+      port_security_enabled: false
+      network_id: { list_join: ['-', [ 'management_net', { get_param: env_name } ]] }
+  instance_port02:
+    type: OS::Neutron::Port
+    properties:
+      port_security_enabled: false
+      network_id: { list_join: ['-', [ 'control_net', { get_param: env_name } ]] }
+      fixed_ips:
+        - ip_address: { get_param: control_net_static_ip }
+  instance_port03:
+    type: OS::Neutron::Port
+    properties:
+      port_security_enabled: false
+      network_id: { list_join: ['-', [ 'tenant_net', { get_param: env_name } ]] }
+  instance_port04:
+    type: OS::Neutron::Port
+    properties:
+      port_security_enabled: false
+      network_id: { list_join: ['-', [ 'external_net', { get_param: env_name } ]] }
+
+  instance_instance:
+    type: OS::Nova::Server
+    properties:
+      image_update_policy: REBUILD
+      flavor: { get_param: instance_flavor }
+      image: { list_join: ['', [ 'ubuntu-16.04-foundation-', { get_param: mcp_version } ]] }
+      key_name: { get_param: key_pair }
+      name:
+        list_join:
+        - '.'
+        - [ { get_param: instance_name }, { get_param: env_name } ]
+      networks:
+      - port: { get_resource: instance_port01 }
+      - port: { get_resource: instance_port02 }
+      - port: { get_resource: instance_port03 }
+      - port: { get_resource: instance_port04 }
+      user_data_format: RAW
+      user_data:
+        str_replace:
+          #template: { get_file: underlay--user-data-cfg01--heat.yaml }
+          #template: { get_file: ../underlay-userdata.yaml }
+          template: { get_param: underlay_userdata }
+          params:
+            hostname: { list_join: ['.', [ { get_param: instance_name }, { get_param: instance_domain } ]] }
+            $node_hostname: { get_param: instance_name }
+            $node_domain: { get_param: instance_domain }
+            $config_host: { get_param: instance_config_host }
+      metadata:
+        roles:
+        - foundation_jenkins_slave
+
+  floating_ip:
+    depends_on: [instance_instance]
+    type: OS::Neutron::FloatingIP
+    properties:
+      floating_network: { get_param: net_public }
+      port_id: { get_resource: instance_port01 }
+  floating_ip_association:
+    depends_on: [floating_ip]
+    type: OS::Neutron::FloatingIPAssociation
+    properties:
+      floatingip_id:  { get_resource: floating_ip }
+      port_id: { get_resource: instance_port01 }
+
+outputs:
+
+  instance_floating_address:
+    description: foundation node IP address (floating) from external network
+    value:
+      get_attr:
+      - floating_ip
+      - floating_ip_address
+
+  instance_address:
+    value:
+      get_attr:
+      - instance_instance
+      - addresses
+      - 'management_net'
+      - 0
+      - addr
+    description: "Instance's private IP address"
+  instance:
+    value: { get_resource: instance_instance }
+    description: "Instance"
diff --git a/tcp_tests/templates/_heat_environments/fragments/Instance.yaml b/tcp_tests/templates/_heat_environments/fragments/Instance.yaml
new file mode 100644
index 0000000..1c9be45
--- /dev/null
+++ b/tcp_tests/templates/_heat_environments/fragments/Instance.yaml
@@ -0,0 +1,103 @@
+heat_template_version: queens
+
+description: Single server instance fragment
+
+parameters:
+  network:
+    type: string
+  instance_flavor:
+    type: string
+  instance_name:
+    type: string
+  instance_config_host:
+    type: string
+  key_pair:
+    type: string
+  instance_domain:
+    type: string
+  net_public:
+    type: string
+  control_net_static_ip:
+    type: string
+  underlay_userdata:
+    type: string
+  mcp_version:
+    type: string
+  env_name:
+    type: string
+
+resources:
+  instance_port01:
+    type: OS::Neutron::Port
+    properties:
+      port_security_enabled: false
+      network_id: { list_join: ['-', [ 'management_net', { get_param: env_name } ]] }
+  instance_port02:
+    type: OS::Neutron::Port
+    properties:
+      port_security_enabled: false
+      network_id: { list_join: ['-', [ 'control_net', { get_param: env_name } ]] }
+      fixed_ips:
+        - ip_address: { get_param: control_net_static_ip }
+  instance_port03:
+    type: OS::Neutron::Port
+    properties:
+      port_security_enabled: false
+      network_id: { list_join: ['-', [ 'tenant_net', { get_param: env_name } ]] }
+
+  instance_instance:
+    type: OS::Nova::Server
+    properties:
+      image_update_policy: REBUILD
+      flavor: { get_param: instance_flavor }
+      image: { list_join: ['', [ 'ubuntu-vcp-', { get_param: mcp_version } ]] }
+      key_name: { get_param: key_pair }
+      name:
+        list_join:
+        - '.'
+        - [ { get_param: instance_name }, { get_param: instance_domain } ]
+      networks:
+      - port: { get_resource: instance_port01 }
+      - port: { get_resource: instance_port02 }
+      - port: { get_resource: instance_port03 }
+      user_data_format: RAW
+      user_data:
+        str_replace:
+          #template: { get_file: underlay--user-data-cfg01--heat.yaml }
+          template: { get_param: underlay_userdata }
+          #template: { get_file: ../../templates/{ get_param: lab_config_name }/underlay-userdata.yaml }
+          params:
+            hostname: { list_join: ['.', [ { get_param: instance_name }, { get_param: instance_domain } ]] }
+            $node_hostname: { get_param: instance_name }
+            $node_domain: { get_param: instance_domain }
+            $config_host: { get_param: instance_config_host }
+      metadata:
+        roles:
+        - salt_minion
+
+  floating_ip:
+    depends_on: [instance_instance]
+    type: OS::Neutron::FloatingIP
+    properties:
+      floating_network: { get_param: net_public }
+      port_id: { get_resource: instance_port01 }
+  floating_ip_association:
+    depends_on: [floating_ip]
+    type: OS::Neutron::FloatingIPAssociation
+    properties:
+      floatingip_id:  { get_resource: floating_ip }
+      port_id: { get_resource: instance_port01 }
+
+outputs:
+  instance_address:
+    value:
+      get_attr:
+      - instance_instance
+      - addresses
+      - 'management_net'
+      - 0
+      - addr
+    description: "Instance's private IP address"
+  instance:
+    value: { get_resource: instance_instance }
+    description: "Instance"
diff --git a/tcp_tests/templates/_heat_environments/fragments/MasterNode.yaml b/tcp_tests/templates/_heat_environments/fragments/MasterNode.yaml
new file mode 100644
index 0000000..410deb6
--- /dev/null
+++ b/tcp_tests/templates/_heat_environments/fragments/MasterNode.yaml
@@ -0,0 +1,94 @@
+heat_template_version: queens
+
+description: Single server instance fragment
+
+parameters:
+  management_subnet_cfg01_ip:
+    type: string
+  salt_master_control_ip:
+    type: string
+  network:
+    type: string
+  cfg01_flavor:
+    type: string
+  instance_name:
+    type: string
+  key_pair:
+    type: string
+  instance_domain:
+    type: string
+  net_public:
+    type: string
+  mcp_version:
+    type: string
+  env_name:
+    type: string
+
+resources:
+  instance_port01:
+    type: OS::Neutron::Port
+    properties:
+      port_security_enabled: false
+      network_id: { list_join: ['-', [ 'management_net', { get_param: env_name } ]] }
+      fixed_ips:
+        - ip_address: { get_param: management_subnet_cfg01_ip }
+
+  instance_port02:
+    type: OS::Neutron::Port
+    properties:
+      port_security_enabled: false
+      network_id: { list_join: ['-', [ 'control_net', { get_param: env_name } ]] }
+      fixed_ips:
+        - ip_address: { get_param: salt_master_control_ip }
+
+  instance_instance:
+    type: OS::Nova::Server
+    properties:
+      image_update_policy: REBUILD
+      flavor: { get_param: cfg01_flavor }
+      image: { list_join: ['', [ 'cfg01-day01-', { get_param: mcp_version } ]] }
+      key_name: { get_param: key_pair }
+      name:
+        list_join:
+        - '.'
+        - [ { get_param: instance_name }, { get_param: instance_domain } ]
+      networks:
+      - port: { get_resource: instance_port01 }
+      - port: { get_resource: instance_port02 }
+      block_device_mapping_v2:
+      - device_name: /dev/cdrom
+        device_type: cdrom
+        boot_index: -1
+        delete_on_termination: true
+        image: { list_join: ['', [ 'cfg01.', { get_param: env_name }, '-config-drive.iso' ]] }
+        volume_size: 1
+      metadata:
+        roles:
+        - salt_master
+
+  floating_ip:
+    depends_on: [instance_instance]
+    type: OS::Neutron::FloatingIP
+    properties:
+      floating_network: { get_param: net_public }
+      port_id: { get_resource: instance_port01 }
+  floating_ip_association:
+    depends_on: [floating_ip]
+    type: OS::Neutron::FloatingIPAssociation
+    properties:
+      floatingip_id:  { get_resource: floating_ip }
+      port_id: { get_resource: instance_port01 }
+
+outputs:
+  instance_address:
+    value:
+      get_attr:
+      - instance_instance
+      - addresses
+      - 'management_net'
+      - 0
+      - addr
+    description: "Instance's private IP address"
+  instance:
+    value: { get_resource: instance_instance }
+    description: "Instance"
diff --git a/tcp_tests/templates/_heat_environments/fragments/MultipleInstance.yaml b/tcp_tests/templates/_heat_environments/fragments/MultipleInstance.yaml
new file mode 100644
index 0000000..986b855
--- /dev/null
+++ b/tcp_tests/templates/_heat_environments/fragments/MultipleInstance.yaml
@@ -0,0 +1,76 @@
+heat_template_version: queens
+
+description: 3 single nodes fragment
+
+parameters:
+  key_pair:
+    type: string
+  network:
+    type: string
+  instance01_name:
+    type: string
+  instance02_name:
+    type: string
+  instance03_name:
+    type: string
+  instance_domain:
+    type: string
+  instance_flavor:
+    type: string
+  instance_config_host:
+    type: string
+  instance01_control_net_static_ip:
+    type: string
+  instance02_control_net_static_ip:
+    type: string
+  instance03_control_net_static_ip:
+    type: string
+  underlay_userdata:
+    type: string
+  mcp_version:
+    type: string
+  env_name:
+    type: string
+
+resources:
+  instance01:
+    type: MCP::SingleInstance
+    properties:
+      env_name: { get_param: env_name }
+      mcp_version: { get_param: mcp_version }
+      key_pair: { get_param: key_pair }
+      network: { get_param: network }
+      control_net_static_ip: {get_param: instance01_control_net_static_ip }
+      instance_name: { get_param: instance01_name }
+      instance_domain: { get_param: instance_domain }
+      instance_flavor: { get_param: instance_flavor }
+      instance_config_host: { get_param: instance_config_host }
+      underlay_userdata: { get_param: underlay_userdata }
+
+  instance02:
+    type: MCP::SingleInstance
+    properties:
+      env_name: { get_param: env_name }
+      mcp_version: { get_param: mcp_version }
+      key_pair: { get_param: key_pair }
+      network: { get_param: network }
+      control_net_static_ip: {get_param: instance02_control_net_static_ip }
+      instance_name: { get_param: instance02_name }
+      instance_domain: { get_param: instance_domain }
+      instance_flavor: { get_param: instance_flavor }
+      instance_config_host: { get_param: instance_config_host }
+      underlay_userdata: { get_param: underlay_userdata }
+
+  instance03:
+    type: MCP::SingleInstance
+    properties:
+      env_name: { get_param: env_name }
+      mcp_version: { get_param: mcp_version }
+      key_pair: { get_param: key_pair }
+      network: { get_param: network }
+      control_net_static_ip: {get_param: instance03_control_net_static_ip }
+      instance_name: { get_param: instance03_name }
+      instance_domain: { get_param: instance_domain }
+      instance_flavor: { get_param: instance_flavor }
+      instance_config_host: { get_param: instance_config_host }
+      underlay_userdata: { get_param: underlay_userdata }
diff --git a/tcp_tests/templates/_heat_environments/fragments/Networks.yaml b/tcp_tests/templates/_heat_environments/fragments/Networks.yaml
new file mode 100644
index 0000000..076684b
--- /dev/null
+++ b/tcp_tests/templates/_heat_environments/fragments/Networks.yaml
@@ -0,0 +1,173 @@
+---
+heat_template_version: queens
+
+description: Network fragment
+
+parameters:
+  env_name:
+    type: string
+  net_public:
+    type: string
+  stack_name:
+    type: string
+  control_subnet_cidr:
+    type: string
+  tenant_subnet_cidr:
+    type: string
+  management_subnet_cidr:
+    type: string
+  external_subnet_cidr:
+    type: string
+  management_subnet_gateway_ip:
+    type: string
+#  control_net_dhcp:
+#    type: boolean
+#    default: false
+#  tenant_net_dhcp:
+#    type: boolean
+#    default: false
+  management_net_dhcp:
+    type: boolean
+    default: true
+  management_subnet_pool_start:
+    type: string
+  management_subnet_pool_end:
+    type: string
+#  external_net_dhcp:
+#    type: boolean
+#    default: false
+
+  nameservers:
+    type: comma_delimited_list
+
+resources:
+  control_net:
+    type: OS::Neutron::Net
+    properties:
+      port_security_enabled: false
+      name: { list_join: ['-', [ 'control_net', { get_param: env_name } ]] }
+  tenant_net:
+    type: OS::Neutron::Net
+    properties:
+      port_security_enabled: false
+      name: { list_join: ['-', [ 'tenant_net', { get_param: env_name } ]] }
+  management_net:
+    type: OS::Neutron::Net
+    properties:
+      port_security_enabled: false
+      name: { list_join: ['-', [ 'management_net', { get_param: env_name } ]] }
+  external_net:
+    type: OS::Neutron::Net
+    properties:
+      port_security_enabled: false
+      name: { list_join: ['-', [ 'external_net', { get_param: env_name } ]] }
+
+  control_subnet:
+    type: OS::Neutron::Subnet
+    properties:
+      name: { list_join: ['-', [ 'control_subnet', { get_param: env_name } ]] }
+      #name: control_subnet
+      network: { get_resource: control_net }
+      cidr: { get_param: control_subnet_cidr }
+      #enable_dhcp: { get_param: control_net_dhcp }
+      #dns_nameservers: { get_param: nameservers }
+      dns_nameservers: [0.0.0.0]
+      gateway_ip: null
+      tags:
+      - private-pool01
+
+  tenant_subnet:
+    type: OS::Neutron::Subnet
+    properties:
+      name: { list_join: ['-', [ 'tenant_subnet', { get_param: env_name } ]] }
+      #name: tenant_subnet
+      network: { get_resource: tenant_net }
+      cidr: { get_param: tenant_subnet_cidr }
+      #enable_dhcp: { get_param: tenant_net_dhcp }
+      #dns_nameservers: { get_param: nameservers }
+      dns_nameservers: [0.0.0.0]
+      gateway_ip: null
+      tags:
+      - tenant-pool01
+
+  management_subnet:
+    type: OS::Neutron::Subnet
+    properties:
+      gateway_ip: { get_param: management_subnet_gateway_ip }
+      name: { list_join: ['-', [ 'management_subnet', { get_param: env_name } ]] }
+      #name: management_subnet
+      network: { get_resource: management_net }
+      cidr: { get_param: management_subnet_cidr }
+      enable_dhcp: { get_param: management_net_dhcp }
+      allocation_pools:
+        - start: { get_param: management_subnet_pool_start }
+          end: { get_param: management_subnet_pool_end }
+      dns_nameservers: { get_param: nameservers }
+      tags:
+      - admin-pool01
+
+  external_subnet:
+    type: OS::Neutron::Subnet
+    properties:
+      name: { list_join: ['-', [ 'external_subnet', { get_param: env_name } ]] }
+      #name: external_subnet
+      network: { get_resource: external_net }
+      cidr: { get_param: external_subnet_cidr }
+      #enable_dhcp: { get_param: external_net_dhcp }
+      #dns_nameservers: { get_param: nameservers }
+      dns_nameservers: [0.0.0.0]
+      gateway_ip: null
+      tags:
+      - external-pool01
+
+  router:
+    type: OS::Neutron::Router
+    properties:
+      #name: publicbarerouter
+      external_gateway_info:
+        network: { get_param: net_public }
+        #enable_snat: True
+
+  router_subnet:
+    type: OS::Neutron::RouterInterface
+    depends_on: management_subnet
+    properties:
+      router: { get_resource: router }
+      subnet: { get_resource: management_subnet }
+
+outputs:
+  network:
+    value: { get_param: stack_name }
+  management_net_prefix:
+    value:
+      list_join:
+        - '.'
+        - - str_split: ['.', { get_param: management_subnet_cidr }, 0]
+          - str_split: ['.', { get_param: management_subnet_cidr }, 1]
+          - str_split: ['.', { get_param: management_subnet_cidr }, 2]
+
+  control_net_prefix:
+    value:
+      list_join:
+        - '.'
+        - - str_split: ['.', { get_param: control_subnet_cidr }, 0]
+          - str_split: ['.', { get_param: control_subnet_cidr }, 1]
+          - str_split: ['.', { get_param: control_subnet_cidr }, 2]
+
+  tenant_net_prefix:
+    value:
+      list_join:
+        - '.'
+        - - str_split: ['.', { get_param: tenant_subnet_cidr }, 0]
+          - str_split: ['.', { get_param: tenant_subnet_cidr }, 1]
+          - str_split: ['.', { get_param: tenant_subnet_cidr }, 2]
+
+  external_net_prefix:
+    value:
+      list_join:
+        - '.'
+        - - str_split: ['.', { get_param: external_subnet_cidr }, 0]
+          - str_split: ['.', { get_param: external_subnet_cidr }, 1]
+          - str_split: ['.', { get_param: external_subnet_cidr }, 2]
+
+...
diff --git a/tcp_tests/templates/_heat_environments/microcloud-8116-cookied-cicd-queens-dvr-sl.sh b/tcp_tests/templates/_heat_environments/microcloud-8116-cookied-cicd-queens-dvr-sl.sh
old mode 100755
new mode 100644
diff --git a/tcp_tests/templates/_heat_environments/microcloud-8116.env b/tcp_tests/templates/_heat_environments/microcloud-8116.env
deleted file mode 100644
index 9570a55..0000000
--- a/tcp_tests/templates/_heat_environments/microcloud-8116.env
+++ /dev/null
@@ -1,24 +0,0 @@
----
-
-parameter_defaults:
-  #flavor_medium: baremetal
-  flavor_medium: cfg01-virtual
-  flavor_ctl: ctl-virtual
-  image_vcp: ironic_provision_image
-  image_ubuntu_cloud_xenial: cfg01-day01
-  #keypair: system-ci-keypair
-  keypair: baremetal
-
-  net_public: public
-
-  # ironic-specific parameters
-  management_physical_network: ironicnet1
-  management_subnet_cidr: 10.13.0.0/24
-  management_subnet_pool_start: 10.13.0.20
-  management_subnet_pool_end: 10.13.0.60
-  management_subnet_gateway_ip: 10.13.0.1
-  management_subnet_cfg01_ip: 10.13.0.15
-
-  cfg01_configdrive_image: cfg01.cookied-cicd-queens-dvr-sl-config-drive.iso
-  dns_nameservers: 172.18.208.44
-...
diff --git a/tcp_tests/templates/_heat_environments/microcloud-8133.env b/tcp_tests/templates/_heat_environments/microcloud-8133.env
new file mode 100644
index 0000000..6e1cb3b
--- /dev/null
+++ b/tcp_tests/templates/_heat_environments/microcloud-8133.env
@@ -0,0 +1,40 @@
+
+resource_registry:
+  "MCP::MultipleInstance": fragments/MultipleInstance.yaml
+  "MCP::Flavors": fragments/Flavors.yaml
+  "MCP::MasterNode": fragments/MasterNode.yaml
+  "MCP::Compute": fragments/Compute.yaml
+  "MCP::Networks": fragments/Networks.yaml
+  "MCP::SingleInstance": fragments/Instance.yaml
+  "MCP::FoundationNode": fragments/FoundationNode.yaml
+
+parameter_defaults:
+
+  cfg_flavor: system.virtual.salt_master
+  ctl_flavor: system.golden.openstack.control
+  cid_flavor: system.golden.cicd.control
+  ntw_flavor: system.compact.opencontrail.control
+  nal_flavor: system.compact.opencontrail.analytics
+  dbs_flavor: system.golden.openstack.database
+  msg_flavor: system.golden.openstack.message_queue
+  mon_flavor: system.golden.stacklight.server
+  log_flavor: system.golden.stacklight.log
+  mtr_flavor: system.golden.stacklight.telemetry
+  cmp_flavor: system.virtual.openstack.compute
+  kvm_fake_flavor: system.virtual.fake_kvm
+  foundation_flavor: system.virtual.foundation
+
+  key_pair: system_key_8133
+
+  net_public: public
+
+  nameservers: 172.18.208.44
+  control_subnet_cidr: "10.6.0.0/24"
+  tenant_subnet_cidr: "10.8.0.0/24"
+  external_subnet_cidr: "10.9.0.0/24"
+  management_subnet_cidr: "10.7.0.0/24"
+  management_subnet_cfg01_ip: 10.7.0.15
+  management_subnet_gateway_ip: 10.7.0.1
+  management_subnet_pool_start: 10.7.0.20
+  management_subnet_pool_end: 10.7.0.60
+  salt_master_control_ip: 10.6.0.15
diff --git a/tcp_tests/templates/cookied-model-generator/salt_heat-cicd-pike-contrail41-sl.yaml b/tcp_tests/templates/cookied-model-generator/salt_heat-cicd-pike-contrail41-sl.yaml
new file mode 100644
index 0000000..992dc35
--- /dev/null
+++ b/tcp_tests/templates/cookied-model-generator/salt_heat-cicd-pike-contrail41-sl.yaml
@@ -0,0 +1,25 @@
+{% from 'cookied-model-generator/underlay.yaml' import HOSTNAME_CFG01 with context %}
+{% from 'cookied-model-generator/underlay.yaml' import DOMAIN_NAME with context %}
+
+{% set LAB_CONFIG_NAME = 'heat-cicd-pike-contrail41-sl' %}
+# Name of the context file (without extension, that is fixed .yaml) used to render the Environment model
+{% set ENVIRONMENT_MODEL_INVENTORY_NAME = os_env('ENVIRONMENT_MODEL_INVENTORY_NAME','heat-cicd-pike-contrail41-sl') %}
+# Path to the context files used to render Cluster and Environment models
+{%- set CLUSTER_CONTEXT_NAME = 'salt-context-cookiecutter-contrail.yaml' %}
+{%- set ENVIRONMENT_CONTEXT_NAMES = ['salt-context-environment.yaml', 'salt-context-cookiecutter-contrail.yaml'] %}
+{%- set CONTROL_VLAN = os_env('CONTROL_VLAN', '10') %}
+{%- set TENANT_VLAN = os_env('TENANT_VLAN', '20') %}
+
+{%- set IPMI_USER = os_env('IPMI_USER', 'mcp-qa') %}
+{%- set IPMI_PASS = os_env('IPMI_PASS', 'password') %}
+
+{% import 'shared-salt.yaml' as SHARED with context %}
+
+{{ SHARED.MACRO_INSTALL_PACKAGES_ON_NODES(HOSTNAME_CFG01) }}
+
+{{ SHARED.MACRO_INSTALL_FORMULAS('\*') }}
+
+{{ SHARED.MACRO_GENERATE_COOKIECUTTER_MODEL(CONTROL_VLAN=CONTROL_VLAN, TENANT_VLAN=TENANT_VLAN) }}
+
+{{ SHARED.MACRO_GENERATE_AND_ENABLE_ENVIRONMENT_MODEL() }}
+{{ SHARED.MACRO_GENERATE_INVENTORY(RERUN_SALTMASTER_STATE=true) }}
diff --git a/tcp_tests/templates/heat-cicd-pike-contrail41-sl/salt-context-cookiecutter-contrail.yaml b/tcp_tests/templates/heat-cicd-pike-contrail41-sl/salt-context-cookiecutter-contrail.yaml
new file mode 100644
index 0000000..7d58774
--- /dev/null
+++ b/tcp_tests/templates/heat-cicd-pike-contrail41-sl/salt-context-cookiecutter-contrail.yaml
@@ -0,0 +1,304 @@
+default_context:
+  backup_private_key: |-
+    -----BEGIN RSA PRIVATE KEY-----
+    MIIEpAIBAAKCAQEApq5WxkagvkNWO85FtS1ByHDKkNWhmFdpY9D49dZrSwuE9XGQ
+    +WW79F2AGwKki2N2j1iyfpMEKRIEIb/5cbl6fZzTGTndhd7Jxkx6xGdhZkX9VM6N
+    qotaO4ckj7NsfiZKTwp58/YSRkz3Ii1XPpt0NQqZLuNAwus4Bl9e1Wk5dNw+gHN3
+    m4JmAczJbQ81lrQURC7f3d2xjoFkXWXC2FKkMS6AOl1j87ATeeSG9xeHLbOvIyBw
+    7IwP9MFA5vUtHl8DzsdmzWmVRabe2VMtGa1Ya5JTTgK8nXmtYW3dvEQ/DtgzcKPJ
+    2fO31cze9LRpDSS0E6d/cISBgzsPfBJuUCGHTQIDAQABAoIBAQCmFVVVoA6PRt1o
+    HjMLQpsntGvDQXsRJxhWY2WO4CZs0n+baZvBRgOwjHIXd9ypH2SFlSXWRXuByPfh
+    AT72eJB7FYaqviPjPojjVFWH2lMM63RvypkSdGRmqFRf87KJSHIGrDO0SV8QOaSO
+    o4spURDLwVG9jKd9EY/zmZgPIhgkPazzVrFoGr8YnKE6qSJh5HivscNl8D3+36SN
+    5uhuElzBTNGd2iU4elLJIGjahetIalEZqL0Fvi1ZzAWoK0YXDmbI8uG8/epJ5Sy4
+    XyyHc7+0Jvm1JWwXczdDFuy+RlL9r66Ja8V9MauuJyigOKnNOJhE2b5/klEcczhC
+    AHA/Hw4pAoGBANcJ/gdouXgcuq3JNXq5Cb4w9lvZbDwQdEtY3+qdHAVndomoGsDT
+    USKq6ZRZzkAAnjiN2YywAQzqFGevoYig+WNLTPd2TdNdlNHfw9Wc4G2iSFb1pIr2
+    uoJ+TQGv4Ck/7LS2NVnWfqNoeo8Iq+Wvnh+F3twv0UIazGI8Bj/xLxvrAoGBAMZu
+    QErf3vzbY4g50HFVbPNi2Nl63A7/P421pEe4JAT1clwIVMyntRpNdVyHKkkKdDWr
+    98tBOhf71+shgsVPEMkfPyZ2nuiBit7LzZ+EAztG9i3hhm8yIUPXoipo0YCOe+yF
+    r+r03pX97aciXuRMPmMTHH6N1vFaUXHSgVs6Y7OnAoGAP4v1ZO0eug8LX6XxRuX9
+    qhXAB96VrJ5UL5wA980b5cDwd7eUyFzqQittwWhUmfdUynOo0XmFpfJau1VckAq6
+    CAzNnud4Ejk6bFcLAUpNzDhD1mbbDDHjZgK68P+vZ6E7ax/ZXkYTwGh0p2Yxnjuq
+    p7gg5sK+vSE8Ot9wHV9Bw6cCgYEAguPq6PjvgF+/Mfbg9kFhUtKbNCoEyqe4ZmOw
+    79YZfGPjga3FMhJWNfluNxC55eBNc7HyDFMEXRm0/dbnCfvzmJdR8q9AdyIsVnad
+    NmHAN/PBI9al9OdeZf/xaoQl3eUe/Y/Z0OShhtMvVpYnffSFGplarGgnpqDrJGe1
+    CFZlufUCgYBemuy+C6gLwTOzhcTcCo4Ir5ZiKcXAE6ufk8OIdGnMWJcmTxxmIMY6
+    XyKu0oobWpOBXPiipQ6TmDpI+flxWYRHwPFFzPa+jhCtupRuTdORKrklV2UfdIWZ
+    N4e+J2yCu7lyz0upwa3MkFIVQ1ez0o8X9NRvAz243qi64y1+KOMPmQ==
+    -----END RSA PRIVATE KEY-----
+  backup_public_key: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCmrlbGRqC+Q1Y7zkW1LUHIcMqQ1aGYV2lj0Pj11mtLC4T1cZD5Zbv0XYAbAqSLY3aPWLJ+kwQpEgQhv/lxuXp9nNMZOd2F3snGTHrEZ2FmRf1Uzo2qi1o7hySPs2x+JkpPCnnz9hJGTPciLVc+m3Q1Cpku40DC6zgGX17VaTl03D6Ac3ebgmYBzMltDzWWtBRELt/d3bGOgWRdZcLYUqQxLoA6XWPzsBN55Ib3F4cts68jIHDsjA/0wUDm9S0eXwPOx2bNaZVFpt7ZUy0ZrVhrklNOArydea1hbd28RD8O2DNwo8nZ87fVzN70tGkNJLQTp39whIGDOw98Em5QIYdN
+  bmk_enabled: 'False'
+  cicd_control_node01_address: ==IPV4_NET_CONTROL_PREFIX==.91
+  cicd_control_node01_hostname: cid01
+  cicd_control_node02_address: ==IPV4_NET_CONTROL_PREFIX==.92
+  cicd_control_node02_hostname: cid02
+  cicd_control_node03_address: ==IPV4_NET_CONTROL_PREFIX==.93
+  cicd_control_node03_hostname: cid03
+  cicd_control_vip_address: ==IPV4_NET_CONTROL_PREFIX==.90
+  cicd_control_vip_hostname: cid
+  cicd_enabled: 'True'
+  cicd_private_key: |-
+    -----BEGIN RSA PRIVATE KEY-----
+    MIIEowIBAAKCAQEAuBC224XQZFyzqC56EyS7yr/rlpRRYsr2vji77faoWQFmgYbZ
+    oeyqqqm8eSN0Cc0wAnxWsQ7H3ZN9uTnyWVrsogs1vx8597iorZAT4Mu6JDbkWlZh
+    IUHo9P9itWJdUWpxjDavqIvjZo+DmOO1mfv9K1asP8COanQEsgHSyuf+XKMBg0ko
+    kEammAUtS9HRxCAJ47QgLPSCFij5ih/MRWY3HWFUFEF3gRdUodWmeJNmW+7JH7T2
+    wId1kn8oRya7eadKxd6wEaCGm5ILXwwVFmFkOGlEeC8wHnbkatd/A53DxzUfOHBi
+    27Gaf83DPxKqDWW0aAh7b49EnFhdkuF3ZyXbYwIDAQABAoIBAFtioQbYpyBNDj2f
+    5af/guUk6Di4pregAWVsEZIR9n9KPLRuWTsVn55f611Rhtke8IkrZnc92WlfQvpl
+    lLdcd0P0wNiFDmi5W7XgZJ4lR+OXBUT8wfibGqgY688WaTJ04K82r3vFCD/xXOrZ
+    k15CR+3ueFKmrY6Yz4P5d8iZ6iXfR47ZYm+wdmx3vmJ+IVfZCRRPAGP25GxqsOs5
+    3qMl9hV7a1MGVVaVPmVzrq0Xzk6IAW2+0p5udGmezn4y6HFPIvOriUVUkni3mNjX
+    dokrETqVbOjkdFkSw28cMBfP/tO3vyfGh5VX24xvRztWtcpAm6Qr5lKEDSvFv13r
+    0z/DxRECgYEA8oZ4+w2cqLJz91fKpWutGZKj4m/HEY6FZfjVflsTT2bKTt+nTtRY
+    qAeKGYIbrjZMAyy4dG+RgW7WORFcRHFyeSrS5Aw51zO+JQ0KzuBv83UqcbqNLcsz
+    BAPHPk/7f30W4wuInqgXrWMTiGePz0hQsvNU6aR7MH4Sd2C0ot4W+00CgYEAwkq+
+    UtugC8ywK+F0xZvjXHi3VJRJZf4WLtRxZGy8CimaritSKpZZRG23Sk0ifDE6+4fD
+    VtxeTfTmeZBictg/fEAPVHzhsNPNyDMA8t7t4ZKmMX9DNYAqVX21s5YQ9encH6KT
+    1q0NRpjvw7QzhfbFfsxeAxHKZFbFlVmROplF+W8CgYAWHVz6x4r5dwxMCZ1Y6DCo
+    nE6FX1vvpedUHRSaqQNhwiXAe3RuI77R054sJUkQ4bKct386XtIN02WFXqfjNdUS
+    Z21DjjnX/cfg6QeLRbvvn0d3h2NIQbctLosEi5aLUYS8v1h93yYJkXc+gPMEG7wA
+    FWAwzebNzTEx4YeXMlk2IQKBgCt8JxTMawm5CkUH9Oa1eTGdIwsfFT5qm/RnP+nG
+    HF/559DLiVxWwiv6kmdi1DEPo6/gNuwd7k1sXpkeo6oolCzu+X9jY+/7t7bzE2dI
+    Vd2CwQebACPdR5xSwnQrRiiD6ux5qrUFjk8as68NieqVzKYQf4oYVUAX26kNnt+K
+    poqpAoGBAINHTGBFVK3XC+fCbu7rhFS8wZAjBmvEDHGnUBp19JREEr3q7a2D84T3
+    17zo0bwxL09QFnOCDDJcXsh8eGbCONV0hJvJU2o7wGol+lRFSd+v6WYZ37bPEyEx
+    l8kv0xXAElriC1RE1CNtvoOn/uxyRs+2OnNgBVxtAGqUWVdpm6CD
+    -----END RSA PRIVATE KEY-----
+  cicd_public_key: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQC4ELbbhdBkXLOoLnoTJLvKv+uWlFFiyva+OLvt9qhZAWaBhtmh7Kqqqbx5I3QJzTACfFaxDsfdk325OfJZWuyiCzW/Hzn3uKitkBPgy7okNuRaVmEhQej0/2K1Yl1RanGMNq+oi+Nmj4OY47WZ+/0rVqw/wI5qdASyAdLK5/5cowGDSSiQRqaYBS1L0dHEIAnjtCAs9IIWKPmKH8xFZjcdYVQUQXeBF1Sh1aZ4k2Zb7skftPbAh3WSfyhHJrt5p0rF3rARoIabkgtfDBUWYWQ4aUR4LzAeduRq138DncPHNR84cGLbsZp/zcM/EqoNZbRoCHtvj0ScWF2S4XdnJdtj
+  cluster_domain: heat-cicd-pike-contrail41-sl.local
+  cluster_name: heat-cicd-pike-contrail41-sl
+  opencontrail_version: 4.1
+  linux_repo_contrail_component: oc41
+  compute_bond_mode: active-backup
+  compute_padding_with_zeros: 'True'
+  compute_primary_first_nic: eth1
+  compute_primary_second_nic: eth2
+  context_seed: TFWH0xgUevQkslwhbWVedwwYhBtImHLiGUIExjT9ahxPAUBHh9Kg3QSAIrqTqtvk
+  control_network_netmask: 255.255.255.0
+  control_network_subnet: ==IPV4_NET_CONTROL_PREFIX==.0/24
+  control_vlan: '10'
+  tenant_vlan: '20'
+  cookiecutter_template_branch: ''
+  cookiecutter_template_credentials: gerrit
+  cookiecutter_template_url: https://gerrit.mcp.mirantis.com/mk/cookiecutter-templates.git
+  deploy_network_gateway: ==IPV4_NET_ADMIN_PREFIX==.1
+  deploy_network_netmask: 255.255.255.0
+  deploy_network_subnet: ==IPV4_NET_ADMIN_PREFIX==.0/24
+  deployment_type: physical
+  dns_server01: 172.18.208.44
+  dns_server02: 172.18.176.6
+  email_address: sgudz@mirantis.com
+  infra_bond_mode: active-backup
+  infra_deploy_nic: eth0
+  infra_kvm01_control_address: ==IPV4_NET_CONTROL_PREFIX==.241
+  infra_kvm01_deploy_address: ==IPV4_NET_ADMIN_PREFIX==.67
+  infra_kvm01_hostname: kvm01
+  infra_kvm02_control_address: ==IPV4_NET_CONTROL_PREFIX==.242
+  infra_kvm02_deploy_address: ==IPV4_NET_ADMIN_PREFIX==.68
+  infra_kvm02_hostname: kvm02
+  infra_kvm03_control_address: ==IPV4_NET_CONTROL_PREFIX==.243
+  infra_kvm03_deploy_address: ==IPV4_NET_ADMIN_PREFIX==.69
+  infra_kvm03_hostname: kvm03
+  infra_kvm_vip_address: ==IPV4_NET_CONTROL_PREFIX==.240
+  infra_primary_first_nic: eth1
+  infra_primary_second_nic: eth2
+  internal_proxy_enabled: 'False'
+  kqueen_custom_mail_enabled: 'False'
+  kqueen_enabled: 'False'
+  kubernetes_enabled: 'False'
+  local_repositories: 'False'
+  maas_enabled: 'False'
+  mcp_common_scripts_branch: ''
+  mcp_version: proposed
+  offline_deployment: 'False'
+  opencontrail_analytics_address: ==IPV4_NET_CONTROL_PREFIX==.30
+  opencontrail_analytics_hostname: nal
+  opencontrail_analytics_node01_address: ==IPV4_NET_CONTROL_PREFIX==.31
+  opencontrail_analytics_node01_hostname: nal01
+  opencontrail_analytics_node02_address: ==IPV4_NET_CONTROL_PREFIX==.32
+  opencontrail_analytics_node02_hostname: nal02
+  opencontrail_analytics_node03_address: ==IPV4_NET_CONTROL_PREFIX==.33
+  opencontrail_analytics_node03_hostname: nal03
+  opencontrail_compute_iface_mask: '24'
+  opencontrail_control_address: ==IPV4_NET_CONTROL_PREFIX==.20
+  opencontrail_control_hostname: ntw
+  opencontrail_control_node01_address: ==IPV4_NET_CONTROL_PREFIX==.21
+  opencontrail_control_node01_hostname: ntw01
+  opencontrail_control_node02_address: ==IPV4_NET_CONTROL_PREFIX==.22
+  opencontrail_control_node02_hostname: ntw02
+  opencontrail_control_node03_address: ==IPV4_NET_CONTROL_PREFIX==.23
+  opencontrail_control_node03_hostname: ntw03
+  opencontrail_enabled: 'True'
+  opencontrail_router01_address: ==IPV4_NET_CONTROL_PREFIX==.220
+  opencontrail_router01_hostname: rtr01
+  opencontrail_router02_address: ==IPV4_NET_CONTROL_PREFIX==.101
+  opencontrail_router02_hostname: rtr02
+  openldap_enabled: 'True'
+  openldap_domain: ${_param:cluster_name}.local
+  openldap_organisation: ${_param:cluster_name}
+  openssh_groups: ''
+  openstack_benchmark_node01_address: ==IPV4_NET_CONTROL_PREFIX==.95
+  openstack_benchmark_node01_hostname: bmk01
+  openstack_cluster_size: compact
+  openstack_compute_count: '2'
+  openstack_compute_rack01_hostname: cmp
+  openstack_compute_single_address_ranges: ==IPV4_NET_CONTROL_PREFIX==.101-==IPV4_NET_CONTROL_PREFIX==.102
+  openstack_compute_deploy_address_ranges: ==IPV4_NET_ADMIN_PREFIX==.73-==IPV4_NET_ADMIN_PREFIX==.74
+  openstack_compute_tenant_address_ranges: ==IPV4_NET_TENANT_PREFIX==.101-==IPV4_NET_TENANT_PREFIX==.102
+  openstack_compute_backend_address_ranges: ==IPV4_NET_TENANT_PREFIX==.101-==IPV4_NET_TENANT_PREFIX==.102
+  openstack_control_address: ==IPV4_NET_CONTROL_PREFIX==.10
+  openstack_control_hostname: ctl
+  openstack_control_node01_address: ==IPV4_NET_CONTROL_PREFIX==.11
+  openstack_control_node01_hostname: ctl01
+  openstack_control_node02_address: ==IPV4_NET_CONTROL_PREFIX==.12
+  openstack_control_node02_hostname: ctl02
+  openstack_control_node03_address: ==IPV4_NET_CONTROL_PREFIX==.13
+  openstack_control_node03_hostname: ctl03
+  openstack_database_address: ==IPV4_NET_CONTROL_PREFIX==.50
+  openstack_database_hostname: dbs
+  openstack_database_node01_address: ==IPV4_NET_CONTROL_PREFIX==.51
+  openstack_database_node01_hostname: dbs01
+  openstack_database_node02_address: ==IPV4_NET_CONTROL_PREFIX==.52
+  openstack_database_node02_hostname: dbs02
+  openstack_database_node03_address: ==IPV4_NET_CONTROL_PREFIX==.53
+  openstack_database_node03_hostname: dbs03
+  openstack_enabled: 'True'
+  openstack_message_queue_address: ==IPV4_NET_CONTROL_PREFIX==.40
+  openstack_message_queue_hostname: msg
+  openstack_message_queue_node01_address: ==IPV4_NET_CONTROL_PREFIX==.41
+  openstack_message_queue_node01_hostname: msg01
+  openstack_message_queue_node02_address: ==IPV4_NET_CONTROL_PREFIX==.42
+  openstack_message_queue_node02_hostname: msg02
+  openstack_message_queue_node03_address: ==IPV4_NET_CONTROL_PREFIX==.43
+  openstack_message_queue_node03_hostname: msg03
+  openstack_network_engine: opencontrail
+  openstack_neutron_bgp_vpn: 'False'
+  openstack_neutron_bgp_vpn_driver: bagpipe
+  openstack_nfv_dpdk_enabled: 'False'
+  openstack_nfv_sriov_enabled: 'False'
+  openstack_nova_compute_nfv_req_enabled: 'False'
+  openstack_nova_compute_reserved_host_memory_mb: '900'
+  openstack_proxy_address: ==IPV4_NET_CONTROL_PREFIX==.80
+  openstack_proxy_hostname: prx
+  openstack_proxy_node01_address: ==IPV4_NET_CONTROL_PREFIX==.81
+  openstack_proxy_node01_hostname: prx01
+  openstack_proxy_node02_address: ==IPV4_NET_CONTROL_PREFIX==.82
+  openstack_proxy_node02_hostname: prx02
+  openstack_upgrade_node01_address: ==IPV4_NET_CONTROL_PREFIX==.19
+  openstack_version: queens
+  oss_enabled: 'False'
+  oss_node03_address: ${_param:stacklight_monitor_node03_address}
+  oss_webhook_app_id: '24'
+  oss_webhook_login_id: '13'
+  platform: openstack_enabled
+  public_host: ${_param:openstack_proxy_address}
+  publication_method: email
+  reclass_repository: https://github.com/Mirantis/mk-lab-salt-model.git
+  salt_api_password: BNRhXeGFdgVNx0Ikm2CAMw7eyeHf4grH
+  salt_api_password_hash: $6$jriFnsbZ$eon54Ts/Kn4ywKpexe/W8srpBF64cxr2D8jd0RzTH8zdZVjS3viYt64m1d1VlXenurwpcGLkGzaGmOI0dlOox0
+  salt_master_address: ==IPV4_NET_CONTROL_PREFIX==.15
+  salt_master_hostname: cfg01
+  salt_master_management_address: ==IPV4_NET_ADMIN_PREFIX==.15
+  shared_reclass_branch: ''
+  shared_reclass_url: https://gerrit.mcp.mirantis.com/salt-models/reclass-system.git
+  stacklight_enabled: 'True'
+  stacklight_log_address: ==IPV4_NET_CONTROL_PREFIX==.60
+  stacklight_log_hostname: log
+  stacklight_log_node01_address: ==IPV4_NET_CONTROL_PREFIX==.61
+  stacklight_log_node01_hostname: log01
+  stacklight_log_node02_address: ==IPV4_NET_CONTROL_PREFIX==.62
+  stacklight_log_node02_hostname: log02
+  stacklight_log_node03_address: ==IPV4_NET_CONTROL_PREFIX==.63
+  stacklight_log_node03_hostname: log03
+  stacklight_long_term_storage_type: prometheus
+  stacklight_monitor_address: ==IPV4_NET_CONTROL_PREFIX==.70
+  stacklight_monitor_hostname: mon
+  stacklight_monitor_node01_address: ==IPV4_NET_CONTROL_PREFIX==.71
+  stacklight_monitor_node01_hostname: mon01
+  stacklight_monitor_node02_address: ==IPV4_NET_CONTROL_PREFIX==.72
+  stacklight_monitor_node02_hostname: mon02
+  stacklight_monitor_node03_address: ==IPV4_NET_CONTROL_PREFIX==.73
+  stacklight_monitor_node03_hostname: mon03
+  stacklight_telemetry_address: ==IPV4_NET_CONTROL_PREFIX==.85
+  stacklight_telemetry_hostname: mtr
+  stacklight_telemetry_node01_address: ==IPV4_NET_CONTROL_PREFIX==.86
+  stacklight_telemetry_node01_hostname: mtr01
+  stacklight_telemetry_node02_address: ==IPV4_NET_CONTROL_PREFIX==.87
+  stacklight_telemetry_node02_hostname: mtr02
+  stacklight_telemetry_node03_address: ==IPV4_NET_CONTROL_PREFIX==.88
+  stacklight_telemetry_node03_hostname: mtr03
+  stacklight_version: '2'
+  static_ips_on_deploy_network_enabled: 'False'
+  tenant_network_gateway: ==IPV4_NET_TENANT_PREFIX==.1
+  tenant_network_netmask: 255.255.255.0
+  tenant_network_subnet: ==IPV4_NET_TENANT_PREFIX==.0/24
+  upstream_proxy_enabled: 'False'
+  use_default_network_scheme: 'True'
+  openldap_domain: heat-cicd-pike-contrail41-sl.local
+  openldap_enabled: 'True'
+  openldap_organisation: ${_param:cluster_name}
+  ceph_enabled: 'False'
+  ceph_version: "luminous"
+  ceph_hyper_converged: "False"
+  ceph_osd_backend: "bluestore"
+  ceph_osd_count: "3"
+  ceph_osd_node_count: 3
+  ceph_osd_block_db_size: 20
+  ceph_osd_journal_size: 20
+  ceph_osd_bond_mode: "active-backup"
+  ceph_osd_data_partition_prefix: ""
+
+  ceph_public_network_allocation: storage
+  ceph_public_network: "==IPV4_NET_CONTROL_PREFIX==.0/24"
+  ceph_cluster_network: "==IPV4_NET_CONTROL_PREFIX==.0/24"
+
+# for 2018.11.0+
+  ceph_osd_single_address_ranges: "==IPV4_NET_CONTROL_PREFIX==.200-==IPV4_NET_CONTROL_PREFIX==.202"
+  ceph_osd_deploy_address_ranges: "==IPV4_NET_ADMIN_PREFIX==.70-==IPV4_NET_ADMIN_PREFIX==.72"
+  ceph_osd_storage_address_ranges: "==IPV4_NET_CONTROL_PREFIX==.200-==IPV4_NET_CONTROL_PREFIX==.202"
+  ceph_osd_backend_address_ranges: "==IPV4_NET_TENANT_PREFIX==.200-==IPV4_NET_TENANT_PREFIX==.202"
+
+  ceph_osd_data_disks: "/dev/sdb"
+  ceph_osd_journal_or_block_db_disks: "/dev/sdb"
+  ceph_osd_mode: "separated"
+  ceph_osd_deploy_nic: "eth0"
+  ceph_osd_primary_first_nic: "eth1"
+  ceph_osd_primary_second_nic: "eth2"
+  #ceph_mon_node01_address: "172.16.47.66"
+  #ceph_mon_node01_deploy_address: "172.16.48.66"
+  ceph_mon_node01_address: "==IPV4_NET_CONTROL_PREFIX==.66"
+  ceph_mon_node01_hostname: "cmn01"
+  #ceph_mon_node02_address: "172.16.47.67"
+  #ceph_mon_node02_deploy_address: "172.16.48.67"
+  ceph_mon_node02_address: "==IPV4_NET_CONTROL_PREFIX==.67"
+  ceph_mon_node02_hostname: "cmn02"
+  #ceph_mon_node03_address: "172.16.47.68"
+  #ceph_mon_node03_deploy_address: "172.16.48.68"
+  ceph_mon_node03_address: "==IPV4_NET_CONTROL_PREFIX==.68"
+  ceph_mon_node03_hostname: "cmn03"
+  #ceph_rgw_address: "172.16.47.75"
+  ceph_rgw_address: "==IPV4_NET_CONTROL_PREFIX==.75"
+  #ceph_rgw_node01_address: "172.16.47.76"
+  #ceph_rgw_node01_deploy_address: "172.16.48.76"
+  ceph_rgw_node01_address: "==IPV4_NET_CONTROL_PREFIX==.76"
+  ceph_rgw_node01_hostname: "rgw01"
+  #ceph_rgw_node02_address: "172.16.47.77"
+  #ceph_rgw_node02_deploy_address: "172.16.48.77"
+  ceph_rgw_node02_address: "==IPV4_NET_CONTROL_PREFIX==.77"
+  ceph_rgw_node02_hostname: "rgw02"
+  #ceph_rgw_node03_address: "172.16.47.78"
+  #ceph_rgw_node03_deploy_address: "172.16.48.78"
+  ceph_rgw_node03_address: "==IPV4_NET_CONTROL_PREFIX==.78"
+  ceph_rgw_node03_hostname: "rgw03"
+  manila_enabled: 'False'
+  barbican_enabled: 'False'
+  barbican_integration_enabled: 'False'
+  # SSL settings
+  nova_vnc_tls_enabled: 'True'
+  galera_ssl_enabled: 'True'
+  openstack_mysql_x509_enabled: 'True'
+  rabbitmq_ssl_enabled: 'True'
+  openstack_rabbitmq_x509_enabled: 'True'
+  openstack_internal_protocol: 'http'
diff --git a/tcp_tests/templates/heat-cicd-pike-contrail41-sl/salt-context-environment.yaml b/tcp_tests/templates/heat-cicd-pike-contrail41-sl/salt-context-environment.yaml
new file mode 100644
index 0000000..fe4194e
--- /dev/null
+++ b/tcp_tests/templates/heat-cicd-pike-contrail41-sl/salt-context-environment.yaml
@@ -0,0 +1,380 @@
+nodes:
+    cfg01:
+      reclass_storage_name: infra_config_node01
+      roles:
+      - infra_config
+      - linux_system_codename_xenial
+      - features_runtest_cfg
+      interfaces:
+        ens3:
+          role: single_static_mgm
+        ens4:
+          role: single_static_ctl
+
+    cid01:
+      reclass_storage_name: cicd_control_node01
+      roles:
+      - cicd_control_leader
+      - linux_system_codename_xenial
+      interfaces:
+        ens3:
+          role: single_dhcp
+        ens4:
+          role: single_ctl
+
+    cid02:
+      reclass_storage_name: cicd_control_node02
+      roles:
+      - cicd_control_manager
+      - linux_system_codename_xenial
+      interfaces:
+        ens3:
+          role: single_dhcp
+        ens4:
+          role: single_ctl
+
+    cid03:
+      reclass_storage_name: cicd_control_node03
+      roles:
+      - cicd_control_manager
+      - linux_system_codename_xenial
+      interfaces:
+        ens3:
+          role: single_dhcp
+        ens4:
+          role: single_ctl
+
+    ctl01:
+      reclass_storage_name: openstack_control_node01
+      roles:
+      - openstack_control_leader
+      - linux_system_codename_xenial
+      classes:
+      - system.linux.system.repo.mcp.apt_mirantis.docker
+      interfaces:
+        ens3:
+          role: single_dhcp
+        ens4:
+          role: single_ctl
+
+    ctl02:
+      reclass_storage_name: openstack_control_node02
+      roles:
+      - openstack_control
+      - linux_system_codename_xenial
+      interfaces:
+        ens3:
+          role: single_dhcp
+        ens4:
+          role: single_ctl
+
+    ctl03:
+      reclass_storage_name: openstack_control_node03
+      roles:
+      - openstack_control
+      - linux_system_codename_xenial
+      interfaces:
+        ens3:
+          role: single_dhcp
+        ens4:
+          role: single_ctl
+
+    dbs01:
+      reclass_storage_name: openstack_database_node01
+      roles:
+      - openstack_database_leader
+      - linux_system_codename_xenial
+      interfaces:
+        ens3:
+          role: single_dhcp
+        ens4:
+          role: single_ctl
+
+    dbs02:
+      reclass_storage_name: openstack_database_node02
+      roles:
+      - openstack_database
+      - linux_system_codename_xenial
+      interfaces:
+        ens3:
+          role: single_dhcp
+        ens4:
+          role: single_ctl
+
+    dbs03:
+      reclass_storage_name: openstack_database_node03
+      roles:
+      - openstack_database
+      - linux_system_codename_xenial
+      interfaces:
+        ens3:
+          role: single_dhcp
+        ens4:
+          role: single_ctl
+
+    msg01:

+      reclass_storage_name: openstack_message_queue_node01

+      roles:

+      - openstack_message_queue

+      - linux_system_codename_xenial

+      interfaces:
+        ens3:
+          role: single_dhcp
+        ens4:

+          role: single_ctl

+

+    msg02:

+      reclass_storage_name: openstack_message_queue_node02

+      roles:

+      - openstack_message_queue

+      - linux_system_codename_xenial

+      interfaces:

+        ens3:
+          role: single_dhcp
+        ens4:

+          role: single_ctl

+

+    msg03:

+      reclass_storage_name: openstack_message_queue_node03

+      roles:

+      - openstack_message_queue

+      - linux_system_codename_xenial

+      interfaces:

+        ens3:
+          role: single_dhcp
+        ens4:

+          role: single_ctl

+

+    prx01:

+      reclass_storage_name: openstack_proxy_node01

+      roles:

+      - openstack_proxy

+      - linux_system_codename_xenial

+      interfaces:

+        ens3:
+          role: single_dhcp
+        ens4:

+          role: single_ctl

+

+    prx02:

+      reclass_storage_name: openstack_proxy_node02

+      roles:

+      - openstack_proxy

+      - linux_system_codename_xenial

+      interfaces:

+        ens3:
+          role: single_dhcp
+        ens4:

+          role: single_ctl

+

+    mon01:

+      reclass_storage_name: stacklight_server_node01

+      roles:

+      - stacklightv2_server_leader

+      - linux_system_codename_xenial

+      interfaces:

+        ens3:
+          role: single_dhcp
+        ens4:

+          role: single_ctl

+

+    mon02:

+      reclass_storage_name: stacklight_server_node02

+      roles:

+      - stacklightv2_server

+      - linux_system_codename_xenial

+      interfaces:

+        ens3:
+          role: single_dhcp
+        ens4:

+          role: single_ctl

+

+    mon03:

+      reclass_storage_name: stacklight_server_node03

+      roles:

+      - stacklightv2_server

+      - linux_system_codename_xenial

+      interfaces:

+        ens3:
+          role: single_dhcp
+        ens4:

+          role: single_ctl

+

+    nal01:

+      reclass_storage_name: opencontrail_analytics_node01

+      roles:

+      - opencontrail_analytics

+      - linux_system_codename_xenial

+      interfaces:

+        ens3:
+          role: single_dhcp
+        ens4:

+          role: single_ctl

+

+    nal02:

+      reclass_storage_name: opencontrail_analytics_node02

+      roles:

+      - opencontrail_analytics

+      - linux_system_codename_xenial

+      interfaces:

+        ens3:
+          role: single_dhcp
+        ens4:

+          role: single_ctl

+

+    nal03:

+      reclass_storage_name: opencontrail_analytics_node03

+      roles:

+      - opencontrail_analytics

+      - linux_system_codename_xenial

+      interfaces:

+        ens3:
+          role: single_dhcp
+        ens4:

+          role: single_ctl

+

+    ntw01:

+      reclass_storage_name: opencontrail_control_node01

+      roles:

+      - opencontrail_control

+      - linux_system_codename_xenial

+      interfaces:

+        ens3:
+          role: single_dhcp
+        ens4:

+          role: single_ctl

+

+    ntw02:

+      reclass_storage_name: opencontrail_control_node02

+      roles:

+      - opencontrail_control

+      - linux_system_codename_xenial

+      interfaces:

+        ens3:
+          role: single_dhcp
+        ens4:

+          role: single_ctl

+

+    ntw03:

+      reclass_storage_name: opencontrail_control_node03

+      roles:

+      - opencontrail_control

+      - linux_system_codename_xenial

+      interfaces:

+        ens3:
+          role: single_dhcp
+        ens4:

+          role: single_ctl

+

+    mtr01:

+      reclass_storage_name: stacklight_telemetry_node01

+      roles:

+      - stacklight_telemetry

+      - linux_system_codename_xenial

+      interfaces:

+        ens3:
+          role: single_dhcp
+        ens4:

+          role: single_ctl

+

+    mtr02:

+      reclass_storage_name: stacklight_telemetry_node02

+      roles:

+      - stacklight_telemetry

+      - linux_system_codename_xenial

+      interfaces:

+        ens3:
+          role: single_dhcp
+        ens4:

+          role: single_ctl

+

+    mtr03:

+      reclass_storage_name: stacklight_telemetry_node03

+      roles:

+      - stacklight_telemetry

+      - linux_system_codename_xenial

+      interfaces:

+        ens3:
+          role: single_dhcp
+        ens4:

+          role: single_ctl

+

+    log01:

+      reclass_storage_name: stacklight_log_node01

+      roles:

+      - stacklight_log_leader_v2

+      - linux_system_codename_xenial

+      interfaces:

+        ens3:
+          role: single_dhcp
+        ens4:

+          role: single_ctl

+

+    log02:

+      reclass_storage_name: stacklight_log_node02

+      roles:

+      - stacklight_log

+      - linux_system_codename_xenial

+      interfaces:

+        ens3:
+          role: single_dhcp
+        ens4:

+          role: single_ctl

+

+    log03:

+      reclass_storage_name: stacklight_log_node03

+      roles:

+      - stacklight_log

+      - linux_system_codename_xenial

+      interfaces:

+        ens3:
+          role: single_dhcp
+        ens4:

+          role: single_ctl

+
+    kvm01:
+      reclass_storage_name: infra_kvm_node01
+      roles:
+      - infra_kvm
+      - linux_system_codename_xenial
+      interfaces:
+        ens3:
+          role: single_dhcp
+        ens4:
+          role: single_ctl
+
+    kvm02:
+      reclass_storage_name: infra_kvm_node02
+      roles:
+      - infra_kvm
+      - linux_system_codename_xenial
+      interfaces:
+        ens3:
+          role: single_dhcp
+        ens4:
+          role: single_ctl
+
+    kvm03:
+      reclass_storage_name: infra_kvm_node03
+      roles:
+      - infra_kvm
+      - linux_system_codename_xenial
+      interfaces:
+        ens3:
+          role: single_dhcp
+        ens4:
+          role: single_ctl
+
+    cmp<<count>>:
+      reclass_storage_name: openstack_compute_rack01
+      roles:
+      - openstack_compute
+      - features_lvm_backend_volume_vdb
+      - linux_system_codename_xenial
+      interfaces:
+        ens3:
+          role: single_dhcp
+        ens5:
+          role: bond0_ab_contrail_single
+        ens4:
+          role: single_ctl
diff --git a/tcp_tests/templates/heat-cicd-pike-contrail41-sl/salt.yaml b/tcp_tests/templates/heat-cicd-pike-contrail41-sl/salt.yaml
new file mode 100644
index 0000000..a51a8a9
--- /dev/null
+++ b/tcp_tests/templates/heat-cicd-pike-contrail41-sl/salt.yaml
@@ -0,0 +1,14 @@
+{% set HOSTNAME_CFG01='cfg01.heat-cicd-pike-contrail41-sl.local' %}
+{% set LAB_CONFIG_NAME='heat-cicd-pike-contrail41-sl' %}
+{% set DOMAIN_NAME='heat-cicd-pike-contrail41-sl.local' %}
+
+# Other salt model repository parameters see in shared-salt.yaml
+
+{% import 'shared-salt.yaml' as SHARED with context %}
+
+{{ SHARED.MACRO_INSTALL_SALT_MINIONS() }}
+
+{{SHARED.MACRO_CHECK_SALT_VERSION_SERVICES_ON_CFG()}}
+
+{{SHARED.MACRO_CHECK_SALT_VERSION_ON_NODES()}}
+
diff --git a/tcp_tests/templates/heat-cicd-pike-contrail41-sl/underlay--user-data-foundation.yaml b/tcp_tests/templates/heat-cicd-pike-contrail41-sl/underlay--user-data-foundation.yaml
new file mode 100644
index 0000000..cb551ef
--- /dev/null
+++ b/tcp_tests/templates/heat-cicd-pike-contrail41-sl/underlay--user-data-foundation.yaml
@@ -0,0 +1,63 @@
+#cloud-config, see http://cloudinit.readthedocs.io/en/latest/topics/examples.html
+
+ssh_pwauth: True
+users:
+  - name: root
+    sudo: ALL=(ALL) NOPASSWD:ALL
+    shell: /bin/bash
+  - name: jenkins
+    sudo: ALL=(ALL) NOPASSWD:ALL
+    shell: /bin/bash
+    ssh_authorized_keys:
+      - ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDFSxeuXh2sO4VYL8N2dlNFVyNcr2RvoH4MeDD/cV2HThfU4/BcH6IOOWXSDibIU279bWVKCL7QUp3mf0Vf7HPuyFuC12QM+l7MwT0jCYh5um3hmAvM6Ga0nkhJygHexe9/rLEYzZJkIjP9/IS/YXSv8rhHg484wQ6qkEuq15nyMqil8tbDQCq0XQ+AWEpNpIa4pUoKmFMsOP8lq10KZXIXsJyZxizadr6Bh4Lm9LWrk8YCw7qP3rmgWxK/s8qXQh1ISZe6ONfcmk6p03qbh4H3CwKyWzxmnIHQvE6PgN/O+PuAZj3PbR2mkkJjYX4jNPlxvj8uTStaVPhAwfR9Spdx jenkins@cz8133
+
+disable_root: false
+chpasswd:
+  list: |
+    root:r00tme
+  expire: False
+
+packages:
+  - openjdk-8-jre-headless
+  - libyaml-dev
+  - libffi-dev
+  - libvirt-dev
+  - python-dev
+  - python-pip
+  - python-virtualenv
+  #- python-psycopg2
+  - pkg-config
+  - vlan
+  - bridge-utils
+  - ebtables
+
+bootcmd:
+  # Enable root access
+  - sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
+  - service sshd restart
+output:
+  all: '| tee -a /var/log/cloud-init-output.log /dev/tty0'
+
+runcmd:
+  # Create swap
+  - fallocate -l 16G /swapfile
+  - chmod 600 /swapfile
+  - mkswap /swapfile
+  - swapon /swapfile
+  - echo "/swapfile   none    swap    defaults   0   0" >> /etc/fstab
+
+write_files:
+  - path: /etc/default/grub.d/97-enable-grub-menu.cfg
+    content: |
+      GRUB_RECORDFAIL_TIMEOUT=30
+      GRUB_TIMEOUT=3
+      GRUB_TIMEOUT_STYLE=menu
+
+  - path: /etc/network/interfaces
+    content: |
+      auto ens3
+      iface ens3 inet dhcp
+
+  - path: /etc/bash_completion.d/fuel_devops30_activate
+    content: |
+      source /home/jenkins/fuel-devops30/bin/activate
diff --git a/tcp_tests/templates/heat-cicd-pike-contrail41-sl/underlay-userdata.yaml b/tcp_tests/templates/heat-cicd-pike-contrail41-sl/underlay-userdata.yaml
new file mode 100644
index 0000000..567a445
--- /dev/null
+++ b/tcp_tests/templates/heat-cicd-pike-contrail41-sl/underlay-userdata.yaml
@@ -0,0 +1,78 @@
+#cloud-config, see http://cloudinit.readthedocs.io/en/latest/topics/examples.html
+
+ssh_pwauth: True
+users:
+   - name: root
+     sudo: ALL=(ALL) NOPASSWD:ALL
+     shell: /bin/bash
+
+disable_root: false
+chpasswd:
+   list: |
+    root:r00tme
+   expire: False
+
+bootcmd:
+   # Enable root access
+   - sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
+   - service sshd restart
+output:
+    all: '| tee -a /var/log/cloud-init-output.log /dev/tty0'
+
+runcmd:
+   - if lvs vg0; then pvresize /dev/vda3; fi
+   - if lvs vg0; then /usr/bin/growlvm.py --image-layout-file /usr/share/growlvm/image-layout.yml; fi
+
+   - export TERM=linux
+   - export LANG=C
+   # Configure dhclient
+   - sudo resolvconf -u
+   #- sudo echo "nameserver {gateway}" >> /etc/resolvconf/resolv.conf.d/base
+   # Enable grub menu using updated config below
+   - update-grub
+
+   # Prepare network connection
+   - sudo ifup ens3
+   #- sudo route add default gw {gateway} {interface_name}
+
+   # Create swap
+   - fallocate -l 16G /swapfile
+   - chmod 600 /swapfile
+   - mkswap /swapfile
+   - swapon /swapfile
+   - echo "/swapfile   none    swap    defaults   0   0" >> /etc/fstab
+
+write_files:
+   - path: /etc/default/grub.d/97-enable-grub-menu.cfg
+     content: |
+         GRUB_RECORDFAIL_TIMEOUT=30
+         GRUB_TIMEOUT=3
+         GRUB_TIMEOUT_STYLE=menu
+
+   - path: /etc/network/interfaces
+     content: |
+          auto ens3
+          iface ens3 inet dhcp
+
+   - path: /usr/share/growlvm/image-layout.yml
+     content: |
+       root:
+         size: '65%VG'
+       home:
+         size: '1%VG'
+       var_log:
+         size: '10%VG'
+       var_log_audit:
+         size: '5%VG'
+       var_tmp:
+         size: '10%VG'
+       tmp:
+         size: '5%VG'
+     owner: root:root
+
+growpart:
+    mode: auto
+    devices:
+      - '/'
+      - '/dev/vda3'
+    ignore_growroot_disabled: false
diff --git a/tcp_tests/templates/heat-cicd-pike-contrail41-sl/underlay.hot b/tcp_tests/templates/heat-cicd-pike-contrail41-sl/underlay.hot
new file mode 100644
index 0000000..6285549
--- /dev/null
+++ b/tcp_tests/templates/heat-cicd-pike-contrail41-sl/underlay.hot
@@ -0,0 +1,440 @@
+---
+
+heat_template_version: queens
+
+description: MCP environment for heat-cicd-pike-contrail41-sl
+
+parameters:
+  instance_domain:
+    type: string
+    default: heat-cicd-pike-contrail41-sl.local
+  mcp_version:
+    type: string
+  env_name:
+    type: string
+  control_subnet_cidr:
+    type: string
+  management_subnet_cidr:
+    type: string
+  management_subnet_pool_start:
+    type: string
+  management_subnet_pool_end:
+    type: string
+  management_subnet_cfg01_ip:
+    type: string
+  management_subnet_gateway_ip:
+    type: string
+
+  key_pair:
+    type: string
+
+  ctl_flavor:
+    type: string
+  cfg_flavor:
+    type: string
+  cid_flavor:
+    type: string
+  ntw_flavor:
+    type: string
+  nal_flavor:
+    type: string
+  kvm_fake_flavor:
+    type: string
+  dbs_flavor:
+    type: string
+  msg_flavor:
+    type: string
+  mon_flavor:
+    type: string
+  log_flavor:
+    type: string
+  mtr_flavor:
+    type: string
+  cmp_flavor:
+    type: string
+  foundation_flavor:
+    type: string
+
+  net_public:
+    type: string
+
+resources:
+  networks:
+    type: MCP::Networks
+    properties:
+      stack_name: { get_param: "OS::stack_name" }
+      env_name: { get_param: env_name }
+
+  #flavors:
+  #  type: MCP::Flavors
+
+  cfg01_node:
+    type: MCP::MasterNode
+    depends_on: [networks]
+    properties:
+      env_name: { get_param: env_name }
+      mcp_version: { get_param: mcp_version }
+      cfg01_flavor: { get_param: cfg_flavor }
+      instance_name: cfg01
+      instance_domain: {get_param: instance_domain}
+      network: { get_attr: [networks, network] }
+
+  control_cluster:
+    type: MCP::MultipleInstance
+    depends_on: [cfg01_node]
+    properties:
+      env_name: { get_param: env_name }
+      mcp_version: { get_param: mcp_version }
+      instance_domain: {get_param: instance_domain}
+      instance01_name: ctl01
+      instance02_name: ctl02
+      instance03_name: ctl03
+      instance_flavor: {get_param: ctl_flavor}
+      network: { get_attr: [networks, network] }
+      underlay_userdata: { get_file: ./underlay-userdata.yaml }
+      instance01_control_net_static_ip:
+        list_join:
+        - '.'
+        - [ { get_attr: [networks, control_net_prefix] }, '11' ]
+      instance02_control_net_static_ip:
+        list_join:
+        - '.'
+        - [ { get_attr: [networks, control_net_prefix] }, '12' ]
+      instance03_control_net_static_ip:
+        list_join:
+        - '.'
+        - [ { get_attr: [networks, control_net_prefix] }, '13' ]
+
+      instance_config_host: { get_attr: [cfg01_node, instance_address] }
+
+  openstack_database_cluster:
+    type: MCP::MultipleInstance
+    depends_on: [control_cluster]
+    properties:
+      env_name: { get_param: env_name }
+      mcp_version: { get_param: mcp_version }
+      instance_domain: {get_param: instance_domain}
+      instance01_name: dbs01
+      instance02_name: dbs02
+      instance03_name: dbs03
+      instance_flavor: {get_param: dbs_flavor}
+      network: { get_attr: [networks, network] }
+      underlay_userdata: { get_file: ./underlay-userdata.yaml }
+      instance01_control_net_static_ip:
+        list_join:
+        - '.'
+        - [ { get_attr: [networks, control_net_prefix] }, '51' ]
+      instance02_control_net_static_ip:
+        list_join:
+        - '.'
+        - [ { get_attr: [networks, control_net_prefix] }, '52' ]
+      instance03_control_net_static_ip:
+        list_join:
+        - '.'
+        - [ { get_attr: [networks, control_net_prefix] }, '53' ]
+      instance_config_host: { get_attr: [cfg01_node, instance_address] }
+
+  fake_kvm_cluster:
+    type: MCP::MultipleInstance
+    depends_on: [cfg01_node]
+    properties:
+      env_name: { get_param: env_name }
+      mcp_version: { get_param: mcp_version }
+      instance_domain: {get_param: instance_domain}
+      instance01_name: kvm01
+      instance02_name: kvm02
+      instance03_name: kvm03
+      instance_flavor: {get_param: kvm_fake_flavor}
+      network: { get_attr: [networks, network] }
+      underlay_userdata: { get_file: ./underlay-userdata.yaml }
+      instance01_control_net_static_ip:
+        list_join:
+        - '.'
+        - [ { get_attr: [networks, control_net_prefix] }, '241' ]
+      instance02_control_net_static_ip:
+        list_join:
+        - '.'
+        - [ { get_attr: [networks, control_net_prefix] }, '242' ]
+      instance03_control_net_static_ip:
+        list_join:
+        - '.'
+        - [ { get_attr: [networks, control_net_prefix] }, '243' ]
+
+      instance_config_host: { get_attr: [cfg01_node, instance_address] }
+
+  openstack_message_queue_cluster:
+    type: MCP::MultipleInstance
+    depends_on: [openstack_database_cluster]
+    properties:
+      env_name: { get_param: env_name }
+      mcp_version: { get_param: mcp_version }
+      instance_domain: {get_param: instance_domain}
+      instance01_name: msg01
+      instance02_name: msg02
+      instance03_name: msg03
+      instance_flavor: {get_param: msg_flavor}
+      network: { get_attr: [networks, network] }
+      underlay_userdata: { get_file: ./underlay-userdata.yaml }
+      instance01_control_net_static_ip:
+        list_join:
+        - '.'
+        - [ { get_attr: [networks, control_net_prefix] }, '41' ]
+      instance02_control_net_static_ip:
+        list_join:
+        - '.'
+        - [ { get_attr: [networks, control_net_prefix] }, '42' ]
+      instance03_control_net_static_ip:
+        list_join:
+        - '.'
+        - [ { get_attr: [networks, control_net_prefix] }, '43' ]
+      instance_config_host: { get_attr: [cfg01_node, instance_address] }
+
+  cicd_cluster:
+    type: MCP::MultipleInstance
+    depends_on: [cfg01_node]
+    properties:
+      env_name: { get_param: env_name }
+      mcp_version: { get_param: mcp_version }
+      instance_domain: {get_param: instance_domain}
+      instance01_name: cid01
+      instance02_name: cid02
+      instance03_name: cid03
+      instance_flavor: {get_param: cid_flavor}
+      network: { get_attr: [networks, network] }
+      underlay_userdata: { get_file: ./underlay-userdata.yaml }
+      instance01_control_net_static_ip:
+        list_join:
+        - '.'
+        - [ { get_attr: [networks, control_net_prefix] }, '91' ]
+      instance02_control_net_static_ip:
+        list_join:
+        - '.'
+        - [ { get_attr: [networks, control_net_prefix] }, '92' ]
+      instance03_control_net_static_ip:
+        list_join:
+        - '.'
+        - [ { get_attr: [networks, control_net_prefix] }, '93' ]
+
+      instance_config_host: { get_attr: [cfg01_node, instance_address] }
+
+  contrail_ntw_cluster:
+    type: MCP::MultipleInstance
+    depends_on: [openstack_message_queue_cluster]
+    properties:
+      env_name: { get_param: env_name }
+      mcp_version: { get_param: mcp_version }
+      instance_domain: {get_param: instance_domain}
+      instance01_name: ntw01
+      instance02_name: ntw02
+      instance03_name: ntw03
+      instance_flavor: {get_param: ntw_flavor}
+      network: { get_attr: [networks, network] }
+      underlay_userdata: { get_file: ./underlay-userdata.yaml }
+      instance01_control_net_static_ip:
+        list_join:
+        - '.'
+        - [ { get_attr: [networks, control_net_prefix] }, '21' ]
+      instance02_control_net_static_ip:
+        list_join:
+        - '.'
+        - [ { get_attr: [networks, control_net_prefix] }, '22' ]
+      instance03_control_net_static_ip:
+        list_join:
+        - '.'
+        - [ { get_attr: [networks, control_net_prefix] }, '23' ]
+      instance_config_host: { get_attr: [cfg01_node, instance_address] }
+
+  contrail_nal_cluster:
+    type: MCP::MultipleInstance
+    depends_on: [contrail_ntw_cluster]
+    properties:
+      env_name: { get_param: env_name }
+      mcp_version: { get_param: mcp_version }
+      instance_domain: {get_param: instance_domain}
+      instance01_name: nal01
+      instance02_name: nal02
+      instance03_name: nal03
+      instance_flavor: {get_param: nal_flavor}
+      network: { get_attr: [networks, network] }
+      underlay_userdata: { get_file: ./underlay-userdata.yaml }
+      instance01_control_net_static_ip:
+        list_join:
+        - '.'
+        - [ { get_attr: [networks, control_net_prefix] }, '31' ]
+      instance02_control_net_static_ip:
+        list_join:
+        - '.'
+        - [ { get_attr: [networks, control_net_prefix] }, '32' ]
+      instance03_control_net_static_ip:
+        list_join:
+        - '.'
+        - [ { get_attr: [networks, control_net_prefix] }, '33' ]
+      instance_config_host: { get_attr: [cfg01_node, instance_address] }
+
+  stacklight_monitor_cluster:
+    type: MCP::MultipleInstance
+    depends_on: [openstack_message_queue_cluster]
+    properties:
+      env_name: { get_param: env_name }
+      mcp_version: { get_param: mcp_version }
+      instance_domain: {get_param: instance_domain}
+      instance01_name: mon01
+      instance02_name: mon02
+      instance03_name: mon03
+      instance_flavor: {get_param: mon_flavor}
+      network: { get_attr: [networks, network] }
+      underlay_userdata: { get_file: ./underlay-userdata.yaml }
+      instance01_control_net_static_ip:
+        list_join:
+        - '.'
+        - [ { get_attr: [networks, control_net_prefix] }, '71' ]
+      instance02_control_net_static_ip:
+        list_join:
+        - '.'
+        - [ { get_attr: [networks, control_net_prefix] }, '72' ]
+      instance03_control_net_static_ip:
+        list_join:
+        - '.'
+        - [ { get_attr: [networks, control_net_prefix] }, '73' ]
+      instance_config_host: { get_attr: [cfg01_node, instance_address] }
+
+  stacklight_log_cluster:
+    type: MCP::MultipleInstance
+    depends_on: [stacklight_monitor_cluster]
+    properties:
+      env_name: { get_param: env_name }
+      mcp_version: { get_param: mcp_version }
+      instance_domain: {get_param: instance_domain}
+      instance01_name: log01
+      instance02_name: log02
+      instance03_name: log03
+      instance_flavor: {get_param: log_flavor}
+      network: { get_attr: [networks, network] }
+      underlay_userdata: { get_file: ./underlay-userdata.yaml }
+      instance01_control_net_static_ip:
+        list_join:
+        - '.'
+        - [ { get_attr: [networks, control_net_prefix] }, '61' ]
+      instance02_control_net_static_ip:
+        list_join:
+        - '.'
+        - [ { get_attr: [networks, control_net_prefix] }, '62' ]
+      instance03_control_net_static_ip:
+        list_join:
+        - '.'
+        - [ { get_attr: [networks, control_net_prefix] }, '63' ]
+      instance_config_host: { get_attr: [cfg01_node, instance_address] }
+
+  stacklight_mtr_cluster:
+    type: MCP::MultipleInstance
+    depends_on: [stacklight_log_cluster]
+    properties:
+      env_name: { get_param: env_name }
+      mcp_version: { get_param: mcp_version }
+      instance_domain: {get_param: instance_domain}
+      instance01_name: mtr01
+      instance02_name: mtr02
+      instance03_name: mtr03
+      instance_flavor: {get_param: mtr_flavor}
+      network: { get_attr: [networks, network] }
+      underlay_userdata: { get_file: ./underlay-userdata.yaml }
+      instance01_control_net_static_ip:
+        list_join:
+        - '.'
+        - [ { get_attr: [networks, control_net_prefix] }, '86' ]
+      instance02_control_net_static_ip:
+        list_join:
+        - '.'
+        - [ { get_attr: [networks, control_net_prefix] }, '87' ]
+      instance03_control_net_static_ip:
+        list_join:
+        - '.'
+        - [ { get_attr: [networks, control_net_prefix] }, '88' ]
+      instance_config_host: { get_attr: [cfg01_node, instance_address] }
+
+  prx01_virtual:
+    type: MCP::SingleInstance
+    depends_on: [control_cluster]
+    properties:
+      env_name: { get_param: env_name }
+      mcp_version: { get_param: mcp_version }
+      instance_domain: {get_param: instance_domain}
+      instance_name: prx01
+      instance_flavor: {get_param: cid_flavor}
+      network: { get_attr: [networks, network] }
+      underlay_userdata: { get_file: ./underlay-userdata.yaml }
+      control_net_static_ip:
+        list_join:
+        - '.'
+        - [ { get_attr: [networks, control_net_prefix] }, '81' ]
+
+      instance_config_host: { get_attr: [cfg01_node, instance_address] }
+
+  cmp001_virtual:
+    type: MCP::Compute
+    depends_on: [cfg01_node]
+    properties:
+      env_name: { get_param: env_name }
+      mcp_version: { get_param: mcp_version }
+      instance_domain: {get_param: instance_domain}
+      instance_name: cmp001
+      instance_flavor: {get_param: cmp_flavor}
+      network: { get_attr: [networks, network] }
+      underlay_userdata: { get_file: ./underlay-userdata.yaml }
+      control_net_static_ip:
+        list_join:
+        - '.'
+        - [ { get_attr: [networks, control_net_prefix] }, '101' ]
+      instance_config_host: { get_attr: [cfg01_node, instance_address] }
+
+  cmp002_virtual:
+    type: MCP::Compute
+    depends_on: [cfg01_node]
+    properties:
+      env_name: { get_param: env_name }
+      mcp_version: { get_param: mcp_version }
+      instance_domain: {get_param: instance_domain}
+      instance_name: cmp002
+      instance_flavor: {get_param: cmp_flavor}
+      network: { get_attr: [networks, network] }
+      underlay_userdata: { get_file: ./underlay-userdata.yaml }
+      control_net_static_ip:
+        list_join:
+        - '.'
+        - [ { get_attr: [networks, control_net_prefix] }, '102' ]
+      instance_config_host: { get_attr: [cfg01_node, instance_address] }
+
+  foundation_node:
+    type: MCP::FoundationNode
+    depends_on: [networks]
+    properties:
+      env_name: { get_param: env_name }
+      mcp_version: { get_param: mcp_version }
+      instance_domain: {get_param: instance_domain}
+      instance_name: foundation
+      instance_flavor: {get_param: foundation_flavor}
+      network: { get_attr: [networks, network] }
+      underlay_userdata: { get_file: ./underlay--user-data-foundation.yaml }
+      control_net_static_ip:
+        list_join:
+        - '.'
+        - [ { get_attr: [networks, control_net_prefix] }, '5' ]
+      instance_config_host: { get_attr: [cfg01_node, instance_address] }
+
+outputs:
+
+  control_subnet_cidr:
+    description: Control network CIDR
+    value: { get_param: control_subnet_cidr }
+
+  management_subnet_cidr:
+    description: Admin network CIDR
+    value: { get_param: management_subnet_cidr }
+
+  foundation_floating:
+    description: foundation node IP address (floating) from external network
+    value:
+      get_attr:
+      - foundation_node
+      - instance_floating_address
+...
diff --git a/tcp_tests/utils/env_jenkins_cicd b/tcp_tests/utils/env_jenkins_cicd.example
old mode 100755
new mode 100644
similarity index 100%
rename from tcp_tests/utils/env_jenkins_cicd
rename to tcp_tests/utils/env_jenkins_cicd.example
diff --git a/tcp_tests/utils/env_jenkins_day01 b/tcp_tests/utils/env_jenkins_day01.example
old mode 100755
new mode 100644
similarity index 100%
rename from tcp_tests/utils/env_jenkins_day01
rename to tcp_tests/utils/env_jenkins_day01.example
diff --git a/tcp_tests/utils/env_k8s b/tcp_tests/utils/env_k8s.example
old mode 100755
new mode 100644
similarity index 100%
rename from tcp_tests/utils/env_k8s
rename to tcp_tests/utils/env_k8s.example
diff --git a/tcp_tests/utils/env_salt b/tcp_tests/utils/env_salt.example
old mode 100755
new mode 100644
similarity index 100%
rename from tcp_tests/utils/env_salt
rename to tcp_tests/utils/env_salt.example
diff --git a/tcp_tests/utils/get_param_heat_template.py b/tcp_tests/utils/get_param_heat_template.py
new file mode 100755
index 0000000..9b8b236
--- /dev/null
+++ b/tcp_tests/utils/get_param_heat_template.py
@@ -0,0 +1,57 @@
+#!/usr/bin/env python
+
+import os
+import sys
+
+from heatclient.common import template_utils
+
+
+if len(sys.argv) <= 1:
+    print("Usage:\n"
+          "  export LAB_CONFIG_NAME=cookied-cicd-...  "
+          "# see directories in tcp_tests/templates/\n"
+          "  export LAB_PARAM_DEFAULTS=nnnn.env "
+          "# see files in tcp_tests/templates/_heat_environments")
+    sys.exit(1)
+
+sys.path.append(os.getcwd())
+try:
+    from tcp_tests import settings_oslo
+except ImportError:
+    print("ImportError: Run the application from the tcp-qa directory or "
+          "set the PYTHONPATH environment variable to directory which contains"
+          " ./tcp_tests")
+    sys.exit(1)
+
+config = settings_oslo.load_config([])
+
+template_file = config.hardware.heat_conf_path
+env_file = config.hardware.heat_env_path
+
+if not os.path.exists(template_file):
+    raise Exception("Heat template '{0}' not found!\n"
+                    "Please set the correct LAB_CONFIG_NAME with underlay.hot"
+                    .format(template_file))
+
+tpl_files, template = template_utils.get_template_contents(
+    template_file)
+
+if os.path.exists(env_file):
+    env_files_list = []
+    env_files, env = (
+        template_utils.process_multiple_environments_and_files(
+            env_paths=[env_file],
+            env_list_tracker=env_files_list))
+else:
+    env = {}
+
+parameter_name = sys.argv[1]
+parameter_value = env['parameter_defaults'].get(parameter_name)
+if parameter_value is None:
+    parameter_value = template['parameters'].get(parameter_name)
+    if parameter_value is None:
+        raise Exception("Parameter '{0}' not found in env file '{1}' "
+                        "and temlate file '{2}'"
+                        .format(parameter_name, env_file, template_file))
+
+print(parameter_value)