Add possibility to use ENV_MANAGER=heat in test pipelines

- If ENV_MANAGER=heat, environment snapshots are unavailable.
  Carefully select the test cases for such environments.
- Added a new job swarm-bootstrap-salt-cluster-heat.groovy
  to create environment in OpenStack
- Added new parameters for parent jobs:
  ENV_MANAGER (default=devops)
  OS_AUTH_URL (for ENV_MANAGER=heat) - Keystone URL
  OS_PROJECT_NAME (for ENV_MANAGER=heat) - OS project name
  OS_USER_DOMAIN_NAME (for ENV_MANAGER=heat) - OS user domain name
  OS_CREDENTIALS (for ENV_MANAGER=heat) - Jenkins credentials
    with username and password to access OpenStack
  LAB_PARAM_DEFAULTS (for ENV_MANAGER=heat) - environment file
    for Heat template with 'parameter_defaults' dict.
- Added requirements 'python-openstackclient' and 'python-glanceclient'
  to operate images and heat stack from Jenkins pipeline scripts.

Related-task: #PROD-27687
Change-Id: I5b3a2fa3aac0bf3d592efa3617e25b8a965f377f
diff --git a/jobs/pipelines/deploy-cicd-and-run-tests.groovy b/jobs/pipelines/deploy-cicd-and-run-tests.groovy
index f5c5b9a..e0e43ab 100644
--- a/jobs/pipelines/deploy-cicd-and-run-tests.groovy
+++ b/jobs/pipelines/deploy-cicd-and-run-tests.groovy
@@ -3,30 +3,52 @@
 def common = new com.mirantis.mk.Common()
 def shared = new com.mirantis.system_qa.SharedPipeline()
 def steps = "hardware,create_model,salt," + env.DRIVETRAIN_STACK_INSTALL + "," + env.PLATFORM_STACK_INSTALL
+def env_manager = env.ENV_MANAGER ?: 'devops'
+def jenkins_slave_node_name = "${NODE_NAME}"
 
 currentBuild.description = "${NODE_NAME}:${ENV_NAME}"
 
-def deploy(shared, common, steps) {
+def deploy(shared, common, steps, env_manager, jenkins_slave_node_name) {
     def report_text = ''
     try {
 
         stage("Clean the environment and clone tcp-qa") {
-            shared.prepare_working_dir()
+            shared.prepare_working_dir(env_manager)
         }
 
         stage("Create environment, generate model, bootstrap the salt-cluster") {
             // steps: "hardware,create_model,salt"
-            shared.swarm_bootstrap_salt_cluster_devops()
+            if (env_manager == 'devops') {
+                jenkins_slave_node_name = "${NODE_NAME}"
+                shared.swarm_bootstrap_salt_cluster_devops()
+            } else if (env_manager == 'heat') {
+                def new_jenkins_slave_node_name = "openstack_slave_${JOB_NAME}"
+                // If shared.swarm_bootstrap_salt_cluster_heat() failed,
+                // do not schedule shared.swarm_testrail_report() on the non existing Jenkins slave
+                shared.swarm_bootstrap_salt_cluster_heat(new_jenkins_slave_node_name)
+                // When the Heat stack created, set jenkins_slave_node_name to the new Jenkins slave
+                jenkins_slave_node_name = new_jenkins_slave_node_name
+            } else {
+                throw new Exception("Unknow env_manager: '${env_manager}'")
+            }
         }
 
         stage("Install core infrastructure and deploy CICD nodes") {
-            // steps: env.DRIVETRAIN_STACK_INSTALL
-            shared.swarm_deploy_cicd(env.DRIVETRAIN_STACK_INSTALL, env.DRIVETRAIN_STACK_INSTALL_TIMEOUT)
+        if (env.DRIVETRAIN_STACK_INSTALL) {
+                // steps: env.DRIVETRAIN_STACK_INSTALL
+                shared.swarm_deploy_cicd(env.DRIVETRAIN_STACK_INSTALL, env.DRIVETRAIN_STACK_INSTALL_TIMEOUT, jenkins_slave_node_name)
+            } else {
+                common.printMsg("DRIVETRAIN_STACK_INSTALL is empty, skipping 'swarm-deploy-cicd' job", "green")
+            }
         }
 
         stage("Deploy platform components") {
-            // steps: env.PLATFORM_STACK_INSTALL
-            shared.swarm_deploy_platform(env.PLATFORM_STACK_INSTALL, env.PLATFORM_STACK_INSTALL_TIMEOUT)
+            if (env.PLATFORM_STACK_INSTALL) {
+                // steps: env.PLATFORM_STACK_INSTALL
+                shared.swarm_deploy_platform(env.PLATFORM_STACK_INSTALL, env.PLATFORM_STACK_INSTALL_TIMEOUT, jenkins_slave_node_name)
+            } else {
+                common.printMsg("PLATFORM_STACK_INSTALL is empty, skipping 'swarm-deploy-platform' job", "green")
+            }
         }
 
         currentBuild.result = 'SUCCESS'
@@ -34,42 +56,50 @@
     } catch (e) {
         common.printMsg("Deploy is failed: " + e.message , "purple")
         report_text = e.message
-        def snapshot_name = "deploy_failed"
-        shared.run_cmd("""\
-            dos.py suspend ${ENV_NAME} || true
-            dos.py snapshot ${ENV_NAME} ${snapshot_name} || true
-        """)
-        if ("${env.SHUTDOWN_ENV_ON_TEARDOWN}" == "false") {
+        if (env_manager == 'devops') {
+            def snapshot_name = "deploy_failed"
             shared.run_cmd("""\
-                dos.py resume ${ENV_NAME} || true
+                dos.py suspend ${ENV_NAME} || true
+                dos.py snapshot ${ENV_NAME} ${snapshot_name} || true
             """)
+            if ("${env.SHUTDOWN_ENV_ON_TEARDOWN}" == "false") {
+                shared.run_cmd("""\
+                    dos.py resume ${ENV_NAME} || true
+                """)
+            }
+            shared.devops_snapshot_info(snapshot_name)
         }
-        shared.devops_snapshot_info(snapshot_name)
         throw e
     } finally {
         shared.create_deploy_result_report(steps, currentBuild.result, report_text)
     }
 }
 
-def test(shared, common, steps) {
+def test(shared, common, steps, env_manager, jenkins_slave_node_name) {
     try {
         stage("Run tests") {
-            shared.swarm_run_pytest(steps)
+            if (env.RUN_TEST_OPTS) {
+                shared.swarm_run_pytest(steps, jenkins_slave_node_name)
+            } else {
+                common.printMsg("RUN_TEST_OPTS is empty, skipping 'swarm-run-pytest' job", "green")
+            }
         }
 
     } catch (e) {
         common.printMsg("Tests are failed: " + e.message, "purple")
-        def snapshot_name = "tests_failed"
-        shared.run_cmd("""\
-            dos.py suspend ${ENV_NAME} || true
-            dos.py snapshot ${ENV_NAME} ${snapshot_name} || true
-        """)
-        if ("${env.SHUTDOWN_ENV_ON_TEARDOWN}" == "false") {
+        if (env_manager == 'devops') {
+            def snapshot_name = "tests_failed"
             shared.run_cmd("""\
-                dos.py resume ${ENV_NAME} || true
+                dos.py suspend ${ENV_NAME} || true
+                dos.py snapshot ${ENV_NAME} ${snapshot_name} || true
             """)
+            if ("${env.SHUTDOWN_ENV_ON_TEARDOWN}" == "false") {
+                shared.run_cmd("""\
+                    dos.py resume ${ENV_NAME} || true
+                """)
+            }
+            shared.devops_snapshot_info(snapshot_name)
         }
-        shared.devops_snapshot_info(snapshot_name)
         throw e
     }
 }
@@ -80,18 +110,20 @@
   node ("${NODE_NAME}") {
     try {
         // run deploy stages
-        deploy(shared, common, steps)
+        deploy(shared, common, steps, env_manager, jenkins_slave_node_name)
         // run test stages
-        test(shared, common, steps)
+        test(shared, common, steps, env_manager, jenkins_slave_node_name)
     } catch (e) {
         common.printMsg("Job is failed: " + e.message, "purple")
         throw e
     } finally {
-        // shutdown the environment if required
-        if ("${env.SHUTDOWN_ENV_ON_TEARDOWN}" == "true") {
-            shared.run_cmd("""\
-                dos.py destroy ${ENV_NAME} || true
-            """)
+        if (env_manager == 'devops') {
+            // shutdown the environment if required
+            if ("${env.SHUTDOWN_ENV_ON_TEARDOWN}" == "true") {
+                shared.run_cmd("""\
+                    dos.py destroy ${ENV_NAME} || true
+                """)
+            }
         }
 
         stage("Archive all xml reports") {
@@ -99,7 +131,7 @@
         }
         if ("${env.REPORT_TO_TESTRAIL}" != "false") {
             stage("report results to testrail") {
-                shared.swarm_testrail_report(steps)
+                shared.swarm_testrail_report(steps, jenkins_slave_node_name)
             }
             stage("Store TestRail reports to job description") {
                 def String description = readFile("description.txt")
diff --git a/jobs/pipelines/deploy-without-drivetrain-and-run-tests.groovy b/jobs/pipelines/deploy-without-drivetrain-and-run-tests.groovy
index 96ddf76..33f8516 100644
--- a/jobs/pipelines/deploy-without-drivetrain-and-run-tests.groovy
+++ b/jobs/pipelines/deploy-without-drivetrain-and-run-tests.groovy
@@ -49,7 +49,7 @@
 def test(shared, common, steps) {
     try {
         stage("Run tests") {
-            shared.swarm_run_pytest(steps)
+            shared.swarm_run_pytest(steps, "${NODE_NAME}")
         }
 
     } catch (e) {
@@ -93,7 +93,7 @@
             archiveArtifacts artifacts: "**/*.xml,**/*.ini,**/*.log,**/*.tar.gz"
         }
         stage("report results to testrail") {
-            shared.swarm_testrail_report(steps)
+            shared.swarm_testrail_report(steps, "${NODE_NAME}")
         }
         stage("Store TestRail reports to job description") {
             def String description = readFile("description.txt")
diff --git a/jobs/pipelines/swarm-bootstrap-salt-cluster-devops.groovy b/jobs/pipelines/swarm-bootstrap-salt-cluster-devops.groovy
index efeabba..392be7c 100644
--- a/jobs/pipelines/swarm-bootstrap-salt-cluster-devops.groovy
+++ b/jobs/pipelines/swarm-bootstrap-salt-cluster-devops.groovy
@@ -44,6 +44,12 @@
         error "'PARENT_WORKSPACE' contains path to non-existing directory ${PARENT_WORKSPACE} on the node '${PARENT_NODE_NAME}'."
     }
     dir("${PARENT_WORKSPACE}") {
+        if (env.TCP_QA_REFS) {
+            stage("Update working dir to patch ${TCP_QA_REFS}") {
+                shared.update_working_dir()
+            }
+        }
+
         stage("Cleanup: erase ${ENV_NAME} and remove config drive") {
             println "Remove environment ${ENV_NAME}"
             shared.run_cmd("""\
@@ -55,12 +61,6 @@
             """)
         }
 
-        if (env.TCP_QA_REFS) {
-            stage("Update working dir to patch ${TCP_QA_REFS}") {
-                shared.update_working_dir()
-            }
-        }
-
         stage("Create an environment ${ENV_NAME} in disabled state") {
             // deploy_hardware.xml
             shared.run_cmd("""\
@@ -75,18 +75,36 @@
         }
 
         stage("Generate the model") {
-            shared.generate_cookied_model()
+            def IPV4_NET_ADMIN=shared.run_cmd_stdout("dos.py net-list ${ENV_NAME} | grep admin-pool01").trim().split().last()
+            def IPV4_NET_CONTROL=shared.run_cmd_stdout("dos.py net-list ${ENV_NAME} | grep private-pool01").trim().split().last()
+            def IPV4_NET_TENANT=shared.run_cmd_stdout("dos.py net-list ${ENV_NAME} | grep tenant-pool01").trim().split().last()
+            def IPV4_NET_EXTERNAL=shared.run_cmd_stdout("dos.py net-list ${ENV_NAME} | grep external-pool01").trim().split().last()
+            shared.generate_cookied_model(IPV4_NET_ADMIN, IPV4_NET_CONTROL, IPV4_NET_TENANT, IPV4_NET_EXTERNAL)
         }
 
         stage("Generate config drive ISO") {
-            shared.generate_configdrive_iso()
+            def SALT_MASTER_IP=shared.run_cmd_stdout("""\
+                SALT_MASTER_INFO=\$(for node in \$(dos.py slave-ip-list --address-pool-name admin-pool01 ${ENV_NAME}); do echo \$node; done|grep cfg01)
+                echo \$SALT_MASTER_INFO|cut -d',' -f2
+                """).trim().split("\n").last()
+            def dhcp_ranges_json=shared.run_cmd_stdout("""\
+                fgrep dhcp_ranges ${ENV_NAME}_hardware.ini |
+                fgrep "admin-pool01"|
+                cut -d"=" -f2
+                """).trim().split("\n").last()
+            def dhcp_ranges = new groovy.json.JsonSlurperClassic().parseText(dhcp_ranges_json)
+            def ADMIN_NETWORK_GW = dhcp_ranges['admin-pool01']['gateway']
+            shared.generate_configdrive_iso(SALT_MASTER_IP, ADMIN_NETWORK_GW)
         }
 
         stage("Upload generated config drive ISO into volume on cfg01 node") {
+            def SALT_MASTER_HOSTNAME=shared.run_cmd_stdout("""\
+                SALT_MASTER_INFO=\$(for node in \$(dos.py slave-ip-list --address-pool-name admin-pool01 ${ENV_NAME}); do echo \$node; done|grep cfg01)
+                echo \$SALT_MASTER_INFO|cut -d',' -f1
+                """).trim().split("\n").last()
             shared.run_cmd("""\
                 # Get SALT_MASTER_HOSTNAME to determine the volume name
-                . ./tcp_tests/utils/env_salt
-                virsh vol-upload ${ENV_NAME}_\${SALT_MASTER_HOSTNAME}_config /home/jenkins/images/${CFG01_CONFIG_IMAGE_NAME} --pool default
+                virsh vol-upload ${ENV_NAME}_${SALT_MASTER_HOSTNAME}_config /home/jenkins/images/${CFG01_CONFIG_IMAGE_NAME} --pool default
                 virsh pool-refresh --pool default
             """)
         }
diff --git a/jobs/pipelines/swarm-bootstrap-salt-cluster-heat.groovy b/jobs/pipelines/swarm-bootstrap-salt-cluster-heat.groovy
new file mode 100644
index 0000000..fc12976
--- /dev/null
+++ b/jobs/pipelines/swarm-bootstrap-salt-cluster-heat.groovy
@@ -0,0 +1,298 @@
+/**
+ *
+ * Create fuel-devops environment, generate a model for it
+ * and bootstrap a salt cluster on the environment nodes
+ *
+ * Expected parameters:
+
+ *   PARENT_NODE_NAME              Name of the jenkins slave to create the environment
+ *   PARENT_WORKSPACE              Path to the workspace of the parent job to use tcp-qa repo
+ *   LAB_CONFIG_NAME               Name of the tcp-qa deployment template
+ *   ENV_NAME                      Fuel-devops environment name
+ *   MCP_VERSION                   MCP version, like 2018.4 or proposed
+ *   MCP_IMAGE_PATH1604            Local path to the image http://ci.mcp.mirantis.net:8085/images/ubuntu-16-04-x64-mcpproposed.qcow2
+ *   IMAGE_PATH_CFG01_DAY01        Local path to the image http://ci.mcp.mirantis.net:8085/images/cfg01-day01-proposed.qcow2
+ *   CFG01_CONFIG_IMAGE_NAME       Name for the creating config drive image, like cfg01.${LAB_CONFIG_NAME}-config-drive.iso
+ *   TCP_QA_REFS                   Reference to the tcp-qa change on review.gerrithub.io, like refs/changes/46/418546/41
+ *   PIPELINE_LIBRARY_REF          Reference to the pipeline-library change
+ *   MK_PIPELINES_REF              Reference to the mk-pipelines change
+ *   COOKIECUTTER_TEMPLATE_COMMIT  Commit/tag/branch for cookiecutter-templates repository. If empty, then takes ${MCP_VERSION} value
+ *   SALT_MODELS_SYSTEM_COMMIT     Commit/tag/branch for reclass-system repository. If empty, then takes ${MCP_VERSION} value
+ *   SHUTDOWN_ENV_ON_TEARDOWN      optional, shutdown fuel-devops environment at the end of the job
+ *   MCP_SALT_REPO_URL             Base URL for MCP repositories required to bootstrap cfg01 node. Leave blank to use default
+ *                                 (http://mirror.mirantis.com/ from mcp-common-scripts)
+ *   MCP_SALT_REPO_KEY             URL of the key file. Leave blank to use default
+ *                                 (${MCP_SALT_REPO_URL}/${MCP_VERSION}/salt-formulas/xenial/archive-salt-formulas.key from mcp-common-scripts)
+ *   OS_AUTH_URL                   OpenStack keystone catalog URL
+ *   OS_PROJECT_NAME               OpenStack project (tenant) name
+ *   OS_USER_DOMAIN_NAME           OpenStack user domain name
+ *   OS_CREDENTIALS                OpenStack username and password credentials ID in Jenkins
+ *   LAB_PARAM_DEFAULTS            Filename placed in tcp_tests/templates/_heat_environments, with default parameters for the heat template
+ *
+ */
+
+@Library('tcp-qa')_
+
+import groovy.xml.XmlUtil
+
+common = new com.mirantis.mk.Common()
+shared = new com.mirantis.system_qa.SharedPipeline()
+
+if (! env.PARENT_NODE_NAME) {
+    error "'PARENT_NODE_NAME' must be set from the parent deployment job!"
+}
+
+currentBuild.description = "${PARENT_NODE_NAME}:${ENV_NAME}"
+def cfg01_day01_image_name = "cfg01-day01-${MCP_VERSION}"
+def ubuntu_vcp_image_name = "ubuntu-vcp-${MCP_VERSION}"
+def ubuntu_foundation_image_name = "ubuntu-16.04-foundation-${MCP_VERSION}"
+
+node ("${PARENT_NODE_NAME}") {
+    if (! fileExists("${PARENT_WORKSPACE}")) {
+        error "'PARENT_WORKSPACE' contains path to non-existing directory ${PARENT_WORKSPACE} on the node '${PARENT_NODE_NAME}'."
+    }
+    dir("${PARENT_WORKSPACE}") {
+
+        if (env.TCP_QA_REFS) {
+            stage("Update working dir to patch ${TCP_QA_REFS}") {
+                shared.update_working_dir()
+            }
+        }
+
+        withCredentials([
+           [$class          : 'UsernamePasswordMultiBinding',
+           credentialsId   : env.OS_CREDENTIALS,
+           passwordVariable: 'OS_PASSWORD',
+           usernameVariable: 'OS_USERNAME']
+        ]) {
+            env.OS_IDENTITY_API_VERSION = 3
+
+            stage("Cleanup: erase ${ENV_NAME} and remove config drive") {
+
+                // delete heat stack
+                println "Remove heat stack '${ENV_NAME}'"
+                shared.run_cmd("""\
+                    # export OS_IDENTITY_API_VERSION=3
+                    # export OS_AUTH_URL=${OS_AUTH_URL}
+                    # export OS_USERNAME=${OS_USERNAME}
+                    # export OS_PASSWORD=${OS_PASSWORD}
+                    # export OS_PROJECT_NAME=${OS_PROJECT_NAME}
+                    openstack --insecure stack delete -y ${ENV_NAME} || true
+                    while openstack --insecure stack show ${ENV_NAME} -f value -c stack_status; do sleep 10; done
+                """)
+
+                println "Remove config drive ISO"
+                shared.run_cmd("""\
+                    rm /home/jenkins/images/${CFG01_CONFIG_IMAGE_NAME} || true
+                """)
+            }
+
+            stage("Generate the model") {
+                def IPV4_NET_ADMIN=shared.run_cmd_stdout("./tcp_tests/utils/get_param_heat_template.py management_subnet_cidr").trim().split().last()
+                def IPV4_NET_CONTROL=shared.run_cmd_stdout("./tcp_tests/utils/get_param_heat_template.py control_subnet_cidr").trim().split().last()
+                def IPV4_NET_TENANT=shared.run_cmd_stdout("./tcp_tests/utils/get_param_heat_template.py tenant_subnet_cidr").trim().split().last()
+                def IPV4_NET_EXTERNAL=shared.run_cmd_stdout("./tcp_tests/utils/get_param_heat_template.py external_subnet_cidr").trim().split().last()
+                shared.generate_cookied_model(IPV4_NET_ADMIN, IPV4_NET_CONTROL, IPV4_NET_TENANT, IPV4_NET_EXTERNAL)
+            }
+
+            stage("Generate config drive ISO") {
+                def SALT_MASTER_IP=shared.run_cmd_stdout("./tcp_tests/utils/get_param_heat_template.py management_subnet_cfg01_ip").trim().split().last()
+                def ADMIN_NETWORK_GW=shared.run_cmd_stdout("./tcp_tests/utils/get_param_heat_template.py management_subnet_gateway_ip").trim().split().last()
+                shared.generate_configdrive_iso(SALT_MASTER_IP, ADMIN_NETWORK_GW)
+            }
+
+            stage("Upload Ubuntu image for foundation node") {
+                shared.run_cmd("""\
+                    if ! openstack --insecure image show ${ubuntu_foundation_image_name} -f value -c name; then
+                        wget -O ./${ubuntu_foundation_image_name} https://cloud-images.ubuntu.com/releases/16.04/release/ubuntu-16.04-server-cloudimg-amd64-disk1.img
+                        openstack --insecure image create ${ubuntu_foundation_image_name} --file ./${ubuntu_foundation_image_name} --disk-format qcow2 --container-format bare
+                        rm ./${ubuntu_foundation_image_name}
+                    else
+                        echo Image ${ubuntu_foundation_image_name} already exists
+                    fi
+                """)
+            }
+
+            stage("Upload cfg01-day01 and VCP images") {
+                shared.run_cmd("""\
+                    # export OS_IDENTITY_API_VERSION=3
+                    # export OS_AUTH_URL=${OS_AUTH_URL}
+                    # export OS_USERNAME=${OS_USERNAME}
+                    # export OS_PASSWORD=${OS_PASSWORD}
+                    # export OS_PROJECT_NAME=${OS_PROJECT_NAME}
+
+                    openstack --insecure image show ${cfg01_day01_image_name} -f value -c name || openstack --insecure image create ${cfg01_day01_image_name} --file ${IMAGE_PATH_CFG01_DAY01} --disk-format qcow2 --container-format bare
+                    openstack --insecure image show ${ubuntu_vcp_image_name} -f value -c name || openstack --insecure image create ${ubuntu_vcp_image_name} --file ${MCP_IMAGE_PATH1604} --disk-format qcow2 --container-format bare
+                """)
+            }
+
+            stage("Upload generated config drive ISO into volume on cfg01 node") {
+                shared.run_cmd("""\
+                    # export OS_IDENTITY_API_VERSION=3
+                    # export OS_AUTH_URL=${OS_AUTH_URL}
+                    # export OS_USERNAME=${OS_USERNAME}
+                    # export OS_PASSWORD=${OS_PASSWORD}
+                    # export OS_PROJECT_NAME=${OS_PROJECT_NAME}
+
+                    openstack --insecure image delete cfg01.${ENV_NAME}-config-drive.iso || true
+                    sleep 3
+                    openstack --insecure image create cfg01.${ENV_NAME}-config-drive.iso --file /home/jenkins/images/${CFG01_CONFIG_IMAGE_NAME} --disk-format iso --container-format bare
+                """)
+            }
+
+            stage("Create Heat stack '${ENV_NAME}'") {
+                // Create stack and wait for CREATE_COMPLETED status, manual analog:
+                //    openstack --insecure stack create ${ENV_NAME} \
+                //        --template ./tcp_tests/templates/${LAB_CONFIG_NAME}/underlay.hot \
+                //        --environment ./tcp_tests/templates/_heat_environments/${LAB_PARAM_DEFAULTS} \
+                //        --parameter env_name=${ENV_NAME} --parameter mcp_version=${MCP_VERSION}
+                shared.run_cmd("""\
+                    export BOOTSTRAP_TIMEOUT=3600
+                    export ENV_MANAGER=heat
+                    export TEST_GROUP=test_create_environment
+                    export SHUTDOWN_ENV_ON_TEARDOWN=false
+                    export PYTHONIOENCODING=UTF-8
+                    export REPOSITORY_SUITE=${MCP_VERSION}
+                    export ENV_NAME=${ENV_NAME}
+                    export LAB_CONFIG_NAME=${LAB_CONFIG_NAME}
+                    export LAB_PARAM_DEFAULTS=${LAB_PARAM_DEFAULTS}
+                    py.test --cache-clear -vvv -s -p no:django -p no:ipdb --junit-xml=deploy_hardware.xml -k \${TEST_GROUP}
+                """)
+            }
+
+            stage("Add the Jenkins slave node") {
+                def jenkins_slave_ip_value_name = "foundation_floating"
+                def jenkins_slave_ip = shared.run_cmd_stdout("openstack --insecure stack output show ${ENV_NAME} ${jenkins_slave_ip_value_name} -f value -c output_value").trim().split().last()
+                def jenkins_slave_executors = 2
+                common.printMsg("JENKINS_SLAVE_NODE_NAME=${JENKINS_SLAVE_NODE_NAME}", "green")
+                common.printMsg("JENKINS_SLAVE_IP=${jenkins_slave_ip}", "green")
+
+        withCredentials([
+           [$class          : 'UsernamePasswordMultiBinding',
+           credentialsId   : "${CREATE_JENKINS_NODE_CREDENTIALS}",
+           passwordVariable: 'JENKINS_PASS',
+           usernameVariable: 'JENKINS_USER']
+        ]) {
+
+                script_delete_agent = ("""\
+                    CRUMB=\$(curl --fail -0 -u \"\${JENKINS_USER}:\${JENKINS_PASS}\" \${JENKINS_URL}\'/crumbIssuer/api/xml?xpath=concat(//crumbRequestField,":",//crumb)\' 2>/dev/null)
+                    curl -w '%{http_code}' -o /dev/null \
+                        -u \"\${JENKINS_USER}:\${JENKINS_PASS}\" \
+                        -H \"Content-Type:application/x-www-form-urlencoded\" \
+                        -H \"\$CRUMB\" \
+                        \"\${JENKINS_URL}/computer/\${JENKINS_SLAVE_NODE_NAME}/doDelete\" \
+                        --request \'POST\' --data \'\'
+                    sleep 10
+                """)
+
+                script_create_agent = ("""\
+                    CRUMB=\$(curl --fail -0 -u \"\${JENKINS_USER}:\${JENKINS_PASS}\" \${JENKINS_URL}\'/crumbIssuer/api/xml?xpath=concat(//crumbRequestField,":",//crumb)\' 2>/dev/null)
+
+                    curl -L -sS -w '%{http_code}' -o /dev/null \
+                        -u \"\${JENKINS_USER}:\${JENKINS_PASS}\" \
+                        -H \"Content-Type:application/x-www-form-urlencoded\" \
+                        -H \"\$CRUMB\" \
+                        -X POST -d 'json={\
+                            \"name\": \"'\"\$JENKINS_SLAVE_NODE_NAME\"'\", \
+                            \"nodeDescription\": \"'\"\$ENV_NAME\"'\", \
+                            \"numExecutors\": \"'\"${jenkins_slave_executors}\"'\", \
+                            \"remoteFS\": \"'\"/home/jenkins/workspace\"'\", \
+                            \"labelString\": \"'\"\$ENV_NAME\"'\", \
+                            \"mode\": \"EXCLUSIVE\", \
+                            \"\": [\"hudson.plugins.sshslaves.SSHLauncher\", \"hudson.slaves.RetentionStrategy\$Always\"], \
+                            \"launcher\": {\
+                                \"stapler-class\": \"hudson.plugins.sshslaves.SSHLauncher\", \
+                                \"\$class\": \"hudson.plugins.sshslaves.SSHLauncher\", \
+                                \"host\": \"'\"${jenkins_slave_ip}\"'\", \
+                                \"credentialsId\": \"'\"\$ACCESS_JENKINS_NODE_CREDENTIALS\"'\", \
+                                \"port\": \"'\"22\"'\", \
+                                \"javaPath\": \"\", \
+                                \"jvmOptions\": \"\", \
+                                \"prefixStartSlaveCmd\": \"\", \
+                                \"suffixStartSlaveCmd\": \"\", \
+                                \"launchTimeoutSeconds\": \"\", \
+                                \"maxNumRetries\": \"\", \
+                                \"retryWaitTime\": \"\", \
+                                \"sshHostKeyVerificationStrategy\": {\
+                                    \"\$class\": \"hudson.plugins.sshslaves.verifiers.NonVerifyingKeyVerificationStrategy\" \
+                                }, \
+                                \"tcpNoDelay\": \"true\"\
+                            }, \
+                            \"retentionStrategy\": {\
+                                \"stapler-class\": \"hudson.slaves.RetentionStrategy\$Always\", \
+                                \"\$class\": \"hudson.slaves.RetentionStrategy\$Always\"\
+                            }, \
+                            \"nodeProperties\": {\
+                                \"stapler-class-bag\": \"true\"\
+                            }, \
+                            \"type\": \"hudson.slaves.DumbSlave\", \
+                            \"crumb\": \"'\"\$CRUMB\"'\"}' \
+                        \"\${JENKINS_URL}/computer/doCreateItem?name=\${JENKINS_SLAVE_NODE_NAME}&type=hudson.slaves.DumbSlave\"
+                """)
+                shared.verbose_sh(script_delete_agent, true, false, true)
+                shared.verbose_sh(script_create_agent, true, false, true)
+
+        } // withCredentials
+
+            }// stage
+
+        } // withCredentials
+    } // dir
+} // node
+
+
+node ("${JENKINS_SLAVE_NODE_NAME}") {
+    dir("${PARENT_WORKSPACE}") {
+
+        stage("Clean the environment and clone tcp-qa") {
+            deleteDir()
+            shared.run_cmd("""\
+                git clone https://github.com/Mirantis/tcp-qa.git ${PARENT_WORKSPACE}
+            """)
+            shared.update_working_dir()
+        }
+
+        withCredentials([
+           [$class          : 'UsernamePasswordMultiBinding',
+           credentialsId   : env.OS_CREDENTIALS,
+           passwordVariable: 'OS_PASSWORD',
+           usernameVariable: 'OS_USERNAME']
+        ]) {
+
+
+            stage("Run the 'underlay' and 'salt-deployed' fixtures to bootstrap salt cluster") {
+                def xml_report_name = "deploy_salt.xml"
+                try {
+                    // deploy_salt.xml
+                    shared.run_sh("""\
+                        export ENV_NAME=${ENV_NAME}
+                        export LAB_CONFIG_NAME=${LAB_CONFIG_NAME}
+                        export LAB_PARAM_DEFAULTS=${LAB_PARAM_DEFAULTS}
+                        export ENV_MANAGER=heat
+                        export SHUTDOWN_ENV_ON_TEARDOWN=false
+                        export BOOTSTRAP_TIMEOUT=3600
+                        export PYTHONIOENCODING=UTF-8
+                        export REPOSITORY_SUITE=${MCP_VERSION}
+                        export TEST_GROUP=test_bootstrap_salt
+                        py.test -vvv -s -p no:django -p no:ipdb --junit-xml=${xml_report_name} -k \${TEST_GROUP}
+                    """)
+                    // Wait for jenkins to start and IO calm down
+                    sleep(60)
+
+                } catch (e) {
+                      common.printMsg("Saltstack cluster deploy is failed", "purple")
+                      if (fileExists(xml_report_name)) {
+                          shared.download_logs("deploy_salt_${ENV_NAME}")
+                          def String junit_report_xml = readFile(xml_report_name)
+                          def String junit_report_xml_pretty = new XmlUtil().serialize(junit_report_xml)
+                          throw new Exception(junit_report_xml_pretty)
+                      } else {
+                          throw e
+                      }
+                } finally {
+                    // TODO(ddmitriev): add checks for salt cluster
+                }
+            } // stage
+        } // withCredentials
+    } // dir
+} // node