Merge "Enable verbode logs for Updating Drivetrain Process"
diff --git a/jobs/global.yaml b/jobs/global.yaml
index 800afa0..7434358 100644
--- a/jobs/global.yaml
+++ b/jobs/global.yaml
@@ -4,3 +4,5 @@
       Do not edit this job through the web ! <br>
       Please use jenkins-job-builder in git <br>
       git clone ssh://gerrit.mcp.mirantis.com:29418/mcp/tcp-qa
+    current-version: 2019.2.11
+    previous-version: 2019.2.10
\ No newline at end of file
diff --git a/jobs/pipelines/deploy-cicd-and-run-tests.groovy b/jobs/pipelines/deploy-cicd-and-run-tests.groovy
index 56a2d9b..7262caa 100644
--- a/jobs/pipelines/deploy-cicd-and-run-tests.groovy
+++ b/jobs/pipelines/deploy-cicd-and-run-tests.groovy
@@ -6,6 +6,7 @@
 def env_manager = env.ENV_MANAGER ?: 'devops'
 def batch_size = env.BATCH_SIZE ?: ''
 def dist_upgrade_nodes = "${env.DIST_UPGRADE_NODES}" != "false" ? true : false
+def upgrade_saltstack = "${env.UPGRADE_SALTSTACK}" != "false" ? true : false
 
 if (env_manager == 'devops') {
     jenkins_slave_node_name = "${NODE_NAME}"
@@ -19,7 +20,7 @@
 
 currentBuild.description = "${NODE_NAME}:${ENV_NAME}<br>"
 
-def deploy(shared, common, steps, env_manager, batch_size, dist_upgrade_nodes) {
+def deploy(shared, common, steps, env_manager, batch_size, dist_upgrade_nodes, upgrade_saltstack) {
     def report_text = ''
     try {
 
@@ -50,7 +51,7 @@
         stage("Install core infrastructure and deploy CICD nodes") {
         if (env.DRIVETRAIN_STACK_INSTALL) {
                 // steps: env.DRIVETRAIN_STACK_INSTALL
-                shared.swarm_deploy_cicd(env.DRIVETRAIN_STACK_INSTALL, env.DRIVETRAIN_STACK_INSTALL_TIMEOUT, jenkins_slave_node_name, make_snapshot_stages, batch_size, dist_upgrade_nodes)
+                shared.swarm_deploy_cicd(env.DRIVETRAIN_STACK_INSTALL, env.DRIVETRAIN_STACK_INSTALL_TIMEOUT, jenkins_slave_node_name, make_snapshot_stages, batch_size, dist_upgrade_nodes, upgrade_saltstack)
             } else {
                 common.printMsg("DRIVETRAIN_STACK_INSTALL is empty, skipping 'swarm-deploy-cicd' job", "green")
             }
@@ -59,7 +60,7 @@
         stage("Deploy platform components") {
             if (env.PLATFORM_STACK_INSTALL) {
                 // steps: env.PLATFORM_STACK_INSTALL
-                shared.swarm_deploy_platform(env.PLATFORM_STACK_INSTALL, env.PLATFORM_STACK_INSTALL_TIMEOUT, jenkins_slave_node_name, make_snapshot_stages, batch_size, dist_upgrade_nodes)
+                shared.swarm_deploy_platform(env.PLATFORM_STACK_INSTALL, env.PLATFORM_STACK_INSTALL_TIMEOUT, jenkins_slave_node_name, make_snapshot_stages, batch_size, dist_upgrade_nodes, upgrade_saltstack)
             } else {
                 common.printMsg("PLATFORM_STACK_INSTALL is empty, skipping 'swarm-deploy-platform' job", "green")
             }
@@ -119,7 +120,7 @@
 }
 
 
-timeout(time: 15, unit: 'HOURS') {
+timeout(time: 23, unit: 'HOURS') {
 // main
 // Temporary disable throttle to check how it will run
 //throttle(['fuel_devops_environment']) {
@@ -128,7 +129,7 @@
     try {
 
         // run deploy stages
-        deploy(shared, common, steps, env_manager, batch_size, dist_upgrade_nodes)
+        deploy(shared, common, steps, env_manager, batch_size, dist_upgrade_nodes, upgrade_saltstack)
         // run test stages
         test(shared, common, steps, env_manager)
     } catch (e) {
diff --git a/jobs/pipelines/rotation-released-deployment.groovy b/jobs/pipelines/rotation-released-deployment.groovy
index 7ae04bc..b341427 100644
--- a/jobs/pipelines/rotation-released-deployment.groovy
+++ b/jobs/pipelines/rotation-released-deployment.groovy
@@ -37,7 +37,7 @@
 }
 
 // ============================================================================
-timeout(time: 15, unit: 'HOURS') {
+timeout(time: 23, unit: 'HOURS') {
   node (env.PARENT_NODE_NAME) {
     stage("Remove extra stacks") {
         withCredentials([
diff --git a/jobs/pipelines/swarm-bootstrap-salt-cluster-heat.groovy b/jobs/pipelines/swarm-bootstrap-salt-cluster-heat.groovy
index 2062c70..10ea257 100644
--- a/jobs/pipelines/swarm-bootstrap-salt-cluster-heat.groovy
+++ b/jobs/pipelines/swarm-bootstrap-salt-cluster-heat.groovy
@@ -51,276 +51,278 @@
 def ubuntu_foundation_image_name = "ubuntu-16.04-foundation-2019.2.0"
 
 timeout(time: 2, unit: 'HOURS') {
-node ("${PARENT_NODE_NAME}") {
-    if (! fileExists("${PARENT_WORKSPACE}")) {
-        error "'PARENT_WORKSPACE' contains path to non-existing directory ${PARENT_WORKSPACE} on the node '${PARENT_NODE_NAME}'."
-    }
-    dir("${PARENT_WORKSPACE}") {
-
-        if (env.TCP_QA_REFS) {
-            stage("Update working dir to patch ${TCP_QA_REFS}") {
-                shared.update_working_dir()
+    timestamps {
+        node ("${PARENT_NODE_NAME}") {
+            if (! fileExists("${PARENT_WORKSPACE}")) {
+                error "'PARENT_WORKSPACE' contains path to non-existing directory ${PARENT_WORKSPACE} on the node '${PARENT_NODE_NAME}'."
             }
-        }
+            dir("${PARENT_WORKSPACE}") {
 
-        withCredentials([
-           [$class          : 'UsernamePasswordMultiBinding',
-           credentialsId   : env.OS_CREDENTIALS,
-           passwordVariable: 'OS_PASSWORD',
-           usernameVariable: 'OS_USERNAME']
-        ]) {
-            env.OS_IDENTITY_API_VERSION = 3
-
-            stage("Cleanup: erase ${ENV_NAME} and remove config drive") {
-
-                // delete heat stack
-                println "Remove heat stack '${ENV_NAME}'"
-                shared.run_cmd("""\
-                    # export OS_IDENTITY_API_VERSION=3
-                    # export OS_AUTH_URL=${OS_AUTH_URL}
-                    # export OS_USERNAME=${OS_USERNAME}
-                    # export OS_PASSWORD=${OS_PASSWORD}
-                    # export OS_PROJECT_NAME=${OS_PROJECT_NAME}
-                    openstack --insecure stack delete -y ${ENV_NAME} || true
-                    timeout 20m /bin/bash -c "while openstack --insecure stack show ${ENV_NAME} -f value -c stack_status; do sleep 10; done"
-                """)
-
-                println "Remove config drive ISO"
-                shared.run_cmd("""\
-                    rm /home/jenkins/images/${CFG01_CONFIG_IMAGE_NAME} || true
-                """)
-            }
-
-            stage("Generate the model") {
-                def IPV4_NET_ADMIN=shared.run_cmd_stdout("./tcp_tests/utils/get_param_heat_template.py management_subnet_cidr").trim().split().last()
-                def IPV4_NET_CONTROL=shared.run_cmd_stdout("./tcp_tests/utils/get_param_heat_template.py control_subnet_cidr").trim().split().last()
-                def IPV4_NET_TENANT=shared.run_cmd_stdout("./tcp_tests/utils/get_param_heat_template.py tenant_subnet_cidr").trim().split().last()
-                def IPV4_NET_EXTERNAL=shared.run_cmd_stdout("./tcp_tests/utils/get_param_heat_template.py external_subnet_cidr").trim().split().last()
-                shared.generate_cookied_model(IPV4_NET_ADMIN, IPV4_NET_CONTROL, IPV4_NET_TENANT, IPV4_NET_EXTERNAL)
-            }
-
-            stage("Generate config drive ISO") {
-                SALT_MASTER_IP=shared.run_cmd_stdout("./tcp_tests/utils/get_param_heat_template.py management_subnet_cfg01_ip").trim().split().last()
-                def ADMIN_NETWORK_GW=shared.run_cmd_stdout("./tcp_tests/utils/get_param_heat_template.py management_subnet_gateway_ip").trim().split().last()
-                shared.generate_configdrive_iso(SALT_MASTER_IP, ADMIN_NETWORK_GW)
-            }
-
-            stage("Upload Ubuntu image for foundation node") {
-                shared.run_cmd("""\
-                    if ! openstack --insecure image show ${ubuntu_foundation_image_name} -f value -c name; then
-                        wget -O ./${ubuntu_foundation_image_name} https://cloud-images.ubuntu.com/releases/16.04/release/ubuntu-16.04-server-cloudimg-amd64-disk1.img
-                        openstack --insecure image create ${ubuntu_foundation_image_name} --file ./${ubuntu_foundation_image_name} --disk-format qcow2 --container-format bare
-                        rm ./${ubuntu_foundation_image_name}
-                    else
-                        echo Image ${ubuntu_foundation_image_name} already exists
-                    fi
-                """)
-            }
-
-            stage("Upload cfg01-day01 and VCP images") {
-                shared.run_cmd("""\
-                    # export OS_IDENTITY_API_VERSION=3
-                    # export OS_AUTH_URL=${OS_AUTH_URL}
-                    # export OS_USERNAME=${OS_USERNAME}
-                    # export OS_PASSWORD=${OS_PASSWORD}
-                    # export OS_PROJECT_NAME=${OS_PROJECT_NAME}
-
-                    openstack --insecure image show ${cfg01_day01_image_name} -f value -c name || openstack --insecure image create ${cfg01_day01_image_name} --file ${IMAGE_PATH_CFG01_DAY01} --disk-format qcow2 --container-format bare
-                    openstack --insecure image show ${ubuntu_vcp_image_name} -f value -c name || openstack --insecure image create ${ubuntu_vcp_image_name} --file ${MCP_IMAGE_PATH1604} --disk-format qcow2 --container-format bare
-                """)
-            }
-
-            stage("Upload generated config drive ISO into volume on cfg01 node") {
-                shared.run_cmd("""\
-                    # export OS_IDENTITY_API_VERSION=3
-                    # export OS_AUTH_URL=${OS_AUTH_URL}
-                    # export OS_USERNAME=${OS_USERNAME}
-                    # export OS_PASSWORD=${OS_PASSWORD}
-                    # export OS_PROJECT_NAME=${OS_PROJECT_NAME}
-
-                    openstack --insecure image delete cfg01.${ENV_NAME}-config-drive.iso || true
-                    sleep 3
-                    openstack --insecure image create cfg01.${ENV_NAME}-config-drive.iso --file /home/jenkins/images/${CFG01_CONFIG_IMAGE_NAME} --disk-format iso --container-format bare
-                """)
-            }
-
-            stage("Create Heat stack '${ENV_NAME}'") {
-                // Create stack and wait for CREATE_COMPLETED status, manual analog:
-                //    openstack --insecure stack create ${ENV_NAME} \
-                //        --template ./tcp_tests/templates/${LAB_CONFIG_NAME}/underlay.hot \
-                //        --environment ./tcp_tests/templates/_heat_environments/${LAB_PARAM_DEFAULTS} \
-                //        --parameter env_name=${ENV_NAME} --parameter mcp_version=${MCP_VERSION}
-                shared.run_cmd("""\
-                    export BOOTSTRAP_TIMEOUT=3600
-                    export ENV_MANAGER=heat
-                    export TEST_GROUP=test_create_environment
-                    export SHUTDOWN_ENV_ON_TEARDOWN=false
-                    export PYTHONIOENCODING=UTF-8
-                    export REPOSITORY_SUITE=${MCP_VERSION}
-                    export UPDATE_VERSION=${UPDATE_VERSION}
-                    export ENV_NAME=${ENV_NAME}
-                    export LAB_CONFIG_NAME=${LAB_CONFIG_NAME}
-                    export LAB_PARAM_DEFAULTS=${LAB_PARAM_DEFAULTS}
-                    export LOG_NAME=swarm_test_create_environment.log
-                    py.test --cache-clear -vvv -s -p no:django -p no:ipdb --junit-xml=deploy_hardware.xml -k \${TEST_GROUP}
-                """)
-            }
-
-            stage("Add the Jenkins slave node") {
-                def jenkins_slave_ip_value_name = "foundation_public_ip"
-                def jenkins_slave_ip = shared.run_cmd_stdout("openstack --insecure stack output show ${ENV_NAME} ${jenkins_slave_ip_value_name} -f value -c output_value").trim().split().last()
-                def jenkins_slave_executors = 2
-                common.printMsg("JENKINS_SLAVE_NODE_NAME=${JENKINS_SLAVE_NODE_NAME}", "green")
-                common.printMsg("JENKINS_SLAVE_IP=${jenkins_slave_ip}", "green")
-
-        withCredentials([
-           [$class          : 'UsernamePasswordMultiBinding',
-           credentialsId   : "${CREATE_JENKINS_NODE_CREDENTIALS}",
-           passwordVariable: 'JENKINS_PASS',
-           usernameVariable: 'JENKINS_USER']
-        ]) {
-
-                script_delete_agent = ("""\
-                    CRUMB=\$(curl --fail -0 -u \"\${JENKINS_USER}:\${JENKINS_PASS}\" \${JENKINS_URL}\'/crumbIssuer/api/xml?xpath=concat(//crumbRequestField,":",//crumb)\' 2>/dev/null)
-                    curl -w '%{http_code}' -o /dev/null \
-                        -u \"\${JENKINS_USER}:\${JENKINS_PASS}\" \
-                        -H \"Content-Type:application/x-www-form-urlencoded\" \
-                        -H \"\$CRUMB\" \
-                        \"\${JENKINS_URL}/computer/\${JENKINS_SLAVE_NODE_NAME}/doDelete\" \
-                        --request \'POST\' --data \'\'
-                    sleep 10
-                """)
-
-                script_create_agent = ("""\
-                    CRUMB=\$(curl --fail -0 -u \"\${JENKINS_USER}:\${JENKINS_PASS}\" \${JENKINS_URL}\'/crumbIssuer/api/xml?xpath=concat(//crumbRequestField,":",//crumb)\' 2>/dev/null)
-
-                    curl -L -sS -w '%{http_code}' -o /dev/null \
-                        -u \"\${JENKINS_USER}:\${JENKINS_PASS}\" \
-                        -H \"Content-Type:application/x-www-form-urlencoded\" \
-                        -H \"\$CRUMB\" \
-                        -X POST -d 'json={\
-                            \"name\": \"'\"\$JENKINS_SLAVE_NODE_NAME\"'\", \
-                            \"nodeDescription\": \"'\"\$ENV_NAME\"'\", \
-                            \"numExecutors\": \"'\"${jenkins_slave_executors}\"'\", \
-                            \"remoteFS\": \"'\"/home/jenkins/workspace\"'\", \
-                            \"labelString\": \"'\"\$ENV_NAME\"'\", \
-                            \"mode\": \"EXCLUSIVE\", \
-                            \"\": [\"hudson.plugins.sshslaves.SSHLauncher\", \"hudson.slaves.RetentionStrategy\$Always\"], \
-                            \"launcher\": {\
-                                \"stapler-class\": \"hudson.plugins.sshslaves.SSHLauncher\", \
-                                \"\$class\": \"hudson.plugins.sshslaves.SSHLauncher\", \
-                                \"host\": \"'\"${jenkins_slave_ip}\"'\", \
-                                \"credentialsId\": \"'\"\$ACCESS_JENKINS_NODE_CREDENTIALS\"'\", \
-                                \"port\": \"'\"22\"'\", \
-                                \"javaPath\": \"\", \
-                                \"jvmOptions\": \"\", \
-                                \"prefixStartSlaveCmd\": \"\", \
-                                \"suffixStartSlaveCmd\": \"\", \
-                                \"launchTimeoutSeconds\": \"\", \
-                                \"maxNumRetries\": \"\", \
-                                \"retryWaitTime\": \"\", \
-                                \"sshHostKeyVerificationStrategy\": {\
-                                    \"\$class\": \"hudson.plugins.sshslaves.verifiers.NonVerifyingKeyVerificationStrategy\" \
-                                }, \
-                                \"tcpNoDelay\": \"true\"\
-                            }, \
-                            \"retentionStrategy\": {\
-                                \"stapler-class\": \"hudson.slaves.RetentionStrategy\$Always\", \
-                                \"\$class\": \"hudson.slaves.RetentionStrategy\$Always\"\
-                            }, \
-                            \"nodeProperties\": {\
-                                \"stapler-class-bag\": \"true\"\
-                            }, \
-                            \"type\": \"hudson.slaves.DumbSlave\", \
-                            \"crumb\": \"'\"\$CRUMB\"'\"}' \
-                        \"\${JENKINS_URL}/computer/doCreateItem?name=\${JENKINS_SLAVE_NODE_NAME}&type=hudson.slaves.DumbSlave\"
-                """)
-                shared.verbose_sh(script_delete_agent, true, false, true)
-                shared.verbose_sh(script_create_agent, true, false, true)
-                timeout(time: 30, unit: 'MINUTES') {
-                    node("${JENKINS_SLAVE_NODE_NAME}") {
-                        sh "echo 'ok'"
-                        println "Jenkins agent is available now and can executes commands"
+                if (env.TCP_QA_REFS) {
+                    stage("Update working dir to patch ${TCP_QA_REFS}") {
+                        shared.update_working_dir()
                     }
                 }
-                // Store jenkins agent IP address
-                jenkins_agent_description = "ssh jenkins@${jenkins_slave_ip}  # foundation node with Jenkins agent <a href=${JENKINS_URL}/computer/${JENKINS_SLAVE_NODE_NAME}>${JENKINS_SLAVE_NODE_NAME}</a><br>ssh root@${SALT_MASTER_IP}  # cfg01 node<br>"
-                writeFile(file: "jenkins_agent_description.txt", text: jenkins_agent_description, encoding: "UTF-8")
 
-        } // withCredentials
+                withCredentials([
+                   [$class          : 'UsernamePasswordMultiBinding',
+                   credentialsId   : env.OS_CREDENTIALS,
+                   passwordVariable: 'OS_PASSWORD',
+                   usernameVariable: 'OS_USERNAME']
+                ]) {
+                    env.OS_IDENTITY_API_VERSION = 3
 
-            }// stage
+                    stage("Cleanup: erase ${ENV_NAME} and remove config drive") {
 
-        } // withCredentials
+                        // delete heat stack
+                        println "Remove heat stack '${ENV_NAME}'"
+                        shared.run_cmd("""\
+                            # export OS_IDENTITY_API_VERSION=3
+                            # export OS_AUTH_URL=${OS_AUTH_URL}
+                            # export OS_USERNAME=${OS_USERNAME}
+                            # export OS_PASSWORD=${OS_PASSWORD}
+                            # export OS_PROJECT_NAME=${OS_PROJECT_NAME}
+                            openstack --insecure stack delete -y ${ENV_NAME} || true
+                            timeout 20m /bin/bash -c "while openstack --insecure stack show ${ENV_NAME} -f value -c stack_status; do sleep 10; done"
+                        """)
 
-    } // dir
-} // node
+                        println "Remove config drive ISO"
+                        shared.run_cmd("""\
+                            rm /home/jenkins/images/${CFG01_CONFIG_IMAGE_NAME} || true
+                        """)
+                    }
+
+                    stage("Generate the model") {
+                        def IPV4_NET_ADMIN=shared.run_cmd_stdout("./tcp_tests/utils/get_param_heat_template.py management_subnet_cidr").trim().split().last()
+                        def IPV4_NET_CONTROL=shared.run_cmd_stdout("./tcp_tests/utils/get_param_heat_template.py control_subnet_cidr").trim().split().last()
+                        def IPV4_NET_TENANT=shared.run_cmd_stdout("./tcp_tests/utils/get_param_heat_template.py tenant_subnet_cidr").trim().split().last()
+                        def IPV4_NET_EXTERNAL=shared.run_cmd_stdout("./tcp_tests/utils/get_param_heat_template.py external_subnet_cidr").trim().split().last()
+                        shared.generate_cookied_model(IPV4_NET_ADMIN, IPV4_NET_CONTROL, IPV4_NET_TENANT, IPV4_NET_EXTERNAL)
+                    }
+
+                    stage("Generate config drive ISO") {
+                        SALT_MASTER_IP=shared.run_cmd_stdout("./tcp_tests/utils/get_param_heat_template.py management_subnet_cfg01_ip").trim().split().last()
+                        def ADMIN_NETWORK_GW=shared.run_cmd_stdout("./tcp_tests/utils/get_param_heat_template.py management_subnet_gateway_ip").trim().split().last()
+                        shared.generate_configdrive_iso(SALT_MASTER_IP, ADMIN_NETWORK_GW)
+                    }
+
+                    stage("Upload Ubuntu image for foundation node") {
+                        shared.run_cmd("""\
+                            if ! openstack --insecure image show ${ubuntu_foundation_image_name} -f value -c name; then
+                                wget -O ./${ubuntu_foundation_image_name} https://cloud-images.ubuntu.com/releases/16.04/release/ubuntu-16.04-server-cloudimg-amd64-disk1.img
+                                openstack --insecure image create ${ubuntu_foundation_image_name} --file ./${ubuntu_foundation_image_name} --disk-format qcow2 --container-format bare
+                                rm ./${ubuntu_foundation_image_name}
+                            else
+                                echo Image ${ubuntu_foundation_image_name} already exists
+                            fi
+                        """)
+                    }
+
+                    stage("Upload cfg01-day01 and VCP images") {
+                        shared.run_cmd("""\
+                            # export OS_IDENTITY_API_VERSION=3
+                            # export OS_AUTH_URL=${OS_AUTH_URL}
+                            # export OS_USERNAME=${OS_USERNAME}
+                            # export OS_PASSWORD=${OS_PASSWORD}
+                            # export OS_PROJECT_NAME=${OS_PROJECT_NAME}
+
+                            openstack --insecure image show ${cfg01_day01_image_name} -f value -c name || openstack --insecure image create ${cfg01_day01_image_name} --file ${IMAGE_PATH_CFG01_DAY01} --disk-format qcow2 --container-format bare
+                            openstack --insecure image show ${ubuntu_vcp_image_name} -f value -c name || openstack --insecure image create ${ubuntu_vcp_image_name} --file ${MCP_IMAGE_PATH1604} --disk-format qcow2 --container-format bare
+                        """)
+                    }
+
+                    stage("Upload generated config drive ISO into volume on cfg01 node") {
+                        shared.run_cmd("""\
+                            # export OS_IDENTITY_API_VERSION=3
+                            # export OS_AUTH_URL=${OS_AUTH_URL}
+                            # export OS_USERNAME=${OS_USERNAME}
+                            # export OS_PASSWORD=${OS_PASSWORD}
+                            # export OS_PROJECT_NAME=${OS_PROJECT_NAME}
+
+                            openstack --insecure image delete cfg01.${ENV_NAME}-config-drive.iso || true
+                            sleep 3
+                            openstack --insecure image create cfg01.${ENV_NAME}-config-drive.iso --file /home/jenkins/images/${CFG01_CONFIG_IMAGE_NAME} --disk-format iso --container-format bare
+                        """)
+                    }
+
+                    stage("Create Heat stack '${ENV_NAME}'") {
+                        // Create stack and wait for CREATE_COMPLETED status, manual analog:
+                        //    openstack --insecure stack create ${ENV_NAME} \
+                        //        --template ./tcp_tests/templates/${LAB_CONFIG_NAME}/underlay.hot \
+                        //        --environment ./tcp_tests/templates/_heat_environments/${LAB_PARAM_DEFAULTS} \
+                        //        --parameter env_name=${ENV_NAME} --parameter mcp_version=${MCP_VERSION}
+                        shared.run_cmd("""\
+                            export BOOTSTRAP_TIMEOUT=3600
+                            export ENV_MANAGER=heat
+                            export TEST_GROUP=test_create_environment
+                            export SHUTDOWN_ENV_ON_TEARDOWN=false
+                            export PYTHONIOENCODING=UTF-8
+                            export REPOSITORY_SUITE=${MCP_VERSION}
+                            export UPDATE_VERSION=${UPDATE_VERSION}
+                            export ENV_NAME=${ENV_NAME}
+                            export LAB_CONFIG_NAME=${LAB_CONFIG_NAME}
+                            export LAB_PARAM_DEFAULTS=${LAB_PARAM_DEFAULTS}
+                            export LOG_NAME=swarm_test_create_environment.log
+                            py.test --cache-clear -vvv -s -p no:django -p no:ipdb --junit-xml=deploy_hardware.xml -k \${TEST_GROUP}
+                        """)
+                    }
+
+                    stage("Add the Jenkins slave node") {
+                        def jenkins_slave_ip_value_name = "foundation_public_ip"
+                        def jenkins_slave_ip = shared.run_cmd_stdout("openstack --insecure stack output show ${ENV_NAME} ${jenkins_slave_ip_value_name} -f value -c output_value").trim().split().last()
+                        def jenkins_slave_executors = 2
+                        common.printMsg("JENKINS_SLAVE_NODE_NAME=${JENKINS_SLAVE_NODE_NAME}", "green")
+                        common.printMsg("JENKINS_SLAVE_IP=${jenkins_slave_ip}", "green")
+
+                withCredentials([
+                   [$class          : 'UsernamePasswordMultiBinding',
+                   credentialsId   : "${CREATE_JENKINS_NODE_CREDENTIALS}",
+                   passwordVariable: 'JENKINS_PASS',
+                   usernameVariable: 'JENKINS_USER']
+                ]) {
+
+                        script_delete_agent = ("""\
+                            CRUMB=\$(curl --fail -0 -u \"\${JENKINS_USER}:\${JENKINS_PASS}\" \${JENKINS_URL}\'/crumbIssuer/api/xml?xpath=concat(//crumbRequestField,":",//crumb)\' 2>/dev/null)
+                            curl -w '%{http_code}' -o /dev/null \
+                                -u \"\${JENKINS_USER}:\${JENKINS_PASS}\" \
+                                -H \"Content-Type:application/x-www-form-urlencoded\" \
+                                -H \"\$CRUMB\" \
+                                \"\${JENKINS_URL}/computer/\${JENKINS_SLAVE_NODE_NAME}/doDelete\" \
+                                --request \'POST\' --data \'\'
+                            sleep 10
+                        """)
+
+                        script_create_agent = ("""\
+                            CRUMB=\$(curl --fail -0 -u \"\${JENKINS_USER}:\${JENKINS_PASS}\" \${JENKINS_URL}\'/crumbIssuer/api/xml?xpath=concat(//crumbRequestField,":",//crumb)\' 2>/dev/null)
+
+                            curl -L -sS -w '%{http_code}' -o /dev/null \
+                                -u \"\${JENKINS_USER}:\${JENKINS_PASS}\" \
+                                -H \"Content-Type:application/x-www-form-urlencoded\" \
+                                -H \"\$CRUMB\" \
+                                -X POST -d 'json={\
+                                    \"name\": \"'\"\$JENKINS_SLAVE_NODE_NAME\"'\", \
+                                    \"nodeDescription\": \"'\"\$ENV_NAME\"'\", \
+                                    \"numExecutors\": \"'\"${jenkins_slave_executors}\"'\", \
+                                    \"remoteFS\": \"'\"/home/jenkins/workspace\"'\", \
+                                    \"labelString\": \"'\"\$ENV_NAME\"'\", \
+                                    \"mode\": \"EXCLUSIVE\", \
+                                    \"\": [\"hudson.plugins.sshslaves.SSHLauncher\", \"hudson.slaves.RetentionStrategy\$Always\"], \
+                                    \"launcher\": {\
+                                        \"stapler-class\": \"hudson.plugins.sshslaves.SSHLauncher\", \
+                                        \"\$class\": \"hudson.plugins.sshslaves.SSHLauncher\", \
+                                        \"host\": \"'\"${jenkins_slave_ip}\"'\", \
+                                        \"credentialsId\": \"'\"\$ACCESS_JENKINS_NODE_CREDENTIALS\"'\", \
+                                        \"port\": \"'\"22\"'\", \
+                                        \"javaPath\": \"\", \
+                                        \"jvmOptions\": \"\", \
+                                        \"prefixStartSlaveCmd\": \"\", \
+                                        \"suffixStartSlaveCmd\": \"\", \
+                                        \"launchTimeoutSeconds\": \"\", \
+                                        \"maxNumRetries\": \"\", \
+                                        \"retryWaitTime\": \"\", \
+                                        \"sshHostKeyVerificationStrategy\": {\
+                                            \"\$class\": \"hudson.plugins.sshslaves.verifiers.NonVerifyingKeyVerificationStrategy\" \
+                                        }, \
+                                        \"tcpNoDelay\": \"true\"\
+                                    }, \
+                                    \"retentionStrategy\": {\
+                                        \"stapler-class\": \"hudson.slaves.RetentionStrategy\$Always\", \
+                                        \"\$class\": \"hudson.slaves.RetentionStrategy\$Always\"\
+                                    }, \
+                                    \"nodeProperties\": {\
+                                        \"stapler-class-bag\": \"true\"\
+                                    }, \
+                                    \"type\": \"hudson.slaves.DumbSlave\", \
+                                    \"crumb\": \"'\"\$CRUMB\"'\"}' \
+                                \"\${JENKINS_URL}/computer/doCreateItem?name=\${JENKINS_SLAVE_NODE_NAME}&type=hudson.slaves.DumbSlave\"
+                        """)
+                        shared.verbose_sh(script_delete_agent, true, false, true)
+                        shared.verbose_sh(script_create_agent, true, false, true)
+                        timeout(time: 30, unit: 'MINUTES') {
+                            node("${JENKINS_SLAVE_NODE_NAME}") {
+                                sh "echo 'ok'"
+                                println "Jenkins agent is available now and can executes commands"
+                            }
+                        }
+                        // Store jenkins agent IP address
+                        jenkins_agent_description = "ssh jenkins@${jenkins_slave_ip}  # foundation node with Jenkins agent <a href=${JENKINS_URL}/computer/${JENKINS_SLAVE_NODE_NAME}>${JENKINS_SLAVE_NODE_NAME}</a><br>ssh root@${SALT_MASTER_IP}  # cfg01 node<br>"
+                        writeFile(file: "jenkins_agent_description.txt", text: jenkins_agent_description, encoding: "UTF-8")
+
+                } // withCredentials
+
+                    }// stage
+
+                } // withCredentials
+
+            } // dir
+        } // node
 
 
-node ("${JENKINS_SLAVE_NODE_NAME}") {
-    dir("${PARENT_WORKSPACE}") {
+        node ("${JENKINS_SLAVE_NODE_NAME}") {
+            dir("${PARENT_WORKSPACE}") {
 
-        stage("Clean the environment and clone tcp-qa") {
-            deleteDir()
-            shared.verbose_sh("""\
-                [ -d /home/jenkins/venv_testrail_reporter ] || virtualenv /home/jenkins/venv_testrail_reporter
-            """, true, false, true)
-            shared.run_cmd("""\
-                . /home/jenkins/venv_testrail_reporter/bin/activate; pip install git+https://github.com/dis-xcom/testrail_reporter -U
-            """)
-            shared.verbose_sh("""\
-                [ -d /home/jenkins/fuel-devops30 ] || virtualenv /home/jenkins/fuel-devops30
-            """, true, false, true)
-            shared.run_cmd("""\
-                git clone https://gerrit.mcp.mirantis.com/mcp/tcp-qa ${PARENT_WORKSPACE}
-            """)
-            shared.update_working_dir()
-        }
-
-        withCredentials([
-           [$class          : 'UsernamePasswordMultiBinding',
-           credentialsId   : env.OS_CREDENTIALS,
-           passwordVariable: 'OS_PASSWORD',
-           usernameVariable: 'OS_USERNAME']
-        ]) {
-
-
-            stage("Run the 'underlay' and 'salt-deployed' fixtures to bootstrap salt cluster") {
-                def xml_report_name = "deploy_salt.xml"
-                try {
-                    // deploy_salt.xml
-                    shared.run_sh("""\
-                        export ENV_NAME=${ENV_NAME}
-                        export LAB_CONFIG_NAME=${LAB_CONFIG_NAME}
-                        export LAB_PARAM_DEFAULTS=${LAB_PARAM_DEFAULTS}
-                        export ENV_MANAGER=heat
-                        export SHUTDOWN_ENV_ON_TEARDOWN=false
-                        export BOOTSTRAP_TIMEOUT=3600
-                        export PYTHONIOENCODING=UTF-8
-                        export REPOSITORY_SUITE=${MCP_VERSION}
-                        export UPDATE_VERSION=${UPDATE_VERSION}
-                        export TEST_GROUP=test_bootstrap_salt
-                        export LOG_NAME=swarm_test_bootstrap_salt.log
-                        py.test -vvv -s -p no:django -p no:ipdb --junit-xml=${xml_report_name} -k \${TEST_GROUP}
+                stage("Clean the environment and clone tcp-qa") {
+                    deleteDir()
+                    shared.verbose_sh("""\
+                        [ -d /home/jenkins/venv_testrail_reporter ] || virtualenv /home/jenkins/venv_testrail_reporter
+                    """, true, false, true)
+                    shared.run_cmd("""\
+                        . /home/jenkins/venv_testrail_reporter/bin/activate; pip install git+https://github.com/dis-xcom/testrail_reporter -U
                     """)
-                    // Wait for jenkins to start and IO calm down
-                    sleep(60)
-
-                } catch (e) {
-                      common.printMsg("Saltstack cluster deploy is failed", "purple")
-                      if (fileExists(xml_report_name)) {
-                          shared.download_logs("deploy_salt_${ENV_NAME}")
-                          def String junit_report_xml = readFile(xml_report_name)
-                          def String junit_report_xml_pretty = new XmlUtil().serialize(junit_report_xml)
-                          throw new Exception(junit_report_xml_pretty)
-                      } else {
-                          throw e
-                      }
-                } finally {
-                    // TODO(ddmitriev): add checks for salt cluster
+                    shared.verbose_sh("""\
+                        [ -d /home/jenkins/fuel-devops30 ] || virtualenv /home/jenkins/fuel-devops30
+                    """, true, false, true)
+                    shared.run_cmd("""\
+                        git clone https://gerrit.mcp.mirantis.com/mcp/tcp-qa ${PARENT_WORKSPACE}
+                    """)
+                    shared.update_working_dir()
                 }
-            } // stage
-        } // withCredentials
-    } // dir
-} // node
+
+                withCredentials([
+                   [$class          : 'UsernamePasswordMultiBinding',
+                   credentialsId   : env.OS_CREDENTIALS,
+                   passwordVariable: 'OS_PASSWORD',
+                   usernameVariable: 'OS_USERNAME']
+                ]) {
+
+
+                    stage("Run the 'underlay' and 'salt-deployed' fixtures to bootstrap salt cluster") {
+                        def xml_report_name = "deploy_salt.xml"
+                        try {
+                            // deploy_salt.xml
+                            shared.run_sh("""\
+                                export ENV_NAME=${ENV_NAME}
+                                export LAB_CONFIG_NAME=${LAB_CONFIG_NAME}
+                                export LAB_PARAM_DEFAULTS=${LAB_PARAM_DEFAULTS}
+                                export ENV_MANAGER=heat
+                                export SHUTDOWN_ENV_ON_TEARDOWN=false
+                                export BOOTSTRAP_TIMEOUT=3600
+                                export PYTHONIOENCODING=UTF-8
+                                export REPOSITORY_SUITE=${MCP_VERSION}
+                                export UPDATE_VERSION=${UPDATE_VERSION}
+                                export TEST_GROUP=test_bootstrap_salt
+                                export LOG_NAME=swarm_test_bootstrap_salt.log
+                                py.test -vvv -s -p no:django -p no:ipdb --junit-xml=${xml_report_name} -k \${TEST_GROUP}
+                            """)
+                            // Wait for jenkins to start and IO calm down
+                            sleep(60)
+
+                        } catch (e) {
+                              common.printMsg("Saltstack cluster deploy is failed", "purple")
+                              if (fileExists(xml_report_name)) {
+                                  shared.download_logs("deploy_salt_${ENV_NAME}")
+                                  def String junit_report_xml = readFile(xml_report_name)
+                                  def String junit_report_xml_pretty = new XmlUtil().serialize(junit_report_xml)
+                                  throw new Exception(junit_report_xml_pretty)
+                              } else {
+                                  throw e
+                              }
+                        } finally {
+                            // TODO(ddmitriev): add checks for salt cluster
+                        }
+                    } // stage
+                } // withCredentials
+            } // dir
+        } // node
+    } // timestamps
 } // timeout
diff --git a/jobs/pipelines/swarm-deploy-cicd.groovy b/jobs/pipelines/swarm-deploy-cicd.groovy
index 6014b9e..0cdc33f 100644
--- a/jobs/pipelines/swarm-deploy-cicd.groovy
+++ b/jobs/pipelines/swarm-deploy-cicd.groovy
@@ -30,73 +30,74 @@
 def install_timeout = env.STACK_INSTALL_TIMEOUT.toInteger()
 
 timeout(time: install_timeout + 600, unit: 'SECONDS') {
-
-    node ("${PARENT_NODE_NAME}") {
-        if (! fileExists("${PARENT_WORKSPACE}")) {
-            error "'PARENT_WORKSPACE' contains path to non-existing directory ${PARENT_WORKSPACE} on the node '${PARENT_NODE_NAME}'."
-        }
-        dir("${PARENT_WORKSPACE}") {
-
-            if (! env.STACK_INSTALL) {
-                error "'STACK_INSTALL' must contain one or more comma separated stack names for [deploy_openstack] pipeline"
+    timestamps {
+        node ("${PARENT_NODE_NAME}") {
+            if (! fileExists("${PARENT_WORKSPACE}")) {
+                error "'PARENT_WORKSPACE' contains path to non-existing directory ${PARENT_WORKSPACE} on the node '${PARENT_NODE_NAME}'."
             }
+            dir("${PARENT_WORKSPACE}") {
 
-            if (env.TCP_QA_REFS) {
-                stage("Update working dir to patch ${TCP_QA_REFS}") {
-                    shared.update_working_dir()
-                }
-            }
-
-            try {
-                // Install core and cicd
-                stage("Run Jenkins job on salt-master [deploy_openstack:${env.STACK_INSTALL}]") {
-                    shared.run_job_on_day01_node(env.STACK_INSTALL, install_timeout)
-                }
-                stage("Create env_jenkins_cicd and env_k8s files") {
-                    shared.run_cmd("""\
-                        export TESTS_CONFIGS=\$(pwd)/${ENV_NAME}_salt_deployed.ini
-                        python ./tcp_tests/utils/create_env_jenkins_cicd.py
-                    """)
+                if (! env.STACK_INSTALL) {
+                    error "'STACK_INSTALL' must contain one or more comma separated stack names for [deploy_openstack] pipeline"
                 }
 
-                for (stack in "${env.STACK_INSTALL}".split(",")) {
-                    stage("Sanity check the deployed component [${stack}]") {
-                        shared.sanity_check_component(stack)
+                if (env.TCP_QA_REFS) {
+                    stage("Update working dir to patch ${TCP_QA_REFS}") {
+                        shared.update_working_dir()
+                    }
+                }
 
-                        // If oslo_config INI file ${ENV_NAME}_salt_deployed.ini exists,
-                        // then make a copy for the created snapshot to allow the system
-                        // tests to revert this snapshot along with the metadata from the INI file.
+                try {
+                    // Install core and cicd
+                    stage("Run Jenkins job on salt-master [deploy_openstack:${env.STACK_INSTALL}]") {
+                        shared.run_job_on_day01_node(env.STACK_INSTALL, install_timeout)
+                    }
+                    stage("Create env_jenkins_cicd and env_k8s files") {
                         shared.run_cmd("""\
-                            if [ -f \$(pwd)/${ENV_NAME}_salt_deployed.ini ]; then
-                                cp \$(pwd)/${ENV_NAME}_salt_deployed.ini \$(pwd)/${ENV_NAME}_${stack}_deployed.ini
-                            fi
+                            export TESTS_CONFIGS=\$(pwd)/${ENV_NAME}_salt_deployed.ini
+                            python ./tcp_tests/utils/create_env_jenkins_cicd.py
                         """)
                     }
-                } // for
 
-                if (make_snapshot_stages) {
-                    stage("Make environment snapshots for [${env.STACK_INSTALL}]") {
-                        shared.devops_snapshot(env.STACK_INSTALL)
+                    for (stack in "${env.STACK_INSTALL}".split(",")) {
+                        stage("Sanity check the deployed component [${stack}]") {
+                            shared.sanity_check_component(stack)
+
+                            // If oslo_config INI file ${ENV_NAME}_salt_deployed.ini exists,
+                            // then make a copy for the created snapshot to allow the system
+                            // tests to revert this snapshot along with the metadata from the INI file.
+                            shared.run_cmd("""\
+                                if [ -f \$(pwd)/${ENV_NAME}_salt_deployed.ini ]; then
+                                    cp \$(pwd)/${ENV_NAME}_salt_deployed.ini \$(pwd)/${ENV_NAME}_${stack}_deployed.ini
+                                fi
+                            """)
+                        }
+                    } // for
+
+                    if (make_snapshot_stages) {
+                        stage("Make environment snapshots for [${env.STACK_INSTALL}]") {
+                            shared.devops_snapshot(env.STACK_INSTALL)
+                        }
+                    }
+
+                } catch (e) {
+                    common.printMsg("Job is failed", "purple")
+                    shared.download_logs("deploy_drivetrain_${ENV_NAME}")
+                    throw e
+                } finally {
+                    // TODO(ddmitriev): analyze the "def currentResult = currentBuild.result ?: 'SUCCESS'"
+                    // and report appropriate data to TestRail
+                    // TODO(ddmitriev): add checks for cicd cluster
+                    if (make_snapshot_stages) {
+                        if ("${env.SHUTDOWN_ENV_ON_TEARDOWN}" == "true") {
+                            shared.run_cmd("""\
+                                dos.py destroy ${ENV_NAME}
+                            """)
+                        }
                     }
                 }
 
-            } catch (e) {
-                common.printMsg("Job is failed", "purple")
-                shared.download_logs("deploy_drivetrain_${ENV_NAME}")
-                throw e
-            } finally {
-                // TODO(ddmitriev): analyze the "def currentResult = currentBuild.result ?: 'SUCCESS'"
-                // and report appropriate data to TestRail
-                // TODO(ddmitriev): add checks for cicd cluster
-                if (make_snapshot_stages) {
-                    if ("${env.SHUTDOWN_ENV_ON_TEARDOWN}" == "true") {
-                        shared.run_cmd("""\
-                            dos.py destroy ${ENV_NAME}
-                        """)
-                    }
-                }
-            }
-
-        } // dir
-    } // node
+            } // dir
+        } // node
+    } // timestamps
 } // timeout
diff --git a/jobs/pipelines/swarm-deploy-platform.groovy b/jobs/pipelines/swarm-deploy-platform.groovy
index b8afc35..7377958 100644
--- a/jobs/pipelines/swarm-deploy-platform.groovy
+++ b/jobs/pipelines/swarm-deploy-platform.groovy
@@ -30,67 +30,68 @@
 def install_timeout = env.STACK_INSTALL_TIMEOUT.toInteger()
 
 timeout(time: install_timeout + 600, unit: 'SECONDS') {
-
-    node ("${PARENT_NODE_NAME}") {
-        if (! fileExists("${PARENT_WORKSPACE}")) {
-            error "'PARENT_WORKSPACE' contains path to non-existing directory ${PARENT_WORKSPACE} on the node '${PARENT_NODE_NAME}'."
-        }
-        dir("${PARENT_WORKSPACE}") {
-
-            if (! env.STACK_INSTALL) {
-                error "'STACK_INSTALL' must contain one or more comma separated stack names for [deploy_openstack] pipeline"
+    timestamps {
+        node ("${PARENT_NODE_NAME}") {
+            if (! fileExists("${PARENT_WORKSPACE}")) {
+                error "'PARENT_WORKSPACE' contains path to non-existing directory ${PARENT_WORKSPACE} on the node '${PARENT_NODE_NAME}'."
             }
+            dir("${PARENT_WORKSPACE}") {
 
-            if (env.TCP_QA_REFS) {
-                stage("Update working dir to patch ${TCP_QA_REFS}") {
-                    shared.update_working_dir()
-                }
-            }
-
-            try {
-                // Install the cluster
-                stage("Run Jenkins job on CICD [deploy_openstack:${env.STACK_INSTALL}]") {
-                    shared.run_job_on_cicd_nodes(env.STACK_INSTALL, install_timeout)
+                if (! env.STACK_INSTALL) {
+                    error "'STACK_INSTALL' must contain one or more comma separated stack names for [deploy_openstack] pipeline"
                 }
 
-                for (stack in "${env.STACK_INSTALL}".split(",")) {
-                    stage("Sanity check the deployed component [${stack}]") {
-                        shared.sanity_check_component(stack)
-
-                        // If oslo_config INI file ${ENV_NAME}_salt_deployed.ini exists,
-                        // then make a copy for the created snapshot to allow the system
-                        // tests to revert this snapshot along with the metadata from the INI file.
-                        shared.run_cmd("""\
-                            if [ -f \$(pwd)/${ENV_NAME}_salt_deployed.ini ]; then
-                                cp \$(pwd)/${ENV_NAME}_salt_deployed.ini \$(pwd)/${ENV_NAME}_${stack}_deployed.ini
-                            fi
-                        """)
-                    }
-                } // for
-
-                if (make_snapshot_stages) {
-                    stage("Make environment snapshots for [${env.STACK_INSTALL}]") {
-                        shared.devops_snapshot(env.STACK_INSTALL)
+                if (env.TCP_QA_REFS) {
+                    stage("Update working dir to patch ${TCP_QA_REFS}") {
+                        shared.update_working_dir()
                     }
                 }
 
-            } catch (e) {
-                common.printMsg("Job is failed", "purple")
-                shared.download_logs("deploy_platform_${ENV_NAME}")
-                throw e
-            } finally {
-                // TODO(ddmitriev): analyze the "def currentResult = currentBuild.result ?: 'SUCCESS'"
-                // and report appropriate data to TestRail
-                // TODO(ddmitriev): add checks for the installed stacks
-                if (make_snapshot_stages) {
-                    if ("${env.SHUTDOWN_ENV_ON_TEARDOWN}" == "true") {
-                        shared.run_cmd("""\
-                            dos.py destroy ${ENV_NAME}
-                        """)
+                try {
+                    // Install the cluster
+                    stage("Run Jenkins job on CICD [deploy_openstack:${env.STACK_INSTALL}]") {
+                        shared.run_job_on_cicd_nodes(env.STACK_INSTALL, install_timeout)
+                    }
+
+                    for (stack in "${env.STACK_INSTALL}".split(",")) {
+                        stage("Sanity check the deployed component [${stack}]") {
+                            shared.sanity_check_component(stack)
+
+                            // If oslo_config INI file ${ENV_NAME}_salt_deployed.ini exists,
+                            // then make a copy for the created snapshot to allow the system
+                            // tests to revert this snapshot along with the metadata from the INI file.
+                            shared.run_cmd("""\
+                                if [ -f \$(pwd)/${ENV_NAME}_salt_deployed.ini ]; then
+                                    cp \$(pwd)/${ENV_NAME}_salt_deployed.ini \$(pwd)/${ENV_NAME}_${stack}_deployed.ini
+                                fi
+                            """)
+                        }
+                    } // for
+
+                    if (make_snapshot_stages) {
+                        stage("Make environment snapshots for [${env.STACK_INSTALL}]") {
+                            shared.devops_snapshot(env.STACK_INSTALL)
+                        }
+                    }
+
+                } catch (e) {
+                    common.printMsg("Job is failed", "purple")
+                    shared.download_logs("deploy_platform_${ENV_NAME}")
+                    throw e
+                } finally {
+                    // TODO(ddmitriev): analyze the "def currentResult = currentBuild.result ?: 'SUCCESS'"
+                    // and report appropriate data to TestRail
+                    // TODO(ddmitriev): add checks for the installed stacks
+                    if (make_snapshot_stages) {
+                        if ("${env.SHUTDOWN_ENV_ON_TEARDOWN}" == "true") {
+                            shared.run_cmd("""\
+                                dos.py destroy ${ENV_NAME}
+                            """)
+                        }
                     }
                 }
-            }
 
-        } // dir
-    } // node
+            } // dir
+        } // node
+    } //timestamps
 } // timeout
\ No newline at end of file
diff --git a/jobs/pipelines/swarm-run-pytest.groovy b/jobs/pipelines/swarm-run-pytest.groovy
index f7ad7c3..1ebee5a 100644
--- a/jobs/pipelines/swarm-run-pytest.groovy
+++ b/jobs/pipelines/swarm-run-pytest.groovy
@@ -12,10 +12,6 @@
  *   PARENT_WORKSPACE              Path to the workspace of the parent job to use tcp-qa repo
  *   TCP_QA_REFS                   Reference to the tcp-qa change on Gerrit, like refs/changes/46/418546/41
  *   SHUTDOWN_ENV_ON_TEARDOWN      optional, shutdown fuel-devops environment at the end of the job
- *   LAB_CONFIG_NAME               Not used (backward compatibility, for manual deployment steps only)
- *   REPOSITORY_SUITE              Not used (backward compatibility, for manual deployment steps only)
- *   MCP_IMAGE_PATH1604            Not used (backward compatibility, for manual deployment steps only)
- *   IMAGE_PATH_CFG01_DAY01        Not used (backward compatibility, for manual deployment steps only)
  *   TEMPEST_IMAGE_VERSION         Tempest image version: pike by default, can be queens.
  *   TEMPEST_TARGET                Node where tempest will be run
  *   MAKE_SNAPSHOT_STAGES          optional, use "dos.py snapshot" to snapshot stages
@@ -34,85 +30,87 @@
 currentBuild.description = "${PARENT_NODE_NAME}:${ENV_NAME}"
 
 timeout(time: 10, unit: 'HOURS') {
-node ("${PARENT_NODE_NAME}") {
-    if (! fileExists("${PARENT_WORKSPACE}")) {
-        error "'PARENT_WORKSPACE' contains path to non-existing directory ${PARENT_WORKSPACE} on the node '${PARENT_NODE_NAME}'."
-    }
-    dir("${PARENT_WORKSPACE}") {
-        try {
-
-            if (env.TCP_QA_REFS) {
-                stage("Update working dir to patch ${TCP_QA_REFS}") {
-                    shared.update_working_dir()
-                }
+    timestamps {
+        node ("${PARENT_NODE_NAME}") {
+            if (! fileExists("${PARENT_WORKSPACE}")) {
+                error "'PARENT_WORKSPACE' contains path to non-existing directory ${PARENT_WORKSPACE} on the node '${PARENT_NODE_NAME}'."
             }
+            dir("${PARENT_WORKSPACE}") {
+                try {
 
-            stage("Run tests") {
-                def steps = shared.get_steps_list(PASSED_STEPS)
-                def sources = """\
-                    cd ${PARENT_WORKSPACE}
-                    export ENV_NAME=${ENV_NAME}
-                    . ./tcp_tests/utils/env_salt"""
-                if (steps.contains('k8s')) {
-                    sources += """
-                    . ./tcp_tests/utils/env_k8s\n"""
-                }
-                if (steps.contains('openstack')) {
-                    sources += """
-                    export TEMPEST_IMAGE_VERSION=${TEMPEST_IMAGE_VERSION}
-                    export TEMPEST_TARGET=${TEMPEST_TARGET}
-                    # TODO: . ./tcp_tests/utils/env_keystonercv3\n"""
-                }
-                def installed = steps.collect {"""\
-                    export ${it}_installed=true"""}.join("\n")
-
-                shared.run_sh(sources + installed + """
-                    export TESTS_CONFIGS=${ENV_NAME}_salt_deployed.ini
-                    export ENV_MANAGER=$ENV_MANAGER  # use 'hardware' fixture to manage fuel-devops environment
-                    export salt_master_host=\$SALT_MASTER_IP  # skip salt_deployed fixture
-                    export salt_master_port=6969
-                    export SALT_USER=\$SALTAPI_USER
-                    export SALT_PASSWORD=\$SALTAPI_PASS
-
-                    export LOG_NAME=swarm_run_pytest.log
-                    py.test --junit-xml=nosetests.xml ${RUN_TEST_OPTS}
-
-                    """)
-
-                def snapshot_name = "test_completed"
-                shared.download_logs("test_completed_${ENV_NAME}")
-
-                if (make_snapshot_stages) {
-                    shared.run_cmd("""\
-                        dos.py suspend ${ENV_NAME}
-                        dos.py snapshot ${ENV_NAME} ${snapshot_name}
-                    """)
-                    if ("${env.SHUTDOWN_ENV_ON_TEARDOWN}" == "false") {
-                        shared.run_cmd("""\
-                            dos.py resume ${ENV_NAME}
-                        """)
+                    if (env.TCP_QA_REFS) {
+                        stage("Update working dir to patch ${TCP_QA_REFS}") {
+                            shared.update_working_dir()
+                        }
                     }
-                    shared.devops_snapshot_info(snapshot_name)
-                }
-            }
 
-        } catch (e) {
-            common.printMsg("Job is failed", "purple")
-            // Downloading logs usually not needed here
-            // because tests should use the decorator @pytest.mark.grab_versions
-            // shared.download_logs("test_failed_${ENV_NAME}")
-            throw e
-        } finally {
-            // TODO(ddmitriev): analyze the "def currentResult = currentBuild.result ?: 'SUCCESS'"
-            // and report appropriate data to TestRail
-            if (make_snapshot_stages) {
-                if ("${env.SHUTDOWN_ENV_ON_TEARDOWN}" == "true") {
-                    shared.run_cmd("""\
-                        dos.py destroy ${ENV_NAME}
-                    """)
+                    stage("Run tests") {
+                        def steps = shared.get_steps_list(PASSED_STEPS)
+                        def sources = """\
+                            cd ${PARENT_WORKSPACE}
+                            export ENV_NAME=${ENV_NAME}
+                            . ./tcp_tests/utils/env_salt"""
+                        if (steps.contains('k8s')) {
+                            sources += """
+                            . ./tcp_tests/utils/env_k8s\n"""
+                        }
+                        if (steps.contains('openstack')) {
+                            sources += """
+                            export TEMPEST_IMAGE_VERSION=${TEMPEST_IMAGE_VERSION}
+                            export TEMPEST_TARGET=${TEMPEST_TARGET}
+                            # TODO: . ./tcp_tests/utils/env_keystonercv3\n"""
+                        }
+                        def installed = steps.collect {"""\
+                            export ${it}_installed=true"""}.join("\n")
+
+                        shared.run_sh(sources + installed + """
+                            export TESTS_CONFIGS=${ENV_NAME}_salt_deployed.ini
+                            export ENV_MANAGER=$ENV_MANAGER  # use 'hardware' fixture to manage fuel-devops environment
+                            export salt_master_host=\$SALT_MASTER_IP  # skip salt_deployed fixture
+                            export salt_master_port=6969
+                            export SALT_USER=\$SALTAPI_USER
+                            export SALT_PASSWORD=\$SALTAPI_PASS
+
+                            export LOG_NAME=swarm_run_pytest.log
+                            py.test --junit-xml=nosetests.xml ${RUN_TEST_OPTS}
+
+                            """)
+
+                        def snapshot_name = "test_completed"
+                        shared.download_logs("test_completed_${ENV_NAME}")
+
+                        if (make_snapshot_stages) {
+                            shared.run_cmd("""\
+                                dos.py suspend ${ENV_NAME}
+                                dos.py snapshot ${ENV_NAME} ${snapshot_name}
+                            """)
+                            if ("${env.SHUTDOWN_ENV_ON_TEARDOWN}" == "false") {
+                                shared.run_cmd("""\
+                                    dos.py resume ${ENV_NAME}
+                                """)
+                            }
+                            shared.devops_snapshot_info(snapshot_name)
+                        }
+                    }
+
+                } catch (e) {
+                    common.printMsg("Job is failed", "purple")
+                    // Downloading logs usually not needed here
+                    // because tests should use the decorator @pytest.mark.grab_versions
+                    // shared.download_logs("test_failed_${ENV_NAME}")
+                    throw e
+                } finally {
+                    // TODO(ddmitriev): analyze the "def currentResult = currentBuild.result ?: 'SUCCESS'"
+                    // and report appropriate data to TestRail
+                    if (make_snapshot_stages) {
+                        if ("${env.SHUTDOWN_ENV_ON_TEARDOWN}" == "true") {
+                            shared.run_cmd("""\
+                                dos.py destroy ${ENV_NAME}
+                            """)
+                        }
+                    }
                 }
             }
-        }
-    }
-}
+        } //node
+    } // timestamps
 } // timeout
diff --git a/jobs/project.yaml b/jobs/project.yaml
index 84dd470..1250a84 100644
--- a/jobs/project.yaml
+++ b/jobs/project.yaml
@@ -3,31 +3,48 @@
 - project:
     name: sre
     jobs:
-      - heat-cicd-pike-dvr-sl
-      - heat-cicd-queens-contrail41-sl
-      - heat-cicd-queens-dvr-sl
+      - heat-cicd-pike-dvr-sl:
+          disabled: false
+          trigger_time: H(5-30) 22 * * 1-7
+      - heat-cicd-queens-contrail41-sl:
+          disabled: false
+          trigger_time: H(5-30) 23 * * 1-7
+      - heat-cicd-queens-dvr-sl:
+          disabled: false
+          trigger_time: H(5-30) 21 * * 1-7
       # ------- BM jobs -------
-      - deploy_bm
+      - deploy_bm:
+          disabled: false
+          trigger_time: H(40-59) 0 * * 1-5
       - bm-cicd-pike-ovs-maas
       - bm-cicd-queens-ovs-maas
       - heat-bm-cicd-pike-contrail-sl
       - heat-bm-cicd-queens-contrail-sl
       - released-bm-pike-ovs
       # --- Released envs ------
-      - deploy-released
+      - deploy-released:
+          disabled: false
+          trigger_time: 0 18 * * 0-4
       - released-heat-cicd-pike-dvr-sl
       - released-heat-cicd-pike-contrail41-sl
       - released-heat-cicd-queens-dvr-sl
       - released-heat-cicd-queens-contrail41-sl
       # ----- Release Artifacts jobs ---
-      - 2019.2.0-heat-cicd-pike-dvr-sl
-      - 2019.2.0-heat-cicd-queens-contrail41-sl
-      - 2019.2.0-heat-cicd-queens-dvr-sl
+      - 2019.2.0-heat-cicd-pike-dvr-sl:
+          disabled: true
+          trigger_time: H(5-30) 22 * * 1-7
+      - 2019.2.0-heat-cicd-queens-contrail41-sl:
+          disabled: true
+          trigger_time: H(5-30) 23 * * 1-7
+      - 2019.2.0-heat-cicd-queens-dvr-sl:
+          disabled: true
+          trigger_time: H(5-30) 21 * * 1-7
       # - show_networks_used_by_libvirt - can't moved to JJB, node parameter is not supported
       - maintenance-heat-cicd-pike-dvr-sl
       - download-config-drive
       - swarm-deploy-cicd
       - swarm-deploy-platform
+      - swarm-run-pytest
       # - generate-report - can't moved to JJB, a lot of parameters is not supported like Inject environment variables
       # - mark-reports - can't moved to JJB, a lot of parameters is not supported like Inject environment variables
       - cookied-cicd-queens-dvr-sl-heat
diff --git a/jobs/templates/2019.2.0-heat-cicd-pike-dvr-sl.yml b/jobs/templates/2019.2.0-heat-cicd-pike-dvr-sl.yml
index 64aaaa3..4a8ebfd 100644
--- a/jobs/templates/2019.2.0-heat-cicd-pike-dvr-sl.yml
+++ b/jobs/templates/2019.2.0-heat-cicd-pike-dvr-sl.yml
@@ -2,10 +2,11 @@
     project-type: pipeline
     description: '{job-description}'
     concurrent: false
-    disabled: true
     name: 2019.2.0-heat-cicd-pike-dvr-sl
+    # 'disabled' and 'timed' option are defined in the project.yaml
+    disabled: '{obj:disabled}'
     triggers:
-       - timed: H(5-30) 22 * * 1-7
+       - timed: '{obj:trigger_time}'
     parameters:
     - string:
         default: heat-cicd-pike-dvr-sl
@@ -35,7 +36,7 @@
         name: PLATFORM_STACK_INSTALL_TIMEOUT
         trim: 'false'
     - string:
-        default: 2019.2.8
+        default: '{current-version}'
         description: ''
         name: MCP_VERSION
         trim: 'false'
@@ -72,29 +73,29 @@
         name: TCP_QA_REFS
         trim: 'false'
     - string:
-        default: 'refs/tags/2019.2.8'
+        default: refs/tags/{current-version}
         description: reference to patchset in pipeline-library
         name: PIPELINE_LIBRARY_REF
         trim: 'false'
     - string:
-        default: 'refs/tags/2019.2.8'
+        default: refs/tags/{current-version}
         description: reference to patchset in mk-pipelines
         name: MK_PIPELINES_REF
         trim: 'false'
     - string:
-        default: refs/tags/2019.2.8
+        default: refs/tags/{current-version}
         description: Can be 'master' or 'proposed'. If empty, then takes ${{MCP_VERSION}}
           value
         name: COOKIECUTTER_TEMPLATE_COMMIT
         trim: 'false'
     - string:
-        default: refs/tags/2019.2.8
+        default: refs/tags/{current-version}
         description: Can be 'master' or 'proposed'. If empty, then takes ${{MCP_VERSION}}
           value
         name: SALT_MODELS_SYSTEM_COMMIT
         trim: 'false'
     - string:
-        default: -m "run_cvp_func_sanity|run_tempest|run_stacklight"
+        default: -m "run_cvp_func_sanity|run_cvp_tempest|run_stacklight"
         description: |-
           Pytest option -k or -m, with expression to select necessary tests.
           Additional pytest options are allowed.
@@ -173,7 +174,7 @@
         name: LAB_PARAM_DEFAULTS
         trim: 'false'
     - string:
-        default: 'refs/tags/2019.2.8'
+        default: refs/tags/{current-version}
         description: ''
         name: JENKINS_PIPELINE_BRANCH
         trim: 'false'
@@ -183,12 +184,12 @@
         name: TEMPEST_PATTERN
         trim: 'false'
     - string:
-        default: 'refs/tags/2019.2.8'
+        default: refs/tags/{current-version}
         description: ''
         name: MCP_COMMON_SCRIPTS_REFS
         trim: 'false'
     - string:
-        default: 2019.2.8
+        default: '{current-version}'
         description: 'for formula repo http://mirror.mirantis.com/update/UPDATE_VERSION/salt-formulas/xenial'
         name: UPDATE_VERSION
         trim: 'false'
@@ -196,6 +197,10 @@
         default: false
         description: Whether to perform dist-upgrade on virtual nodes during deployment
         name: DIST_UPGRADE_NODES
+    - bool:
+        default: false
+        description: 'Works starting from MCP 2019.2.10 or master. Whether to apply saltstack updates on all nodes in cluster before deployment'
+        name: UPGRADE_SALTSTACK
     pipeline-scm:
       lightweight-checkout: false
       scm:
diff --git a/jobs/templates/2019.2.0-heat-cicd-queens-contrail41-sl.yml b/jobs/templates/2019.2.0-heat-cicd-queens-contrail41-sl.yml
index 710593a..4da28d4 100644
--- a/jobs/templates/2019.2.0-heat-cicd-queens-contrail41-sl.yml
+++ b/jobs/templates/2019.2.0-heat-cicd-queens-contrail41-sl.yml
@@ -2,10 +2,11 @@
     project-type: pipeline
     description: '{job-description}'
     concurrent: false
-    disabled: true
     name: 2019.2.0-heat-cicd-queens-contrail41-sl
+    # 'disabled' and 'timed' option are defined in the project.yaml
+    disabled: '{obj:disabled}'
     triggers:
-    - timed: H(5-30) 23 * * 1-7
+       - timed: '{obj:trigger_time}'
     parameters:
     - string:
         default: heat-cicd-queens-contrail41-sl
@@ -35,7 +36,7 @@
         name: PLATFORM_STACK_INSTALL_TIMEOUT
         trim: 'false'
     - string:
-        default: 2019.2.8
+        default: '{current-version}'
         description: ''
         name: MCP_VERSION
         trim: 'false'
@@ -72,29 +73,29 @@
         name: TCP_QA_REFS
         trim: 'false'
     - string:
-        default: 'refs/tags/2019.2.8'
+        default: refs/tags/{current-version}
         description: reference to patchset in pipeline-library
         name: PIPELINE_LIBRARY_REF
         trim: 'false'
     - string:
-        default: 'refs/tags/2019.2.8'
+        default: refs/tags/{current-version}
         description: reference to patchset in mk-pipelines
         name: MK_PIPELINES_REF
         trim: 'false'
     - string:
-        default: refs/tags/2019.2.8
+        default: refs/tags/{current-version}
         description: Can be 'master' or 'proposed'. If empty, then takes ${{MCP_VERSION}}
           value
         name: COOKIECUTTER_TEMPLATE_COMMIT
         trim: 'false'
     - string:
-        default: refs/tags/2019.2.8
+        default: refs/tags/{current-version}
         description: Can be 'master' or 'proposed'. If empty, then takes ${{MCP_VERSION}}
           value
         name: SALT_MODELS_SYSTEM_COMMIT
         trim: 'false'
     - string:
-        default: -m "run_cvp_func_sanity|run_tempest|run_stacklight"
+        default: -m "run_cvp_func_sanity|run_cvp_tempest|run_stacklight"
         description: |-
           Pytest option -k or -m, with expression to select necessary tests.
           Additional pytest options are allowed.
@@ -173,17 +174,17 @@
         name: LAB_PARAM_DEFAULTS
         trim: 'false'
     - string:
-        default: 'refs/tags/2019.2.8'
+        default: refs/tags/{current-version}
         description: ''
         name: JENKINS_PIPELINE_BRANCH
         trim: 'false'
     - string:
-        default: 'refs/tags/2019.2.8'
+        default: refs/tags/{current-version}
         description: ''
         name: MCP_COMMON_SCRIPTS_REFS
         trim: 'false'
     - string:
-        default: 2019.2.8
+        default: '{current-version}'
         description: ''
         name: UPDATE_VERSION
         trim: 'false'
@@ -201,6 +202,10 @@
         default: false
         description: Whether to perform dist-upgrade on virtual nodes during deployment
         name: DIST_UPGRADE_NODES
+    - bool:
+        default: false
+        description: 'Works starting from MCP 2019.2.10 or master. Whether to apply saltstack updates on all nodes in cluster before deployment'
+        name: UPGRADE_SALTSTACK
     pipeline-scm:
       lightweight-checkout: false
       scm:
diff --git a/jobs/templates/2019.2.0-heat-cicd-queens-dvr-sl.yml b/jobs/templates/2019.2.0-heat-cicd-queens-dvr-sl.yml
index 88e4202..2374cb4 100644
--- a/jobs/templates/2019.2.0-heat-cicd-queens-dvr-sl.yml
+++ b/jobs/templates/2019.2.0-heat-cicd-queens-dvr-sl.yml
@@ -2,10 +2,11 @@
     project-type: pipeline
     description: '{job-description}'
     concurrent: false
-    disabled: true
     name: 2019.2.0-heat-cicd-queens-dvr-sl
+    # 'disabled' and 'timed' option are defined in the project.yaml
+    disabled: '{obj:disabled}'
     triggers:
-    - timed: H(5-30) 21 * * 1-7
+       - timed: '{obj:trigger_time}'
     parameters:
     - string:
         default: heat-cicd-queens-dvr-sl
@@ -35,7 +36,7 @@
         name: PLATFORM_STACK_INSTALL_TIMEOUT
         trim: 'false'
     - string:
-        default: 2019.2.8
+        default: "{current-version}"
         description: ''
         name: MCP_VERSION
         trim: 'false'
@@ -72,23 +73,23 @@
         name: TCP_QA_REFS
         trim: 'false'
     - string:
-        default: 'refs/tags/2019.2.8'
+        default: refs/tags/{current-version}
         description: reference to patchset in pipeline-library
         name: PIPELINE_LIBRARY_REF
         trim: 'false'
     - string:
-        default: 'refs/tags/2019.2.8'
+        default: refs/tags/{current-version}
         description: reference to patchset in mk-pipelines
         name: MK_PIPELINES_REF
         trim: 'false'
     - string:
-        default: refs/tags/2019.2.8
+        default: refs/tags/{current-version}
         description: Can be 'master' or 'proposed'. If empty, then takes ${{MCP_VERSION}}
           value
         name: COOKIECUTTER_TEMPLATE_COMMIT
         trim: 'false'
     - string:
-        default: refs/tags/2019.2.8
+        default: refs/tags/{current-version}
         description: Can be 'master' or 'proposed'. If empty, then takes ${{MCP_VERSION}}
           value
         name: SALT_MODELS_SYSTEM_COMMIT
@@ -173,7 +174,7 @@
         name: LAB_PARAM_DEFAULTS
         trim: 'false'
     - string:
-        default: 'refs/tags/2019.2.8'
+        default: refs/tags/{current-version}
         description: ''
         name: JENKINS_PIPELINE_BRANCH
         trim: 'false'
@@ -183,12 +184,12 @@
         name: TEMPEST_PATTERN
         trim: 'false'
     - string:
-        default: 'refs/tags/2019.2.8'
+        default: refs/tags/{current-version}
         description: ''
         name: MCP_COMMON_SCRIPTS_REFS
         trim: 'false'
     - string:
-        default: 2019.2.8
+        default: "{current-version}"
         description: 'for formula repo http://mirror.mirantis.com/update/UPDATE_VERSION/salt-formulas/xenial'
         name: UPDATE_VERSION
         trim: 'false'
@@ -201,6 +202,10 @@
         default: false
         description: Whether to perform dist-upgrade on virtual nodes during deployment
         name: DIST_UPGRADE_NODES
+    - bool:
+        default: false
+        description: 'Works starting from MCP 2019.2.10 or master. Whether to apply saltstack updates on all nodes in cluster before deployment'
+        name: UPGRADE_SALTSTACK
     pipeline-scm:
       lightweight-checkout: false
       scm:
diff --git a/jobs/templates/bm-cicd-pike-ovs-maas.yml b/jobs/templates/bm-cicd-pike-ovs-maas.yml
index 6b7c1db..694630c 100644
--- a/jobs/templates/bm-cicd-pike-ovs-maas.yml
+++ b/jobs/templates/bm-cicd-pike-ovs-maas.yml
@@ -93,7 +93,7 @@
         name: SALT_MODELS_SYSTEM_COMMIT
         trim: 'false'
     - string:
-        default: -m "run_cvp_func_sanity|run_tempest|run_stacklight"
+        default: -m "run_cvp_func_sanity|run_cvp_tempest|run_stacklight"
         description: |-
           Pytest option -k or -m, with expression to select necessary tests.
           Additional pytest options are allowed.
@@ -196,6 +196,10 @@
         default: true
         description: Whether to perform dist-upgrade on virtual nodes during deployment
         name: DIST_UPGRADE_NODES
+    - bool:
+        default: true
+        description: 'Works starting from MCP 2019.2.10 or master. Whether to apply saltstack updates on all nodes in cluster before deployment'
+        name: UPGRADE_SALTSTACK
     pipeline-scm:
       lightweight-checkout: false
       scm:
diff --git a/jobs/templates/bm-cicd-queens-ovs-maas.yml b/jobs/templates/bm-cicd-queens-ovs-maas.yml
index 6166a66..ad40bdf 100644
--- a/jobs/templates/bm-cicd-queens-ovs-maas.yml
+++ b/jobs/templates/bm-cicd-queens-ovs-maas.yml
@@ -92,7 +92,7 @@
         name: SALT_MODELS_SYSTEM_COMMIT
         trim: 'false'
     - string:
-        default: -m "run_cvp_func_sanity|run_tempest|run_stacklight"
+        default: -m "run_cvp_func_sanity|run_cvp_tempest|run_stacklight"
         description: |-
           Pytest option -k or -m, with expression to select necessary tests.
           Additional pytest options are allowed.
@@ -208,6 +208,10 @@
         default: false
         description: Whether to perform dist-upgrade on virtual nodes during deployment
         name: DIST_UPGRADE_NODES
+    - bool:
+        default: false
+        description: 'Works starting from MCP 2019.2.10 or master. Whether to apply saltstack updates on all nodes in cluster before deployment'
+        name: UPGRADE_SALTSTACK
     pipeline-scm:
       lightweight-checkout: false
       scm:
diff --git a/jobs/templates/deploy-released.yml b/jobs/templates/deploy-released.yml
index 2d809d9..ac17ec2 100644
--- a/jobs/templates/deploy-released.yml
+++ b/jobs/templates/deploy-released.yml
@@ -2,10 +2,11 @@
     project-type: pipeline
     description: '{job-description}'
     concurrent: false
-    disabled: false
     sandbox: true
+    # 'disabled' and 'timed' option are defined in the project.yaml
+    disabled: '{obj:disabled}'
     triggers:
-    - timed: H(20-30) 1 * * 1-5
+    - timed: '{obj:trigger_time}'
     name: deploy-released
     parameters:
     - text:
diff --git a/jobs/templates/deploy_bm.yml b/jobs/templates/deploy_bm.yml
index 6814972..94a406c 100644
--- a/jobs/templates/deploy_bm.yml
+++ b/jobs/templates/deploy_bm.yml
@@ -2,10 +2,11 @@
     project-type: pipeline
     description: '{job-description}'
     concurrent: false
-    disabled: false
     sandbox: true
+    # 'disabled' and 'timed' option are defined in the project.yaml
+    disabled: '{obj:disabled}'
     triggers:
-    - timed: H(40-59) 0 * * 1-5
+    - timed: '{obj:trigger_time}'
     name: deploy_bm
     parameters:
     - string:
diff --git a/jobs/templates/heat-bm-cicd-pike-contrail-sl.yml b/jobs/templates/heat-bm-cicd-pike-contrail-sl.yml
index af9f64b..3d9c175 100644
--- a/jobs/templates/heat-bm-cicd-pike-contrail-sl.yml
+++ b/jobs/templates/heat-bm-cicd-pike-contrail-sl.yml
@@ -94,7 +94,7 @@
         name: SALT_MODELS_SYSTEM_COMMIT
         trim: 'false'
     - string:
-        default: -m "run_cvp_func_sanity|run_tempest|run_stacklight"
+        default: -m "run_cvp_func_sanity|run_cvp_tempest|run_stacklight"
         description: |-
           Pytest option -k or -m, with expression to select necessary tests.
           Additional pytest options are allowed.
@@ -221,6 +221,10 @@
         default: true
         description: Whether to perform dist-upgrade on virtual nodes during deployment
         name: DIST_UPGRADE_NODES
+    - bool:
+        default: true
+        description: 'Works starting from MCP 2019.2.10 or master. Whether to apply saltstack updates on all nodes in cluster before deployment'
+        name: UPGRADE_SALTSTACK
     pipeline-scm:
       lightweight-checkout: false
       scm:
diff --git a/jobs/templates/heat-bm-cicd-queens-contrail-sl.yml b/jobs/templates/heat-bm-cicd-queens-contrail-sl.yml
index 87562d8..2c0de61 100644
--- a/jobs/templates/heat-bm-cicd-queens-contrail-sl.yml
+++ b/jobs/templates/heat-bm-cicd-queens-contrail-sl.yml
@@ -95,7 +95,7 @@
         name: SALT_MODELS_SYSTEM_COMMIT
         trim: 'false'
     - string:
-        default: -m "run_cvp_func_sanity|run_tempest|run_stacklight"
+        default: -m "run_cvp_func_sanity|run_cvp_tempest|run_stacklight"
         description: |-
           Pytest option -k or -m, with expression to select necessary tests.
           Additional pytest options are allowed.
@@ -222,6 +222,10 @@
         default: false
         description: Whether to perform dist-upgrade on virtual nodes during deployment
         name: DIST_UPGRADE_NODES
+    - bool:
+        default: false
+        description: 'Works starting from MCP 2019.2.10 or master. Whether to apply saltstack updates on all nodes in cluster before deployment'
+        name: UPGRADE_SALTSTACK
     pipeline-scm:
       lightweight-checkout: false
       scm:
diff --git a/jobs/templates/heat-cicd-pike-dvr-sl.yml b/jobs/templates/heat-cicd-pike-dvr-sl.yml
index 4df86a6..f3baf7e 100644
--- a/jobs/templates/heat-cicd-pike-dvr-sl.yml
+++ b/jobs/templates/heat-cicd-pike-dvr-sl.yml
@@ -2,11 +2,12 @@
 - job-template:
     project-type: pipeline
     concurrent: true
-    disabled: false
     description: '{job-description}'
     name: heat-cicd-pike-dvr-sl
+    # 'disabled' and 'timed' option are defined in the project.yaml
+    disabled: '{obj:disabled}'
     triggers:
-       - timed: H(5-30) 22 * * 1-7
+    - timed: '{obj:trigger_time}'
     properties:
     - build-discarder:
         days-to-keep: 60
@@ -99,7 +100,7 @@
         name: SALT_MODELS_SYSTEM_COMMIT
         trim: 'false'
     - string:
-        default: -m "run_cvp_func_sanity|run_tempest|run_stacklight"
+        default: -m "run_cvp_func_sanity|run_cvp_tempest|run_stacklight"
         description: |-
           Pytest option -k or -m, with expression to select necessary tests.
           Additional pytest options are allowed.
@@ -201,6 +202,10 @@
         default: true
         description: Whether to perform dist-upgrade on virtual nodes during deployment
         name: DIST_UPGRADE_NODES
+    - bool:
+        default: true
+        description: 'Works starting from MCP 2019.2.10 or master. Whether to apply saltstack updates on all nodes in cluster before deployment'
+        name: UPGRADE_SALTSTACK
     pipeline-scm:
       lightweight-checkout: false
       scm:
diff --git a/jobs/templates/heat-cicd-queens-contrail41-sl.yml b/jobs/templates/heat-cicd-queens-contrail41-sl.yml
index 724439a..71f1f6a 100644
--- a/jobs/templates/heat-cicd-queens-contrail41-sl.yml
+++ b/jobs/templates/heat-cicd-queens-contrail41-sl.yml
@@ -3,10 +3,11 @@
     project-type: pipeline
     description: '{job-description}'
     concurrent: true
-    disabled: false
     name: heat-cicd-queens-contrail41-sl
+    # 'disabled' and 'timed' option are defined in the project.yaml
+    disabled: '{obj:disabled}'
     triggers:
-    - timed: H(5-30) 23 * * 1-7
+    - timed: '{obj:trigger_time}'
     parameters:
     - string:
         default: heat-cicd-queens-contrail41-sl
@@ -92,7 +93,7 @@
         name: SALT_MODELS_SYSTEM_COMMIT
         trim: 'false'
     - string:
-        default: -m "run_cvp_func_sanity|run_tempest|run_stacklight"
+        default: -m "run_cvp_func_sanity|run_cvp_tempest|run_stacklight"
         description: |-
           Pytest option -k or -m, with expression to select necessary tests.
           Additional pytest options are allowed.
@@ -199,6 +200,10 @@
         default: true
         description: Whether to perform dist-upgrade on virtual nodes during deployment
         name: DIST_UPGRADE_NODES
+    - bool:
+        default: true
+        description: 'Works starting from MCP 2019.2.10 or master. Whether to apply saltstack updates on all nodes in cluster before deployment'
+        name: UPGRADE_SALTSTACK
     pipeline-scm:
       lightweight-checkout: false
       scm:
diff --git a/jobs/templates/heat-cicd-queens-dvr-sl.yml b/jobs/templates/heat-cicd-queens-dvr-sl.yml
index e58a4fb..b0afc13 100644
--- a/jobs/templates/heat-cicd-queens-dvr-sl.yml
+++ b/jobs/templates/heat-cicd-queens-dvr-sl.yml
@@ -2,11 +2,12 @@
 - job-template:
     project-type: pipeline
     concurrent: true
-    disabled: false
     name: heat-cicd-queens-dvr-sl
     description: '{job-description}'
+    # 'disabled' and 'timed' option are defined in the project.yaml
+    disabled: '{obj:disabled}'
     triggers:
-    - timed: H(5-30) 21 * * 1-7
+    - timed: '{obj:trigger_time}'
     parameters:
     - string:
         default: heat-cicd-queens-dvr-sl
@@ -200,6 +201,10 @@
         default: false
         description: Whether to perform dist-upgrade on virtual nodes during deployment
         name: DIST_UPGRADE_NODES
+    - bool:
+        default: false
+        description: 'Works starting from MCP 2019.2.10 or master. Whether to apply saltstack updates on all nodes in cluster before deployment'
+        name: UPGRADE_SALTSTACK
     pipeline-scm:
       lightweight-checkout: false
       scm:
diff --git a/jobs/templates/maintenance-heat-cicd-pike-dvr-sl.yml b/jobs/templates/maintenance-heat-cicd-pike-dvr-sl.yml
index 7e947a6..7fd3f3b 100644
--- a/jobs/templates/maintenance-heat-cicd-pike-dvr-sl.yml
+++ b/jobs/templates/maintenance-heat-cicd-pike-dvr-sl.yml
@@ -196,6 +196,10 @@
         default: false
         description: Whether to perform dist-upgrade on virtual nodes during deployment
         name: DIST_UPGRADE_NODES
+    - bool:
+        default: false
+        description: 'Works starting from MCP 2019.2.10 or master. Whether to apply saltstack updates on all nodes in cluster before deployment'
+        name: UPGRADE_SALTSTACK
     pipeline-scm:
       lightweight-checkout: false
       scm:
diff --git a/jobs/templates/released-bm-pike-ovs.yml b/jobs/templates/released-bm-pike-ovs.yml
index dbb8f1d..4c96960 100644
--- a/jobs/templates/released-bm-pike-ovs.yml
+++ b/jobs/templates/released-bm-pike-ovs.yml
@@ -33,7 +33,7 @@
         name: PLATFORM_STACK_INSTALL_TIMEOUT
         trim: 'false'
     - string:
-        default: 2019.2.0
+        default: "{previous-version}"
         description: ''
         name: MCP_VERSION
         trim: 'false'
@@ -63,36 +63,36 @@
         name: ENV_NAME
         trim: 'false'
     - string:
-        default: refs/heads/2019.2.0
+        default: refs/tags/{previous-version}
         description: |-
           Example: refs/changes/89/411189/36
           (for now - only one reference allowed)
         name: TCP_QA_REFS
         trim: 'false'
     - string:
-        default: refs/heads/release/2019.2.0
+        default: refs/tags/{previous-version}
         description: reference to patchset in pipeline-library
         name: PIPELINE_LIBRARY_REF
         trim: 'false'
     - string:
-        default: refs/heads/release/2019.2.0
+        default: refs/tags/{previous-version}
         description: reference to patchset in mk-pipelines
         name: MK_PIPELINES_REF
         trim: 'false'
     - string:
-        default: 2019.2.0
+        default: refs/tags/{previous-version}
         description: Can be 'master' or 'proposed'. If empty, then takes ${{MCP_VERSION}}
           value
         name: COOKIECUTTER_TEMPLATE_COMMIT
         trim: 'false'
     - string:
-        default: 2019.2.0
+        default: refs/tags/{previous-version}
         description: Can be 'master' or 'proposed'. If empty, then takes ${{MCP_VERSION}}
           value
         name: SALT_MODELS_SYSTEM_COMMIT
         trim: 'false'
     - string:
-        default: -m "run_cvp_func_sanity|run_tempest|run_stacklight"
+        default: -m "run_cvp_func_sanity|run_cvp_tempest|run_stacklight"
         description: |-
           Pytest option -k or -m, with expression to select necessary tests.
           Additional pytest options are allowed.
@@ -108,7 +108,7 @@
         name: COOKIECUTTER_REF_CHANGE
         trim: 'false'
     - string:
-        default: refs/heads/2019.2.0
+        default: refs/tags/{previous-version}
         description: ''
         name: ENVIRONMENT_TEMPLATE_REF_CHANGE
         trim: 'false'
@@ -161,12 +161,12 @@
         name: LAB_PARAM_DEFAULTS
         trim: 'false'
     - string:
-        default: refs/heads/release/2019.2.0
+        default: refs/tags/{previous-version}
         description: ''
         name: JENKINS_PIPELINE_BRANCH
         trim: 'false'
     - string:
-        default: refs/heads/release/2019.2.0
+        default: refs/tags/{previous-version}
         description: ''
         name: MCP_COMMON_SCRIPTS_REFS
         trim: 'false'
@@ -185,6 +185,10 @@
         default: false
         description: Whether to perform dist-upgrade on virtual nodes during deployment
         name: DIST_UPGRADE_NODES
+    - bool:
+        default: false
+        description: 'Works starting from MCP 2019.2.10 or master. Whether to apply saltstack updates on all nodes in cluster before deployment'
+        name: UPGRADE_SALTSTACK
     pipeline-scm:
       lightweight-checkout: false
       scm:
diff --git a/jobs/templates/released-heat-cicd-pike-contrail41-sl.yml b/jobs/templates/released-heat-cicd-pike-contrail41-sl.yml
index ea5943d..e3caf3b 100644
--- a/jobs/templates/released-heat-cicd-pike-contrail41-sl.yml
+++ b/jobs/templates/released-heat-cicd-pike-contrail41-sl.yml
@@ -33,7 +33,7 @@
         name: PLATFORM_STACK_INSTALL_TIMEOUT
         trim: 'false'
     - string:
-        default: 2019.2.8
+        default: "{previous-version}"
         description: ''
         name: MCP_VERSION
         trim: 'false'
@@ -70,23 +70,23 @@
         name: TCP_QA_REFS
         trim: 'false'
     - string:
-        default: refs/tags/2019.2.8
+        default: refs/tags/{previous-version}
         description: reference to patchset in pipeline-library
         name: PIPELINE_LIBRARY_REF
         trim: 'false'
     - string:
-        default: refs/tags/2019.2.8
+        default: refs/tags/{previous-version}
         description: reference to patchset in mk-pipelines
         name: MK_PIPELINES_REF
         trim: 'false'
     - string:
-        default: refs/tags/2019.2.8
+        default: refs/tags/{previous-version}
         description: Can be 'master' or 'proposed'. If empty, then takes ${{MCP_VERSION}}
           value
         name: COOKIECUTTER_TEMPLATE_COMMIT
         trim: 'false'
     - string:
-        default: refs/tags/2019.2.8
+        default: refs/tags/{previous-version}
         description: Can be 'master' or 'proposed'. If empty, then takes ${{MCP_VERSION}}
           value
         name: SALT_MODELS_SYSTEM_COMMIT
@@ -189,17 +189,17 @@
         name: LAB_PARAM_DEFAULTS
         trim: 'false'
     - string:
-        default: refs/tags/2019.2.8
+        default: refs/tags/{previous-version}
         description: ''
         name: JENKINS_PIPELINE_BRANCH
         trim: 'false'
     - string:
-        default: refs/tags/2019.2.8
+        default: refs/tags/{previous-version}
         description: ''
         name: MCP_COMMON_SCRIPTS_REFS
         trim: 'false'
     - string:
-        default: 2019.2.8
+        default: "{previous-version}"
         description: 'for formula repo http://mirror.mirantis.com/update/UPDATE_VERSION/salt-formulas/xenial'
         name: UPDATE_VERSION
         trim: 'false'
@@ -212,6 +212,10 @@
         default: true
         description: Whether to perform dist-upgrade on virtual nodes during deployment
         name: DIST_UPGRADE_NODES
+    - bool:
+        default: true
+        description: 'Works starting from MCP 2019.2.10 or master. Whether to apply saltstack updates on all nodes in cluster before deployment'
+        name: UPGRADE_SALTSTACK
     pipeline-scm:
       lightweight-checkout: false
       scm:
diff --git a/jobs/templates/released-heat-cicd-pike-dvr-sl.yml b/jobs/templates/released-heat-cicd-pike-dvr-sl.yml
index 7f60422..a7db0c8 100644
--- a/jobs/templates/released-heat-cicd-pike-dvr-sl.yml
+++ b/jobs/templates/released-heat-cicd-pike-dvr-sl.yml
@@ -33,7 +33,7 @@
         name: PLATFORM_STACK_INSTALL_TIMEOUT
         trim: 'false'
     - string:
-        default: 2019.2.8
+        default: "{previous-version}"
         description: ''
         name: MCP_VERSION
         trim: 'false'
@@ -70,23 +70,23 @@
         name: TCP_QA_REFS
         trim: 'false'
     - string:
-        default: refs/tags/2019.2.8
+        default: refs/tags/{previous-version}
         description: reference to patchset in pipeline-library
         name: PIPELINE_LIBRARY_REF
         trim: 'false'
     - string:
-        default: refs/tags/2019.2.8
+        default: refs/tags/{previous-version}
         description: reference to patchset in mk-pipelines
         name: MK_PIPELINES_REF
         trim: 'false'
     - string:
-        default: refs/tags/2019.2.8
+        default: refs/tags/{previous-version}
         description: Can be 'master' or 'proposed'. If empty, then takes ${{MCP_VERSION}}
           value
         name: COOKIECUTTER_TEMPLATE_COMMIT
         trim: 'false'
     - string:
-        default: refs/tags/2019.2.8
+        default: refs/tags/{previous-version}
         description: Can be 'master' or 'proposed'. If empty, then takes ${{MCP_VERSION}}
           value
         name: SALT_MODELS_SYSTEM_COMMIT
@@ -188,7 +188,7 @@
         name: LAB_PARAM_DEFAULTS
         trim: 'false'
     - string:
-        default: refs/tags/2019.2.8
+        default: refs/tags/{previous-version}
         description: ''
         name: JENKINS_PIPELINE_BRANCH
         trim: 'false'
@@ -198,12 +198,12 @@
         name: TEMPEST_PATTERN
         trim: 'false'
     - string:
-        default: refs/tags/2019.2.8
+        default: refs/tags/{previous-version}
         description: ''
         name: MCP_COMMON_SCRIPTS_REFS
         trim: 'false'
     - string:
-        default: 2019.2.8
+        default: "{previous-version}"
         description: 'for formula repo http://mirror.mirantis.com/update/UPDATE_VERSION/salt-formulas/xenial'
         name: UPDATE_VERSION
         trim: 'false'
@@ -211,6 +211,10 @@
         default: false
         description: Whether to perform dist-upgrade on virtual nodes during deployment
         name: DIST_UPGRADE_NODES
+    - bool:
+        default: false
+        description: 'Works starting from MCP 2019.2.10 or master. Whether to apply saltstack updates on all nodes in cluster before deployment'
+        name: UPGRADE_SALTSTACK
     pipeline-scm:
       lightweight-checkout: false
       scm:
diff --git a/jobs/templates/released-heat-cicd-queens-contrail41-sl.yml b/jobs/templates/released-heat-cicd-queens-contrail41-sl.yml
index 7775830..41e57d3 100644
--- a/jobs/templates/released-heat-cicd-queens-contrail41-sl.yml
+++ b/jobs/templates/released-heat-cicd-queens-contrail41-sl.yml
@@ -33,7 +33,7 @@
         name: PLATFORM_STACK_INSTALL_TIMEOUT
         trim: 'false'
     - string:
-        default: 2019.2.8
+        default: "{previous-version}"
         description: ''
         name: MCP_VERSION
         trim: 'false'
@@ -70,22 +70,22 @@
         name: TCP_QA_REFS
         trim: 'false'
     - string:
-        default: refs/tags/2019.2.8
+        default: refs/tags/{previous-version}
         description: reference to patchset in pipeline-library
         name: PIPELINE_LIBRARY_REF
         trim: 'false'
     - string:
-        default: refs/tags/2019.2.8
+        default: refs/tags/{previous-version}
         description: reference to patchset in mk-pipelines
         name: MK_PIPELINES_REF
         trim: 'false'
     - string:
-        default: refs/tags/2019.2.8
+        default: refs/tags/{previous-version}
         description: Can be 'master' or 'proposed'. If empty, then takes ${{MCP_VERSION}} value
         name: COOKIECUTTER_TEMPLATE_COMMIT
         trim: 'false'
     - string:
-        default: refs/tags/2019.2.8
+        default: refs/tags/{previous-version}
         description: Can be 'master' or 'proposed'. If empty, then takes ${{MCP_VERSION}} value
         name: SALT_MODELS_SYSTEM_COMMIT
         trim: 'false'
@@ -186,17 +186,17 @@
         name: LAB_PARAM_DEFAULTS
         trim: 'false'
     - string:
-        default: refs/tags/2019.2.8
+        default: refs/tags/{previous-version}
         description: ''
         name: JENKINS_PIPELINE_BRANCH
         trim: 'false'
     - string:
-        default: refs/tags/2019.2.8
+        default: refs/tags/{previous-version}
         description: ''
         name: MCP_COMMON_SCRIPTS_REFS
         trim: 'false'
     - string:
-        default: 2019.2.8
+        default: "{previous-version}"
         description: 'for formula repo http://mirror.mirantis.com/update/UPDATE_VERSION/salt-formulas/xenial'
         name: UPDATE_VERSION
         trim: 'false'
@@ -214,6 +214,10 @@
         default: false
         description: Whether to perform dist-upgrade on virtual nodes during deployment
         name: DIST_UPGRADE_NODES
+    - bool:
+        default: false
+        description: 'Works starting from MCP 2019.2.10 or master. Whether to apply saltstack updates on all nodes in cluster before deployment'
+        name: UPGRADE_SALTSTACK
     pipeline-scm:
       lightweight-checkout: false
       scm:
diff --git a/jobs/templates/released-heat-cicd-queens-dvr-sl.yml b/jobs/templates/released-heat-cicd-queens-dvr-sl.yml
index bbc1114..eab85d8 100644
--- a/jobs/templates/released-heat-cicd-queens-dvr-sl.yml
+++ b/jobs/templates/released-heat-cicd-queens-dvr-sl.yml
@@ -33,7 +33,7 @@
         name: PLATFORM_STACK_INSTALL_TIMEOUT
         trim: 'false'
     - string:
-        default: 2019.2.8
+        default: "{previous-version}"
         description: ''
         name: MCP_VERSION
         trim: 'false'
@@ -70,23 +70,23 @@
         name: TCP_QA_REFS
         trim: 'false'
     - string:
-        default: refs/tags/2019.2.8
+        default: refs/tags/{previous-version}
         description: reference to patchset in pipeline-library
         name: PIPELINE_LIBRARY_REF
         trim: 'false'
     - string:
-        default: refs/tags/2019.2.8
+        default: refs/tags/{previous-version}
         description: reference to patchset in mk-pipelines
         name: MK_PIPELINES_REF
         trim: 'false'
     - string:
-        default: refs/tags/2019.2.8
+        default: refs/tags/{previous-version}
         description: Can be 'master' or 'proposed'. If empty, then takes ${{MCP_VERSION}}
           value
         name: COOKIECUTTER_TEMPLATE_COMMIT
         trim: 'false'
     - string:
-        default: refs/tags/2019.2.8
+        default: refs/tags/{previous-version}
         description: Can be 'master' or 'proposed'. If empty, then takes ${{MCP_VERSION}}
           value
         name: SALT_MODELS_SYSTEM_COMMIT
@@ -188,7 +188,7 @@
         name: LAB_PARAM_DEFAULTS
         trim: 'false'
     - string:
-        default: refs/tags/2019.2.8
+        default: refs/tags/{previous-version}
         description: ''
         name: JENKINS_PIPELINE_BRANCH
         trim: 'false'
@@ -198,12 +198,12 @@
         name: TEMPEST_PATTERN
         trim: 'false'
     - string:
-        default: refs/tags/2019.2.8
+        default: refs/tags/{previous-version}
         description: ''
         name: MCP_COMMON_SCRIPTS_REFS
         trim: 'false'
     - string:
-        default: 2019.2.8
+        default: "{previous-version}"
         description: 'for formula repo http://mirror.mirantis.com/update/UPDATE_VERSION/salt-formulas/xenial'
         name: UPDATE_VERSION
         trim: 'false'
@@ -211,6 +211,10 @@
         default: false
         description: Whether to perform dist-upgrade on virtual nodes during deployment
         name: DIST_UPGRADE_NODES
+    - bool:
+        default: false
+        description: 'Works starting from MCP 2019.2.10 or master. Whether to apply saltstack updates on all nodes in cluster before deployment'
+        name: UPGRADE_SALTSTACK
     pipeline-scm:
       lightweight-checkout: false
       scm:
diff --git a/jobs/templates/swarm-deploy-cicd.yml b/jobs/templates/swarm-deploy-cicd.yml
index 65e77fe..e999b96 100644
--- a/jobs/templates/swarm-deploy-cicd.yml
+++ b/jobs/templates/swarm-deploy-cicd.yml
@@ -56,6 +56,10 @@
         default: false
         description: Whether to perform dist-upgrade on virtual nodes during deployment
         name: DIST_UPGRADE_NODES
+    - bool:
+        default: false
+        description: 'Works starting from MCP 2019.2.10 or master. Whether to apply saltstack updates on all nodes in cluster before deployment'
+        name: UPGRADE_SALTSTACK
     pipeline-scm:
       lightweight-checkout: false
       scm:
diff --git a/jobs/templates/swarm-deploy-platform.yml b/jobs/templates/swarm-deploy-platform.yml
index 1405ac4..7971c41 100644
--- a/jobs/templates/swarm-deploy-platform.yml
+++ b/jobs/templates/swarm-deploy-platform.yml
@@ -57,6 +57,10 @@
         default: false
         description: Whether to perform dist-upgrade on virtual nodes during deployment
         name: DIST_UPGRADE_NODES
+    - bool:
+        default: false
+        description: 'Works starting from MCP 2019.2.10 or master. Whether to apply saltstack updates on all nodes in cluster before deployment'
+        name: UPGRADE_SALTSTACK
     pipeline-scm:
       lightweight-checkout: false
       scm:
diff --git a/jobs/templates/swarm-run-pytest.yml b/jobs/templates/swarm-run-pytest.yml
new file mode 100644
index 0000000..36d9380
--- /dev/null
+++ b/jobs/templates/swarm-run-pytest.yml
@@ -0,0 +1,86 @@
+- job-template:
+    project-type: pipeline
+    description: '{job-description}'
+    concurrent: true
+    disabled: false
+    name: swarm-run-pytest
+    parameters:
+    - string:
+        default: ''
+        description: 'Required: Fuel-devops environment name'
+        name: ENV_NAME
+        trim: 'false'
+    - string:
+        default: ''
+        description: 'Example: refs/changes/89/411189/36
+                       (for now - only one reference allowed)'
+        name: TCP_QA_REFS
+        trim: 'false'
+    - string:
+        default: ''
+        description: 'Required: Name of the jenkins slave to create the environment
+                      To be set by the parent deployment job.'
+        name: PARENT_NODE_NAME
+        trim: 'false'
+    - string:
+        default: ''
+        description: 'Completed steps to install components on the environment.
+                      If tests require some additional components, it may be installed in
+                      appropriate fixtures, so set the PASSED_STEPS correctly for the
+                      testing environment.'
+        name: PASSED_STEPS
+        trim: 'false'
+    - string:
+        default: ''
+        description: 'Required: Workspace on the jenkins slave to reuse for the job
+                      To be set by the parent deployment job.'
+        name: PARENT_WORKSPACE
+        trim: 'false'
+    - bool:
+        default: false
+        description: 'Shutdown the fuel-devops environment at the end of the job'
+        name: SHUTDOWN_ENV_ON_TEARDOWN
+        trim: 'false'
+    - string:
+        default:
+        name: ENV_MANAGER
+    - bool:
+        default: false
+        description: 'Use "dos.py snapshot" to snapshot stages of deploy if ENV_MANAGER is devops'
+        name: MAKE_SNAPSHOT_STAGES
+    - text:
+        default: ''
+        description: |-
+          Pytest option -k or -m, with expression to select necessary tests.
+          Additional pytest options are allowed.
+        name: RUN_TEST_OPTS
+        trim: 'false'
+    - string:
+        default: pike
+        description: ''
+        name: TEMPEST_IMAGE_VERSION
+        trim: 'false'
+    - string:
+        default: ''
+        description: ''
+        name: TEMPEST_TARGET
+        trim: 'false'
+    - string:
+        default: ''
+        description: ''
+        name: TEMPEST_PATTERN
+        trim: 'false'
+    - string:
+        default: ''
+        description: ''
+        name: TEMPEST_EXTRA_ARGS
+        trim: 'false'
+    pipeline-scm:
+      lightweight-checkout: false
+      scm:
+      - git:
+          branches:
+          - FETCH_HEAD
+          refspec: ${{TCP_QA_REFS}}
+          url: https://gerrit.mcp.mirantis.com/mcp/tcp-qa.git
+      script-path: jobs/pipelines/swarm-run-pytest.groovy
diff --git a/src/com/mirantis/system_qa/SharedPipeline.groovy b/src/com/mirantis/system_qa/SharedPipeline.groovy
index d6f4ed4..9edd241 100644
--- a/src/com/mirantis/system_qa/SharedPipeline.groovy
+++ b/src/com/mirantis/system_qa/SharedPipeline.groovy
@@ -162,7 +162,7 @@
                     export OS_PROJECT_NAME=${OS_PROJECT_NAME}
                     export OS_USER_DOMAIN_NAME=${OS_USER_DOMAIN_NAME}
                     openstack --insecure stack delete -y ${ENV_NAME} || true
-                    while openstack --insecure stack show ${ENV_NAME} -f value -c stack_status; do sleep 10; done
+                    timeout 20m /bin/bash -c "while openstack --insecure stack show ${ENV_NAME} -f value -c stack_status; do sleep 10; done"
                 """)
             }
 
@@ -314,7 +314,7 @@
         build_pipeline_job('swarm-bootstrap-salt-cluster-heat', parameters)
 }
 
-def swarm_deploy_cicd(String stack_to_install, String install_timeout, String jenkins_slave_node_name, Boolean make_snapshot_stages, String batch_size, Boolean dist_upgrade_nodes) {
+def swarm_deploy_cicd(String stack_to_install, String install_timeout, String jenkins_slave_node_name, Boolean make_snapshot_stages, String batch_size, Boolean dist_upgrade_nodes, Boolean upgrade_saltstack) {
         // Run openstack_deploy job on cfg01 Jenkins for specified stacks
         def common = new com.mirantis.mk.Common()
         def tcp_qa_refs = env.TCP_QA_REFS ?: ''
@@ -327,13 +327,14 @@
                 string(name: 'TCP_QA_REFS', value: "${tcp_qa_refs}"),
                 string(name: 'BATCH_SIZE', value: batch_size),
                 booleanParam(name: 'DIST_UPGRADE_NODES', value: dist_upgrade_nodes),
+                booleanParam(name: 'UPGRADE_SALTSTACK', value: upgrade_saltstack),
                 booleanParam(name: 'MAKE_SNAPSHOT_STAGES', value: make_snapshot_stages),
                 booleanParam(name: 'SHUTDOWN_ENV_ON_TEARDOWN', value: false),
             ]
         build_pipeline_job('swarm-deploy-cicd', parameters)
 }
 
-def swarm_deploy_platform(String stack_to_install, String install_timeout, String jenkins_slave_node_name, Boolean make_snapshot_stages, String batch_size, Boolean dist_upgrade_nodes) {
+def swarm_deploy_platform(String stack_to_install, String install_timeout, String jenkins_slave_node_name, Boolean make_snapshot_stages, String batch_size, Boolean dist_upgrade_nodes, Boolean upgrade_saltstack) {
         // Run openstack_deploy job on CICD Jenkins for specified stacks
         def common = new com.mirantis.mk.Common()
         def tcp_qa_refs = env.TCP_QA_REFS ?: ''
@@ -346,6 +347,7 @@
                 string(name: 'TCP_QA_REFS', value: "${tcp_qa_refs}"),
                 string(name: 'BATCH_SIZE', value: batch_size),
                 booleanParam(name: 'DIST_UPGRADE_NODES', value: dist_upgrade_nodes),
+                booleanParam(name: 'UPGRADE_SALTSTACK', value: upgrade_saltstack),
                 booleanParam(name: 'MAKE_SNAPSHOT_STAGES', value: make_snapshot_stages),
                 booleanParam(name: 'SHUTDOWN_ENV_ON_TEARDOWN', value: false),
             ]
@@ -384,10 +386,6 @@
                 string(name: 'PARENT_WORKSPACE', value: pwd()),
                 string(name: 'TCP_QA_REFS', value: "${tcp_qa_refs}"),
                 booleanParam(name: 'SHUTDOWN_ENV_ON_TEARDOWN', value: false),
-                string(name: 'LAB_CONFIG_NAME', value: "${LAB_CONFIG_NAME}"),
-                string(name: 'REPOSITORY_SUITE', value: "${MCP_VERSION}"),
-                string(name: 'MCP_IMAGE_PATH1604', value: "${MCP_IMAGE_PATH1604}"),
-                string(name: 'IMAGE_PATH_CFG01_DAY01', value: "${IMAGE_PATH_CFG01_DAY01}"),
                 string(name: 'TEMPEST_IMAGE_VERSION', value: "${tempest_image_version}"),
                 string(name: 'TEMPEST_TARGET', value: "${tempest_target}"),
                 string(name: 'TEMPEST_PATTERN', value: "${tempest_pattern}"),
@@ -521,7 +519,8 @@
                 \\\"SALT_MASTER_URL\\\": \\\"\${SALTAPI_URL}\\\",
                 \\\"BATCH_SIZE\\\": \\\"\${BATCH_SIZE}\\\",
                 \\\"STACK_INSTALL\\\": \\\"${stack}\\\",
-                \\\"DIST_UPGRADE_NODES\\\": \\\"\${DIST_UPGRADE_NODES}\\\"
+                \\\"DIST_UPGRADE_NODES\\\": \\\"\${DIST_UPGRADE_NODES}\\\",
+                \\\"UPGRADE_SALTSTACK\\\": \\\"\${UPGRADE_SALTSTACK}\\\"
             }\"
             JOB_PREFIX="[ ${ENV_NAME}/{build_number}:drivetrain {time} ] "
             python ./tcp_tests/utils/run_jenkins_job.py --verbose --job-name=deploy_openstack --job-parameters="\$JOB_PARAMETERS" --job-output-prefix="\$JOB_PREFIX"
@@ -556,7 +555,8 @@
                 \\\"SALT_MASTER_URL\\\": \\\"\${SALTAPI_URL}\\\",
                 \\\"BATCH_SIZE\\\": \\\"\${BATCH_SIZE}\\\",
                 \\\"STACK_INSTALL\\\": \\\"${stack}\\\",
-                \\\"DIST_UPGRADE_NODES\\\": \\\"\${DIST_UPGRADE_NODES}\\\"
+                \\\"DIST_UPGRADE_NODES\\\": \\\"\${DIST_UPGRADE_NODES}\\\",
+                \\\"UPGRADE_SALTSTACK\\\": \\\"\${UPGRADE_SALTSTACK}\\\"
             }\"
             JOB_PREFIX="[ ${ENV_NAME}/{build_number}:platform {time} ] "
             python ./tcp_tests/utils/run_jenkins_job.py --verbose --job-name=deploy_openstack --job-parameters="\$JOB_PARAMETERS" --job-output-prefix="\$JOB_PREFIX"
diff --git a/tcp_tests/managers/saltmanager.py b/tcp_tests/managers/saltmanager.py
index 0ecef46..1f63ae9 100644
--- a/tcp_tests/managers/saltmanager.py
+++ b/tcp_tests/managers/saltmanager.py
@@ -283,6 +283,18 @@
         result = self.local(tgt=tgt, fun='cmd.run', args=cmd)
         return result['return']
 
+    def file_write(self, tgt, filename, content):
+        result = self.local(tgt=tgt,
+                            fun='file.write',
+                            args=[filename, content])
+        return result['return']
+
+    def file_makedirs(self, tgt, path):
+        if path[-1] != "/":
+            path += "/"
+        result = self.local(tgt=tgt, fun='file.makedirs', args=path)
+        return result['return']
+
     @utils.retry(10, exception=libpepper.PepperException)
     def sync_time(self, tgt='*'):
         LOG.info("NTP time sync on the salt minions '{0}'".format(tgt))
diff --git a/tcp_tests/settings.py b/tcp_tests/settings.py
index d3a0aa3..5e60b86 100644
--- a/tcp_tests/settings.py
+++ b/tcp_tests/settings.py
@@ -101,6 +101,7 @@
 STACK_INSTALL = os.environ.get('STACK_INSTALL', None)
 BATCH_SIZE = os.environ.get('BATCH_SIZE', None)
 DIST_UPGRADE_NODES = os.environ.get('DIST_UPGRADE_NODES', False)
+UPGRADE_SALTSTACK = os.environ.get('UPGRADE_SALTSTACK', False)
 SKIP_SYNC_TIME = get_var_as_bool("SKIP_SYNC_TIME", False)
 
 # OpenStack parameters to work with Heat stacks
diff --git a/tcp_tests/templates/_heat_environments/eu-cloud.env b/tcp_tests/templates/_heat_environments/eu-cloud.env
index 218f04b..4b04572 100644
--- a/tcp_tests/templates/_heat_environments/eu-cloud.env
+++ b/tcp_tests/templates/_heat_environments/eu-cloud.env
@@ -43,4 +43,4 @@
   net_public: public
 
   foundation_image: system.foundation
-  nameservers: 172.18.208.44
+  nameservers: 172.18.176.6,172.18.224.6
diff --git a/tcp_tests/templates/_heat_environments/us-cloud.env b/tcp_tests/templates/_heat_environments/us-cloud.env
index b5d0961..87b40ef 100644
--- a/tcp_tests/templates/_heat_environments/us-cloud.env
+++ b/tcp_tests/templates/_heat_environments/us-cloud.env
@@ -43,4 +43,4 @@
   net_public: public
 
   foundation_image: system.foundation
-  nameservers: 172.18.208.44
+  nameservers: 172.18.224.6,172.18.176.6
diff --git a/tcp_tests/templates/bm-cicd-pike-ovs-maas/salt-context-cookiecutter-openstack_ovs.yaml b/tcp_tests/templates/bm-cicd-pike-ovs-maas/salt-context-cookiecutter-openstack_ovs.yaml
index 35aed20..9e111d7 100644
--- a/tcp_tests/templates/bm-cicd-pike-ovs-maas/salt-context-cookiecutter-openstack_ovs.yaml
+++ b/tcp_tests/templates/bm-cicd-pike-ovs-maas/salt-context-cookiecutter-openstack_ovs.yaml
@@ -28,7 +28,7 @@
   deploy_network_subnet: 172.16.164.0/26
   deployment_type: physical
   dns_server01: 172.18.176.6
-  dns_server02: 172.18.208.44
+  dns_server02: 172.18.224.6
   email_address: test@mirantis.com
   gateway_primary_first_nic: eth1
   gateway_primary_second_nic: eth2
diff --git a/tcp_tests/templates/bm-cicd-pike-ovs-maas/salt.yaml b/tcp_tests/templates/bm-cicd-pike-ovs-maas/salt.yaml
index 1dac2f8..a31051f 100644
--- a/tcp_tests/templates/bm-cicd-pike-ovs-maas/salt.yaml
+++ b/tcp_tests/templates/bm-cicd-pike-ovs-maas/salt.yaml
@@ -80,4 +80,5 @@
   skip_fail: false
 
 {{SHARED_TEST_TOOLS.MACRO_INSTALL_RECLASS_TOOLS()}}
-{{ SHARED_WORKAROUNDS.MACRO_CEPH_SET_PGNUM() }}
\ No newline at end of file
+{{ SHARED_WORKAROUNDS.MACRO_CEPH_SET_PGNUM() }}
+{{ SHARED_WORKAROUNDS.CLEAR_CEPH_OSD_DRIVES() }}
\ No newline at end of file
diff --git a/tcp_tests/templates/bm-cicd-pike-ovs-maas/tempest_skip.list b/tcp_tests/templates/bm-cicd-pike-ovs-maas/tempest_skip.list
new file mode 100644
index 0000000..c3799f2
--- /dev/null
+++ b/tcp_tests/templates/bm-cicd-pike-ovs-maas/tempest_skip.list
@@ -0,0 +1,25 @@
+# Globally disabled inside the 'ci-tempest' docker image
+tempest.api.object_storage.test_object_version.ContainerTest.test_versioned_container
+tempest.api.object_storage.test_healthcheck.HealthcheckTest.test_get_healthcheck
+tempest.api.object_storage.test_container_sync_middleware.ContainerSyncMiddlewareTest.test_container_synchronization
+tempest.api.object_storage.test_crossdomain.CrossdomainTest.test_get_crossdomain_policy
+tempest.api.object_storage.test_container_services_negative.ContainerNegativeTest.test_create_container_metadata_value_exceeds_max_length
+tempest.api.object_storage.test_container_services_negative.ContainerNegativeTest.test_create_container_metadata_name_exceeds_max_length
+tempest.api.object_storage.test_container_services_negative.ContainerNegativeTest.test_create_container_metadata_exceeds_overall_metadata_count
+tempest.api.object_storage.test_container_services.ContainerTest.test_create_container_with_metadata_value
+tempest.api.object_storage.test_account_services_negative.AccountNegativeTest.test_list_containers_with_non_authorized_user
+tempest.api.object_storage.test_account_quotas.AccountQuotasTest.test_admin_modify_quota\b
+tempest.api.object_storage.test_account_quotas.AccountQuotasTest.test_upload_valid_object\b
+tempest.api.object_storage.test_account_quotas_negative.AccountQuotasNegativeTest.test_user_modify_quota
+
+# PROD-22111 Need to align integration CI labs configuration to pass Tempest tests with WaitCondition
+heat_tempest_plugin.tests.functional.test_os_wait_condition.OSWaitCondition.test_create_stack_with_multi_signal_waitcondition
+heat_tempest_plugin.tests.scenario.test_server_cfn_init.CfnInitIntegrationTest.test_server_cfn_init\b
+heat_tempest_plugin.tests.scenario.test_server_signal.ServerSignalIntegrationTest.test_server_signal_userdata_format_raw
+heat_tempest_plugin.tests.scenario.test_server_signal.ServerSignalIntegrationTest.test_server_signal_userdata_format_software_config
+
+heat_tempest_plugin.tests.scenario.test_server_signal.ServerSignalIntegrationTest.test_server_signal_userdata_format_software_config
+heat_tempest_plugin.tests.scenario.test_autoscaling_lbv2.AutoscalingLoadBalancerv2Test.test_autoscaling_loadbalancer_neutron
+
+# PROD-29650 failed with PortNotFound
+tempest.api.compute.servers.test_attach_interfaces.AttachInterfacesTestJSON.test_create_list_show_delete_interfaces_by_fixed_ip
diff --git a/tcp_tests/templates/bm-cicd-queens-ovs-maas/salt-context-cookiecutter-openstack_ovs.yaml b/tcp_tests/templates/bm-cicd-queens-ovs-maas/salt-context-cookiecutter-openstack_ovs.yaml
index c4ec68a..730fc39 100644
--- a/tcp_tests/templates/bm-cicd-queens-ovs-maas/salt-context-cookiecutter-openstack_ovs.yaml
+++ b/tcp_tests/templates/bm-cicd-queens-ovs-maas/salt-context-cookiecutter-openstack_ovs.yaml
@@ -13,7 +13,7 @@
   openstack_baremetal_neutron_subnet_allocation_end: 10.14.0.200
   openstack_baremetal_address: 10.167.11.20
   openstack_baremetal_interface: ens7
-  openstack_baremetal_vip_interface: phy-baremetal
+  openstack_baremetal_vip_interface: br_baremetal
   jenkins_cfg_admin_password: r00tme
   bmk_enabled: 'False'
   cicd_control_node01_address: 10.167.11.91
@@ -43,7 +43,7 @@
   deploy_network_subnet: 172.16.164.0/26
   deployment_type: physical
   dns_server01: 172.18.176.6
-  dns_server02: 172.18.208.44
+  dns_server02: 172.18.224.6
   email_address: test@mirantis.com
   gateway_primary_first_nic: eth1
   gateway_primary_second_nic: eth2
diff --git a/tcp_tests/templates/bm-cicd-queens-ovs-maas/salt.yaml b/tcp_tests/templates/bm-cicd-queens-ovs-maas/salt.yaml
index 92c3aa4..9fc0dfe 100644
--- a/tcp_tests/templates/bm-cicd-queens-ovs-maas/salt.yaml
+++ b/tcp_tests/templates/bm-cicd-queens-ovs-maas/salt.yaml
@@ -80,4 +80,5 @@
   skip_fail: false
 
 {{SHARED_TEST_TOOLS.MACRO_INSTALL_RECLASS_TOOLS()}}
-{{ SHARED_WORKAROUNDS.MACRO_CEPH_SET_PGNUM() }}
\ No newline at end of file
+{{ SHARED_WORKAROUNDS.MACRO_CEPH_SET_PGNUM() }}
+{{ SHARED_WORKAROUNDS.CLEAR_CEPH_OSD_DRIVES() }}
\ No newline at end of file
diff --git a/tcp_tests/templates/bm-cicd-queens-ovs-maas/tempest_skip.list b/tcp_tests/templates/bm-cicd-queens-ovs-maas/tempest_skip.list
new file mode 100644
index 0000000..c9c567a
--- /dev/null
+++ b/tcp_tests/templates/bm-cicd-queens-ovs-maas/tempest_skip.list
@@ -0,0 +1,30 @@
+# Globally disabled inside the 'ci-tempest' docker image
+tempest.api.object_storage.test_object_version.ContainerTest.test_versioned_container
+tempest.api.object_storage.test_healthcheck.HealthcheckTest.test_get_healthcheck
+tempest.api.object_storage.test_container_sync_middleware.ContainerSyncMiddlewareTest.test_container_synchronization
+tempest.api.object_storage.test_crossdomain.CrossdomainTest.test_get_crossdomain_policy
+tempest.api.object_storage.test_container_services_negative.ContainerNegativeTest.test_create_container_metadata_value_exceeds_max_length
+tempest.api.object_storage.test_container_services_negative.ContainerNegativeTest.test_create_container_metadata_name_exceeds_max_length
+tempest.api.object_storage.test_container_services_negative.ContainerNegativeTest.test_create_container_metadata_exceeds_overall_metadata_count
+tempest.api.object_storage.test_container_services.ContainerTest.test_create_container_with_metadata_value
+tempest.api.object_storage.test_account_services_negative.AccountNegativeTest.test_list_containers_with_non_authorized_user
+tempest.api.object_storage.test_account_quotas.AccountQuotasTest.test_admin_modify_quota\b
+tempest.api.object_storage.test_account_quotas.AccountQuotasTest.test_upload_valid_object\b
+tempest.api.object_storage.test_account_quotas_negative.AccountQuotasNegativeTest.test_user_modify_quota
+
+# PROD-22111 Need to align integration CI labs configuration to pass Tempest tests with WaitCondition
+heat_tempest_plugin.tests.functional.test_os_wait_condition.OSWaitCondition.test_create_stack_with_multi_signal_waitcondition
+heat_tempest_plugin.tests.scenario.test_server_cfn_init.CfnInitIntegrationTest.test_server_cfn_init\b
+heat_tempest_plugin.tests.scenario.test_server_signal.ServerSignalIntegrationTest.test_server_signal_userdata_format_raw
+heat_tempest_plugin.tests.scenario.test_server_signal.ServerSignalIntegrationTest.test_server_signal_userdata_format_software_config
+
+heat_tempest_plugin.tests.scenario.test_server_signal.ServerSignalIntegrationTest.test_server_signal_userdata_format_software_config
+heat_tempest_plugin.tests.scenario.test_autoscaling_lbv2.AutoscalingLoadBalancerv2Test.test_autoscaling_loadbalancer_neutron
+
+# PROD-29650 failed with PortNotFound
+tempest.api.compute.servers.test_attach_interfaces.AttachInterfacesTestJSON.test_create_list_show_delete_interfaces_by_fixed_ip
+
+# PROD-33000 [OC][Infra] Instances don't have access to external net
+# tempest.api.compute.volumes.test_attach_volume.AttachVolumeShelveTestJSON.test_detach_volume_shelved_or_offload_server\b
+# tempest.api.compute.volumes.test_attach_volume.AttachVolumeShelveTestJSON.test_attach_volume_shelved_or_offload_server\b
+# tempest.scenario.test_network_advanced_server_ops.TestNetworkAdvancedServerOps.test_server_connectivity_suspend_resume\b
diff --git a/tcp_tests/templates/cookied-model-generator/underlay--user-data-cfg01.yaml b/tcp_tests/templates/cookied-model-generator/underlay--user-data-cfg01.yaml
index 695e537..91a3858 100644
--- a/tcp_tests/templates/cookied-model-generator/underlay--user-data-cfg01.yaml
+++ b/tcp_tests/templates/cookied-model-generator/underlay--user-data-cfg01.yaml
@@ -53,7 +53,7 @@
    ############## TCP Cloud cfg01 node ##################
    - echo "Preparing base OS"
 
-   - echo "nameserver 172.18.208.44" > /etc/resolv.conf;
+   - echo "nameserver 172.18.224.6" > /etc/resolv.conf;
 
    # Ensure that the salt-master service is ready to receive requests
    - salt-key -y -D
diff --git a/tcp_tests/templates/heat-bm-cicd-pike-contrail-sl/salt-context-cookiecutter-contrail.yaml b/tcp_tests/templates/heat-bm-cicd-pike-contrail-sl/salt-context-cookiecutter-contrail.yaml
index 75605b9..f45108a 100644
--- a/tcp_tests/templates/heat-bm-cicd-pike-contrail-sl/salt-context-cookiecutter-contrail.yaml
+++ b/tcp_tests/templates/heat-bm-cicd-pike-contrail-sl/salt-context-cookiecutter-contrail.yaml
@@ -92,8 +92,8 @@
   deploy_network_netmask: 255.255.255.192
   deploy_network_subnet: ==IPV4_NET_ADMIN_PREFIX==.0/26
   deployment_type: physical
-  dns_server01: 172.18.208.44
-  dns_server02: 172.18.176.6
+  dns_server01: 172.18.176.6
+  dns_server02: 172.18.224.6
   email_address: sgudz@mirantis.com
   infra_bond_mode: active-backup
   infra_deploy_nic: eth0
diff --git a/tcp_tests/templates/heat-bm-cicd-pike-contrail-sl/salt.yaml b/tcp_tests/templates/heat-bm-cicd-pike-contrail-sl/salt.yaml
index f3167dc..46393c3 100644
--- a/tcp_tests/templates/heat-bm-cicd-pike-contrail-sl/salt.yaml
+++ b/tcp_tests/templates/heat-bm-cicd-pike-contrail-sl/salt.yaml
@@ -77,3 +77,4 @@
 
 {{SHARED_TEST_TOOLS.MACRO_INSTALL_RECLASS_TOOLS()}}
 {{ SHARED_WORKAROUNDS.MACRO_CEPH_SET_PGNUM() }}
+{{ SHARED_WORKAROUNDS.CLEAR_CEPH_OSD_DRIVES() }}
diff --git a/tcp_tests/templates/heat-bm-cicd-pike-contrail-sl/tempest_skip.list b/tcp_tests/templates/heat-bm-cicd-pike-contrail-sl/tempest_skip.list
new file mode 100644
index 0000000..49728e9
--- /dev/null
+++ b/tcp_tests/templates/heat-bm-cicd-pike-contrail-sl/tempest_skip.list
@@ -0,0 +1,23 @@
+# Globally disabled inside the 'ci-tempest' docker image
+tempest.api.object_storage.test_object_version.ContainerTest.test_versioned_container
+tempest.api.object_storage.test_healthcheck.HealthcheckTest.test_get_healthcheck
+tempest.api.object_storage.test_container_sync_middleware.ContainerSyncMiddlewareTest.test_container_synchronization
+tempest.api.object_storage.test_crossdomain.CrossdomainTest.test_get_crossdomain_policy
+tempest.api.object_storage.test_container_services_negative.ContainerNegativeTest.test_create_container_metadata_value_exceeds_max_length
+tempest.api.object_storage.test_container_services_negative.ContainerNegativeTest.test_create_container_metadata_name_exceeds_max_length
+tempest.api.object_storage.test_container_services_negative.ContainerNegativeTest.test_create_container_metadata_exceeds_overall_metadata_count
+tempest.api.object_storage.test_container_services.ContainerTest.test_create_container_with_metadata_value
+tempest.api.object_storage.test_account_services_negative.AccountNegativeTest.test_list_containers_with_non_authorized_user
+tempest.api.object_storage.test_account_quotas.AccountQuotasTest.test_admin_modify_quota\b
+tempest.api.object_storage.test_account_quotas.AccountQuotasTest.test_upload_valid_object\b
+tempest.api.object_storage.test_account_quotas_negative.AccountQuotasNegativeTest.test_user_modify_quota
+
+# PROD-22111 Need to align integration CI labs configuration to pass Tempest tests with WaitCondition
+heat_tempest_plugin.tests.functional.test_os_wait_condition.OSWaitCondition.test_create_stack_with_multi_signal_waitcondition
+heat_tempest_plugin.tests.scenario.test_server_cfn_init.CfnInitIntegrationTest.test_server_cfn_init\b
+heat_tempest_plugin.tests.scenario.test_server_signal.ServerSignalIntegrationTest.test_server_signal_userdata_format_raw
+heat_tempest_plugin.tests.scenario.test_server_signal.ServerSignalIntegrationTest.test_server_signal_userdata_format_software_config
+
+heat_tempest_plugin.tests.scenario.test_server_signal.ServerSignalIntegrationTest.test_server_signal_userdata_format_software_config
+heat_tempest_plugin.tests.scenario.test_autoscaling_lbv2.AutoscalingLoadBalancerv2Test.test_autoscaling_loadbalancer_neutron
+
diff --git a/tcp_tests/templates/heat-bm-cicd-queens-contrail-sl/salt-context-cookiecutter-contrail.yaml b/tcp_tests/templates/heat-bm-cicd-queens-contrail-sl/salt-context-cookiecutter-contrail.yaml
index 2ca190b..d79011f 100644
--- a/tcp_tests/templates/heat-bm-cicd-queens-contrail-sl/salt-context-cookiecutter-contrail.yaml
+++ b/tcp_tests/templates/heat-bm-cicd-queens-contrail-sl/salt-context-cookiecutter-contrail.yaml
@@ -92,8 +92,8 @@
   deploy_network_netmask: 255.255.255.192
   deploy_network_subnet: ==IPV4_NET_ADMIN_PREFIX==.0/26
   deployment_type: physical
-  dns_server01: 172.18.208.44
-  dns_server02: 172.18.176.6
+  dns_server01: 172.18.176.6
+  dns_server02: 172.18.224.6
   email_address: sgudz@mirantis.com
   infra_bond_mode: active-backup
   infra_deploy_nic: eth0
diff --git a/tcp_tests/templates/heat-bm-cicd-queens-contrail-sl/salt.yaml b/tcp_tests/templates/heat-bm-cicd-queens-contrail-sl/salt.yaml
index b6c24bf..8dbf61a 100644
--- a/tcp_tests/templates/heat-bm-cicd-queens-contrail-sl/salt.yaml
+++ b/tcp_tests/templates/heat-bm-cicd-queens-contrail-sl/salt.yaml
@@ -76,4 +76,5 @@
   skip_fail: false
 
 {{SHARED_TEST_TOOLS.MACRO_INSTALL_RECLASS_TOOLS()}}
-{{ SHARED_WORKAROUNDS.MACRO_CEPH_SET_PGNUM() }}
\ No newline at end of file
+{{ SHARED_WORKAROUNDS.MACRO_CEPH_SET_PGNUM() }}
+{{ SHARED_WORKAROUNDS.CLEAR_CEPH_OSD_DRIVES() }}
\ No newline at end of file
diff --git a/tcp_tests/templates/heat-bm-cicd-queens-contrail-sl/tempest_skip.list b/tcp_tests/templates/heat-bm-cicd-queens-contrail-sl/tempest_skip.list
new file mode 100644
index 0000000..8dfb654
--- /dev/null
+++ b/tcp_tests/templates/heat-bm-cicd-queens-contrail-sl/tempest_skip.list
@@ -0,0 +1,47 @@
+# Globally disabled inside the 'ci-tempest' docker image
+tempest.api.object_storage.test_object_version.ContainerTest.test_versioned_container
+tempest.api.object_storage.test_healthcheck.HealthcheckTest.test_get_healthcheck
+tempest.api.object_storage.test_container_sync_middleware.ContainerSyncMiddlewareTest.test_container_synchronization
+tempest.api.object_storage.test_crossdomain.CrossdomainTest.test_get_crossdomain_policy
+tempest.api.object_storage.test_container_services_negative.ContainerNegativeTest.test_create_container_metadata_value_exceeds_max_length
+tempest.api.object_storage.test_container_services_negative.ContainerNegativeTest.test_create_container_metadata_name_exceeds_max_length
+tempest.api.object_storage.test_container_services_negative.ContainerNegativeTest.test_create_container_metadata_exceeds_overall_metadata_count
+tempest.api.object_storage.test_container_services.ContainerTest.test_create_container_with_metadata_value
+tempest.api.object_storage.test_account_services_negative.AccountNegativeTest.test_list_containers_with_non_authorized_user
+tempest.api.object_storage.test_account_quotas.AccountQuotasTest.test_admin_modify_quota\b
+tempest.api.object_storage.test_account_quotas.AccountQuotasTest.test_upload_valid_object\b
+tempest.api.object_storage.test_account_quotas_negative.AccountQuotasNegativeTest.test_user_modify_quota
+
+# PROD-22111 Need to align integration CI labs configuration to pass Tempest tests with WaitCondition
+heat_tempest_plugin.tests.functional.test_os_wait_condition.OSWaitCondition.test_create_stack_with_multi_signal_waitcondition
+heat_tempest_plugin.tests.scenario.test_server_cfn_init.CfnInitIntegrationTest.test_server_cfn_init\b
+heat_tempest_plugin.tests.scenario.test_server_signal.ServerSignalIntegrationTest.test_server_signal_userdata_format_raw
+heat_tempest_plugin.tests.scenario.test_server_signal.ServerSignalIntegrationTest.test_server_signal_userdata_format_software_config
+
+heat_tempest_plugin.tests.scenario.test_server_signal.ServerSignalIntegrationTest.test_server_signal_userdata_format_software_config
+heat_tempest_plugin.tests.scenario.test_autoscaling_lbv2.AutoscalingLoadBalancerv2Test.test_autoscaling_loadbalancer_neutron
+
+# PROD-25940 for contrail only
+tempest.api.network.admin.test_quotas.QuotasTest.test_quotas\b
+
+# PROD-33719 for contrail only
+tempest.api.network.admin.test_routers.RoutersAdminTest.test_update_router_set_gateway
+tempest.api.network.admin.test_routers.RoutersIpV6AdminTest.test_update_router_set_gateway
+
+# PROD-25128 [OC 4.x][Tempest] Parameter "strict_compliance" is False by default
+tempest.api.network.test_floating_ips_negative.FloatingIPNegativeTestJSON.test_associate_floatingip_port_ext_net_unreachable
+tempest.api.network.test_floating_ips_negative.FloatingIPNegativeTestJSON.test_create_floatingip_with_port_ext_net_unreachable
+
+# PROD-21671 [OpenContrail 4.0] Unable to update "subnet-id" for port (test_update_port_with_security_group_and_extra_attributes)
+tempest.api.network.test_ports.PortsIpV6TestJSON.test_update_port_with_security_group_and_extra_attributes
+tempest.api.network.test_ports.PortsTestJSON.test_update_port_with_security_group_and_extra_attributes
+tempest.api.network.test_ports.PortsIpV6TestJSON.test_update_port_with_two_security_groups_and_extra_attributes
+tempest.api.network.test_ports.PortsTestJSON.test_update_port_with_two_security_groups_and_extra_attributes
+
+# PROD-31179 Several tempest tests are failed on contrail configuration on checks for floating ip connectivity
+tempest.scenario.test_network_basic_ops.TestNetworkBasicOps.test_network_basic_ops
+tempest.scenario.test_network_basic_ops.TestNetworkBasicOps.test_port_security_macspoofing_port
+tempest.scenario.test_network_basic_ops.TestNetworkBasicOps.test_update_router_admin_state
+
+# PROD-25586 [OC4.x][Tempest] Heat can't update port's mac address
+heat_tempest_plugin.tests.functional.test_create_update_neutron_port.UpdatePortTest.test_update_with_mac_address
\ No newline at end of file
diff --git a/tcp_tests/templates/heat-cicd-k8s-calico-sl/cookiecutter-context-k8s-sl.yaml b/tcp_tests/templates/heat-cicd-k8s-calico-sl/cookiecutter-context-k8s-sl.yaml
index 8818e7b..c4b85a6 100644
--- a/tcp_tests/templates/heat-cicd-k8s-calico-sl/cookiecutter-context-k8s-sl.yaml
+++ b/tcp_tests/templates/heat-cicd-k8s-calico-sl/cookiecutter-context-k8s-sl.yaml
@@ -91,7 +91,7 @@
   deploy_network_subnet: ==IPV4_NET_ADMIN_PREFIX==.0/24
   deployment_type: physical
   dns_server01: 172.18.176.6
-  dns_server02: 172.18.208.44
+  dns_server02: 172.18.224.6
   email_address: ddmitriev@mirantis.com
   etcd_ssl: 'True'
   stacklight_ssl_enabled: 'True'
diff --git a/tcp_tests/templates/heat-cicd-k8s-genie/cookiecutter-context-k8s-genie.yaml b/tcp_tests/templates/heat-cicd-k8s-genie/cookiecutter-context-k8s-genie.yaml
index 46f40c0..f208038 100644
--- a/tcp_tests/templates/heat-cicd-k8s-genie/cookiecutter-context-k8s-genie.yaml
+++ b/tcp_tests/templates/heat-cicd-k8s-genie/cookiecutter-context-k8s-genie.yaml
@@ -33,7 +33,7 @@
   deploy_network_subnet: ==IPV4_NET_ADMIN_PREFIX==.0/24
   deployment_type: physical
   dns_server01: 172.18.176.6
-  dns_server02: 172.18.208.44
+  dns_server02: 172.18.224.6
   email_address: ddmitriev@mirantis.com
   etcd_ssl: 'True'
   stacklight_ssl_enabled: 'True'
diff --git a/tcp_tests/templates/heat-cicd-pike-dvr-sl/salt-context-cookiecutter.yaml b/tcp_tests/templates/heat-cicd-pike-dvr-sl/salt-context-cookiecutter.yaml
index 6453453..2677238 100644
--- a/tcp_tests/templates/heat-cicd-pike-dvr-sl/salt-context-cookiecutter.yaml
+++ b/tcp_tests/templates/heat-cicd-pike-dvr-sl/salt-context-cookiecutter.yaml
@@ -135,7 +135,7 @@
   deploy_network_subnet: 10.167.5.0/24
   deployment_type: physical
   dns_server01: 172.18.176.6
-  dns_server02: 172.18.208.44
+  dns_server02: 172.18.224.6
   email_address: test@mirantis.com
   gainsight_service_enabled: 'False'
   gateway_primary_first_nic: eth1
diff --git a/tcp_tests/templates/heat-cicd-pike-dvr-sl/tempest_skip.list b/tcp_tests/templates/heat-cicd-pike-dvr-sl/tempest_skip.list
new file mode 100644
index 0000000..9556a50
--- /dev/null
+++ b/tcp_tests/templates/heat-cicd-pike-dvr-sl/tempest_skip.list
@@ -0,0 +1,26 @@
+# Globally disabled inside the 'ci-tempest' docker image
+tempest.api.object_storage.test_object_version.ContainerTest.test_versioned_container
+tempest.api.object_storage.test_healthcheck.HealthcheckTest.test_get_healthcheck
+tempest.api.object_storage.test_container_sync_middleware.ContainerSyncMiddlewareTest.test_container_synchronization
+tempest.api.object_storage.test_crossdomain.CrossdomainTest.test_get_crossdomain_policy
+tempest.api.object_storage.test_container_services_negative.ContainerNegativeTest.test_create_container_metadata_value_exceeds_max_length
+tempest.api.object_storage.test_container_services_negative.ContainerNegativeTest.test_create_container_metadata_name_exceeds_max_length
+tempest.api.object_storage.test_container_services_negative.ContainerNegativeTest.test_create_container_metadata_exceeds_overall_metadata_count
+tempest.api.object_storage.test_container_services.ContainerTest.test_create_container_with_metadata_value
+tempest.api.object_storage.test_account_services_negative.AccountNegativeTest.test_list_containers_with_non_authorized_user
+tempest.api.object_storage.test_account_quotas.AccountQuotasTest.test_admin_modify_quota\b
+tempest.api.object_storage.test_account_quotas.AccountQuotasTest.test_upload_valid_object\b
+tempest.api.object_storage.test_account_quotas_negative.AccountQuotasNegativeTest.test_user_modify_quota
+
+# PROD-22111 Need to align integration CI labs configuration to pass Tempest tests with WaitCondition
+heat_tempest_plugin.tests.functional.test_os_wait_condition.OSWaitCondition.test_create_stack_with_multi_signal_waitcondition
+heat_tempest_plugin.tests.scenario.test_server_cfn_init.CfnInitIntegrationTest.test_server_cfn_init\b
+heat_tempest_plugin.tests.scenario.test_server_signal.ServerSignalIntegrationTest.test_server_signal_userdata_format_raw
+heat_tempest_plugin.tests.scenario.test_server_signal.ServerSignalIntegrationTest.test_server_signal_userdata_format_software_config
+
+heat_tempest_plugin.tests.scenario.test_server_signal.ServerSignalIntegrationTest.test_server_signal_userdata_format_software_config
+heat_tempest_plugin.tests.scenario.test_autoscaling_lbv2.AutoscalingLoadBalancerv2Test.test_autoscaling_loadbalancer_neutron
+
+# PROD-29650 failed with PortNotFound
+tempest.api.compute.servers.test_attach_interfaces.AttachInterfacesTestJSON.test_create_list_show_delete_interfaces_by_fixed_ip
+
diff --git a/tcp_tests/templates/heat-cicd-queens-contrail41-sl/salt-context-cookiecutter-contrail.yaml b/tcp_tests/templates/heat-cicd-queens-contrail41-sl/salt-context-cookiecutter-contrail.yaml
index 9f1b14b..b2532f0 100644
--- a/tcp_tests/templates/heat-cicd-queens-contrail41-sl/salt-context-cookiecutter-contrail.yaml
+++ b/tcp_tests/templates/heat-cicd-queens-contrail41-sl/salt-context-cookiecutter-contrail.yaml
@@ -90,8 +90,8 @@
   deploy_network_netmask: 255.255.255.0
   deploy_network_subnet: ==IPV4_NET_ADMIN_PREFIX==.0/24
   deployment_type: physical
-  dns_server01: 172.18.208.44
-  dns_server02: 172.18.176.6
+  dns_server01: 172.18.176.6
+  dns_server02: 172.18.224.6
   email_address: sgudz@mirantis.com
   infra_bond_mode: active-backup
   infra_deploy_nic: eth0
diff --git a/tcp_tests/templates/heat-cicd-queens-contrail41-sl/tempest_skip.list b/tcp_tests/templates/heat-cicd-queens-contrail41-sl/tempest_skip.list
new file mode 100644
index 0000000..4698a42
--- /dev/null
+++ b/tcp_tests/templates/heat-cicd-queens-contrail41-sl/tempest_skip.list
@@ -0,0 +1,50 @@
+# Globally disabled inside the 'ci-tempest' docker image
+tempest.api.object_storage.test_object_version.ContainerTest.test_versioned_container
+tempest.api.object_storage.test_healthcheck.HealthcheckTest.test_get_healthcheck
+tempest.api.object_storage.test_container_sync_middleware.ContainerSyncMiddlewareTest.test_container_synchronization
+tempest.api.object_storage.test_crossdomain.CrossdomainTest.test_get_crossdomain_policy
+tempest.api.object_storage.test_container_services_negative.ContainerNegativeTest.test_create_container_metadata_value_exceeds_max_length
+tempest.api.object_storage.test_container_services_negative.ContainerNegativeTest.test_create_container_metadata_name_exceeds_max_length
+tempest.api.object_storage.test_container_services_negative.ContainerNegativeTest.test_create_container_metadata_exceeds_overall_metadata_count
+tempest.api.object_storage.test_container_services.ContainerTest.test_create_container_with_metadata_value
+tempest.api.object_storage.test_account_services_negative.AccountNegativeTest.test_list_containers_with_non_authorized_user
+tempest.api.object_storage.test_account_quotas.AccountQuotasTest.test_admin_modify_quota\b
+tempest.api.object_storage.test_account_quotas.AccountQuotasTest.test_upload_valid_object\b
+tempest.api.object_storage.test_account_quotas_negative.AccountQuotasNegativeTest.test_user_modify_quota
+
+# PROD-22111 Need to align integration CI labs configuration to pass Tempest tests with WaitCondition
+heat_tempest_plugin.tests.functional.test_os_wait_condition.OSWaitCondition.test_create_stack_with_multi_signal_waitcondition
+heat_tempest_plugin.tests.scenario.test_server_cfn_init.CfnInitIntegrationTest.test_server_cfn_init\b
+heat_tempest_plugin.tests.scenario.test_server_signal.ServerSignalIntegrationTest.test_server_signal_userdata_format_raw
+heat_tempest_plugin.tests.scenario.test_server_signal.ServerSignalIntegrationTest.test_server_signal_userdata_format_software_config
+
+heat_tempest_plugin.tests.scenario.test_server_signal.ServerSignalIntegrationTest.test_server_signal_userdata_format_software_config
+heat_tempest_plugin.tests.scenario.test_autoscaling_lbv2.AutoscalingLoadBalancerv2Test.test_autoscaling_loadbalancer_neutron
+
+# PROD-29650 failed with PortNotFound
+tempest.api.compute.servers.test_attach_interfaces.AttachInterfacesTestJSON.test_create_list_show_delete_interfaces_by_fixed_ip
+
+# PROD-25940 for contrail only
+tempest.api.network.admin.test_quotas.QuotasTest.test_quotas\b
+
+# PROD-33719 for contrail only
+tempest.api.network.admin.test_routers.RoutersAdminTest.test_update_router_set_gateway
+tempest.api.network.admin.test_routers.RoutersIpV6AdminTest.test_update_router_set_gateway
+
+# PROD-25128 [OC 4.x][Tempest] Parameter "strict_compliance" is False by default
+tempest.api.network.test_floating_ips_negative.FloatingIPNegativeTestJSON.test_associate_floatingip_port_ext_net_unreachable
+tempest.api.network.test_floating_ips_negative.FloatingIPNegativeTestJSON.test_create_floatingip_with_port_ext_net_unreachable
+
+# PROD-21671 [OpenContrail 4.0] Unable to update "subnet-id" for port (test_update_port_with_security_group_and_extra_attributes)
+tempest.api.network.test_ports.PortsIpV6TestJSON.test_update_port_with_security_group_and_extra_attributes
+tempest.api.network.test_ports.PortsTestJSON.test_update_port_with_security_group_and_extra_attributes
+tempest.api.network.test_ports.PortsIpV6TestJSON.test_update_port_with_two_security_groups_and_extra_attributes
+tempest.api.network.test_ports.PortsTestJSON.test_update_port_with_two_security_groups_and_extra_attributes
+
+# PROD-31179 Several tempest tests are failed on contrail configuration on checks for floating ip connectivity
+tempest.scenario.test_network_basic_ops.TestNetworkBasicOps.test_network_basic_ops
+tempest.scenario.test_network_basic_ops.TestNetworkBasicOps.test_port_security_macspoofing_port
+tempest.scenario.test_network_basic_ops.TestNetworkBasicOps.test_update_router_admin_state
+
+# PROD-25586 [OC4.x][Tempest] Heat can't update port's mac address
+heat_tempest_plugin.tests.functional.test_create_update_neutron_port.UpdatePortTest.test_update_with_mac_address
\ No newline at end of file
diff --git a/tcp_tests/templates/heat-cicd-queens-dvr-sl/salt-context-cookiecutter.yaml b/tcp_tests/templates/heat-cicd-queens-dvr-sl/salt-context-cookiecutter.yaml
index a4dbdba..683d502 100644
--- a/tcp_tests/templates/heat-cicd-queens-dvr-sl/salt-context-cookiecutter.yaml
+++ b/tcp_tests/templates/heat-cicd-queens-dvr-sl/salt-context-cookiecutter.yaml
@@ -135,7 +135,7 @@
   deploy_network_subnet: 10.167.5.0/24
   deployment_type: physical
   dns_server01: 172.18.176.6
-  dns_server02: 172.18.208.44
+  dns_server02: 172.18.224.6
   email_address: test@mirantis.com
   gainsight_service_enabled: 'False'
   gateway_primary_first_nic: eth1
diff --git a/tcp_tests/templates/heat-cicd-queens-dvr-sl/salt.yaml b/tcp_tests/templates/heat-cicd-queens-dvr-sl/salt.yaml
index 9af78d6..3b77cfd 100644
--- a/tcp_tests/templates/heat-cicd-queens-dvr-sl/salt.yaml
+++ b/tcp_tests/templates/heat-cicd-queens-dvr-sl/salt.yaml
@@ -6,6 +6,7 @@
 
 {% import 'shared-salt.yaml' as SHARED with context %}
 {% import 'shared-test-tools.yaml' as SHARED_TEST_TOOLS with context %}
+{% import 'shared-workarounds.yaml' as SHARED_WORKAROUNDS with context %}
 
 
 {{ SHARED.MACRO_INSTALL_SALT_MINIONS() }}
@@ -16,4 +17,5 @@
 
 {{SHARED.MACRO_IPFLUSH_TENANTS_IFACES()}}
 
-{{SHARED_TEST_TOOLS.MACRO_INSTALL_RECLASS_TOOLS()}}
\ No newline at end of file
+{{SHARED_TEST_TOOLS.MACRO_INSTALL_RECLASS_TOOLS()}}
+{{ SHARED_WORKAROUNDS.MACRO_CEPH_SET_PGNUM() }}
\ No newline at end of file
diff --git a/tcp_tests/templates/heat-cicd-queens-dvr-sl/tempest_skip.list b/tcp_tests/templates/heat-cicd-queens-dvr-sl/tempest_skip.list
new file mode 100644
index 0000000..a2c5935
--- /dev/null
+++ b/tcp_tests/templates/heat-cicd-queens-dvr-sl/tempest_skip.list
@@ -0,0 +1,27 @@
+# Globally disabled inside the 'ci-tempest' docker image
+tempest.api.object_storage.test_object_version.ContainerTest.test_versioned_container
+tempest.api.object_storage.test_healthcheck.HealthcheckTest.test_get_healthcheck
+tempest.api.object_storage.test_container_sync_middleware.ContainerSyncMiddlewareTest.test_container_synchronization
+tempest.api.object_storage.test_crossdomain.CrossdomainTest.test_get_crossdomain_policy
+tempest.api.object_storage.test_container_services_negative.ContainerNegativeTest.test_create_container_metadata_value_exceeds_max_length
+tempest.api.object_storage.test_container_services_negative.ContainerNegativeTest.test_create_container_metadata_name_exceeds_max_length
+tempest.api.object_storage.test_container_services_negative.ContainerNegativeTest.test_create_container_metadata_exceeds_overall_metadata_count
+tempest.api.object_storage.test_container_services.ContainerTest.test_create_container_with_metadata_value
+tempest.api.object_storage.test_account_services_negative.AccountNegativeTest.test_list_containers_with_non_authorized_user
+tempest.api.object_storage.test_account_quotas.AccountQuotasTest.test_admin_modify_quota\b
+tempest.api.object_storage.test_account_quotas.AccountQuotasTest.test_upload_valid_object\b
+tempest.api.object_storage.test_account_quotas_negative.AccountQuotasNegativeTest.test_user_modify_quota
+
+# PROD-22111 Need to align integration CI labs configuration to pass Tempest tests with WaitCondition
+heat_tempest_plugin.tests.functional.test_os_wait_condition.OSWaitCondition.test_create_stack_with_multi_signal_waitcondition
+heat_tempest_plugin.tests.scenario.test_server_cfn_init.CfnInitIntegrationTest.test_server_cfn_init\b
+heat_tempest_plugin.tests.scenario.test_server_signal.ServerSignalIntegrationTest.test_server_signal_userdata_format_raw
+heat_tempest_plugin.tests.scenario.test_server_signal.ServerSignalIntegrationTest.test_server_signal_userdata_format_software_config
+
+heat_tempest_plugin.tests.scenario.test_server_signal.ServerSignalIntegrationTest.test_server_signal_userdata_format_software_config
+heat_tempest_plugin.tests.scenario.test_autoscaling_lbv2.AutoscalingLoadBalancerv2Test.test_autoscaling_loadbalancer_neutron
+
+
+# PROD-29650 failed with PortNotFound
+tempest.api.compute.servers.test_attach_interfaces.AttachInterfacesTestJSON.test_create_list_show_delete_interfaces_by_fixed_ip
+
diff --git a/tcp_tests/templates/released-heat-cicd-pike-contrail41-sl/salt-context-cookiecutter-contrail.yaml b/tcp_tests/templates/released-heat-cicd-pike-contrail41-sl/salt-context-cookiecutter-contrail.yaml
index 16d5e18..47e1c66 100644
--- a/tcp_tests/templates/released-heat-cicd-pike-contrail41-sl/salt-context-cookiecutter-contrail.yaml
+++ b/tcp_tests/templates/released-heat-cicd-pike-contrail41-sl/salt-context-cookiecutter-contrail.yaml
@@ -87,8 +87,8 @@
   deploy_network_netmask: 255.255.255.0
   deploy_network_subnet: ==IPV4_NET_ADMIN_PREFIX==.0/24
   deployment_type: physical
-  dns_server01: 172.18.208.44
-  dns_server02: 172.18.176.6
+  dns_server01: 172.18.176.6
+  dns_server02: 172.18.224.6
   email_address: sgudz@mirantis.com
   infra_bond_mode: active-backup
   infra_deploy_nic: eth0
@@ -251,15 +251,20 @@
   openldap_enabled: 'True'
   openldap_organisation: ${_param:cluster_name}
   ceph_enabled: 'True'
-  ceph_version: "luminous"
+    # Apply setting from contexts/ceph/nautilus-multiple-osd.yml
+  ceph_version: "nautilus"
   ceph_hyper_converged: "False"
   ceph_osd_backend: "bluestore"
+  ceph_osds_per_device: '1'
+  ceph_osd_data_size: ''
   ceph_osd_count: "3"
   ceph_osd_node_count: 3
   ceph_osd_block_db_size: 3
   ceph_osd_journal_size: 3
+  ceph_osd_dmcrypt: False
   ceph_osd_bond_mode: "active-backup"
   ceph_osd_data_partition_prefix: ""
+  ceph_osd_block_partition_prefix: ""
   ceph_public_network_allocation: storage
   ceph_public_network: "==IPV4_NET_CONTROL_PREFIX==.0/24"
   ceph_cluster_network: "==IPV4_NET_CONTROL_PREFIX==.0/24"
@@ -270,8 +275,8 @@
   ceph_osd_storage_address_ranges: "==IPV4_NET_CONTROL_PREFIX==.201-==IPV4_NET_CONTROL_PREFIX==.203"
   ceph_osd_backend_address_ranges: "==IPV4_NET_TENANT_PREFIX==.201-==IPV4_NET_TENANT_PREFIX==.203"
 
-  ceph_osd_data_disks: "/dev/vdb"
-  ceph_osd_journal_or_block_db_disks: "/dev/vdb"
+  ceph_osd_data_disks: "/dev/vdd"
+  ceph_osd_journal_or_block_db_disks: ""
   ceph_osd_mode: "separated"
   ceph_osd_deploy_nic: "eth0"
   ceph_osd_primary_first_nic: "eth1"
diff --git a/tcp_tests/templates/released-heat-cicd-pike-contrail41-sl/salt.yaml b/tcp_tests/templates/released-heat-cicd-pike-contrail41-sl/salt.yaml
index 1aed1d8..710f01d 100644
--- a/tcp_tests/templates/released-heat-cicd-pike-contrail41-sl/salt.yaml
+++ b/tcp_tests/templates/released-heat-cicd-pike-contrail41-sl/salt.yaml
@@ -6,8 +6,6 @@
 
 {% import 'shared-salt.yaml' as SHARED with context %}
 {% import 'shared-test-tools.yaml' as SHARED_TEST_TOOLS with context %}
-{% import 'shared-workarounds.yaml' as SHARED_WORKAROUNDS with context %}
-
 
 {{ SHARED.MACRO_INSTALL_SALT_MINIONS() }}
 
@@ -16,7 +14,3 @@
 {{ SHARED.MACRO_CHECK_SALT_VERSION_ON_NODES() }}
 
 {{ SHARED_TEST_TOOLS.MACRO_INSTALL_RECLASS_TOOLS() }}
-
-{{ SHARED_WORKAROUNDS.MACRO_SET_SALTAPI_TIMEOUT() }}
-
-
diff --git a/tcp_tests/templates/released-heat-cicd-pike-contrail41-sl/tempest_skip.list b/tcp_tests/templates/released-heat-cicd-pike-contrail41-sl/tempest_skip.list
new file mode 100644
index 0000000..4698a42
--- /dev/null
+++ b/tcp_tests/templates/released-heat-cicd-pike-contrail41-sl/tempest_skip.list
@@ -0,0 +1,50 @@
+# Globally disabled inside the 'ci-tempest' docker image
+tempest.api.object_storage.test_object_version.ContainerTest.test_versioned_container
+tempest.api.object_storage.test_healthcheck.HealthcheckTest.test_get_healthcheck
+tempest.api.object_storage.test_container_sync_middleware.ContainerSyncMiddlewareTest.test_container_synchronization
+tempest.api.object_storage.test_crossdomain.CrossdomainTest.test_get_crossdomain_policy
+tempest.api.object_storage.test_container_services_negative.ContainerNegativeTest.test_create_container_metadata_value_exceeds_max_length
+tempest.api.object_storage.test_container_services_negative.ContainerNegativeTest.test_create_container_metadata_name_exceeds_max_length
+tempest.api.object_storage.test_container_services_negative.ContainerNegativeTest.test_create_container_metadata_exceeds_overall_metadata_count
+tempest.api.object_storage.test_container_services.ContainerTest.test_create_container_with_metadata_value
+tempest.api.object_storage.test_account_services_negative.AccountNegativeTest.test_list_containers_with_non_authorized_user
+tempest.api.object_storage.test_account_quotas.AccountQuotasTest.test_admin_modify_quota\b
+tempest.api.object_storage.test_account_quotas.AccountQuotasTest.test_upload_valid_object\b
+tempest.api.object_storage.test_account_quotas_negative.AccountQuotasNegativeTest.test_user_modify_quota
+
+# PROD-22111 Need to align integration CI labs configuration to pass Tempest tests with WaitCondition
+heat_tempest_plugin.tests.functional.test_os_wait_condition.OSWaitCondition.test_create_stack_with_multi_signal_waitcondition
+heat_tempest_plugin.tests.scenario.test_server_cfn_init.CfnInitIntegrationTest.test_server_cfn_init\b
+heat_tempest_plugin.tests.scenario.test_server_signal.ServerSignalIntegrationTest.test_server_signal_userdata_format_raw
+heat_tempest_plugin.tests.scenario.test_server_signal.ServerSignalIntegrationTest.test_server_signal_userdata_format_software_config
+
+heat_tempest_plugin.tests.scenario.test_server_signal.ServerSignalIntegrationTest.test_server_signal_userdata_format_software_config
+heat_tempest_plugin.tests.scenario.test_autoscaling_lbv2.AutoscalingLoadBalancerv2Test.test_autoscaling_loadbalancer_neutron
+
+# PROD-29650 failed with PortNotFound
+tempest.api.compute.servers.test_attach_interfaces.AttachInterfacesTestJSON.test_create_list_show_delete_interfaces_by_fixed_ip
+
+# PROD-25940 for contrail only
+tempest.api.network.admin.test_quotas.QuotasTest.test_quotas\b
+
+# PROD-33719 for contrail only
+tempest.api.network.admin.test_routers.RoutersAdminTest.test_update_router_set_gateway
+tempest.api.network.admin.test_routers.RoutersIpV6AdminTest.test_update_router_set_gateway
+
+# PROD-25128 [OC 4.x][Tempest] Parameter "strict_compliance" is False by default
+tempest.api.network.test_floating_ips_negative.FloatingIPNegativeTestJSON.test_associate_floatingip_port_ext_net_unreachable
+tempest.api.network.test_floating_ips_negative.FloatingIPNegativeTestJSON.test_create_floatingip_with_port_ext_net_unreachable
+
+# PROD-21671 [OpenContrail 4.0] Unable to update "subnet-id" for port (test_update_port_with_security_group_and_extra_attributes)
+tempest.api.network.test_ports.PortsIpV6TestJSON.test_update_port_with_security_group_and_extra_attributes
+tempest.api.network.test_ports.PortsTestJSON.test_update_port_with_security_group_and_extra_attributes
+tempest.api.network.test_ports.PortsIpV6TestJSON.test_update_port_with_two_security_groups_and_extra_attributes
+tempest.api.network.test_ports.PortsTestJSON.test_update_port_with_two_security_groups_and_extra_attributes
+
+# PROD-31179 Several tempest tests are failed on contrail configuration on checks for floating ip connectivity
+tempest.scenario.test_network_basic_ops.TestNetworkBasicOps.test_network_basic_ops
+tempest.scenario.test_network_basic_ops.TestNetworkBasicOps.test_port_security_macspoofing_port
+tempest.scenario.test_network_basic_ops.TestNetworkBasicOps.test_update_router_admin_state
+
+# PROD-25586 [OC4.x][Tempest] Heat can't update port's mac address
+heat_tempest_plugin.tests.functional.test_create_update_neutron_port.UpdatePortTest.test_update_with_mac_address
\ No newline at end of file
diff --git a/tcp_tests/templates/released-heat-cicd-pike-dvr-sl/salt-context-cookiecutter.yaml b/tcp_tests/templates/released-heat-cicd-pike-dvr-sl/salt-context-cookiecutter.yaml
index e632f83..e8afd72 100644
--- a/tcp_tests/templates/released-heat-cicd-pike-dvr-sl/salt-context-cookiecutter.yaml
+++ b/tcp_tests/templates/released-heat-cicd-pike-dvr-sl/salt-context-cookiecutter.yaml
@@ -43,12 +43,15 @@
   ceph_mon_node03_address: 10.167.4.68
   ceph_mon_node03_hostname: cmn03
   ceph_osd_backend: bluestore
+  ceph_osds_per_device: '3'
   ceph_osd_block_db_size: '3'
+  ceph_osd_data_size: '14'
+  ceph_osd_dmcrypt: False
   ceph_osd_bond_mode: active-backup
   ceph_osd_data_partition_prefix: ""
   ceph_osd_count: '3'
-  ceph_osd_data_disks: "/dev/vdb"
-  ceph_osd_journal_or_block_db_disks: "/dev/vdb"
+  ceph_osd_data_disks: "/dev/vdd"
+  ceph_osd_journal_or_block_db_disks: "/dev/vde"
   ceph_osd_node_count: '3'
   ceph_osd_journal_size: '3'
   ceph_osd_deploy_nic: "eth0"
@@ -126,7 +129,7 @@
   deploy_network_subnet: 10.167.5.0/24
   deployment_type: physical
   dns_server01: 172.18.176.6
-  dns_server02: 172.18.208.44
+  dns_server02: 172.18.224.6
   email_address: test@mirantis.com
   gainsight_service_enabled: 'False'
   gateway_primary_first_nic: eth1
diff --git a/tcp_tests/templates/released-heat-cicd-pike-dvr-sl/salt.yaml b/tcp_tests/templates/released-heat-cicd-pike-dvr-sl/salt.yaml
index 7408912..9e648aa 100644
--- a/tcp_tests/templates/released-heat-cicd-pike-dvr-sl/salt.yaml
+++ b/tcp_tests/templates/released-heat-cicd-pike-dvr-sl/salt.yaml
@@ -17,6 +17,4 @@
 {{ SHARED.MACRO_IPFLUSH_TENANTS_IFACES() }}
 
 {{ SHARED_TEST_TOOLS.MACRO_INSTALL_RECLASS_TOOLS() }}
-
-{{ SHARED_WORKAROUNDS.MACRO_SET_SALTAPI_TIMEOUT() }}
-{{ SHARED_WORKAROUNDS.MACRO_CEPH_SET_PGNUM() }}
+{{ SHARED_WORKAROUNDS.MACRO_CEPH_SET_PGNUM() }}
\ No newline at end of file
diff --git a/tcp_tests/templates/released-heat-cicd-pike-dvr-sl/tempest_skip.list b/tcp_tests/templates/released-heat-cicd-pike-dvr-sl/tempest_skip.list
new file mode 100644
index 0000000..c3799f2
--- /dev/null
+++ b/tcp_tests/templates/released-heat-cicd-pike-dvr-sl/tempest_skip.list
@@ -0,0 +1,25 @@
+# Globally disabled inside the 'ci-tempest' docker image
+tempest.api.object_storage.test_object_version.ContainerTest.test_versioned_container
+tempest.api.object_storage.test_healthcheck.HealthcheckTest.test_get_healthcheck
+tempest.api.object_storage.test_container_sync_middleware.ContainerSyncMiddlewareTest.test_container_synchronization
+tempest.api.object_storage.test_crossdomain.CrossdomainTest.test_get_crossdomain_policy
+tempest.api.object_storage.test_container_services_negative.ContainerNegativeTest.test_create_container_metadata_value_exceeds_max_length
+tempest.api.object_storage.test_container_services_negative.ContainerNegativeTest.test_create_container_metadata_name_exceeds_max_length
+tempest.api.object_storage.test_container_services_negative.ContainerNegativeTest.test_create_container_metadata_exceeds_overall_metadata_count
+tempest.api.object_storage.test_container_services.ContainerTest.test_create_container_with_metadata_value
+tempest.api.object_storage.test_account_services_negative.AccountNegativeTest.test_list_containers_with_non_authorized_user
+tempest.api.object_storage.test_account_quotas.AccountQuotasTest.test_admin_modify_quota\b
+tempest.api.object_storage.test_account_quotas.AccountQuotasTest.test_upload_valid_object\b
+tempest.api.object_storage.test_account_quotas_negative.AccountQuotasNegativeTest.test_user_modify_quota
+
+# PROD-22111 Need to align integration CI labs configuration to pass Tempest tests with WaitCondition
+heat_tempest_plugin.tests.functional.test_os_wait_condition.OSWaitCondition.test_create_stack_with_multi_signal_waitcondition
+heat_tempest_plugin.tests.scenario.test_server_cfn_init.CfnInitIntegrationTest.test_server_cfn_init\b
+heat_tempest_plugin.tests.scenario.test_server_signal.ServerSignalIntegrationTest.test_server_signal_userdata_format_raw
+heat_tempest_plugin.tests.scenario.test_server_signal.ServerSignalIntegrationTest.test_server_signal_userdata_format_software_config
+
+heat_tempest_plugin.tests.scenario.test_server_signal.ServerSignalIntegrationTest.test_server_signal_userdata_format_software_config
+heat_tempest_plugin.tests.scenario.test_autoscaling_lbv2.AutoscalingLoadBalancerv2Test.test_autoscaling_loadbalancer_neutron
+
+# PROD-29650 failed with PortNotFound
+tempest.api.compute.servers.test_attach_interfaces.AttachInterfacesTestJSON.test_create_list_show_delete_interfaces_by_fixed_ip
diff --git a/tcp_tests/templates/released-heat-cicd-queens-contrail41-sl/salt-context-cookiecutter-contrail.yaml b/tcp_tests/templates/released-heat-cicd-queens-contrail41-sl/salt-context-cookiecutter-contrail.yaml
index 1a1a0e8..f48bcb3 100644
--- a/tcp_tests/templates/released-heat-cicd-queens-contrail41-sl/salt-context-cookiecutter-contrail.yaml
+++ b/tcp_tests/templates/released-heat-cicd-queens-contrail41-sl/salt-context-cookiecutter-contrail.yaml
@@ -90,8 +90,8 @@
   deploy_network_netmask: 255.255.255.0
   deploy_network_subnet: ==IPV4_NET_ADMIN_PREFIX==.0/24
   deployment_type: physical
-  dns_server01: 172.18.208.44
-  dns_server02: 172.18.176.6
+  dns_server01: 172.18.176.6
+  dns_server02: 172.18.224.6
   email_address: test@mirantis.com
   infra_bond_mode: active-backup
   infra_deploy_nic: eth0
diff --git a/tcp_tests/templates/released-heat-cicd-queens-contrail41-sl/salt.yaml b/tcp_tests/templates/released-heat-cicd-queens-contrail41-sl/salt.yaml
index 4a8b1f3..de5cfac 100644
--- a/tcp_tests/templates/released-heat-cicd-queens-contrail41-sl/salt.yaml
+++ b/tcp_tests/templates/released-heat-cicd-queens-contrail41-sl/salt.yaml
@@ -6,7 +6,6 @@
 
 {% import 'shared-salt.yaml' as SHARED with context %}
 {% import 'shared-test-tools.yaml' as SHARED_TEST_TOOLS with context %}
-{% import 'shared-workarounds.yaml' as SHARED_WORKAROUNDS with context %}
 
 {{ SHARED.MACRO_INSTALL_SALT_MINIONS() }}
 
@@ -14,6 +13,4 @@
 
 {{ SHARED.MACRO_CHECK_SALT_VERSION_ON_NODES() }}
 
-{{ SHARED_TEST_TOOLS.MACRO_INSTALL_RECLASS_TOOLS() }}
-
-{{ SHARED_WORKAROUNDS.MACRO_SET_SALTAPI_TIMEOUT() }}
+{{ SHARED_TEST_TOOLS.MACRO_INSTALL_RECLASS_TOOLS() }}
\ No newline at end of file
diff --git a/tcp_tests/templates/released-heat-cicd-queens-contrail41-sl/tempest_skip.list b/tcp_tests/templates/released-heat-cicd-queens-contrail41-sl/tempest_skip.list
new file mode 100644
index 0000000..c3799f2
--- /dev/null
+++ b/tcp_tests/templates/released-heat-cicd-queens-contrail41-sl/tempest_skip.list
@@ -0,0 +1,25 @@
+# Globally disabled inside the 'ci-tempest' docker image
+tempest.api.object_storage.test_object_version.ContainerTest.test_versioned_container
+tempest.api.object_storage.test_healthcheck.HealthcheckTest.test_get_healthcheck
+tempest.api.object_storage.test_container_sync_middleware.ContainerSyncMiddlewareTest.test_container_synchronization
+tempest.api.object_storage.test_crossdomain.CrossdomainTest.test_get_crossdomain_policy
+tempest.api.object_storage.test_container_services_negative.ContainerNegativeTest.test_create_container_metadata_value_exceeds_max_length
+tempest.api.object_storage.test_container_services_negative.ContainerNegativeTest.test_create_container_metadata_name_exceeds_max_length
+tempest.api.object_storage.test_container_services_negative.ContainerNegativeTest.test_create_container_metadata_exceeds_overall_metadata_count
+tempest.api.object_storage.test_container_services.ContainerTest.test_create_container_with_metadata_value
+tempest.api.object_storage.test_account_services_negative.AccountNegativeTest.test_list_containers_with_non_authorized_user
+tempest.api.object_storage.test_account_quotas.AccountQuotasTest.test_admin_modify_quota\b
+tempest.api.object_storage.test_account_quotas.AccountQuotasTest.test_upload_valid_object\b
+tempest.api.object_storage.test_account_quotas_negative.AccountQuotasNegativeTest.test_user_modify_quota
+
+# PROD-22111 Need to align integration CI labs configuration to pass Tempest tests with WaitCondition
+heat_tempest_plugin.tests.functional.test_os_wait_condition.OSWaitCondition.test_create_stack_with_multi_signal_waitcondition
+heat_tempest_plugin.tests.scenario.test_server_cfn_init.CfnInitIntegrationTest.test_server_cfn_init\b
+heat_tempest_plugin.tests.scenario.test_server_signal.ServerSignalIntegrationTest.test_server_signal_userdata_format_raw
+heat_tempest_plugin.tests.scenario.test_server_signal.ServerSignalIntegrationTest.test_server_signal_userdata_format_software_config
+
+heat_tempest_plugin.tests.scenario.test_server_signal.ServerSignalIntegrationTest.test_server_signal_userdata_format_software_config
+heat_tempest_plugin.tests.scenario.test_autoscaling_lbv2.AutoscalingLoadBalancerv2Test.test_autoscaling_loadbalancer_neutron
+
+# PROD-29650 failed with PortNotFound
+tempest.api.compute.servers.test_attach_interfaces.AttachInterfacesTestJSON.test_create_list_show_delete_interfaces_by_fixed_ip
diff --git a/tcp_tests/templates/released-heat-cicd-queens-dvr-sl/salt-context-cookiecutter.yaml b/tcp_tests/templates/released-heat-cicd-queens-dvr-sl/salt-context-cookiecutter.yaml
index d729d43..c8ba8c6 100644
--- a/tcp_tests/templates/released-heat-cicd-queens-dvr-sl/salt-context-cookiecutter.yaml
+++ b/tcp_tests/templates/released-heat-cicd-queens-dvr-sl/salt-context-cookiecutter.yaml
@@ -1,3 +1,4 @@
+#Ceph Nautilus multiple osd
 default_context:
   jenkins_cfg_admin_password: r00tme
   auditd_enabled: 'False'
@@ -43,13 +44,17 @@
   ceph_mon_node02_hostname: cmn02
   ceph_mon_node03_address: 10.167.4.68
   ceph_mon_node03_hostname: cmn03
+    # Apply changes for ceph from contexts/ceph/nautilus-encrypted-devices.yml
   ceph_osd_backend: bluestore
+  ceph_osds_per_device: '3'
+  ceph_osd_data_size: '14'
+  ceph_osd_dmcrypt: True
   ceph_osd_block_db_size: '3'
   ceph_osd_data_partition_prefix: ""
   ceph_osd_bond_mode: active-backup
   ceph_osd_count: '3'
-  ceph_osd_data_disks: "/dev/vdb"
-  ceph_osd_journal_or_block_db_disks: "/dev/vdb"
+  ceph_osd_data_disks: "/dev/vdd"
+  ceph_osd_journal_or_block_db_disks: "/dev/vde"
   ceph_osd_mode: "separated"
   ceph_osd_node_count: '3'
   ceph_osd_journal_size: '3'
@@ -70,7 +75,8 @@
   ceph_rgw_node02_hostname: rgw02
   ceph_rgw_node03_address: 10.167.4.78
   ceph_rgw_node03_hostname: rgw03
-  ceph_version: luminous
+  ceph_version: "nautilus"
+  ceph_osd_block_partition_prefix: ''
   cicd_control_node01_address: 10.167.4.91
   cicd_control_node01_hostname: cid01
   cicd_control_node02_address: 10.167.4.92
@@ -127,7 +133,7 @@
   deploy_network_subnet: 10.167.5.0/24
   deployment_type: physical
   dns_server01: 172.18.176.6
-  dns_server02: 172.18.208.44
+  dns_server02: 172.18.224.6
   email_address: test@mirantis.com
   gainsight_service_enabled: 'False'
   gateway_primary_first_nic: eth1
diff --git a/tcp_tests/templates/released-heat-cicd-queens-dvr-sl/salt.yaml b/tcp_tests/templates/released-heat-cicd-queens-dvr-sl/salt.yaml
index 76505ed..d4eeaac 100644
--- a/tcp_tests/templates/released-heat-cicd-queens-dvr-sl/salt.yaml
+++ b/tcp_tests/templates/released-heat-cicd-queens-dvr-sl/salt.yaml
@@ -6,8 +6,6 @@
 
 {% import 'shared-salt.yaml' as SHARED with context %}
 {% import 'shared-test-tools.yaml' as SHARED_TEST_TOOLS with context %}
-{% import 'shared-workarounds.yaml' as SHARED_WORKAROUNDS with context %}
-
 {{ SHARED.MACRO_INSTALL_SALT_MINIONS() }}
 
 {{ SHARED.MACRO_CHECK_SALT_VERSION_SERVICES_ON_CFG() }}
@@ -17,5 +15,3 @@
 {{ SHARED.MACRO_IPFLUSH_TENANTS_IFACES() }}
 
 {{ SHARED_TEST_TOOLS.MACRO_INSTALL_RECLASS_TOOLS() }}
-
-{{ SHARED_WORKAROUNDS.MACRO_SET_SALTAPI_TIMEOUT() }}
\ No newline at end of file
diff --git a/tcp_tests/templates/released-heat-cicd-queens-dvr-sl/tempest_skip.list b/tcp_tests/templates/released-heat-cicd-queens-dvr-sl/tempest_skip.list
new file mode 100644
index 0000000..239b946
--- /dev/null
+++ b/tcp_tests/templates/released-heat-cicd-queens-dvr-sl/tempest_skip.list
@@ -0,0 +1,31 @@
+# Globally disabled inside the 'ci-tempest' docker image
+tempest.api.object_storage.test_object_version.ContainerTest.test_versioned_container
+tempest.api.object_storage.test_healthcheck.HealthcheckTest.test_get_healthcheck
+tempest.api.object_storage.test_container_sync_middleware.ContainerSyncMiddlewareTest.test_container_synchronization
+tempest.api.object_storage.test_crossdomain.CrossdomainTest.test_get_crossdomain_policy
+tempest.api.object_storage.test_container_services_negative.ContainerNegativeTest.test_create_container_metadata_value_exceeds_max_length
+tempest.api.object_storage.test_container_services_negative.ContainerNegativeTest.test_create_container_metadata_name_exceeds_max_length
+tempest.api.object_storage.test_container_services_negative.ContainerNegativeTest.test_create_container_metadata_exceeds_overall_metadata_count
+tempest.api.object_storage.test_container_services.ContainerTest.test_create_container_with_metadata_value
+tempest.api.object_storage.test_account_services_negative.AccountNegativeTest.test_list_containers_with_non_authorized_user
+tempest.api.object_storage.test_account_quotas.AccountQuotasTest.test_admin_modify_quota\b
+tempest.api.object_storage.test_account_quotas.AccountQuotasTest.test_upload_valid_object\b
+tempest.api.object_storage.test_account_quotas_negative.AccountQuotasNegativeTest.test_user_modify_quota
+
+# PROD-22111 Need to align integration CI labs configuration to pass Tempest tests with WaitCondition
+heat_tempest_plugin.tests.functional.test_os_wait_condition.OSWaitCondition.test_create_stack_with_multi_signal_waitcondition
+heat_tempest_plugin.tests.scenario.test_server_cfn_init.CfnInitIntegrationTest.test_server_cfn_init\b
+heat_tempest_plugin.tests.scenario.test_server_signal.ServerSignalIntegrationTest.test_server_signal_userdata_format_raw
+heat_tempest_plugin.tests.scenario.test_server_signal.ServerSignalIntegrationTest.test_server_signal_userdata_format_software_config
+
+heat_tempest_plugin.tests.scenario.test_server_signal.ServerSignalIntegrationTest.test_server_signal_userdata_format_software_config
+heat_tempest_plugin.tests.scenario.test_autoscaling_lbv2.AutoscalingLoadBalancerv2Test.test_autoscaling_loadbalancer_neutron
+
+# PROD-29650 failed with PortNotFound
+tempest.api.compute.servers.test_attach_interfaces.AttachInterfacesTestJSON.test_create_list_show_delete_interfaces_by_fixed_ip
+
+# PROD-33000 [OC][Infra] Instances don't have access to external net
+# tempest.api.compute.volumes.test_attach_volume.AttachVolumeShelveTestJSON.test_detach_volume_shelved_or_offload_server\b
+# tempest.api.compute.volumes.test_attach_volume.AttachVolumeShelveTestJSON.test_attach_volume_shelved_or_offload_server\b
+# tempest.scenario.test_network_advanced_server_ops.TestNetworkAdvancedServerOps.test_server_connectivity_suspend_resume\b
+
diff --git a/tcp_tests/templates/shared-workarounds.yaml b/tcp_tests/templates/shared-workarounds.yaml
index 396153c..5e508fd 100644
--- a/tcp_tests/templates/shared-workarounds.yaml
+++ b/tcp_tests/templates/shared-workarounds.yaml
@@ -1,19 +1,3 @@
-
-{%- macro MACRO_SET_SALTAPI_TIMEOUT() %}
-{######################################}
-- description: |
-    Set nginx_proxy_salt_api to 1800. RelatedProd:PROD-34798
-    #TODO Remove after released 2019.2.8
-  cmd: |
-    set -e;
-    set -x;
-    . /root/venv-reclass-tools/bin/activate;
-    reclass-tools  add-key  parameters.nginx.server.site.nginx_proxy_salt_api.proxy.timeout 1800 /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/config/init.yml
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 1}
-  skip_fail: false
-{%- endmacro %}
-
 {%- macro MACRO_CEPH_SET_PGNUM() %}
 {######################################}
 - description: |
@@ -25,7 +9,24 @@
     . /root/venv-reclass-tools/bin/activate;
     reclass-tools  add-key parameters.ceph.setup.pool.gnocchi.pg_num 32 /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/ceph/setup.yml
     reclass-tools  add-key parameters.ceph.setup.pool.gnocchi.pgp_num 32 /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/ceph/setup.yml
+    cd /srv/salt/reclass
+    git add /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/ceph/setup.yml
+    git commit -m "[from tcp-qa/shared_workarounds.yaml] Set greater pg_num/pgp_num for gnocchi pool in Ceph. RelatedProd:PROD-34833"
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 1}
   skip_fail: false
+{%- endmacro %}
+
+{%- macro CLEAR_CEPH_OSD_DRIVES() %}
+{######################################}
+- description: |
+    Clear Ceph OSD drives before deploying Ceph. RelatedProd:PROD-35141
+
+  cmd: |
+    set -e;
+    set -x;
+    salt -C 'I@ceph:osd' cmd.run 'dd if=/dev/zero of=/dev/sdb bs=1M count=50 conv=fsync ; dd if=/dev/zero of=/dev/sdc bs=1M count=50 conv=fsync'
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 1}
+  skip_fail: true
 {%- endmacro %}
\ No newline at end of file
diff --git a/tcp_tests/tests/system/test_cvp_pipelines.py b/tcp_tests/tests/system/test_cvp_pipelines.py
index e55fc08..3d5fbc7 100644
--- a/tcp_tests/tests/system/test_cvp_pipelines.py
+++ b/tcp_tests/tests/system/test_cvp_pipelines.py
@@ -99,6 +99,72 @@
 
     @pytest.mark.grab_versions
     @pytest.mark.parametrize("_", [settings.ENV_NAME])
+    @pytest.mark.run_cvp_tempest
+    def test_run_cvp_tempest(
+            self,
+            salt_actions,
+            show_step,
+            drivetrain_actions,
+            tempest_actions, _):
+        """Runner for Pipeline CVP - Tempest tests
+
+        Scenario:
+            1. Sync time on the environment nodes
+            2. Execute pre-requites for Tempest
+            3. Run cvp-tempest Jenkins job and get results
+            4. Download Tempest xml report from Jenkins node to foundation
+                node
+
+        """
+        salt = salt_actions
+        dt = drivetrain_actions
+        jenkins_start_timeout = 60
+        jenkins_build_timeout = 6 * 60 * 60
+        cluster_name = settings.ENV_NAME
+        skiplist_file = "tcp_tests/templates/{}/tempest_skip.list".\
+            format(cluster_name)
+
+        show_step(1)
+        salt.sync_time()
+
+        show_step(2)
+        tempest_actions.prepare(pipeline=True)
+        # Copy skip list to cfg node
+        skiplist = open(skiplist_file, 'r').read() or ""
+        salt.file_makedirs(tgt="I@salt:master", path="/usr/share/tempest")
+        salt.file_write(tgt="I@salt:master",
+                        filename="/usr/share/tempest/skip.list",
+                        content=skiplist)
+
+        show_step(3)
+        job_name = 'cvp-tempest'
+        job_parameters = {
+            'EXTRA_PARAMS': """
+---
+  DEBUG_MODE: false
+  GENERATE_CONFIG: true
+  SKIP_LIST_PATH: /usr/share/tempest/skip.list
+  report_prefix: "cvp_"
+            """,
+            'TEMPEST_ENDPOINT_TYPE': 'internalURL',
+            'TEMPEST_TEST_PATTERN': 'set=full',
+        }
+        cvp_tempest_result = dt.start_job_on_jenkins(
+            job_name,
+            jenkins_tgt='I@docker:client:stack:jenkins and I@salt:master',
+            start_timeout=jenkins_start_timeout,
+            build_timeout=jenkins_build_timeout,
+            verbose=True,
+            job_parameters=job_parameters,
+            job_output_prefix='[ {job_name}/{build_number}:platform {time} ] ')
+        LOG.info('Job {0} result: {1}'.format(job_name, cvp_tempest_result))
+
+        show_step(4)
+        tempest_actions.fetch_arficats(
+            username='root', report_dir="/root/test/")
+
+    @pytest.mark.grab_versions
+    @pytest.mark.parametrize("_", [settings.ENV_NAME])
     @pytest.mark.run_cvp_func_sanity
     def test_run_cvp_func_sanity(self, salt_actions, show_step, _):
         """Runner for Pipeline CVP - Functional tests
@@ -352,53 +418,3 @@
         except jenkins.NotFoundException:
             raise jenkins.NotFoundException("{0}\n{1}".format(
                 description, '\n'.join(stages)))
-
-    @pytest.mark.grab_versions
-    @pytest.mark.parametrize("_", [settings.ENV_NAME])
-    @pytest.mark.run_cvp_tempest
-    def test_run_cvp_tempest(
-            self,
-            salt_actions,
-            show_step,
-            drivetrain_actions,
-            tempest_actions, _):
-        """Runner for Pipeline CVP - Tempest tests
-
-        Scenario:
-            1. Sync time on the environment nodes
-            2. Execute pre-requites for Tempest
-            3. Run cvp-tempest Jenkins job and get results
-            4. Download Tempest xml report from Jenkins node to foundation
-                node
-
-        """
-        salt = salt_actions
-        dt = drivetrain_actions
-        jenkins_start_timeout = 60
-        jenkins_build_timeout = 3 * 60 * 60
-
-        show_step(1)
-        salt.sync_time()
-
-        show_step(2)
-        tempest_actions.prepare(pipeline=True)
-
-        show_step(3)
-        job_name = 'cvp-tempest'
-        job_parameters = {
-            'TEMPEST_ENDPOINT_TYPE': 'internalURL',
-            'TEMPEST_TEST_PATTERN': 'set=full',
-        }
-        cvp_tempest_result = dt.start_job_on_jenkins(
-            job_name,
-            jenkins_tgt='I@docker:client:stack:jenkins and cfg01*',
-            start_timeout=jenkins_start_timeout,
-            build_timeout=jenkins_build_timeout,
-            verbose=True,
-            job_parameters=job_parameters,
-            job_output_prefix='[ cvp-func/{build_number}:platform {time} ] ')
-        LOG.info('Job {0} result: {1}'.format(job_name, cvp_tempest_result))
-
-        show_step(4)
-        tempest_actions.fetch_arficats(
-            username='root', report_dir="/root/test/")
diff --git a/tcp_tests/tests/system/test_mcp_update.py b/tcp_tests/tests/system/test_mcp_update.py
index 00feaef..eb64fb2 100644
--- a/tcp_tests/tests/system/test_mcp_update.py
+++ b/tcp_tests/tests/system/test_mcp_update.py
@@ -545,6 +545,7 @@
         job_parameters = {
             "TARGET_SERVERS": target,
             "OS_DIST_UPGRADE": True,
+            "UPGRADE_SALTSTACK": False,
             "OS_UPGRADE": True,
             "INTERACTIVE": False}
         upgrade_control_pipeline = drivetrain_actions.start_job_on_jenkins(
diff --git a/tcp_tests/tests/system/test_upgrade_pike_queens.py b/tcp_tests/tests/system/test_upgrade_pike_queens.py
index c4c9119..3ea2d23 100644
--- a/tcp_tests/tests/system/test_upgrade_pike_queens.py
+++ b/tcp_tests/tests/system/test_upgrade_pike_queens.py
@@ -194,7 +194,8 @@
         job_name = 'deploy-upgrade-control'
         job_parameters = {
             'INTERACTIVE': False,
-            'OS_DIST_UPGRADE': True,
+            'OS_DIST_UPGRADE': False,
+            'UPGRADE_SALTSTACK': False,
             'OS_UPGRADE': True
         }
         # ####### Run job for ctl* ###