Merge "Fix xpath for cvp-spt plots"
diff --git a/build-debian-packages-pipeline.groovy b/build-debian-packages-pipeline.groovy
index 5d16339..864f0bc 100644
--- a/build-debian-packages-pipeline.groovy
+++ b/build-debian-packages-pipeline.groovy
@@ -64,12 +64,16 @@
           checkout changelog: true, poll: false,
             scm: [$class: 'GitSCM', branches: pollBranches, doGenerateSubmoduleConfigurations: false,
             extensions: extensions,  submoduleCfg: [], userRemoteConfigs: userRemoteConfigs]
+
+          /* There are 2 schemas of build spec keeping:
+                 1. Separate directory with specs.
+                 2. Separate branch with build specs. I.e. debian/xenial
+             Logic below makes package build compatible with both schemas.
+          */
+          if (fileExists('debian/changelog')) {
+              debian_branch = null
+          }
           if (debian_branch){
-            /* There are 2 schemas of build spec keeping:
-                   1. Separate branch with build specs. I.e. debian/xenial
-                   2. Separate directory with specs.
-               Logic below makes package build compatible with both schemas.
-            */
             def retStatus = sh(script: 'git checkout ' + DEBIAN_BRANCH, returnStatus: true)
             if (retStatus != 0) {
               common.warningMsg("Cannot checkout ${DEBIAN_BRANCH} branch. Going to build package by ${SOURCE_BRANCH} branch.")
diff --git a/build-mirror-image.groovy b/build-mirror-image.groovy
deleted file mode 100644
index 4c42b3e..0000000
--- a/build-mirror-image.groovy
+++ /dev/null
@@ -1,139 +0,0 @@
-/**
- *
- * Build mirror image pipeline
- *
- * Expected parameters:
- * IMAGE_NAME - Name of the result image.
- * OS_CREDENTIALS_ID - ID of credentials for OpenStack API stored in Jenkins.
- * OS_PROJECT - Project in OpenStack under the VM will be spawned.
- * OS_URL - Keystone auth endpoint of the OpenStack.
- * OS_VERSION - OpenStack version
- * UPLOAD_URL - URL of an WebDAV used to upload the image after creating.
- * VM_AVAILABILITY_ZONE - Availability zone in OpenStack in the VM will be spawned.
- * VM_FLAVOR - Flavor to be used for VM in OpenStack.
- * VM_FLOATING_IP_POOL - Floating IP pool to be used to assign floating IP to the VM.
- * VM_IMAGE - Name of the image to be used for VM in OpenStack.
- * VM_IP - Static IP that is assigned to the VM which belongs to the network used.
- * VM_NETWORK_ID - ID of the network that VM connects to.
- * EXTRA_VARIABLES - list of key:value variables required by template.json
- *
- */
-
-// Load shared libs
-def common = new com.mirantis.mk.Common()
-def openstack = new com.mirantis.mk.Openstack()
-def git = new com.mirantis.mk.Git()
-def date = new Date()
-def dateTime = date.format("ddMMyyyy-HHmmss")
-def rcFile = ""
-def openstackEnv = ""
-def uploadImageStatus = ""
-def uploadMd5Status = ""
-def creds
-ArrayList extra_vars = EXTRA_VARIABLES.readLines()
-IMAGE_NAME = IMAGE_NAME + "-" + dateTime
-
-timeout(time: 8, unit: 'HOURS') {
-  node("python&&disk-xl") {
-    try {
-      def workspace = common.getWorkspace()
-      openstackEnv = "${workspace}/venv"
-
-      stage("Prepare env") {
-        if (!fileExists("${workspace}/tmp")) {
-          sh "mkdir -p ${workspace}/tmp"
-        }
-        if (!fileExists("${workspace}/images")) {
-          sh "mkdir ${workspace}/images"
-        }
-        if (!fileExists("bin")) {
-          common.infoMsg("Downloading packer")
-          sh "mkdir -p bin"
-          dir("bin") {
-            sh "wget --quiet -O ${PACKER_ZIP} ${PACKER_URL}"
-            sh "echo \"${PACKER_ZIP_MD5} ${PACKER_ZIP}\" >> md5sum"
-            sh "md5sum -c --status md5sum"
-            sh "unzip ${PACKER_ZIP}"
-          }
-        }
-        // clean images dir before building
-        sh(script: "rm -rf ${BUILD_OS}/images/*", returnStatus: true)
-        // clean virtualenv is exists
-        sh(script: "rm -rf ${workspace}/venv", returnStatus: true)
-
-        openstack.setupOpenstackVirtualenv(openstackEnv, OS_VERSION)
-        git.checkoutGitRepository(PACKER_TEMPLATES_REPO_NAME, PACKER_TEMPLATES_REPO_URL, PACKER_TEMPLATES_BRANCH)
-        creds = common.getPasswordCredentials(OS_CREDENTIALS_ID)
-      }
-
-      stage("Build Instance") {
-        dir("${workspace}/${PACKER_TEMPLATES_REPO_NAME}/${BUILD_OS}/") {
-          withEnv(extra_vars + ["PATH=${env.PATH}:${workspace}/bin",
-                                "PACKER_LOG_PATH=${workspace}/packer.log",
-                                "PACKER_LOG=1",
-                                "TMPDIR=${workspace}/tmp",
-                                "IMAGE_NAME=${IMAGE_NAME}",
-                                "OS_USERNAME=${creds.username}",
-                                "OS_PASSWORD=${creds.password.toString()}"]) {
-            if (PACKER_DEBUG.toBoolean()) {
-              PACKER_ARGS = "${PACKER_ARGS} -debug"
-            }
-
-            sh "packer build -only=${BUILD_ONLY} ${PACKER_ARGS} -parallel=false template.json"
-
-            def packerStatus = sh(script: "grep \"Some builds didn't complete successfully and had errors\" ${PACKER_LOG_PATH}", returnStatus: true)
-            // grep returns 0 if find something
-            if (packerStatus != 0) {
-              common.infoMsg("Openstack instance complete")
-            } else {
-              throw new Exception("Packer build failed")
-            }
-          }
-        }
-      }
-
-      stage("Publish image") {
-        common.infoMsg("Saving image ${IMAGE_NAME}")
-        rcFile = openstack.createOpenstackEnv(workspace, OS_URL, OS_CREDENTIALS_ID, OS_PROJECT, "default", "", "default", "2", "")
-
-        common.retry(3, 5) {
-          openstack.runOpenstackCommand("openstack image save --file ${IMAGE_NAME}.qcow2 ${IMAGE_NAME}", rcFile, openstackEnv)
-        }
-        sh "md5sum ${IMAGE_NAME}.qcow2 > ${IMAGE_NAME}.qcow2.md5"
-
-        common.infoMsg("Uploading image ${IMAGE_NAME}")
-        common.retry(3, 5) {
-          uploadImageStatus = sh(script: "curl -f -T ${IMAGE_NAME}.qcow2 ${UPLOAD_URL}", returnStatus: true)
-          if (uploadImageStatus != 0) {
-            throw new Exception("Image upload failed")
-          }
-        }
-
-        common.retry(3, 5) {
-          uploadMd5Status = sh(script: "curl -f -T ${IMAGE_NAME}.qcow2.md5 ${UPLOAD_URL}", returnStatus: true)
-          if (uploadMd5Status != 0) {
-            throw new Exception("MD5 sum upload failed")
-          }
-        }
-        currentBuild.description = "<a href='http://ci.mcp.mirantis.net:8085/images/${IMAGE_NAME}.qcow2'>${IMAGE_NAME}.qcow2</a>"
-      }
-
-    } catch (Throwable e) {
-      // If there was an error or exception thrown, the build failed
-      currentBuild.result = "FAILURE"
-      throw e
-    } finally {
-      if (CLEANUP_AFTER) {
-          dir(workspace) {
-            sh "rm -rf ./*"
-          }
-      } else {
-        common.infoMsg("Env has not been cleanup!")
-        common.infoMsg("Packer private key:")
-        dir("${workspace}/${PACKER_TEMPLATES_REPO_NAME}/${BUILD_OS}/") {
-          sh "cat os_${BUILD_OS}.pem"
-        }
-      }
-    }
-  }
-}
diff --git a/cloud-deploy-pipeline.groovy b/cloud-deploy-pipeline.groovy
index a541fe0..bf7e238 100644
--- a/cloud-deploy-pipeline.groovy
+++ b/cloud-deploy-pipeline.groovy
@@ -355,6 +355,10 @@
               }
             }
 
+            stage('Install Orchestrated Apps'){
+                orchestrate.OrchestrateApplications(venvPepper, "I@salt:master ${extra_tgt}", "orchestration.deploy.applications")
+            }
+
             // install k8s
             if (common.checkContains('STACK_INSTALL', 'k8s')) {
 
diff --git a/docker-mirror-images.groovy b/docker-mirror-images.groovy
index ebbfc86..07a80e7 100644
--- a/docker-mirror-images.groovy
+++ b/docker-mirror-images.groovy
@@ -42,7 +42,7 @@
                     imagePath = imageArray[0]
                     if (imagePath.contains('SUBS_SOURCE_IMAGE_TAG')) {
                         common.warningMsg("Replacing SUBS_SOURCE_IMAGE_TAG => ${SOURCE_IMAGE_TAG}")
-                        imagePath.replace('SUBS_SOURCE_IMAGE_TAG', SOURCE_IMAGE_TAG)
+                        imagePath = imagePath.replace('SUBS_SOURCE_IMAGE_TAG', SOURCE_IMAGE_TAG)
                     }
                     targetRegistry = imageArray[1]
                     imageName = getImageName(imagePath)
diff --git a/generate-cookiecutter-products.groovy b/generate-cookiecutter-products.groovy
index 12dc88d..553029e 100644
--- a/generate-cookiecutter-products.groovy
+++ b/generate-cookiecutter-products.groovy
@@ -17,9 +17,19 @@
 if (common.validInputParam('RECLASS_VERSION')) {
   reclassVersion = RECLASS_VERSION
 }
+slaveNode = (env.SLAVE_NODE ?: 'python&&docker')
 
-timeout(time: 12, unit: 'HOURS') {
-  node("python&&docker") {
+// install extra formulas required only for rendering cfg01. All others - should be fetched automatically via
+// salt.master.env state, during salt-master bootstrap.
+// TODO: In the best - those data should fetched somewhere from CC, per env\context. Like option, process _enabled
+// options from CC contexts
+// currently, just mix them together in one set
+def testCfg01ExtraFormulas = 'glusterfs jenkins logrotate maas ntp rsyslog fluentd telegraf prometheus ' +
+                             'grafana backupninja auditd'
+
+
+timeout(time: 2, unit: 'HOURS') {
+  node(slaveNode) {
     def templateEnv = "${env.WORKSPACE}/template"
     def modelEnv = "${env.WORKSPACE}/model"
     def testEnv = "${env.WORKSPACE}/test"
@@ -43,6 +53,7 @@
       def templateDir = "${templateEnv}/template/dir"
       def templateOutputDir = templateBaseDir
       def user
+      def testResult = false
       wrap([$class: 'BuildUser']) {
         user = env.BUILD_USER_ID
       }
@@ -50,7 +61,8 @@
       currentBuild.description = clusterName
       print("Using context:\n" + COOKIECUTTER_TEMPLATE_CONTEXT)
 
-      stage ('Download Cookiecutter template') {
+      stage('Download Cookiecutter template') {
+        sh(script: 'find . -mindepth 1 -delete > /dev/null || true')
         def cookiecutterTemplateUrl = templateContext.default_context.cookiecutter_template_url
         def cookiecutterTemplateBranch = templateContext.default_context.cookiecutter_template_branch
         git.checkoutGitRepository(templateEnv, cookiecutterTemplateUrl, 'master')
@@ -64,7 +76,7 @@
           if (cookiecutterTemplateBranch == '') {
             cookiecutterTemplateBranch = mcpVersion
             // Don't have nightly/testing/stable for cookiecutter-templates repo, therefore use master
-            if(mcpVersion == "nightly" || mcpVersion == "testing" || mcpVersion == "stable"){
+            if ([ "nightly" , "testing", "stable" ].contains(mcpVersion)) {
               cookiecutterTemplateBranch = 'master'
             }
           }
@@ -72,7 +84,7 @@
         }
       }
 
-      stage ('Create empty reclass model') {
+      stage('Create empty reclass model') {
         dir(path: modelEnv) {
           sh "rm -rfv .git"
           sh "git init"
@@ -89,8 +101,9 @@
           // Use mcpVersion git tag if not specified branch for reclass-system
           if (sharedReclassBranch == '') {
             sharedReclassBranch = mcpVersion
-            // Don't have nightly/testing/stable for reclass-system repo, therefore use master
-            if(mcpVersion == "nightly" || mcpVersion == "testing" || mcpVersion == "stable"){
+            // Don't have nightly/testing for reclass-system repo, therefore use master
+            if ([ "nightly" , "testing", "stable" ].contains(mcpVersion)) {
+              common.warningMsg("Fetching reclass-system from master!")
               sharedReclassBranch = 'master'
             }
           }
@@ -142,11 +155,11 @@
         }
       }
 
-      if(localRepositories && !offlineDeployment){
+      if (localRepositories && !offlineDeployment) {
         def aptlyModelUrl = templateContext.default_context.local_model_url
         dir(path: modelEnv) {
           ssh.agentSh "git submodule add \"${aptlyModelUrl}\" \"classes/cluster/${clusterName}/cicd/aptly\""
-          if(!(mcpVersion in ["nightly", "testing", "stable"])){
+          if (!(mcpVersion in ["nightly", "testing", "stable"])) {
             ssh.agentSh "cd \"classes/cluster/${clusterName}/cicd/aptly\";git fetch --tags;git checkout ${mcpVersion}"
           }
         }
@@ -173,16 +186,16 @@
 
       stage("Test") {
         if (TEST_MODEL.toBoolean() && sharedReclassUrl != '') {
-          def testResult = false
           sh("cp -r ${modelEnv} ${testEnv}")
           def DockerCName = "${env.JOB_NAME.toLowerCase()}_${env.BUILD_TAG.toLowerCase()}"
+          common.infoMsg("Attempt to run test against formula-version: ${mcpVersion}")
           testResult = saltModelTesting.setupAndTestNode(
               "${saltMaster}.${clusterDomain}",
               "",
-              "",
+              testCfg01ExtraFormulas,
               testEnv,
               'pkg',
-              'stable',
+              mcpVersion,
               reclassVersion,
               0,
               false,
@@ -193,8 +206,7 @@
           if (testResult) {
             common.infoMsg("Test finished: SUCCESS")
           } else {
-            common.infoMsg('Test finished: FAILURE')
-            throw new RuntimeException('Test stage finished: FAILURE')
+            common.warningMsg('Test finished: FAILURE')
           }
         } else {
           common.warningMsg("Test stage has been skipped!")
@@ -208,16 +220,18 @@
         def mcpCommonScriptsBranch = templateContext.default_context.mcp_common_scripts_branch
         if (mcpCommonScriptsBranch == '') {
           mcpCommonScriptsBranch = mcpVersion
-          // Don't have nightly for mcp-common-scripts repo, therefore use master
-          if(mcpVersion == "nightly"){
+          // Don't have n/t/s for mcp-common-scripts repo, therefore use master
+          if ([ "nightly" , "testing", "stable" ].contains(mcpVersion)) {
+            common.warningMsg("Fetching mcp-common-scripts from master!")
             mcpCommonScriptsBranch = 'master'
           }
         }
         def config_drive_script_url = "https://raw.githubusercontent.com/Mirantis/mcp-common-scripts/${mcpCommonScriptsBranch}/config-drive/create_config_drive.sh"
         def user_data_script_url = "https://raw.githubusercontent.com/Mirantis/mcp-common-scripts/${mcpCommonScriptsBranch}/config-drive/master_config.sh"
-
-        sh "wget -O create-config-drive ${config_drive_script_url} && chmod +x create-config-drive"
-        sh "wget -O user_data.sh ${user_data_script_url}"
+        common.retry(3, 5) {
+          sh "wget -O create-config-drive ${config_drive_script_url} && chmod +x create-config-drive"
+          sh "wget -O user_data.sh ${user_data_script_url}"
+        }
 
         sh "git clone --mirror https://github.com/Mirantis/mk-pipelines.git ${pipelineEnv}/mk-pipelines"
         sh "git clone --mirror https://github.com/Mirantis/pipeline-library.git ${pipelineEnv}/pipeline-library"
@@ -231,7 +245,7 @@
         smc['DEPLOY_NETWORK_NETMASK'] = templateContext['default_context']['deploy_network_netmask']
         smc['DNS_SERVERS'] = templateContext['default_context']['dns_server01']
         smc['MCP_VERSION'] = "${mcpVersion}"
-        if (templateContext['default_context']['local_repositories'] == 'True'){
+        if (templateContext['default_context']['local_repositories'] == 'True') {
           def localRepoIP = templateContext['default_context']['local_repo_url']
           smc['MCP_SALT_REPO_KEY'] = "http://${localRepoIP}/public.gpg"
           smc['MCP_SALT_REPO_URL'] = "http://${localRepoIP}/ubuntu-xenial"
@@ -239,8 +253,8 @@
           smc['PIPELINE_REPO_URL'] = "http://${localRepoIP}:8088"
           smc['LOCAL_REPOS'] = 'true'
         }
-        if (templateContext['default_context']['upstream_proxy_enabled'] == 'True'){
-          if (templateContext['default_context']['upstream_proxy_auth_enabled'] == 'True'){
+        if (templateContext['default_context']['upstream_proxy_enabled'] == 'True') {
+          if (templateContext['default_context']['upstream_proxy_auth_enabled'] == 'True') {
             smc['http_proxy'] = 'http://' + templateContext['default_context']['upstream_proxy_user'] + ':' + templateContext['default_context']['upstream_proxy_password'] + '@' + templateContext['default_context']['upstream_proxy_address'] + ':' + templateContext['default_context']['upstream_proxy_port']
             smc['https_proxy'] = 'http://' + templateContext['default_context']['upstream_proxy_user'] + ':' + templateContext['default_context']['upstream_proxy_password'] + '@' + templateContext['default_context']['upstream_proxy_address'] + ':' + templateContext['default_context']['upstream_proxy_port']
           } else {
@@ -260,7 +274,7 @@
         // save cfg iso to artifacts
         archiveArtifacts artifacts: "output-${clusterName}/${saltMaster}.${clusterDomain}-config.iso"
 
-        if (templateContext['default_context']['local_repositories'] == 'True'){
+        if (templateContext['default_context']['local_repositories'] == 'True') {
           def aptlyServerHostname = templateContext.default_context.aptly_server_hostname
           def user_data_script_apt_url = "https://raw.githubusercontent.com/Mirantis/mcp-common-scripts/master/config-drive/mirror_config.sh"
           sh "wget -O mirror_config.sh ${user_data_script_apt_url}"
@@ -284,8 +298,8 @@
         }
       }
 
-      stage ('Save changes reclass model') {
-        sh(returnStatus: true, script: "tar -zcf output-${clusterName}/${clusterName}.tar.gz -C ${modelEnv} .")
+      stage('Save changes reclass model') {
+        sh(returnStatus: true, script: "tar -czf output-${clusterName}/${clusterName}.tar.gz --exclude='*@tmp' -C ${modelEnv} .")
         archiveArtifacts artifacts: "output-${clusterName}/${clusterName}.tar.gz"
 
 
@@ -295,21 +309,24 @@
               body: "Mirantis Jenkins\n\nRequested reclass model ${clusterName} has been created and attached to this email.\nEnjoy!\n\nMirantis",
               subject: "Your Salt model ${clusterName}")
         }
-        dir("output-${clusterName}"){
+        dir("output-${clusterName}") {
           deleteDir()
         }
       }
 
+      // Fail, but leave possibility to get failed artifacts
+      if (!testResult && TEST_MODEL.toBoolean()) {
+        common.warningMsg('Test finished: FAILURE. Please check logs and\\or debug failed model manually!')
+        error('Test stage finished: FAILURE')
+      }
+
     } catch (Throwable e) {
-      // If there was an error or exception thrown, the build failed
       currentBuild.result = "FAILURE"
       currentBuild.description = currentBuild.description ? e.message + " " + currentBuild.description : e.message
       throw e
     } finally {
-      stage ('Clean workspace directories') {
-        sh(returnStatus: true, script: "rm -rf ${templateEnv}")
-        sh(returnStatus: true, script: "rm -rf ${modelEnv}")
-        sh(returnStatus: true, script: "rm -rf ${pipelineEnv}")
+      stage('Clean workspace directories') {
+        sh(script: 'find . -mindepth 1 -delete > /dev/null || true')
       }
       // common.sendNotification(currentBuild.result,"",["slack"])
     }
diff --git a/tag-git-repos.groovy b/tag-git-repos.groovy
index dabbb7f..312ec9e 100644
--- a/tag-git-repos.groovy
+++ b/tag-git-repos.groovy
@@ -44,7 +44,7 @@
         repoCommit = repoArray[2]
         if (repoCommit.contains('SUBS_SOURCE_REF')) {
           common.warningMsg("Replacing SUBS_SOURCE_REF => ${SOURCE_TAG}")
-          repoCommit.replace('SUBS_SOURCE_REF', SOURCE_TAG
+          repoCommit = repoCommit.replace('SUBS_SOURCE_REF', SOURCE_TAG
             )
         }
         gitRepoAddTag(repoUrl, repoName, TAG, GIT_CREDENTIALS, repoCommit)
diff --git a/test-cookiecutter-reclass-chunk.groovy b/test-cookiecutter-reclass-chunk.groovy
new file mode 100644
index 0000000..12428ba
--- /dev/null
+++ b/test-cookiecutter-reclass-chunk.groovy
@@ -0,0 +1,23 @@
+package com.mirantis.mk
+def common = new com.mirantis.mk.Common()
+def saltModelTesting = new com.mirantis.mk.SaltModelTesting()
+
+/**
+ * Test CC model wrapper
+ *  EXTRA_VARIABLES_YAML: yaml based string, to be directly passed into testCCModel
+ */
+
+timeout(time: 1, unit: 'HOURS') {
+node() {
+  try {
+    extra_vars = readYaml text: EXTRA_VARIABLES_YAML
+    currentBuild.description = extra_vars.modelFile
+    saltModelTesting.testCCModel(extra_vars)
+    } catch (Throwable e) {
+          // If there was an error or exception thrown, the build failed
+          currentBuild.result = "FAILURE"
+          currentBuild.description = currentBuild.description ? e.message + " " + currentBuild.description : e.message
+          throw e
+        }
+      }
+    }
diff --git a/test-cookiecutter-reclass.groovy b/test-cookiecutter-reclass.groovy
index 66961f8..2a64990 100644
--- a/test-cookiecutter-reclass.groovy
+++ b/test-cookiecutter-reclass.groovy
@@ -4,14 +4,15 @@
 python = new com.mirantis.mk.Python()
 saltModelTesting = new com.mirantis.mk.SaltModelTesting()
 
+slave_node = 'python&&docker'
 def reclassVersion = 'v1.5.4'
 if (common.validInputParam('RECLASS_VERSION')) {
   reclassVersion = RECLASS_VERSION
 }
 
 def generateSaltMaster(modEnv, clusterDomain, clusterName) {
-    def nodeFile = "${modEnv}/nodes/cfg01.${clusterDomain}.yml"
-    def nodeString = """classes:
+  def nodeFile = "${modEnv}/nodes/cfg01.${clusterDomain}.yml"
+  def nodeString = """classes:
 - cluster.${clusterName}.infra.config
 parameters:
     _param:
@@ -22,193 +23,226 @@
             name: cfg01
             domain: ${clusterDomain}
 """
-    sh "mkdir -p ${modEnv}/nodes/"
-    println "Create file ${nodeFile}"
-    writeFile(file: nodeFile, text: nodeString)
+  sh "mkdir -p ${modEnv}/nodes/"
+  println "Create file ${nodeFile}"
+  writeFile(file: nodeFile, text: nodeString)
+}
+
+def GetBaseName(line, remove_ext) {
+ filename = line.toString().split('/').last()
+ if (remove_ext && filename.endsWith(remove_ext.toString())) {
+   filename = filename.take(filename.lastIndexOf(remove_ext.toString()))
+ }
+ return filename
 }
 
 def generateModel(modelFile, cutterEnv) {
-    def templateEnv = "${env.WORKSPACE}"
-    def modelEnv = "${env.WORKSPACE}/model"
-    def basename = sh(script: "basename ${modelFile} .yml", returnStdout: true).trim()
-    def generatedModel = "${modelEnv}/${basename}"
-    def testEnv = "${env.WORKSPACE}/test"
-    def content = readFile(file: "${templateEnv}/contexts/${modelFile}")
-    def templateContext = readYaml text: content
-    def clusterDomain = templateContext.default_context.cluster_domain
-    def clusterName = templateContext.default_context.cluster_name
-    def outputDestination = "${generatedModel}/classes/cluster/${clusterName}"
-    def targetBranch = "feature/${clusterName}"
-    def templateBaseDir = "${env.WORKSPACE}"
-    def templateDir = "${templateEnv}/dir"
-    def templateOutputDir = templateBaseDir
-    sh "rm -rf ${generatedModel} || true"
+  def templateEnv = "${env.WORKSPACE}"
+  def modelEnv = "${env.WORKSPACE}/model"
+  def basename = GetBaseName(modelFile, '.yml')
+  def generatedModel = "${modelEnv}/${basename}"
+  def testEnv = "${env.WORKSPACE}/test"
+  def content = readFile(file: "${templateEnv}/contexts/${modelFile}")
+  def templateContext = readYaml text: content
+  def clusterDomain = templateContext.default_context.cluster_domain
+  def clusterName = templateContext.default_context.cluster_name
+  def outputDestination = "${generatedModel}/classes/cluster/${clusterName}"
+  def targetBranch = "feature/${clusterName}"
+  def templateBaseDir = "${env.WORKSPACE}"
+  def templateDir = "${templateEnv}/dir"
+  def templateOutputDir = templateBaseDir
+  sh(script: "rm -rf ${generatedModel} || true")
 
-    common.infoMsg("Generating model from context ${modelFile}")
+  common.infoMsg("Generating model from context ${modelFile}")
 
-    def productList = ["infra", "cicd", "opencontrail", "kubernetes", "openstack", "oss", "stacklight", "ceph"]
-    for (product in productList) {
+  def productList = ["infra", "cicd", "opencontrail", "kubernetes", "openstack", "oss", "stacklight", "ceph"]
+  for (product in productList) {
 
-        // get templateOutputDir and productDir
-        if (product.startsWith("stacklight")) {
-            templateOutputDir = "${env.WORKSPACE}/output/stacklight"
-            try {
-                productDir = "stacklight" + templateContext.default_context['stacklight_version']
-            } catch (Throwable e) {
-                productDir = "stacklight1"
-            }
-        } else {
-            templateOutputDir = "${env.WORKSPACE}/output/${product}"
-            productDir = product
-        }
-
-        if (product == "infra" || (templateContext.default_context["${product}_enabled"]
-            && templateContext.default_context["${product}_enabled"].toBoolean())) {
-
-            templateDir = "${templateEnv}/cluster_product/${productDir}"
-            common.infoMsg("Generating product " + product + " from " + templateDir + " to " + templateOutputDir)
-
-            sh "rm -rf ${templateOutputDir} || true"
-            sh "mkdir -p ${templateOutputDir}"
-            sh "mkdir -p ${outputDestination}"
-
-            python.buildCookiecutterTemplate(templateDir, content, templateOutputDir, cutterEnv, templateBaseDir)
-            sh "mv -v ${templateOutputDir}/${clusterName}/* ${outputDestination}"
-        } else {
-            common.warningMsg("Product " + product + " is disabled")
-        }
+    // get templateOutputDir and productDir
+    if (product.startsWith("stacklight")) {
+      templateOutputDir = "${env.WORKSPACE}/output/stacklight"
+      try {
+        productDir = "stacklight" + templateContext.default_context['stacklight_version']
+      } catch (Throwable e) {
+        productDir = "stacklight1"
+      }
+    } else {
+      templateOutputDir = "${env.WORKSPACE}/output/${product}"
+      productDir = product
     }
-    generateSaltMaster(generatedModel, clusterDomain, clusterName)
+
+    if (product == "infra" || (templateContext.default_context["${product}_enabled"]
+        && templateContext.default_context["${product}_enabled"].toBoolean())) {
+
+      templateDir = "${templateEnv}/cluster_product/${productDir}"
+      common.infoMsg("Generating product " + product + " from " + templateDir + " to " + templateOutputDir)
+
+      sh "rm -rf ${templateOutputDir} || true"
+      sh "mkdir -p ${templateOutputDir}"
+      sh "mkdir -p ${outputDestination}"
+
+      python.buildCookiecutterTemplate(templateDir, content, templateOutputDir, cutterEnv, templateBaseDir)
+      sh "mv -v ${templateOutputDir}/${clusterName}/* ${outputDestination}"
+    } else {
+      common.warningMsg("Product " + product + " is disabled")
+    }
+  }
+  generateSaltMaster(generatedModel, clusterDomain, clusterName)
 }
 
+
 def testModel(modelFile, testEnv, reclassVersion='v1.5.4') {
-  def templateEnv = "${env.WORKSPACE}"
-  def content = readFile(file: "${templateEnv}/contexts/${modelFile}.yml")
-  def templateContext = readYaml text: content
-  def clusterName = templateContext.default_context.cluster_name
-  def clusterDomain = templateContext.default_context.cluster_domain
-  if (SYSTEM_GIT_URL == "") {
-    git.checkoutGitRepository("${testEnv}/classes/system", RECLASS_MODEL_URL, RECLASS_MODEL_BRANCH, CREDENTIALS_ID)
-  } else {
-    dir("${testEnv}/classes/system") {
-      if (!gerrit.gerritPatchsetCheckout(SYSTEM_GIT_URL, SYSTEM_GIT_REF, "HEAD", CREDENTIALS_ID)) {
-        common.errorMsg("Failed to obtain system reclass with url: ${SYSTEM_GIT_URL} and ${SYSTEM_GIT_REF}")
-      }
-    }
-  }
+  // modelFile - `modelfiname` from model/modelfiname/modelfiname.yaml
+  // testEnv - path for model (model/modelfilename/)
+  //* Grub all models and send it to check in paralell - by one in thread.
 
-  def testResult = false
-  def DockerCName = "${env.JOB_NAME.toLowerCase()}_${env.BUILD_TAG.toLowerCase()}"
-  testResult = saltModelTesting.setupAndTestNode(
-      "cfg01.${clusterDomain}",
-      clusterName,
-      EXTRA_FORMULAS,
-      testEnv,
-      'pkg',
-      DISTRIB_REVISION,
-      reclassVersion,
-      0,
-      false,
-      false,
-      '',
-      '',
-      DockerCName)
-  if (testResult) {
-    common.infoMsg("testModel finished: SUCCESS")
-  } else {
-    error('testModel finished: FAILURE')
-    throw new RuntimeException('Test stage finished: FAILURE')
-  }
-
+  _values_string =  """
+  ---
+  MODELS_TARGZ: "${env.BUILD_URL}/artifact/reclass.tar.gz"
+  DockerCName: "${env.JOB_NAME.toLowerCase()}_${env.BUILD_TAG.toLowerCase()}_${modelFile.toLowerCase()}"
+  testReclassEnv: "model/${modelFile}/"
+  modelFile: "contexts/${modelFile}.yml"
+  DISTRIB_REVISION: "${DISTRIB_REVISION}"
+  EXTRA_FORMULAS: "${env.EXTRA_FORMULAS}"
+  reclassVersion: "${reclassVersion}"
+  """
+  build job: "test-mk-cookiecutter-templates-chunk", parameters: [
+  [$class: 'StringParameterValue', name: 'EXTRA_VARIABLES_YAML', value: _values_string.stripIndent() ],
+  ]
 }
 
 def gerritRef
 try {
   gerritRef = GERRIT_REFSPEC
-} catch (MissingPropertyException e) {
-  gerritRef = null
-}
-timeout(time: 12, unit: 'HOURS') {
-    node("python&&docker") {
-        def templateEnv = "${env.WORKSPACE}"
-        def cutterEnv = "${env.WORKSPACE}/cutter"
-        def jinjaEnv = "${env.WORKSPACE}/jinja"
+  } catch (MissingPropertyException e) {
+    gerritRef = null
+  }
 
-        try {
-            stage("Cleanup") {
-                sh("rm -rf * || true")
-            }
-
-            stage ('Download Cookiecutter template') {
-                if (gerritRef) {
-                    def gerritChange = gerrit.getGerritChange(GERRIT_NAME, GERRIT_HOST, GERRIT_CHANGE_NUMBER, CREDENTIALS_ID)
-                    merged = gerritChange.status == "MERGED"
-                    if(!merged){
-                        checkouted = gerrit.gerritPatchsetCheckout ([
-                            credentialsId : CREDENTIALS_ID
-                        ])
-                    } else{
-                        common.successMsg("Change ${GERRIT_CHANGE_NUMBER} is already merged, no need to gate them")
-                    }
-                } else {
-                    git.checkoutGitRepository(templateEnv, COOKIECUTTER_TEMPLATE_URL, COOKIECUTTER_TEMPLATE_BRANCH, CREDENTIALS_ID)
-                }
-            }
-
-            stage("Setup") {
-                python.setupCookiecutterVirtualenv(cutterEnv)
-            }
-
-            stage("Check workflow_definition") {
-                sh "python ${env.WORKSPACE}/workflow_definition_test.py"
-            }
-
-            def contextFiles
-            dir("${templateEnv}/contexts") {
-                contextFiles = findFiles(glob: "*.yml")
-            }
-
-            def contextFileList = []
-            for (int i = 0; i < contextFiles.size(); i++) {
-                contextFileList << contextFiles[i]
-            }
-
-            stage("generate-model") {
-                for (contextFile in contextFileList) {
-                    generateModel(contextFile, cutterEnv)
-                }
-            }
-
-            dir("${env.WORKSPACE}") {
-                sh(returnStatus: true, script: "tar -zcvf model.tar.gz -C model .")
-                archiveArtifacts artifacts: "model.tar.gz"
-            }
-
-            stage("test-nodes") {
-                def partitions = common.partitionList(contextFileList, PARALLEL_NODE_GROUP_SIZE.toInteger())
-                def buildSteps = [:]
-                partitions.eachWithIndex { partition, i ->
-                    buildSteps.put("partition-${i}", new HashMap<String,org.jenkinsci.plugins.workflow.cps.CpsClosure2>())
-                    for(part in partition){
-                        def basename = sh(script: "basename ${part} .yml", returnStdout: true).trim()
-                        def testEnv = "${env.WORKSPACE}/model/${basename}"
-                        buildSteps.get("partition-${i}").put(basename, { testModel(basename, testEnv, reclassVersion) })
-                    }
-                }
-                common.serial(buildSteps)
-            }
-
-            stage ('Clean workspace directories') {
-                sh(returnStatus: true, script: "rm -rfv * > /dev/null || true")
-            }
-
-        } catch (Throwable e) {
-             currentBuild.result = "FAILURE"
-             currentBuild.description = currentBuild.description ? e.message + " " + currentBuild.description : e.message
-             throw e
-        } finally {
-             def dummy = "dummy"
-             //FAILING common.sendNotification(currentBuild.result,"",["slack"])
-        }
+def testModelStep(basename,testEnv) {
+  // We need to wrap what we return in a Groovy closure, or else it's invoked
+  // when this method is called, not when we pass it to parallel.
+  // To do this, you need to wrap the code below in { }, and either return
+  // that explicitly, or use { -> } syntax.
+  return {
+    node(slave_node) {
+      testModel(basename, testEnv)
     }
+  }
+}
+
+timeout(time: 2, unit: 'HOURS') {
+  node(slave_node) {
+    def templateEnv = "${env.WORKSPACE}"
+    def cutterEnv = "${env.WORKSPACE}/cutter"
+    def jinjaEnv = "${env.WORKSPACE}/jinja"
+
+    try {
+      // Fixme. Just use 'cleanup workspace' option.
+      stage("Cleanup") {
+        sh(script:  'find . -mindepth 1 -delete > /dev/null || true')
+      }
+
+      stage('Download Cookiecutter template') {
+        if (gerritRef) {
+          def gerritChange = gerrit.getGerritChange(GERRIT_NAME, GERRIT_HOST, GERRIT_CHANGE_NUMBER, CREDENTIALS_ID)
+          merged = gerritChange.status == "MERGED"
+          if (!merged) {
+            checkouted = gerrit.gerritPatchsetCheckout([
+              credentialsId: CREDENTIALS_ID
+              ])
+            } else {
+              common.successMsg("Change ${GERRIT_CHANGE_NUMBER} is already merged, no need to gate them")
+            }
+            } else {
+              git.checkoutGitRepository(templateEnv, COOKIECUTTER_TEMPLATE_URL, COOKIECUTTER_TEMPLATE_BRANCH, CREDENTIALS_ID)
+            }
+          }
+
+          stage("Setup") {
+            python.setupCookiecutterVirtualenv(cutterEnv)
+          }
+
+          stage("Check workflow_definition") {
+            sh(script: "python ${env.WORKSPACE}/workflow_definition_test.py")
+          }
+
+          def contextFileList = []
+          dir("${templateEnv}/contexts") {
+            for (String x : findFiles(glob: "*.yml")) {
+              contextFileList.add(x)
+            }
+          }
+
+          stage("generate-model") {
+            for (contextFile in contextFileList) {
+              generateModel(contextFile, cutterEnv)
+            }
+          }
+
+          dir("${env.WORKSPACE}") {
+          // Collect only models. For backward compatability - who know, probably someone use it..
+          sh(script: "tar -czf model.tar.gz -C model ../contexts .", returnStatus: true)
+          archiveArtifacts artifacts: "model.tar.gz"
+          // to be able share reclass for all subenvs
+          // Also, makes artifact test more solid - use one reclass for all of sub-models.
+          // Archive Structure will be:
+          // tar.gz
+          // ├── contexts
+          // │   └── ceph.yml
+          // ├── global_reclass <<< reclass system
+          // ├── model
+          // │   └── ceph       <<< from `context basename`
+          // │       ├── classes
+          // │       │   ├── cluster
+          // │       │   └── system -> ../../../global_reclass
+          // │       └── nodes
+          // │           └── cfg01.ceph-cluster-domain.local.yml
+
+          if (SYSTEM_GIT_URL == "") {
+            git.checkoutGitRepository("${env.WORKSPACE}/global_reclass/", RECLASS_MODEL_URL, RECLASS_MODEL_BRANCH, CREDENTIALS_ID)
+            } else {
+              dir("${env.WORKSPACE}/global_reclass/") {
+                if (!gerrit.gerritPatchsetCheckout(SYSTEM_GIT_URL, SYSTEM_GIT_REF, "HEAD", CREDENTIALS_ID)) {
+                  common.errorMsg("Failed to obtain system reclass with url: ${SYSTEM_GIT_URL} and ${SYSTEM_GIT_REF}")
+                  throw new RuntimeException("Failed to obtain system reclass")
+                }
+              }
+            }
+            // link all models, to use one global reclass
+            for (String context : contextFileList) {
+              def basename = GetBaseName(context, '.yml')
+              dir("${env.WORKSPACE}/model/${basename}"){
+                sh(script: 'mkdir -p classes/; ln -sfv ../../../global_reclass classes/system ')
+              }
+            }
+            // Save all models and all contexts. Warning! `h` flag has been used.
+            sh(script: "tar -chzf reclass.tar.gz --exclude='*@tmp' model contexts global_reclass", returnStatus: true)
+            archiveArtifacts artifacts: "reclass.tar.gz"
+          }
+
+          stage("test-contexts") {
+            stepsForParallel = [:]
+            common.infoMsg("Found: ${contextFileList.size()} contexts to test.")
+            for (String context : contextFileList) {
+              def basename = GetBaseName(context, '.yml')
+              def testEnv = "${env.WORKSPACE}/model/${basename}"
+              stepsForParallel.put("Test:${basename}", testModelStep(basename, testEnv))
+            }
+            parallel stepsForParallel
+            common.infoMsg('All tests done')
+          }
+
+          stage('Clean workspace directories') {
+            sh(script:  'find . -mindepth 1 -delete > /dev/null || true')
+          }
+
+} catch (Throwable e) {
+  currentBuild.result = "FAILURE"
+  currentBuild.description = currentBuild.description ? e.message + " " + currentBuild.description : e.message
+  throw e
+  } finally {
+    def dummy = "dummy"
+      //FAILING common.sendNotification(currentBuild.result,"",["slack"])
+    }
+  }
 }
diff --git a/test-salt-formulas-pipeline.groovy b/test-salt-formulas-pipeline.groovy
index ca4eb67..0caef9c 100644
--- a/test-salt-formulas-pipeline.groovy
+++ b/test-salt-formulas-pipeline.groovy
@@ -4,28 +4,19 @@
  *  DEFAULT_GIT_URL
  *  CREDENTIALS_ID
  *  KITCHEN_TESTS_PARALLEL
- *  RUN_TEST_IN_DOCKER     If true, run test stage in docker
  *  SMOKE_TEST_DOCKER_IMG  Docker image for run test (default "ubuntu:16.04")
  */
 common = new com.mirantis.mk.Common()
 def gerrit = new com.mirantis.mk.Gerrit()
 def ruby = new com.mirantis.mk.Ruby()
 
-def gerritRef
-try {
-  gerritRef = GERRIT_REFSPEC
-} catch (MissingPropertyException e) {
-  gerritRef = null
-}
-
-def defaultGitRef, defaultGitUrl
-try {
-  defaultGitRef = DEFAULT_GIT_REF
-  defaultGitUrl = DEFAULT_GIT_URL
-} catch (MissingPropertyException e) {
-  defaultGitRef = null
-  defaultGitUrl = null
-}
+def gerritRef = env.GERRIT_REFSPEC ?: null
+def defaultGitRef = env.DEFAULT_GIT_REF ?: null
+def defaultGitUrl = env.DEFAULT_GIT_URL ?: null
+def slaveNode = env.SLAVE_NODE ?: 'python&&docker'
+def saltVersion = env.SALT_VERSION ?: ""
+def dockerLib = new com.mirantis.mk.Docker()
+def img = dockerLib.getImage(env.SMOKE_TEST_DOCKER_IMG, "ubuntu:16.04")
 
 def checkouted = false
 
@@ -70,9 +61,13 @@
     [$class: 'StringParameterValue', name: 'SALT_VERSION', value: SALT_VERSION]
   ]
 }
-timeout(time: 12, unit: 'HOURS') {
-  node("python") {
+timeout(time: 2, unit: 'HOURS') {
+  node(slaveNode) {
     try {
+      if (fileExists("tests/build")) {
+        common.infoMsg('Cleaning test env')
+        sh ("sudo rm -rf tests/build")
+      }
       stage("checkout") {
         if (gerritRef) {
           // job is triggered by Gerrit
@@ -102,39 +97,39 @@
           throw new Exception("Cannot checkout gerrit patchset, GERRIT_REFSPEC and DEFAULT_GIT_REF is null")
         }
     }
-    stage("test") {
-      if (checkouted) {
-        try {
-          saltVersion = SALT_VERSION
-            } catch (MissingPropertyException e) {
-          saltVersion = "" // default value is empty string, means latest
-        }
-        withEnv(["SALT_VERSION=${saltVersion}"]) {
-          boolean run_test_in_docker = (env.RUN_TEST_IN_DOCKER ?: false).toBoolean()
-          if (run_test_in_docker) {
-            def dockerLib = new com.mirantis.mk.Docker()
-            def img = dockerLib.getImage(env.SMOKE_TEST_DOCKER_IMG, "ubuntu:16.04")
-            def workspace = common.getWorkspace()
-            img.inside("-u root:root -v ${workspace}/:/formula/") {
-              sh("""cd /etc/apt/ && echo > sources.list \
-              && echo "deb [arch=amd64] http://cz.archive.ubuntu.com/ubuntu xenial main restricted universe multiverse" >> sources.list \
-              && echo "deb [arch=amd64] http://cz.archive.ubuntu.com/ubuntu xenial-updates main restricted universe multiverse" >> sources.list \
-              && echo "deb [arch=amd64] http://cz.archive.ubuntu.com/ubuntu xenial-backports main restricted universe multiverse" >> sources.list \
-              && echo 'Acquire::Languages "none";' > apt.conf.d/docker-no-languages \
-              && echo 'Acquire::GzipIndexes "true"; Acquire::CompressionTypes::Order:: "gz";' > apt.conf.d/docker-gzip-indexes \
-              && echo 'APT::Get::Install-Recommends "false"; APT::Get::Install-Suggests "false";' > apt.conf.d/docker-recommends \
-              && apt-get update \
-              && apt-get install -y git-core wget curl apt-transport-https \
-              && apt-get install -y python-pip python3-pip python-virtualenv python3-virtualenv python-yaml autoconf build-essential""")
-              sh("cd /formula/ && make clean && make test")
+      stage("test") {
+        if (checkouted) {
+          try {
+            // TODO add try\finally for image-stuck case. (copy-paste from SaltModelTesting)
+            withEnv(["SALT_VERSION=${saltVersion}"]) {
+              img.inside("-v ${env.WORKSPACE}/:/formula/ -u root:root --cpus=4 --ulimit nofile=4096:8192") {
+                sh('''#!/bin/bash -xe
+                      cd /etc/apt/
+                      echo "deb [arch=amd64] http://cz.archive.ubuntu.com/ubuntu xenial main restricted universe" > sources.list
+                      echo "deb [arch=amd64] http://cz.archive.ubuntu.com/ubuntu xenial-updates main restricted universe" >> sources.list
+                      echo 'Acquire::Languages "none";' > apt.conf.d/docker-no-languages
+                      echo 'Acquire::GzipIndexes "true"; Acquire::CompressionTypes::Order:: "gz";' > apt.conf.d/docker-gzip-indexes
+                      echo 'APT::Get::Install-Recommends "false"; APT::Get::Install-Suggests "false";' > apt.conf.d/docker-recommends
+                      apt-get update
+                      apt-get install -y git-core wget curl apt-transport-https
+                      apt-get install -y python-pip python3-pip python-virtualenv python3-virtualenv python-yaml autoconf build-essential
+                      cd /formula/
+                      make clean
+                      make test
+                      make clean
+                      ''')
+              }
             }
-          } else {
-            common.warningMsg("Those tests should be always be run in clean env! Recommends to use docker env!")
-            sh("make clean && make test")
+          }
+          finally {
+            if (fileExists("tests/build")) {
+              common.infoMsg('Cleaning test env')
+              sh ("sudo rm -rf tests/build")
+            }
           }
         }
+
       }
-    }
     stage("kitchen") {
         if (checkouted) {
           if (fileExists(".kitchen.yml")) {
diff --git a/update-package.groovy b/update-package.groovy
index 790e2ac..5be04a5 100644
--- a/update-package.groovy
+++ b/update-package.groovy
@@ -11,12 +11,10 @@
  *   TARGET_BATCH_LIVE          Batch size for the complete live package update on all nodes, empty string means apply to all targetted nodes.
  *
 **/
-
+pepperEnv = "pepperEnv"
+salt = new com.mirantis.mk.Salt()
 def common = new com.mirantis.mk.Common()
-def salt = new com.mirantis.mk.Salt()
 def python = new com.mirantis.mk.Python()
-
-def pepperEnv = "pepperEnv"
 def targetTestSubset
 def targetLiveSubset
 def targetLiveAll
@@ -25,6 +23,10 @@
 def packages
 def command
 def commandKwargs
+def installSaltStack(target, pkgs){
+    salt.runSaltProcessStep(pepperEnv, target, 'pkg.install', ["force_yes=True", "pkgs='$pkgs'"], null, true, 30)
+}
+
 timeout(time: 12, unit: 'HOURS') {
     node() {
         try {
@@ -89,6 +91,20 @@
             }
 
             stage('Apply package upgrades on sample') {
+                if(packages == null || packages.contains("salt-master") || packages.contains("salt-common") || packages.contains("salt-minion") || packages.contains("salt-api")){
+                    def saltTargets = (targetLiveSubset.split(' or ').collect{it as String})
+                    for(int i = 0; i < saltTargets.size(); i++ ){
+                        common.infoMsg("During salt-minion upgrade on cfg node, pipeline lose connectivy to salt-master for 2 min. If pipeline ended with error rerun pipeline again.")
+                        common.retry(10, 5) {
+                            if(salt.minionsReachable(pepperEnv, 'I@salt:master', "I@salt:master and ${saltTargets[i]}")){
+                                installSaltStack("I@salt:master and ${saltTargets[i]}", '["salt-master", "salt-common", "salt-api", "salt-minion"]')
+                            }
+                            if(salt.minionsReachable(pepperEnv, 'I@salt:master', "I@salt:minion and not I@salt:master and ${saltTargets[i]}")){
+                                installSaltStack("I@salt:minion and not I@salt:master and ${saltTargets[i]}", '["salt-minion"]')
+                            }
+                        }
+                    }
+                }
                 out = salt.runSaltCommand(pepperEnv, 'local', ['expression': targetLiveSubset, 'type': 'compound'], command, null, packages, commandKwargs)
                 salt.printSaltCommandResult(out)
             }
@@ -100,6 +116,22 @@
             }
 
             stage('Apply package upgrades on all nodes') {
+
+                if(packages == null || packages.contains("salt-master") || packages.contains("salt-common") || packages.contains("salt-minion") || packages.contains("salt-api")){
+                    def saltTargets = (targetLiveAll.split(' or ').collect{it as String})
+                    for(int i = 0; i < saltTargets.size(); i++ ){
+                        common.infoMsg("During salt-minion upgrade on cfg node, pipeline lose connectivy to salt-master for 2 min. If pipeline ended with error rerun pipeline again.")
+                        common.retry(10, 5) {
+                            if(salt.minionsReachable(pepperEnv, 'I@salt:master', "I@salt:master and ${saltTargets[i]}")){
+                                installSaltStack("I@salt:master and ${saltTargets[i]}", '["salt-master", "salt-common", "salt-api", "salt-minion"]')
+                            }
+                            if(salt.minionsReachable(pepperEnv, 'I@salt:master', "I@salt:minion and not I@salt:master and ${saltTargets[i]}")){
+                                installSaltStack("I@salt:minion and not I@salt:master and ${saltTargets[i]}", '["salt-minion"]')
+                            }
+                        }
+                    }
+                }
+
                 out = salt.runSaltCommand(pepperEnv, 'local', ['expression': targetLiveAll, 'type': 'compound'], command, null, packages, commandKwargs)
                 salt.printSaltCommandResult(out)
             }
diff --git a/upgrade-mcp-release.groovy b/upgrade-mcp-release.groovy
index 62e5622..d5c0e77 100644
--- a/upgrade-mcp-release.groovy
+++ b/upgrade-mcp-release.groovy
@@ -46,7 +46,7 @@
     }
 
     def saltVersion = salt.getPillar(venvPepper, 'I@salt:master', "_param:salt_version").get("return")[0].values()[0]
-    def saltMinionVersions = salt.cmdRun(venvPepper, "*", "apt-cache policy salt-common |  awk '/Installed/ && /$saltVersion/'").get("return")
+    def saltMinionVersions = salt.cmdRun(venvPepper, target, "apt-cache policy salt-common |  awk '/Installed/ && /$saltVersion/'").get("return")
     def saltMinionVersion = ""
 
     for(minion in saltMinionVersions[0].keySet()){
@@ -64,7 +64,12 @@
     archiveArtifacts artifacts: "$filename"
 }
 
-timeout(time: 12, unit: 'HOURS') {
+def pipelineTimeout = 12
+if (common.validInputParam('PIPELINE_TIMEOUT') && PIPELINE_TIMEOUT.isInteger()) {
+    pipelineTimeout = "${PIPELINE_TIMEOUT}".toInteger()
+}
+
+timeout(time: pipelineTimeout, unit: 'HOURS') {
     node("python") {
         try {
             def gitMcpVersion = MCP_VERSION
@@ -87,7 +92,11 @@
                     catch(Exception ex){
                         error("You have uncommited changes in your Reclass cluster model repository. Please commit or reset them and rerun the pipeline.")
                     }
+                    def dateTime = common.getDatetime()
                     salt.cmdRun(venvPepper, 'I@salt:master', "cd /srv/salt/reclass/classes/cluster/$cluster_name && grep -r --exclude-dir=aptly -l 'apt_mk_version: .*' * | xargs sed -i 's/apt_mk_version: .*/apt_mk_version: \"$MCP_VERSION\"/g'")
+                    common.infoMsg("The following changes were made to the cluster model and will be commited. Please consider if you want to push them to the remote repository or not. You have to do this manually when the run is finished.")
+                    salt.cmdRun(venvPepper, 'I@salt.master', "cd /srv/salt/reclass/classes/cluster/$cluster_name && git diff")
+                    salt.cmdRun(venvPepper, 'I@salt:master', "cd /srv/salt/reclass/classes/cluster/$cluster_name && git status && git add -u && git commit -m 'Cluster model update to the release $MCP_VERSION on $dateTime'")
                 }
 
                 try{
diff --git a/validate-cloud.groovy b/validate-cloud.groovy
index 7d3b2e2..3c27dce 100644
--- a/validate-cloud.groovy
+++ b/validate-cloud.groovy
@@ -39,6 +39,7 @@
  *   GENERATE_REPORT             If not false, run report generation command
  *   ACCUMULATE_RESULTS          If true, results from the previous build will be used
  *   JOB_TIMEOUT                 Job timeout in hours
+ *   SKIP_LIST                   List of the Rally scenarios which should be skipped
  *
  */
 
@@ -93,7 +94,7 @@
                       rally_variables = ["plugins_repo":"${RALLY_PLUGINS_REPO}",
                                          "plugins_branch":"${RALLY_PLUGINS_BRANCH}"]
                     }
-                    validate.runRallyTests(pepperEnv, TARGET_NODE, TEST_IMAGE, platform, artifacts_dir, RALLY_CONFIG_REPO, RALLY_CONFIG_BRANCH, RALLY_SCENARIOS, RALLY_TASK_ARGS_FILE, rally_variables, report_dir)
+                    validate.runRallyTests(pepperEnv, TARGET_NODE, TEST_IMAGE, platform, artifacts_dir, RALLY_CONFIG_REPO, RALLY_CONFIG_BRANCH, RALLY_SCENARIOS, RALLY_TASK_ARGS_FILE, rally_variables, report_dir, SKIP_LIST)
                 } else {
                     common.infoMsg("Skipping Rally tests")
                 }