Merge "Print output of highstate"
diff --git a/.gitreview b/.gitreview
index 9075ea3..ce0aa41 100644
--- a/.gitreview
+++ b/.gitreview
@@ -1,4 +1,4 @@
 [gerrit]
-host=gerrit.mcp.mirantis.net
+host=gerrit.mcp.mirantis.com
 port=29418
 project=mk/mk-pipelines.git
diff --git a/build-debian-packages-prometheus-relay.groovy b/build-debian-packages-prometheus-relay.groovy
index f101f57..ea19c9d 100644
--- a/build-debian-packages-prometheus-relay.groovy
+++ b/build-debian-packages-prometheus-relay.groovy
@@ -13,7 +13,7 @@
                 sh("rm -rf * || true")
             }
 
-            def workingDir = "src/gerrit.mcp.mirantis.net/debian"
+            def workingDir = "src/gerrit.mcp.mirantis.com/debian"
             stage("checkout") {
                 git.checkoutGitRepository(
                     "${workingDir}/prometheus-relay",
@@ -53,7 +53,7 @@
                             export GOROOT=\$PWD/go &&
                             export GOPATH=\$PWD &&
                             export PATH=\$PATH:\$GOPATH/bin:\$GOROOT/bin &&
-                            cd src/gerrit.mcp.mirantis.net/debian/prometheus-relay &&
+                            cd src/gerrit.mcp.mirantis.com/debian/prometheus-relay &&
                             make""")
                     }
                     archiveArtifacts artifacts: "${workingDir}/prometheus-relay/build/*.deb"
diff --git a/cloud-deploy-pipeline.groovy b/cloud-deploy-pipeline.groovy
index ce35b51..aadc7c9 100644
--- a/cloud-deploy-pipeline.groovy
+++ b/cloud-deploy-pipeline.groovy
@@ -423,6 +423,23 @@
                     }
 
                     orchestrate.installKubernetesCompute(venvPepper, extra_tgt)
+                    // Setup kubernetes addons for opencontrail. More info in the definition of the func.
+                    orchestrate.setupKubeAddonForContrail(venvPepper, extra_tgt)
+                }
+            }
+
+            // install ceph
+            if (common.checkContains('STACK_INSTALL', 'ceph')) {
+                stage('Install Ceph MONs') {
+                    orchestrate.installCephMon(venvPepper, "I@ceph:mon ${extra_tgt}", extra_tgt)
+                }
+
+                stage('Install Ceph OSDs') {
+                    orchestrate.installCephOsd(venvPepper, "I@ceph:osd ${extra_tgt}", true, extra_tgt)
+                }
+
+                stage('Install Ceph clients') {
+                    orchestrate.installCephClient(venvPepper, extra_tgt)
                 }
             }
 
@@ -474,20 +491,8 @@
 
             }
 
-            // install ceph
+            // connect ceph
             if (common.checkContains('STACK_INSTALL', 'ceph')) {
-                stage('Install Ceph MONs') {
-                    orchestrate.installCephMon(venvPepper, "I@ceph:mon ${extra_tgt}", extra_tgt)
-                }
-
-                stage('Install Ceph OSDs') {
-                    orchestrate.installCephOsd(venvPepper, "I@ceph:osd ${extra_tgt}", true, extra_tgt)
-                }
-
-
-                stage('Install Ceph clients') {
-                    orchestrate.installCephClient(venvPepper, extra_tgt)
-                }
 
                 stage('Connect Ceph') {
                     orchestrate.connectCeph(venvPepper, extra_tgt)
diff --git a/cloud-update.groovy b/cloud-update.groovy
index be29675..d58d1e0 100644
--- a/cloud-update.groovy
+++ b/cloud-update.groovy
@@ -388,7 +388,7 @@
     } else {
         def salt = new com.mirantis.mk.Salt()
         for (s in services) {
-            def outputServicesStr = salt.getReturnValues(salt.cmdRun(pepperEnv, "${probe}*", "service --status-all | grep ${s} | awk \'{print \$4}\'"))
+            def outputServicesStr = salt.getReturnValues(salt.cmdRun(pepperEnv, probe, "service --status-all | grep ${s} | awk \'{print \$4}\'"))
             def servicesList = outputServicesStr.tokenize("\n").init() //init() returns the items from the Iterable excluding the last item
             if (servicesList) {
                 for (name in servicesList) {
diff --git a/docker-build-image-pipeline.groovy b/docker-build-image-pipeline.groovy
index 1fbd9f0..a39051f 100644
--- a/docker-build-image-pipeline.groovy
+++ b/docker-build-image-pipeline.groovy
@@ -9,94 +9,101 @@
  * REGISTRY_URL - Docker registry URL (can be empty)
  * ARTIFACTORY_URL - URL to artifactory
  * ARTIFACTORY_NAMESPACE - Artifactory namespace (oss, cicd,...)
+ * UPLOAD_TO_DOCKER_HUB    - True\False
  * REGISTRY_CREDENTIALS_ID - Docker hub credentials id
  *
-**/
+ **/
 
 def common = new com.mirantis.mk.Common()
 def gerrit = new com.mirantis.mk.Gerrit()
 def git = new com.mirantis.mk.Git()
 def dockerLib = new com.mirantis.mk.Docker()
 def artifactory = new com.mirantis.mcp.MCPArtifactory()
+
+slaveNode = env.SLAVE_NODE ?: 'docker'
+uploadToDockerHub = env.UPLOAD_TO_DOCKER_HUB ?: false
+
 timeout(time: 12, unit: 'HOURS') {
-  node("docker") {
-    def workspace = common.getWorkspace()
-    def imageTagsList = IMAGE_TAGS.tokenize(" ")
-    try{
+    node(slaveNode) {
+        def workspace = common.getWorkspace()
+        def imageTagsList = env.IMAGE_TAGS.tokenize(" ")
+        try {
 
-      def buildArgs = []
-      try {
-        buildArgs = IMAGE_BUILD_PARAMS.tokenize(' ')
-      } catch (Throwable e) {
-        buildArgs = []
-      }
-      def dockerApp
-        stage("checkout") {
-           git.checkoutGitRepository('.', IMAGE_GIT_URL, IMAGE_BRANCH, IMAGE_CREDENTIALS_ID)
-        }
+            def buildArgs = []
+            try {
+                buildArgs = IMAGE_BUILD_PARAMS.tokenize(' ')
+            } catch (Throwable e) {
+                buildArgs = []
+            }
+            def dockerApp
+            stage("checkout") {
+                git.checkoutGitRepository('.', IMAGE_GIT_URL, IMAGE_BRANCH, IMAGE_CREDENTIALS_ID)
+            }
 
-        if (IMAGE_BRANCH == "master") {
-          try {
-            def tag = sh(script: "git describe --tags --abbrev=0", returnStdout: true).trim()
-            def revision = sh(script: "git describe --tags --abbrev=4 | grep -oP \"^${tag}-\\K.*\" | awk -F\\- '{print \$1}'", returnStdout: true).trim()
-            imageTagsList << tag
-            revision = revision ? revision : "0"
-            if(Integer.valueOf(revision) > 0){
-              imageTagsList << "${tag}-${revision}"
+            if (IMAGE_BRANCH == "master") {
+                try {
+                    def tag = sh(script: "git describe --tags --abbrev=0", returnStdout: true).trim()
+                    def revision = sh(script: "git describe --tags --abbrev=4 | grep -oP \"^${tag}-\\K.*\" | awk -F\\- '{print \$1}'", returnStdout: true).trim()
+                    imageTagsList << tag
+                    revision = revision ? revision : "0"
+                    if (Integer.valueOf(revision) > 0) {
+                        imageTagsList << "${tag}-${revision}"
+                    }
+                    if (!imageTagsList.contains("latest")) {
+                        imageTagsList << "latest"
+                    }
+                } catch (Exception e) {
+                    common.infoMsg("Impossible to find any tag")
+                }
             }
-            if (!imageTagsList.contains("latest")) {
-              imageTagsList << "latest"
-              //workaround for all of our docker images
-              imageTagsList << "nightly"
-            }
-          } catch (Exception e) {
-            common.infoMsg("Impossible to find any tag")
-          }
-        }
 
-        stage("build") {
-          common.infoMsg("Building docker image ${IMAGE_NAME}")
-          dockerApp = dockerLib.buildDockerImage(IMAGE_NAME, "", "${workspace}/${DOCKERFILE_PATH}", imageTagsList[0], buildArgs)
-          if(!dockerApp){
-            throw new Exception("Docker build image failed")
-          }
-        }
-        stage("upload to docker hub"){
-          docker.withRegistry(REGISTRY_URL, REGISTRY_CREDENTIALS_ID) {
-            for(int i=0;i<imageTagsList.size();i++){
-              common.infoMsg("Uploading image ${IMAGE_NAME} with tag ${imageTagsList[i]} to dockerhub")
-              dockerApp.push(imageTagsList[i])
+            stage("build") {
+                common.infoMsg("Building docker image ${IMAGE_NAME}")
+                dockerApp = dockerLib.buildDockerImage(IMAGE_NAME, "", "${workspace}/${DOCKERFILE_PATH}", imageTagsList[0], buildArgs)
+                if (!dockerApp) {
+                    throw new Exception("Docker build image failed")
+                }
             }
-          }
+            stage("upload to docker hub") {
+                if (uploadToDockerHub) {
+                    docker.withRegistry(REGISTRY_URL, REGISTRY_CREDENTIALS_ID) {
+                        for (int i = 0; i < imageTagsList.size(); i++) {
+                            common.infoMsg("Uploading image ${IMAGE_NAME} with tag ${imageTagsList[i]} to dockerhub")
+                            dockerApp.push(imageTagsList[i])
+                        }
+                    }
+                } else {
+                    common.infoMsg('upload to docker hub skipped')
+                }
+            }
+            stage("upload to artifactory") {
+                if (common.validInputParam("ARTIFACTORY_URL") && common.validInputParam("ARTIFACTORY_NAMESPACE")) {
+                    def artifactoryName = "mcp-ci";
+                    def artifactoryServer = Artifactory.server(artifactoryName)
+                    def shortImageName = IMAGE_NAME
+                    if (IMAGE_NAME.contains("/")) {
+                        shortImageName = IMAGE_NAME.tokenize("/")[1]
+                    }
+                    for (imageTag in imageTagsList) {
+                        sh "docker tag ${IMAGE_NAME}:${imageTagsList[0]} ${ARTIFACTORY_URL}/mirantis/${ARTIFACTORY_NAMESPACE}/${shortImageName}:${imageTag}"
+                        for (artifactoryRepo in ["docker-dev-local", "docker-prod-local"]) {
+                            common.infoMsg("Uploading image ${IMAGE_NAME} with tag ${imageTag} to artifactory ${artifactoryName} using repo ${artifactoryRepo}")
+                            artifactory.uploadImageToArtifactory(artifactoryServer, ARTIFACTORY_URL,
+                                "mirantis/${ARTIFACTORY_NAMESPACE}/${shortImageName}",
+                                imageTag, artifactoryRepo)
+                        }
+                    }
+                } else {
+                    common.warningMsg("ARTIFACTORY_URL not given, upload to artifactory skipped")
+                }
+            }
+        } catch (Throwable e) {
+            // If there was an error or exception thrown, the build failed
+            currentBuild.result = "FAILURE"
+            currentBuild.description = currentBuild.description ? e.message + " " + currentBuild.description : e.message
+            throw e
+        } finally {
+            common.sendNotification(currentBuild.result, "", ["slack"])
         }
-        stage("upload to artifactory"){
-          if(common.validInputParam("ARTIFACTORY_URL") && common.validInputParam("ARTIFACTORY_NAMESPACE")) {
-             def artifactoryName = "mcp-ci";
-             def artifactoryServer = Artifactory.server(artifactoryName)
-             def shortImageName = IMAGE_NAME
-             if (IMAGE_NAME.contains("/")) {
-                shortImageName = IMAGE_NAME.tokenize("/")[1]
-             }
-             for (imageTag in imageTagsList) {
-               sh "docker tag ${IMAGE_NAME} ${ARTIFACTORY_URL}/mirantis/${ARTIFACTORY_NAMESPACE}/${shortImageName}:${imageTag}"
-               for(artifactoryRepo in ["docker-dev-local", "docker-prod-local"]){
-                 common.infoMsg("Uploading image ${IMAGE_NAME} with tag ${imageTag} to artifactory ${artifactoryName} using repo ${artifactoryRepo}")
-                 artifactory.uploadImageToArtifactory(artifactoryServer, ARTIFACTORY_URL,
-                                                   "mirantis/${ARTIFACTORY_NAMESPACE}/${shortImageName}",
-                                                   imageTag, artifactoryRepo)
-               }
-             }
-          }else{
-            common.warningMsg("ARTIFACTORY_URL not given, upload to artifactory skipped")
-          }
-        }
-    } catch (Throwable e) {
-       // If there was an error or exception thrown, the build failed
-       currentBuild.result = "FAILURE"
-       currentBuild.description = currentBuild.description ? e.message + " " + currentBuild.description : e.message
-       throw e
-    } finally {
-       common.sendNotification(currentBuild.result,"",["slack"])
     }
-  }
 }
diff --git a/docker-mirror-images.groovy b/docker-mirror-images.groovy
index d88c9d1..4ccc74a 100644
--- a/docker-mirror-images.groovy
+++ b/docker-mirror-images.groovy
@@ -59,12 +59,18 @@
                     imageName = getImageName(sourceImage)
                     targetImageFull = "${targetRegistryPath}/${imageName}:${env.IMAGE_TAG}"
                     srcImage = docker.image(sourceImage)
-                    srcImage.pull()
+                    common.retry(3, 5) {
+                        srcImage.pull()
+                    }
                     // Use sh-docker call for tag, due magic code in plugin:
                     // https://github.com/jenkinsci/docker-workflow-plugin/blob/docker-workflow-1.17/src/main/resources/org/jenkinsci/plugins/docker/workflow/Docker.groovy#L168-L170
                     sh("docker tag ${srcImage.id} ${targetImageFull}")
                     common.infoMsg("Attempt to push docker image into remote registry: ${env.REGISTRY_URL}")
-                    sh("docker push ${targetImageFull}")
+                    common.retry(3, 5) {
+                        docker.withRegistry(env.REGISTRY_URL, env.TARGET_REGISTRY_CREDENTIALS_ID) {
+                            sh("docker push ${targetImageFull}")
+                        }
+                    }
                     if (targetImageFull.contains(externalMarker)) {
                         external = true
                     }
@@ -95,7 +101,9 @@
                                 common.infoMsg("artifactoryProperties=> ${artifactoryProperties}")
                                 // Call pipeline-library routine to set properties
                                 def mcp_artifactory = new com.mirantis.mcp.MCPArtifactory()
-                                mcp_artifactory.setProperties(imgUrl - '/manifest.json', artifactoryProperties)
+                                common.retry(3, 5) {
+                                    mcp_artifactory.setProperties(imgUrl - '/manifest.json', artifactoryProperties)
+                                }
                             }
                         }
                     }
diff --git a/gating-pipeline.groovy b/gating-pipeline.groovy
index e42524b..aeaee9a 100644
--- a/gating-pipeline.groovy
+++ b/gating-pipeline.groovy
@@ -3,79 +3,105 @@
  * CREDENTIALS_ID - Gerrit credentails ID
  * JOBS_NAMESPACE - Gerrit gating jobs namespace (mk, contrail, ...)
  *
-**/
+ **/
 
 def common = new com.mirantis.mk.Common()
 def gerrit = new com.mirantis.mk.Gerrit()
 def ssh = new com.mirantis.mk.Ssh()
-timeout(time: 12, unit: 'HOURS') {
-  node("python") {
-    try{
-      // test if change is not already merged
-      ssh.prepareSshAgentKey(CREDENTIALS_ID)
-      ssh.ensureKnownHosts(GERRIT_HOST)
-      def gerritChange = gerrit.getGerritChange(GERRIT_NAME, GERRIT_HOST, GERRIT_CHANGE_NUMBER, CREDENTIALS_ID, true)
-      def doSubmit = false
-      def giveVerify = false
-      stage("test") {
-        if (gerritChange.status != "MERGED" && !SKIP_TEST.equals("true")){
-          // test max CodeReview
-          if(gerrit.patchsetHasApproval(gerritChange.currentPatchSet,"Code-Review", "+")){
-            doSubmit = true
-            def gerritProjectArray = GERRIT_PROJECT.tokenize("/")
-            def gerritProject = gerritProjectArray[gerritProjectArray.size() - 1]
-            def jobsNamespace = JOBS_NAMESPACE
-            def plural_namespaces = ['salt-formulas', 'salt-models']
-            // remove plural s on the end of job namespace
-            if (JOBS_NAMESPACE in plural_namespaces){
-              jobsNamespace = JOBS_NAMESPACE.substring(0, JOBS_NAMESPACE.length() - 1)
-            }
-            // salt-formulas tests have -latest on end of the name
-            if(JOBS_NAMESPACE.equals("salt-formulas")){
-              gerritProject=gerritProject+"-latest"
-            }
-            def testJob = String.format("test-%s-%s", jobsNamespace, gerritProject)
-            if (_jobExists(testJob)) {
-              common.infoMsg("Test job ${testJob} found, running")
-              def patchsetVerified =  gerrit.patchsetHasApproval(gerritChange.currentPatchSet,"Verified", "+")
-              build job: testJob, parameters: [
-                [$class: 'StringParameterValue', name: 'DEFAULT_GIT_URL', value: "${GERRIT_SCHEME}://${GERRIT_NAME}@${GERRIT_HOST}:${GERRIT_PORT}/${GERRIT_PROJECT}"],
-                [$class: 'StringParameterValue', name: 'DEFAULT_GIT_REF', value: GERRIT_REFSPEC]
-              ]
-              giveVerify = true
-            } else {
-              common.infoMsg("Test job ${testJob} not found")
-            }
-          } else {
-            common.errorMsg("Change don't have a CodeReview, skipping gate")
-          }
-        } else {
-          common.infoMsg("Test job skipped")
-        }
-      }
-      stage("submit review"){
-        if(gerritChange.status == "MERGED"){
-          common.successMsg("Change ${GERRIT_CHANGE_NUMBER} is already merged, no need to gate them")
-        }else if(doSubmit){
-          if(giveVerify){
-            common.warningMsg("Change ${GERRIT_CHANGE_NUMBER} don't have a Verified, but tests were successful, so adding Verified and submitting")
-            ssh.agentSh(String.format("ssh -p 29418 %s@%s gerrit review --verified +1 --submit %s,%s", GERRIT_NAME, GERRIT_HOST, GERRIT_CHANGE_NUMBER, GERRIT_PATCHSET_NUMBER))
-          }else{
-            ssh.agentSh(String.format("ssh -p 29418 %s@%s gerrit review --submit %s,%s", GERRIT_NAME, GERRIT_HOST, GERRIT_CHANGE_NUMBER, GERRIT_PATCHSET_NUMBER))
-          }
-          common.infoMsg(String.format("Gerrit review %s,%s submitted", GERRIT_CHANGE_NUMBER, GERRIT_PATCHSET_NUMBER))
-        }
-      }
-    } catch (Throwable e) {
-       // If there was an error or exception thrown, the build failed
-       currentBuild.result = "FAILURE"
-       currentBuild.description = currentBuild.description ? e.message + " " + currentBuild.description : e.message
-       throw e
-    }
-  }
-}
+
+slaveNode = env.SLAVE_NODE ?: 'docker'
+giveVerify = false
 
 @NonCPS
-def _jobExists(jobName){
-  return Jenkins.instance.items.find{it -> it.name.equals(jobName)}
+def isJobExists(jobName) {
+    return Jenkins.instance.items.find { it -> it.name.equals(jobName) }
+}
+
+def callJobWithExtraVars(String jobName) {
+    def gerritVars = '\n---'
+    for (envVar in env.getEnvironment()) {
+        if (envVar.key.startsWith("GERRIT_")) {
+            gerritVars += "\n${envVar.key}: '${envVar.value}'"
+        }
+    }
+    testJob = build job: jobName, parameters: [
+        [$class: 'TextParameterValue', name: 'EXTRA_VARIABLES_YAML', value: gerritVars]
+    ]
+    if (testJob.getResult() != 'SUCCESS') {
+        error("Gate job ${testJob.getBuildUrl().toString()}  finished with ${testJob.getResult()} !")
+    }
+    giveVerify = true
+}
+
+
+timeout(time: 12, unit: 'HOURS') {
+    node(slaveNode) {
+        try {
+            // test if change is not already merged
+            ssh.prepareSshAgentKey(CREDENTIALS_ID)
+            ssh.ensureKnownHosts(GERRIT_HOST)
+            def gerritChange = gerrit.getGerritChange(GERRIT_NAME, GERRIT_HOST, GERRIT_CHANGE_NUMBER, CREDENTIALS_ID, true)
+            def doSubmit = false
+            stage("test") {
+                if (gerritChange.status != "MERGED" && !SKIP_TEST.equals("true")) {
+                    // test max CodeReview
+                    if (gerrit.patchsetHasApproval(gerritChange.currentPatchSet, "Code-Review", "+")) {
+                        doSubmit = true
+                        def gerritProjectArray = GERRIT_PROJECT.tokenize("/")
+                        def gerritProject = gerritProjectArray[gerritProjectArray.size() - 1]
+                        def jobsNamespace = JOBS_NAMESPACE
+                        def plural_namespaces = ['salt-formulas', 'salt-models']
+                        // remove plural s on the end of job namespace
+                        if (JOBS_NAMESPACE in plural_namespaces) {
+                            jobsNamespace = JOBS_NAMESPACE.substring(0, JOBS_NAMESPACE.length() - 1)
+                        }
+                        // salt-formulas tests have -latest on end of the name
+                        if (JOBS_NAMESPACE.equals("salt-formulas")) {
+                            gerritProject = gerritProject + "-latest"
+                        }
+                        def testJob = String.format("test-%s-%s", jobsNamespace, gerritProject)
+                        if (env.GERRIT_PROJECT == 'mk/cookiecutter-templates') {
+                            callJobWithExtraVars('test-mk-cookiecutter-templates')
+                        } else if (env.GERRIT_PROJECT == 'salt-models/reclass-system') {
+                            callJobWithExtraVars('test-salt-model-reclass-system')
+                        } else {
+                            if (isJobExists(testJob)) {
+                                common.infoMsg("Test job ${testJob} found, running")
+                                def patchsetVerified = gerrit.patchsetHasApproval(gerritChange.currentPatchSet, "Verified", "+")
+                                build job: testJob, parameters: [
+                                    [$class: 'StringParameterValue', name: 'DEFAULT_GIT_URL', value: "${GERRIT_SCHEME}://${GERRIT_NAME}@${GERRIT_HOST}:${GERRIT_PORT}/${GERRIT_PROJECT}"],
+                                    [$class: 'StringParameterValue', name: 'DEFAULT_GIT_REF', value: GERRIT_REFSPEC]
+                                ]
+                                giveVerify = true
+                            } else {
+                                common.infoMsg("Test job ${testJob} not found")
+                            }
+                        }
+                    } else {
+                        common.errorMsg("Change don't have a CodeReview, skipping gate")
+                    }
+                } else {
+                    common.infoMsg("Test job skipped")
+                }
+            }
+            stage("submit review") {
+                if (gerritChange.status == "MERGED") {
+                    common.successMsg("Change ${GERRIT_CHANGE_NUMBER} is already merged, no need to gate them")
+                } else if (doSubmit) {
+                    if (giveVerify) {
+                        common.warningMsg("Change ${GERRIT_CHANGE_NUMBER} don't have a Verified, but tests were successful, so adding Verified and submitting")
+                        ssh.agentSh(String.format("ssh -p 29418 %s@%s gerrit review --verified +1 --submit %s,%s", GERRIT_NAME, GERRIT_HOST, GERRIT_CHANGE_NUMBER, GERRIT_PATCHSET_NUMBER))
+                    } else {
+                        ssh.agentSh(String.format("ssh -p 29418 %s@%s gerrit review --submit %s,%s", GERRIT_NAME, GERRIT_HOST, GERRIT_CHANGE_NUMBER, GERRIT_PATCHSET_NUMBER))
+                    }
+                    common.infoMsg(String.format("Gerrit review %s,%s submitted", GERRIT_CHANGE_NUMBER, GERRIT_PATCHSET_NUMBER))
+                }
+            }
+        } catch (Throwable e) {
+            // If there was an error or exception thrown, the build failed
+            currentBuild.result = "FAILURE"
+            currentBuild.description = currentBuild.description ? e.message + " " + currentBuild.description : e.message
+            throw e
+        }
+    }
 }
diff --git a/generate-cookiecutter-products.groovy b/generate-cookiecutter-products.groovy
index 549a4d3..5e31d36 100644
--- a/generate-cookiecutter-products.groovy
+++ b/generate-cookiecutter-products.groovy
@@ -104,31 +104,19 @@
             for (product in productList) {
 
                 // get templateOutputDir and productDir
-                if (product.startsWith("stacklight")) {
-                    templateOutputDir = "${env.WORKSPACE}/output/stacklight"
-
-                    def stacklightVersion
-                    try {
-                        stacklightVersion = templateContext.default_context['stacklight_version']
-                    } catch (Throwable e) {
-                        common.warningMsg('Stacklight version loading failed')
-                    }
-
-                    if (stacklightVersion) {
-                        productDir = "stacklight" + stacklightVersion
-                    } else {
-                        productDir = "stacklight1"
-                    }
-
-                } else {
-                    templateOutputDir = "${env.WORKSPACE}/output/${product}"
-                    productDir = product
+                templateOutputDir = "${env.WORKSPACE}/output/${product}"
+                productDir = product
+                templateDir = "${templateEnv}/cluster_product/${productDir}"
+                // Bw for 2018.8.1 and older releases
+                if (product.startsWith("stacklight") && (!fileExists(templateDir))) {
+                    common.warningMsg("Old release detected! productDir => 'stacklight2' ")
+                    productDir = "stacklight2"
+                    templateDir = "${templateEnv}/cluster_product/${productDir}"
                 }
 
                 if (product == "infra" || (templateContext.default_context["${product}_enabled"]
                     && templateContext.default_context["${product}_enabled"].toBoolean())) {
 
-                    templateDir = "${templateEnv}/cluster_product/${productDir}"
                     common.infoMsg("Generating product " + product + " from " + templateDir + " to " + templateOutputDir)
 
                     sh "rm -rf ${templateOutputDir} || true"
@@ -179,12 +167,12 @@
                     common.infoMsg("Attempt to run test against formula-version: ${mcpVersion}")
                     try {
                         def config = [
-                            'dockerHostname': "${saltMaster}.${clusterDomain}",
-                            'reclassEnv': testEnv,
-                            'formulasRevision': mcpVersion,
-                            'reclassVersion': reclassVersion,
+                            'dockerHostname'     : "${saltMaster}.${clusterDomain}",
+                            'reclassEnv'         : testEnv,
+                            'formulasRevision'   : mcpVersion,
+                            'reclassVersion'     : reclassVersion,
                             'dockerContainerName': DockerCName,
-                            'testContext': 'salt-model-node'
+                            'testContext'        : 'salt-model-node'
                         ]
                         testResult = saltModelTesting.testNode(config)
                         common.infoMsg("Test finished: SUCCESS")
@@ -201,7 +189,7 @@
 
                 // download create-config-drive
                 // FIXME: that should be refactored, to use git clone - to be able download it from custom repo.
-                def mcpCommonScriptsBranch = templateContext.default_context.mcp_common_scripts_branch
+                def mcpCommonScriptsBranch = templateContext['default_context']['mcp_common_scripts_branch']
                 if (mcpCommonScriptsBranch == '') {
                     mcpCommonScriptsBranch = mcpVersion
                     // Don't have n/t/s for mcp-common-scripts repo, therefore use master
@@ -210,16 +198,21 @@
                         mcpCommonScriptsBranch = 'master'
                     }
                 }
-                def config_drive_script_url = "https://raw.githubusercontent.com/Mirantis/mcp-common-scripts/${mcpCommonScriptsBranch}/config-drive/create_config_drive.sh"
-                def user_data_script_url = "https://raw.githubusercontent.com/Mirantis/mcp-common-scripts/${mcpCommonScriptsBranch}/config-drive/master_config.sh"
-                common.retry(3, 5) {
-                    sh "wget -O create-config-drive ${config_drive_script_url} && chmod +x create-config-drive"
-                    sh "wget -O user_data.sh ${user_data_script_url}"
-                }
+
+                def commonScriptsRepoUrl = 'https://gerrit.mcp.mirantis.com/mcp/mcp-common-scripts'
+                checkout([
+                    $class           : 'GitSCM',
+                    branches         : [[name: 'FETCH_HEAD'],],
+                    extensions       : [[$class: 'RelativeTargetDirectory', relativeTargetDir: 'mcp-common-scripts']],
+                    userRemoteConfigs: [[url: commonScriptsRepoUrl, refspec: mcpCommonScriptsBranch],],
+                ])
+
+                sh "cp mcp-common-scripts/config-drive/create_config_drive.sh create-config-drive && chmod +x create-config-drive"
+                sh "[ -f mcp-common-scripts/config-drive/master_config.sh ] && cp mcp-common-scripts/config-drive/master_config.sh user_data || cp mcp-common-scripts/config-drive/master_config.yaml user_data"
 
                 sh "git clone --mirror https://github.com/Mirantis/mk-pipelines.git ${pipelineEnv}/mk-pipelines"
                 sh "git clone --mirror https://github.com/Mirantis/pipeline-library.git ${pipelineEnv}/pipeline-library"
-                args = "--user-data user_data.sh --hostname ${saltMaster} --model ${modelEnv} --mk-pipelines ${pipelineEnv}/mk-pipelines/ --pipeline-library ${pipelineEnv}/pipeline-library/ ${saltMaster}.${clusterDomain}-config.iso"
+                args = "--user-data user_data --hostname ${saltMaster} --model ${modelEnv} --mk-pipelines ${pipelineEnv}/mk-pipelines/ --pipeline-library ${pipelineEnv}/pipeline-library/ ${saltMaster}.${clusterDomain}-config.iso"
 
                 // load data from model
                 def smc = [:]
@@ -251,7 +244,7 @@
                 }
 
                 for (i in common.entries(smc)) {
-                    sh "sed -i 's,export ${i[0]}=.*,export ${i[0]}=${i[1]},' user_data.sh"
+                    sh "sed -i 's,${i[0]}=.*,${i[0]}=${i[1]},' user_data"
                 }
 
                 // create cfg config-drive
@@ -263,8 +256,7 @@
 
                 if (templateContext['default_context']['local_repositories'] == 'True') {
                     def aptlyServerHostname = templateContext.default_context.aptly_server_hostname
-                    def user_data_script_apt_url = "https://raw.githubusercontent.com/Mirantis/mcp-common-scripts/master/config-drive/mirror_config.sh"
-                    sh "wget -O mirror_config.sh ${user_data_script_apt_url}"
+                    sh "cp mcp-common-scripts/config-drive/mirror_config.sh mirror_config.sh"
 
                     def smc_apt = [:]
                     smc_apt['SALT_MASTER_DEPLOY_IP'] = templateContext['default_context']['salt_master_management_address']
@@ -289,7 +281,6 @@
                 sh(returnStatus: true, script: "tar -czf output-${clusterName}/${clusterName}.tar.gz --exclude='*@tmp' -C ${modelEnv} .")
                 archiveArtifacts artifacts: "output-${clusterName}/${clusterName}.tar.gz"
 
-
                 if (EMAIL_ADDRESS != null && EMAIL_ADDRESS != "") {
                     emailext(to: EMAIL_ADDRESS,
                         attachmentsPattern: "output-${clusterName}/*",
diff --git a/k8s-upgrade-pipeline.groovy b/k8s-upgrade-pipeline.groovy
index 3e7828b..530a256 100644
--- a/k8s-upgrade-pipeline.groovy
+++ b/k8s-upgrade-pipeline.groovy
@@ -10,6 +10,8 @@
  *   CTL_TARGET                 Salt targeted kubernetes CTL nodes (ex. I@kubernetes:master). Kubernetes control plane
  *   CMP_TARGET                 Salt targeted compute nodes (ex. cmp* and 'I@kubernetes:pool') Kubernetes computes
  *   PER_NODE                   Target nodes will be managed one by one (bool)
+ *   SIMPLE_UPGRADE             Use previous version of upgrade without conron/drain abilities
+ *   UPGRADE_DOCKER             Upgrade docker component
  *
 **/
 def common = new com.mirantis.mk.Common()
@@ -50,6 +52,60 @@
     }
 }
 
+def cordonNode(pepperEnv, target) {
+    def salt = new com.mirantis.mk.Salt()
+    def originalTarget = "I@kubernetes:master and not ${target}"
+
+    stage("Cordoning ${target} kubernetes node") {
+        def nodeShortName = target.tokenize(".")[0]
+        salt.cmdRun(pepperEnv, originalTarget, "kubectl cordon ${nodeShortName}", true, 1)
+    }
+}
+
+def uncordonNode(pepperEnv, target) {
+    def salt = new com.mirantis.mk.Salt()
+    def originalTarget = "I@kubernetes:master and not ${target}"
+
+    stage("Uncordoning ${target} kubernetes node") {
+        def nodeShortName = target.tokenize(".")[0]
+        salt.cmdRun(pepperEnv, originalTarget, "kubectl uncordon ${nodeShortName}", true, 1)
+    }
+}
+
+def drainNode(pepperEnv, target) {
+    def salt = new com.mirantis.mk.Salt()
+    def originalTarget = "I@kubernetes:master and not ${target}"
+
+    stage("Draining ${target} kubernetes node") {
+        def nodeShortName = target.tokenize(".")[0]
+        salt.cmdRun(pepperEnv, originalTarget, "kubectl drain --force --ignore-daemonsets --grace-period 100 --timeout 300s --delete-local-data ${nodeShortName}", true, 1)
+    }
+}
+
+def regenerateCerts(pepperEnv, target) {
+    def salt = new com.mirantis.mk.Salt()
+
+    stage("Regenerate certs for ${target}") {
+        salt.enforceState(pepperEnv, target, 'salt.minion.cert')
+    }
+}
+
+def updateAddons(pepperEnv, target) {
+    def salt = new com.mirantis.mk.Salt()
+
+    stage("Upgrading Addons at ${target}") {
+        salt.enforceState(pepperEnv, target, "kubernetes.master.kube-addons")
+        salt.enforceState(pepperEnv, target, "kubernetes.master.setup")
+    }
+}
+
+def upgradeDocker(pepperEnv, target) {
+    def salt = new com.mirantis.mk.Salt()
+
+    stage("Upgrading docker at ${target}") {
+        salt.enforceState(pepperEnv, target, 'docker.host')
+    }
+}
 
 timeout(time: 12, unit: 'HOURS') {
     node() {
@@ -73,7 +129,19 @@
                     def targetHosts = salt.getMinionsSorted(pepperEnv, target)
 
                     for (t in targetHosts) {
-                        performKubernetesControlUpdate(pepperEnv, t)
+                        if (SIMPLE_UPGRADE.toBoolean()) {
+                            performKubernetesControlUpdate(pepperEnv, t)
+                        } else {
+                            cordonNode(pepperEnv, t)
+                            drainNode(pepperEnv, t)
+                            regenerateCerts(pepperEnv, t)
+                            if (UPGRADE_DOCKER.toBoolean()) {
+                                upgradeDocker(pepperEnv, t)
+                            }
+                            performKubernetesControlUpdate(pepperEnv, t)
+                            updateAddons(pepperEnv, t)
+                            uncordonNode(pepperEnv, t)
+                        }
                     }
                 } else {
                     performKubernetesControlUpdate(pepperEnv, target)
@@ -87,7 +155,18 @@
                     def targetHosts = salt.getMinionsSorted(pepperEnv, target)
 
                     for (t in targetHosts) {
-                        performKubernetesComputeUpdate(pepperEnv, t)
+                        if (SIMPLE_UPGRADE.toBoolean()) {
+                            performKubernetesComputeUpdate(pepperEnv, t)
+                        } else {
+                            cordonNode(pepperEnv, t)
+                            drainNode(pepperEnv, t)
+                            regenerateCerts(pepperEnv, t)
+                            if (UPGRADE_DOCKER.toBoolean()) {
+                                upgradeDocker(pepperEnv, t)
+                            }
+                            performKubernetesComputeUpdate(pepperEnv, t)
+                            uncordonNode(pepperEnv, t)
+                        }
                     }
                 } else {
                     performKubernetesComputeUpdate(pepperEnv, target)
diff --git a/opencontrail-upgrade.groovy b/opencontrail-upgrade.groovy
index af96600..4d9d498 100644
--- a/opencontrail-upgrade.groovy
+++ b/opencontrail-upgrade.groovy
@@ -33,7 +33,7 @@
 def CONTROL_PKGS = 'contrail-config contrail-config-openstack contrail-control contrail-dns contrail-lib contrail-nodemgr contrail-utils contrail-web-controller contrail-web-core neutron-plugin-contrail python-contrail'
 def ANALYTIC_PKGS = 'contrail-analytics contrail-lib contrail-nodemgr contrail-utils python-contrail'
 def CMP_PKGS = 'contrail-lib contrail-nodemgr contrail-utils contrail-vrouter-agent contrail-vrouter-utils python-contrail python-contrail-vrouter-api python-opencontrail-vrouter-netns contrail-vrouter-dkms contrail-nova-driver'
-def KERNEL_MODULE_RELOAD = 'service supervisor-vrouter stop;ifdown vhost0;rmmod vrouter;modprobe vrouter;ifup vhost0;service supervisor-vrouter start;'
+def KERNEL_MODULE_RELOAD = 'service supervisor-vrouter stop; rmmod vrouter; sync && echo 3 > /proc/sys/vm/drop_caches && echo 1 > /proc/sys/vm/compact_memory; service supervisor-vrouter start'
 
 def void runCommonCommands(target, command, args, check, salt, pepperEnv, common) {
 
diff --git a/opencontrail40-upgrade.groovy b/opencontrail40-upgrade.groovy
index 76243e5..52a0d23 100644
--- a/opencontrail40-upgrade.groovy
+++ b/opencontrail40-upgrade.groovy
@@ -26,13 +26,16 @@
 def command = 'cmd.shell'
 
 def controlPkgs = 'contrail-config,contrail-config-openstack,contrail-control,contrail-dns,contrail-lib,contrail-nodemgr,contrail-utils,contrail-web-controller,contrail-web-core,neutron-plugin-contrail,python-contrail,contrail-database'
+def thirdPartyControlPkgsToRemove = 'redis-server,ifmap-server,supervisor'
 def analyticsPkgs = 'contrail-analytics,contrail-lib,contrail-nodemgr,contrail-utils,python-contrail,contrail-database'
+def thirdPartyAnalyticsPkgsToRemove = 'redis-server,supervisor'
 //def cmpPkgs = ['contrail-lib', 'contrail-nodemgr', 'contrail-utils', 'contrail-vrouter-agent', 'contrail-vrouter-utils', 'python-contrail', 'python-contrail-vrouter-api', 'python-opencontrail-vrouter-netns', 'contrail-vrouter-dkms']
 def CMP_PKGS = 'contrail-lib contrail-nodemgr contrail-utils contrail-vrouter-agent contrail-vrouter-utils python-contrail python-contrail-vrouter-api python-opencontrail-vrouter-netns contrail-vrouter-dkms'
-def KERNEL_MODULE_RELOAD = 'service supervisor-vrouter stop;ifdown vhost0;rmmod vrouter;modprobe vrouter;ifup vhost0;service supervisor-vrouter start;'
-def analyticsServices = ['supervisor-analytics', 'supervisor-database', 'zookeeper']
+def KERNEL_MODULE_RELOAD = 'service supervisor-vrouter stop; rmmod vrouter; sync && echo 3 > /proc/sys/vm/drop_caches && echo 1 > /proc/sys/vm/compact_memory; service contrail-vrouter-agent start; service contrail-vrouter-nodemgr start'
+def analyticsServices = ['supervisor-analytics', 'supervisor-database', 'zookeeper', 'redis-server']
 def configServices = ['contrail-webui-jobserver', 'contrail-webui-webserver', 'supervisor-config', 'supervisor-database', 'zookeeper']
-def controlServices = ['ifmap-server', 'supervisor-control']
+def controlServices = ['ifmap-server', 'supervisor-control', 'redis-server']
+def thirdPartyServicesToDisable = ['kafka', 'zookeeper', 'cassandra']
 def config4Services = ['zookeeper', 'contrail-webui-middleware', 'contrail-webui', 'contrail-api', 'contrail-schema', 'contrail-svc-monitor', 'contrail-device-manager', 'contrail-config-nodemgr', 'contrail-database']
 
 def void runCommonCommands(target, command, args, check, salt, pepperEnv, common) {
@@ -107,7 +110,7 @@
                     common.errorMsg('Cassandra failed to backup. Please fix it before continuing.')
                     throw er
                 }
-                
+
                 salt.runSaltProcessStep(pepperEnv, 'I@neutron:server', 'service.stop', ['neutron-server'])
 
                 try {
@@ -167,14 +170,14 @@
             salt.runSaltProcessStep(pepperEnv, 'I@opencontrail:control', 'archive.tar', ['zcvf', '/root/contrail-zookeeper.tgz', '/var/lib/zoopeeker'])
             salt.runSaltProcessStep(pepperEnv, 'I@opencontrail:collector', 'archive.tar', ['zcvf', '/root/contrail-analytics-database.tgz', '/var/lib/cassandra'])
             salt.runSaltProcessStep(pepperEnv, 'I@opencontrail:collector', 'archive.tar', ['zcvf', '/root/contrail-analytics-zookeeper.tgz', '/var/lib/zookeeper'])
-            //salt.runSaltProcessStep(pepperEnv, 'I@opencontrail:control', 'pkg.remove', [controlPkgs])
-            //salt.runSaltProcessStep(pepperEnv, 'I@opencontrail:collector', 'pkg.remove', [analyticsPkgs])
-            for (service in controlServices) {
+            for (service in (controlServices + thirdPartyServicesToDisable)) {
                 salt.runSaltProcessStep(pepperEnv, 'I@opencontrail:control', 'service.disable', [service])
             }
-            for (service in analyticsServices) {
+            for (service in (analyticsServices + thirdPartyServicesToDisable)) {
                 salt.runSaltProcessStep(pepperEnv, 'I@opencontrail:collector', 'service.disable', [service])
-                }
+            }
+            salt.runSaltProcessStep(pepperEnv, 'I@opencontrail:control', 'pkg.remove', [controlPkgs + ',' + thirdPartyControlPkgsToRemove])
+            salt.runSaltProcessStep(pepperEnv, 'I@opencontrail:collector', 'pkg.remove', [analyticsPkgs + ',' + thirdPartyAnalyticsPkgsToRemove])
         }
 
 
@@ -305,6 +308,12 @@
 
                 salt.runSaltProcessStep(pepperEnv, 'I@opencontrail:control and *01*', 'cmd.shell', ['cd /etc/docker/compose/opencontrail/; docker-compose down'], null, true)
                 salt.runSaltProcessStep(pepperEnv, 'I@opencontrail:control and *01*', 'state.sls', ['opencontrail', 'exclude=opencontrail.client'])
+                for (service in (controlServices + thirdPartyServicesToDisable)) {
+                    salt.runSaltProcessStep(pepperEnv, 'I@opencontrail:control', 'service.enable', [service])
+                }
+                for (service in (analyticsServices + thirdPartyServicesToDisable)) {
+                    salt.runSaltProcessStep(pepperEnv, 'I@opencontrail:collector', 'service.enable', [service])
+                }
             }
         }
 
diff --git a/openstack-control-upgrade.groovy b/openstack-control-upgrade.groovy
index 6a6eea2..5febb3c 100644
--- a/openstack-control-upgrade.groovy
+++ b/openstack-control-upgrade.groovy
@@ -159,6 +159,7 @@
     for (target in upgradeTargets){
       common.stageWrapper(upgradeStageMap, "Pre upgrade", target, interactive) {
         openstack.runOpenStackUpgradePhase(env, target, 'pre')
+        openstack.runOpenStackUpgradePhase(env, target, 'verify')
       }
     }
 
diff --git a/promote-vcp-images.groovy b/promote-vcp-images.groovy
index 181eafa..7b4f80e 100644
--- a/promote-vcp-images.groovy
+++ b/promote-vcp-images.groovy
@@ -17,6 +17,8 @@
 slaveNode = env.SLAVE_NODE ?: 'jsl23.mcp.mirantis.net'
 def job_env = env.getEnvironment().findAll { k, v -> v }
 def verify = job_env.VERIFY_DOWNLOAD ?: true
+def overwrite = job_env.FORCE_OVERWRITE.toBoolean() ?: false
+
 
 
 timeout(time: 6, unit: 'HOURS') {
@@ -91,7 +93,7 @@
                         remoteImageStatus = ''
                         remoteImageStatus = sh(script: "wget  --auth-no-challenge --spider ${targetImageUrl} 2>/dev/null", returnStatus: true)
                         // wget return code 8 ,if file not exist
-                        if (remoteImageStatus != '8') {
+                        if (remoteImageStatus != 8 && !overwrite) {
                             error("Attempt to overwrite existing release! Target: ${targetImage} already exist!")
                         }
                     }
diff --git a/release-mcp-version.groovy b/release-mcp-version.groovy
index 4cae93c..470f338 100644
--- a/release-mcp-version.groovy
+++ b/release-mcp-version.groovy
@@ -46,7 +46,7 @@
         [$class: 'StringParameterValue', name: 'TARGET_REGISTRY_CREDENTIALS_ID', value: dockerCredentials],
         [$class: 'StringParameterValue', name: 'REGISTRY_URL', value: dockerRegistryUrl],
         [$class: 'StringParameterValue', name: 'IMAGE_TAG', value: targetTag],
-        [$class: 'StringParameterValue', name: 'IMAGE_LIST', value: imageList],
+        [$class: 'TextParameterValue', name: 'IMAGE_LIST', value: imageList],
         [$class: 'StringParameterValue', name: 'SOURCE_IMAGE_TAG', value: sourceImageTag],
     ]
 }
@@ -67,7 +67,7 @@
 
 def triggerGitTagJob(gitRepoList, gitCredentials, tag, sourceTag) {
     build job: "tag-git-repos-all", parameters: [
-        [$class: 'StringParameterValue', name: 'GIT_REPO_LIST', value: gitRepoList],
+        [$class: 'TextParameterValue', name: 'GIT_REPO_LIST', value: gitRepoList],
         [$class: 'StringParameterValue', name: 'GIT_CREDENTIALS', value: gitCredentials],
         [$class: 'StringParameterValue', name: 'TAG', value: tag],
         [$class: 'StringParameterValue', name: 'SOURCE_TAG', value: sourceTag],
@@ -76,9 +76,10 @@
 
 def triggerPromoteVCPJob(VcpImageList, tag, sourceTag) {
     build job: "promote-vcp-images-all", parameters: [
-        [$class: 'StringParameterValue', name: 'VCP_IMAGE_LIST', value: VcpImageList],
+        [$class: 'TextParameterValue', name: 'VCP_IMAGE_LIST', value: VcpImageList],
         [$class: 'StringParameterValue', name: 'TAG', value: tag],
-        [$class: 'StringParameterValue', name: 'SOURCE_TAG', value: sourceTag]
+        [$class: 'StringParameterValue', name: 'SOURCE_TAG', value: sourceTag],
+        [$class: 'BooleanParameterValue', name: 'FORCE_OVERWRITE', value: true],
     ]
 }
 
diff --git a/sync-http-to-s3.groovy b/sync-http-to-s3.groovy
new file mode 100644
index 0000000..108a394
--- /dev/null
+++ b/sync-http-to-s3.groovy
@@ -0,0 +1,29 @@
+def common = new com.mirantis.mk.Common()
+
+
+node("docker") {
+    stage('Prepare') {
+        img = docker.image(IMAGE)
+        img.pull()
+    }
+    stage('Upload') {
+        FILENAMES.split().each { filename ->
+            url = "${SOURCE}/${filename}"
+            img.withRun("--entrypoint='/bin/bash'") { c ->
+                withCredentials([[$class          : 'UsernamePasswordMultiBinding', credentialsId: 'aws-s3',
+                                  usernameVariable: 'S3_ACCESS_KEY', passwordVariable: 'S3_SECRET_KEY']]) {
+                    img.inside("-e S3_ACCESS_KEY=${S3_ACCESS_KEY} -e S3_SECRET_KEY=${S3_SECRET_KEY}") {
+                        common.retry(3, 5) {
+                            sh(script: "wget --progress=dot:giga -O ${filename} ${url}", returnStdout: true)
+                            sh(script: "/usr/local/bin/s4cmd put ${filename} ${DEST}/${filename}", returnStdout: true)
+                        }
+                    }
+                }
+
+
+            }
+            sh("rm ${filename}")
+        }
+    }
+    deleteDir()
+}
diff --git a/test-cookiecutter-reclass.groovy b/test-cookiecutter-reclass.groovy
index b380bfd..6f73570 100644
--- a/test-cookiecutter-reclass.groovy
+++ b/test-cookiecutter-reclass.groovy
@@ -17,13 +17,19 @@
 git = new com.mirantis.mk.Git()
 python = new com.mirantis.mk.Python()
 
-slaveNode = env.SLAVE_NODE ?: 'python&&docker'
+def extraVarsYAML = env.EXTRA_VARIABLES_YAML ?: false
+if (extraVarsYAML) {
+    common.mergeEnv(env, extraVarsYAML)
+}
+
+slaveNode = env.SLAVE_NODE ?: 'docker'
+checkIncludeOrder = env.CHECK_INCLUDE_ORDER ?: false
 
 // Global var's
 alreadyMerged = false
 gerritConData = [credentialsId       : env.CREDENTIALS_ID,
                  gerritName          : env.GERRIT_NAME ?: 'mcp-jenkins',
-                 gerritHost          : env.GERRIT_HOST ?: 'gerrit.mcp.mirantis.net',
+                 gerritHost          : env.GERRIT_HOST ?: 'gerrit.mcp.mirantis.com',
                  gerritScheme        : env.GERRIT_SCHEME ?: 'ssh',
                  gerritPort          : env.GERRIT_PORT ?: '29418',
                  gerritRefSpec       : null,
@@ -31,16 +37,20 @@
                  withWipeOut         : true,
                  GERRIT_CHANGE_NUMBER: null]
 //
-//ccTemplatesRepo = env.COOKIECUTTER_TEMPLATE_URL ?: 'ssh://mcp-jenkins@gerrit.mcp.mirantis.net:29418/mk/cookiecutter-templates'
+//ccTemplatesRepo = env.COOKIECUTTER_TEMPLATE_URL ?: 'ssh://mcp-jenkins@gerrit.mcp.mirantis.com:29418/mk/cookiecutter-templates'
+gerritDataCCHEAD = [:]
 gerritDataCC = [:]
 gerritDataCC << gerritConData
 gerritDataCC['gerritBranch'] = env.COOKIECUTTER_TEMPLATE_BRANCH ?: 'master'
+gerritDataCC['gerritRefSpec'] = env.COOKIECUTTER_TEMPLATE_REF ?: null
 gerritDataCC['gerritProject'] = 'mk/cookiecutter-templates'
 //
-//reclassSystemRepo = env.RECLASS_SYSTEM_URL ?: 'ssh://mcp-jenkins@gerrit.mcp.mirantis.net:29418/salt-models/reclass-system'
+//reclassSystemRepo = env.RECLASS_SYSTEM_URL ?: 'ssh://mcp-jenkins@gerrit.mcp.mirantis.com:29418/salt-models/reclass-system'
+gerritDataRSHEAD = [:]
 gerritDataRS = [:]
 gerritDataRS << gerritConData
 gerritDataRS['gerritBranch'] = env.RECLASS_MODEL_BRANCH ?: 'master'
+gerritDataRS['gerritRefSpec'] = env.RECLASS_SYSTEM_GIT_REF ?: null
 gerritDataRS['gerritProject'] = 'salt-models/reclass-system'
 
 // version of debRepos, aka formulas\reclass
@@ -98,22 +108,18 @@
         for (product in productList) {
 
             // get templateOutputDir and productDir
-            if (product.startsWith("stacklight")) {
-                templateOutputDir = "${templateEnvDir}/output/stacklight"
-                try {
-                    productDir = "stacklight" + templateContext.default_context['stacklight_version']
-                } catch (Throwable e) {
-                    productDir = "stacklight1"
-                }
-            } else {
-                templateOutputDir = "${templateEnvDir}/output/${product}"
-                productDir = product
+            templateOutputDir = "${templateEnvDir}/output/${product}"
+            productDir = product
+            templateDir = "${templateEnvDir}/cluster_product/${productDir}"
+            // Bw for 2018.8.1 and older releases
+            if (product.startsWith("stacklight") && (!fileExists(templateDir))) {
+                common.warningMsg("Old release detected! productDir => 'stacklight2' ")
+                productDir = "stacklight2"
+                templateDir = "${templateEnvDir}/cluster_product/${productDir}"
             }
-
             if (product == "infra" || (templateContext.default_context["${product}_enabled"]
                 && templateContext.default_context["${product}_enabled"].toBoolean())) {
 
-                templateDir = "${templateEnvDir}/cluster_product/${productDir}"
                 common.infoMsg("Generating product " + product + " from " + templateDir + " to " + templateOutputDir)
 
                 sh "rm -rf ${templateOutputDir} || true"
@@ -134,7 +140,7 @@
     return {
         dir(copyTo) {
             copyArtifacts(projectName: jobName, selector: specific(build), filter: "nodesinfo.tar.gz")
-            sh "tar -xvf nodesinfo.tar.gz"
+            sh "tar -xf nodesinfo.tar.gz"
             sh "rm -v nodesinfo.tar.gz"
         }
     }
@@ -220,51 +226,50 @@
     // Simple function, to check and define branch-around variables
     // In general, simply make transition updates for non-master branch
     // based on magic logic
-    def message = ''
+    def message = '<br/>'
     if (env.GERRIT_PROJECT) {
         // TODO are we going to have such branches?
         if (!['nightly', 'testing', 'stable', 'proposed', 'master'].contains(env.GERRIT_BRANCH)) {
             gerritDataCC['gerritBranch'] = env.GERRIT_BRANCH
             gerritDataRS['gerritBranch'] = env.GERRIT_BRANCH
-            // 'binary' branch logic w\o 'release/' prefix
-            testDistribRevision = env.GERRIT_BRANCH.split('/')[-1]
-            // Check if we are going to test bleeding-edge release, which doesn't have binary release yet
-            if (!common.checkRemoteBinary([apt_mk_version: testDistribRevision]).linux_system_repo_url) {
-                common.errorMsg("Binary release: ${testDistribRevision} not exist. Fallback to 'proposed'! ")
-                testDistribRevision = 'proposed'
-            }
+            testDistribRevision = env.GERRIT_BRANCH
         }
         // Identify, who triggered. To whom we should pass refspec
         if (env.GERRIT_PROJECT == 'salt-models/reclass-system') {
             gerritDataRS['gerritRefSpec'] = env.GERRIT_REFSPEC
             gerritDataRS['GERRIT_CHANGE_NUMBER'] = env.GERRIT_CHANGE_NUMBER
-            message = "<br/>RECLASS_SYSTEM_GIT_REF =>${gerritDataRS['gerritRefSpec']}"
+            message = message + "<br/>RECLASS_SYSTEM_GIT_REF =>${gerritDataRS['gerritRefSpec']}"
         } else if (env.GERRIT_PROJECT == 'mk/cookiecutter-templates') {
             gerritDataCC['gerritRefSpec'] = env.GERRIT_REFSPEC
             gerritDataCC['GERRIT_CHANGE_NUMBER'] = env.GERRIT_CHANGE_NUMBER
-            message = "<br/>COOKIECUTTER_TEMPLATE_REF =>${gerritDataCC['gerritRefSpec']}"
+            message = message + "<br/>COOKIECUTTER_TEMPLATE_REF =>${gerritDataCC['gerritRefSpec']}"
         } else {
             error("Unsuported gerrit-project triggered:${env.GERRIT_PROJECT}")
         }
-
         message = "<font color='red'>GerritTrigger detected! We are in auto-mode:</font>" +
             "<br/>Test env variables has been changed:" +
             "<br/>COOKIECUTTER_TEMPLATE_BRANCH => ${gerritDataCC['gerritBranch']}" +
-            "<br/>DISTRIB_REVISION =>${testDistribRevision}" +
             "<br/>RECLASS_MODEL_BRANCH=> ${gerritDataRS['gerritBranch']}" + message
-        common.warningMsg(message)
-        currentBuild.description = currentBuild.description ? message + "<br/>" + currentBuild.description : message
     } else {
-        // Check for passed variables:
-        if (env.RECLASS_SYSTEM_GIT_REF) {
-            gerritDataRS['gerritRefSpec'] = RECLASS_SYSTEM_GIT_REF
-        }
-        if (env.COOKIECUTTER_TEMPLATE_REF) {
-            gerritDataCC['gerritRefSpec'] = COOKIECUTTER_TEMPLATE_REF
-        }
-        message = "<font color='red'>Manual run detected!</font>" + "<br/>"
-        currentBuild.description = currentBuild.description ? message + "<br/>" + currentBuild.description : message
+        message = "<font color='red'>Non-gerrit trigger run detected!</font>" + message
     }
+    gerritDataCCHEAD << gerritDataCC
+    gerritDataCCHEAD['gerritRefSpec'] = null
+    gerritDataCCHEAD['GERRIT_CHANGE_NUMBER'] = null
+    gerritDataRSHEAD << gerritDataRS
+    gerritDataRSHEAD['gerritRefSpec'] = null
+    gerritDataRSHEAD['GERRIT_CHANGE_NUMBER'] = null
+    // 'binary' branch logic w\o 'release/' prefix
+    if (testDistribRevision.contains('/')) {
+        testDistribRevision = testDistribRevision.split('/')[-1]
+    }
+    // Check if we are going to test bleeding-edge release, which doesn't have binary release yet
+    if (!common.checkRemoteBinary([apt_mk_version: testDistribRevision]).linux_system_repo_url) {
+        common.errorMsg("Binary release: ${testDistribRevision} not exist. Fallback to 'proposed'! ")
+        testDistribRevision = 'proposed'
+        message = "<br/>DISTRIB_REVISION =>${testDistribRevision}" + message
+    }
+    currentBuild.description = currentBuild.description ? message + currentBuild.description : message
 }
 
 def replaceGeneratedValues(path) {
@@ -293,35 +298,32 @@
     // tar.gz
     // ├── contexts
     // │   └── ceph.yml
-    // ├── global_reclass <<< reclass system
+    // ├── ${reclassDirName} <<< reclass system
     // ├── model
     // │   └── ceph       <<< from `context basename`
     // │       ├── classes
     // │       │   ├── cluster
-    // │       │   └── system -> ../../../global_reclass
+    // │       │   └── system -> ../../../${reclassDirName}
     // │       └── nodes
     // │           └── cfg01.ceph-cluster-domain.local.yml
     dir(envPath) {
         for (String context : contextList) {
             def basename = common.GetBaseName(context, '.yml')
             dir("${envPath}/model/${basename}") {
-                sh(script: 'mkdir -p classes/; ln -sfv ../../../../global_reclass classes/system ')
+                sh(script: "mkdir -p classes/; ln -sfv ../../../../${common.GetBaseName(archiveName, '.tar.gz')} classes/system ")
             }
         }
         // replace all generated passwords/secrets/keys with hardcode value for infra/secrets.yaml
         replaceGeneratedValues("${envPath}/model")
-        // Save all models and all contexts. Warning! `h` flag must be used.
-        sh(script: "set -ex; tar -chzf ${archiveName} --exclude='*@tmp' model contexts", returnStatus: true)
-        archiveArtifacts artifacts: archiveName
-        // move for "Compare Pillars" stage
-        sh(script: "mv -v ${archiveName} ${env.WORKSPACE}")
+        // Save all models and all contexts. Warning! `h` flag must be used!
+        sh(script: "set -ex; tar -czhf ${env.WORKSPACE}/${archiveName} --exclude='*@tmp' model contexts", returnStatus: true)
     }
+    archiveArtifacts artifacts: archiveName
 }
 
 timeout(time: 1, unit: 'HOURS') {
     node(slaveNode) {
         globalVariatorsUpdate()
-        def gerritDataCCHEAD = [:]
         def templateEnvHead = "${env.WORKSPACE}/EnvHead/"
         def templateEnvPatched = "${env.WORKSPACE}/EnvPatched/"
         def contextFileListHead = []
@@ -338,16 +340,24 @@
                 // Prepare 2 env - for patchset, and for HEAD
                 def paralellEnvs = [:]
                 paralellEnvs.failFast = true
-                paralellEnvs['downloadEnvPatched'] = StepPrepareGit(templateEnvPatched, gerritDataCC)
-                gerritDataCCHEAD << gerritDataCC
-                gerritDataCCHEAD['gerritRefSpec'] = null; gerritDataCCHEAD['GERRIT_CHANGE_NUMBER'] = null
                 paralellEnvs['downloadEnvHead'] = StepPrepareGit(templateEnvHead, gerritDataCCHEAD)
-                parallel paralellEnvs
+                if (gerritDataCC.get('gerritRefSpec', null)) {
+                    paralellEnvs['downloadEnvPatched'] = StepPrepareGit(templateEnvPatched, gerritDataCC)
+                    parallel paralellEnvs
+                } else {
+                    paralellEnvs['downloadEnvPatched'] = { common.warningMsg('No need to process: downloadEnvPatched') }
+                    parallel paralellEnvs
+                    sh("rsync -a --exclude '*@tmp' ${templateEnvHead} ${templateEnvPatched}")
+                }
             }
             stage("Check workflow_definition") {
                 // Check only for patchset
                 python.setupVirtualenv(vEnv, 'python2', [], "${templateEnvPatched}/requirements.txt")
-                common.infoMsg(python.runVirtualenvCommand(vEnv, "python ${templateEnvPatched}/workflow_definition_test.py"))
+                if (gerritDataCC.get('gerritRefSpec', null)) {
+                    common.infoMsg(python.runVirtualenvCommand(vEnv, "python ${templateEnvPatched}/workflow_definition_test.py"))
+                } else {
+                    common.infoMsg('No need to process: workflow_definition')
+                }
             }
 
             stage("generate models") {
@@ -364,18 +374,29 @@
                 // Generate over 2env's - for patchset, and for HEAD
                 def paralellEnvs = [:]
                 paralellEnvs.failFast = true
-                paralellEnvs['GenerateEnvPatched'] = StepGenerateModels(contextFileListPatched, vEnv, templateEnvPatched)
                 paralellEnvs['GenerateEnvHead'] = StepGenerateModels(contextFileListHead, vEnv, templateEnvHead)
-                parallel paralellEnvs
-
-                // Collect artifacts
-                dir(templateEnvPatched) {
-                    // Collect only models. For backward comparability - who know, probably someone use it..
-                    sh(script: "tar -czf model.tar.gz -C model ../contexts .", returnStatus: true)
-                    archiveArtifacts artifacts: "model.tar.gz"
+                if (gerritDataCC.get('gerritRefSpec', null)) {
+                    paralellEnvs['GenerateEnvPatched'] = StepGenerateModels(contextFileListPatched, vEnv, templateEnvPatched)
+                    parallel paralellEnvs
+                } else {
+                    paralellEnvs['GenerateEnvPatched'] = { common.warningMsg('No need to process: GenerateEnvPatched') }
+                    parallel paralellEnvs
+                    sh("rsync -a --exclude '*@tmp' ${templateEnvHead} ${templateEnvPatched}")
                 }
 
-                StepPrepareGit("${env.WORKSPACE}/global_reclass/", gerritDataRS).call()
+                // We need 2 git's, one for HEAD, one for PATCHed.
+                // if no patch, use head for both
+                RSHeadDir = common.GetBaseName(headReclassArtifactName, '.tar.gz')
+                RSPatchedDir = common.GetBaseName(patchedReclassArtifactName, '.tar.gz')
+                common.infoMsg("gerritDataRS= ${gerritDataRS}")
+                common.infoMsg("gerritDataRSHEAD= ${gerritDataRSHEAD}")
+                if (gerritDataRS.get('gerritRefSpec', null)) {
+                    StepPrepareGit("${env.WORKSPACE}/${RSPatchedDir}/", gerritDataRS).call()
+                    StepPrepareGit("${env.WORKSPACE}/${RSHeadDir}/", gerritDataRSHEAD).call()
+                } else {
+                    StepPrepareGit("${env.WORKSPACE}/${RSHeadDir}/", gerritDataRS).call()
+                    sh("cd ${env.WORKSPACE} ; ln -svf ${RSHeadDir} ${RSPatchedDir}")
+                }
                 // link all models, to use one global reclass
                 // For HEAD
                 linkReclassModels(contextFileListHead, templateEnvHead, headReclassArtifactName)
@@ -392,7 +413,7 @@
                    tar -xzf ${headReclassArtifactName}  --directory ${compareRoot}/old
                    """)
                 common.warningMsg('infra/secrets.yml has been skipped from compare!')
-                result = '\n' + common.comparePillars(compareRoot, env.BUILD_URL, "-Ev \'infra/secrets.yml\'")
+                result = '\n' + common.comparePillars(compareRoot, env.BUILD_URL, "-Ev \'infra/secrets.yml|\\.git\'")
                 currentBuild.description = currentBuild.description ? currentBuild.description + result : result
             }
             stage("TestContexts Head/Patched") {
@@ -427,6 +448,49 @@
                 result = '\n' + common.comparePillars(reclassNodeInfoDir, env.BUILD_URL, '')
                 currentBuild.description = currentBuild.description ? currentBuild.description + result : result
             }
+            stage('Check include order') {
+                if (!checkIncludeOrder) {
+                    common.infoMsg('Check include order require to much time, and currently disabled!')
+
+                } else {
+                    def correctIncludeOrder = ["service", "system", "cluster"]
+                    dir(reclassInfoPatchedPath) {
+                        def nodeInfoFiles = findFiles(glob: "**/*.reclass.nodeinfo")
+                        def messages = ["<b>Wrong include ordering found</b><ul>"]
+                        def stepsForParallel = [:]
+                        nodeInfoFiles.each { nodeInfo ->
+                            stepsForParallel.put("Checking ${nodeInfo.path}:", {
+                                def node = readYaml file: nodeInfo.path
+                                def classes = node['classes']
+                                def curClassID = 0
+                                def prevClassID = 0
+                                def wrongOrder = false
+                                for (String className in classes) {
+                                    def currentClass = className.tokenize('.')[0]
+                                    curClassID = correctIncludeOrder.indexOf(currentClass)
+                                    if (currentClass != correctIncludeOrder[prevClassID]) {
+                                        if (prevClassID > curClassID) {
+                                            wrongOrder = true
+                                            common.warningMsg("File ${nodeInfo.path} contains wrong order of classes including: Includes for ${className} should be declared before ${correctIncludeOrder[prevClassID]} includes")
+                                        } else {
+                                            prevClassID = curClassID
+                                        }
+                                    }
+                                }
+                                if (wrongOrder) {
+                                    messages.add("<li>${nodeInfo.path} contains wrong order of classes including</li>")
+                                }
+                            })
+                        }
+                        parallel stepsForParallel
+                        def includerOrder = '<b>No wrong include order</b>'
+                        if (messages.size() != 1) {
+                            includerOrder = messages.join('')
+                        }
+                        currentBuild.description = currentBuild.description ? currentBuild.description + includerOrder : includerOrder
+                    }
+                }
+            }
             sh(script: 'find . -mindepth 1 -delete > /dev/null || true')
 
         } catch (Throwable e) {
diff --git a/test-openscap-pipeline.groovy b/test-openscap-pipeline.groovy
new file mode 100644
index 0000000..c57e67d
--- /dev/null
+++ b/test-openscap-pipeline.groovy
@@ -0,0 +1,282 @@
+/**
+ *
+ * Run openscap xccdf evaluation on given nodes
+ *
+ * Expected parametes:
+ *  SALT_MASTER_URL             Full Salt API address.
+ *  SALT_MASTER_CREDENTIALS     Credentials to the Salt API.
+ *
+ *  XCCDF_BENCHMARKS_DIR        The XCCDF benchmarks base directory (default /usr/share/xccdf-benchmarks/mirantis/)
+ *  XCCDF_BENCHMARKS            List of pairs XCCDF benchmark filename and corresponding profile separated with ','
+ *                                  these pairs are separated with semicolon.
+ *                                  (e.g. manila/openstack_manila-xccdf.xml,profilename;horizon/openstack_horizon-xccdf.xml,profile)
+ *  XCCDF_VERSION               The XCCDF version (default 1.2)
+ *  XCCDF_TAILORING_ID          The tailoring id (default None)
+ *
+ *  TARGET_SERVERS              The target Salt nodes (default *)
+ *
+ *  ARTIFACTORY_URL             The artifactory URL
+ *  ARTIFACTORY_NAMESPACE       The artifactory namespace (default 'mirantis/openscap')
+ *  ARTIFACTORY_REPO            The artifactory repo (default 'binary-dev-local')
+ *
+ *  UPLOAD_TO_DASHBOARD         Boolean. Upload results to the WORP or not
+ *  DASHBOARD_API_URL           The WORP api base url. Mandatory if UPLOAD_TO_DASHBOARD is true
+ */
+
+
+
+/**
+  * Upload results to the `WORP` dashboard
+  *
+  * @param apiUrl               The base dashboard api url
+  * @param cloudName            The cloud name (mostly, the given node's domain name)
+  * @param nodeName             The node name
+  * @param reportType           Type of the report to create/use, either 'openscap' or 'cve'
+  * @param reportId             Report Id to re-use, if empty report will be created
+  * @param results              The scanning results as a json file content (string)
+  * @return reportId            The Id of the report created if incoming reportId was empty, otherwise incoming reportId
+  */
+def uploadResultToDashboard(apiUrl, cloudName, nodeName, reportType, reportId, results) {
+    def common = new com.mirantis.mk.Common()
+    def http = new com.mirantis.mk.Http()
+
+    // Yes, we do not care of performance and will create at least 4 requests per each result
+    def requestData = [:]
+
+    def cloudId
+    def nodeId
+
+    def worpApi = [:]
+    worpApi["url"] = apiUrl
+
+    // Let's take a look, may be our minion is already presented on the dashboard
+    // Get available environments
+    common.infoMsg("Making GET to ${worpApi.url}/environment/")
+    environments = http.restGet(worpApi, "/environment/")
+    for (environment in environments) {
+        if (environment['name'] == cloudName) {
+            cloudId = environment['uuid']
+            break
+        }
+    }
+    // Cloud wasn't presented, let's create it
+    if (! cloudId ) {
+        // Create cloud
+        requestData = [:]
+        requestData['name'] = cloudName
+        common.infoMsg("Making POST to ${worpApi.url}/environment/ with ${requestData}")
+        cloudId = http.restPost(worpApi, "/environment/", requestData)['env']['uuid']
+
+        // And the node
+        // It was done here to reduce count of requests to the api.
+        // Because if there was not cloud presented on the dashboard, then the node was not presented as well.
+        requestData = [:]
+        requestData['nodes'] = [nodeName]
+        common.infoMsg("Making PUT to ${worpApi.url}/environment/${cloudId}/nodes/ with ${requestData}")
+        nodeId = http.restCall(worpApi, "/environment/${cloudId}/nodes/", "PUT", requestData)['uuid']
+    }
+
+    if (! nodeId ) {
+        // Get available nodes in our environment
+        common.infoMsg("Making GET to ${worpApi.url}/environment/${cloudId}/nodes/")
+        nodes = http.restGet(worpApi, "/environment/${cloudId}/nodes/")
+        for (node in nodes) {
+            if (node['name'] == nodeName) {
+                nodeId = node['uuid']
+                break
+            }
+        }
+    }
+
+    // Node wasn't presented, let's create it
+    if (! nodeId ) {
+        // Create node
+        requestData = [:]
+        requestData['nodes'] = [nodeName]
+        common.infoMsg("Making PUT to ${worpApi.url}/environment/${cloudId}/nodes/ with ${requestData}")
+        nodeId = http.restCall(worpApi, "/environment/${cloudId}/nodes/", "PUT", requestData)['uuid']
+    }
+
+    // Create report if needed
+    if (! reportId ) {
+        requestData = [:]
+        requestData['env_uuid'] = cloudId
+        common.infoMsg("Making POST to ${worpApi.url}/reports/${reportType}/ with ${requestData}")
+        reportId = http.restPost(worpApi, "/reports/${reportType}/", requestData)['report']['uuid']
+    }
+
+    // Upload results
+    // NOTE(pas-ha) results should already be a dict with 'results' key
+    requestData = common.parseJSON(results)
+    requestData['node_name'] = nodeName
+    common.infoMsg("First result in results to PUT is ${requestData['results'][0]}")
+    // NOTE(pas-ha) not logging whole results to be sent, is too large and just spams the logs
+    common.infoMsg("Making PUT to ${worpApi.url}/reports/${reportType}/${reportId}/ with node name ${requestData['node_name']} and results")
+    http.restCall(worpApi, "/reports/${reportType}/${reportId}/", "PUT", requestData)
+    return reportId
+}
+
+
+node('python') {
+    def pepperEnv = 'pepperEnv'
+
+    // XCCDF related variables
+    def benchmarksAndProfilesArray = XCCDF_BENCHMARKS.tokenize(';')
+    def benchmarksDir = XCCDF_BENCHMARKS_DIR ?: '/usr/share/xccdf-benchmarks/mirantis/'
+    def xccdfVersion = XCCDF_VERSION ?: '1.2'
+    def xccdfTailoringId = XCCDF_TAILORING_ID ?: 'None'
+    def targetServers = TARGET_SERVERS ?: '*'
+
+    def salt = new com.mirantis.mk.Salt()
+    def python = new com.mirantis.mk.Python()
+    def common = new com.mirantis.mk.Common()
+    def http = new com.mirantis.mk.Http()
+
+    // To have an ability to work in heavy concurrency conditions
+    def scanUUID = UUID.randomUUID().toString()
+
+    def artifactsArchiveName = "openscap-${scanUUID}.zip"
+    def resultsBaseDir = "/var/log/openscap/${scanUUID}"
+    def artifactsDir = "openscap"
+
+    def liveMinions
+
+
+    stage ('Setup virtualenv for Pepper') {
+        python.setupPepperVirtualenv(pepperEnv, SALT_MASTER_URL, SALT_MASTER_CREDENTIALS)
+    }
+
+    stage ('Run openscap xccdf evaluation and attempt to upload the results to a dashboard') {
+        liveMinions = salt.getMinions(pepperEnv, targetServers)
+
+        if (liveMinions.isEmpty()) {
+            throw new Exception('There are no alive minions')
+        }
+
+        common.infoMsg("Scan UUID: ${scanUUID}")
+
+        // Clean all results before proceeding with results from every minion
+        dir(artifactsDir) {
+            deleteDir()
+        }
+
+        def reportId
+        for (minion in liveMinions) {
+
+            // Iterate oscap evaluation over the benchmarks
+            for (benchmark in benchmarksAndProfilesArray) {
+                def (benchmarkFilePath, profile) = benchmark.tokenize(',').collect({it.trim()})
+
+                // Remove extension from the benchmark name
+                def benchmarkPathWithoutExtension = benchmarkFilePath.replaceFirst('[.][^.]+$', '')
+
+                // Get benchmark name
+                def benchmarkName = benchmarkPathWithoutExtension.tokenize('/')[-1]
+
+                // And build resultsDir based on this path
+                def resultsDir = "${resultsBaseDir}/${benchmarkPathWithoutExtension}"
+
+                def benchmarkFile = "${benchmarksDir}${benchmarkFilePath}"
+
+                def nodeShortName = minion.tokenize('.')[0]
+
+                def archiveName = "${scanUUID}_${nodeShortName}_${benchmarkName}.tar"
+
+                // Evaluate the benchmark
+                salt.runSaltProcessStep(pepperEnv, minion, 'oscap.eval', [
+                    'xccdf', benchmarkFile, "results_dir=${resultsDir}",
+                    "profile=${profile}", "xccdf_version=${xccdfVersion}",
+                    "tailoring_id=${xccdfTailoringId}"
+                ])
+
+                salt.cmdRun(pepperEnv, minion, "tar -cf /tmp/${archiveName} -C ${resultsBaseDir} .")
+                fileContents = salt.cmdRun(pepperEnv, minion, "cat /tmp/${archiveName}", true, null, false)['return'][0].values()[0].replaceAll('Salt command execution success', '')
+
+                sh "mkdir -p ${artifactsDir}/${scanUUID}/${nodeShortName}"
+                writeFile file: "${archiveName}", text: fileContents
+                sh "tar --strip-components 1 -xf ${archiveName} --directory ${artifactsDir}/${scanUUID}/${nodeShortName}; rm -f ${archiveName}"
+
+                // Remove archive which is not needed anymore
+                salt.runSaltProcessStep(pepperEnv, minion, 'file.remove', "/tmp/${archiveName}")
+
+                // Attempt to upload the scanning results to the dashboard
+                if (UPLOAD_TO_DASHBOARD.toBoolean()) {
+                    if (common.validInputParam('DASHBOARD_API_URL')) {
+                        def cloudName = salt.getGrain(pepperEnv, minion, 'domain')['return'][0].values()[0].values()[0]
+                        reportId = uploadResultToDashboard(DASHBOARD_API_URL, cloudName, minion, "openscap", reportId, salt.getFileContent(pepperEnv, minion, "${resultsDir}/results.json"))
+                    } else {
+                        throw new Exception('Uploading to the dashboard is enabled but the DASHBOARD_API_URL was not set')
+                    }
+                }
+            }
+        }
+
+        // Prepare archive
+        sh "tar -cJf ${artifactsDir}.tar.xz ${artifactsDir}"
+
+        // Archive the build output artifacts
+        archiveArtifacts artifacts: "*.xz"
+    }
+
+/*  // Will be implemented later
+    stage ('Attempt to upload results to an artifactory') {
+        if (common.validInputParam('ARTIFACTORY_URL')) {
+            for (minion in liveMinions) {
+                def destDir = "${artifactsDir}/${minion}"
+                def archiveName = "openscap-${scanUUID}.tar.gz"
+                def tempArchive = "/tmp/${archiveName}"
+                def destination = "${destDir}/${archiveName}"
+
+                dir(destDir) {
+                    // Archive scanning results on the remote target
+                    salt.runSaltProcessStep(pepperEnv, minion, 'archive.tar', ['czf', tempArchive, resultsBaseDir])
+
+                    // Get it content and save it
+                    writeFile file: destination, text: salt.getFileContent(pepperEnv, minion, tempArchive)
+
+                    // Remove scanning results and the temp archive on the remote target
+                    salt.runSaltProcessStep(pepperEnv, minion, 'file.remove', resultsBaseDir)
+                    salt.runSaltProcessStep(pepperEnv, minion, 'file.remove', tempArchive)
+                }
+            }
+
+            def artifactory = new com.mirantis.mcp.MCPArtifactory()
+            def artifactoryName = 'mcp-ci'
+            def artifactoryRepo = ARTIFACTORY_REPO ?: 'binary-dev-local'
+            def artifactoryNamespace = ARTIFACTORY_NAMESPACE ?: 'mirantis/openscap'
+            def artifactoryServer = Artifactory.server(artifactoryName)
+            def publishInfo = true
+            def buildInfo = Artifactory.newBuildInfo()
+            def zipName = "${env.WORKSPACE}/openscap/${scanUUID}/results.zip"
+
+            // Zip scan results
+            zip zipFile: zipName, archive: false, dir: artifactsDir
+
+            // Mandatory and additional properties
+            def properties = artifactory.getBinaryBuildProperties([
+                                "scanUuid=${scanUUID}",
+                                "project=openscap"
+                            ])
+
+            // Build Artifactory spec object
+            def uploadSpec = """{
+                "files":
+                    [
+                        {
+                            "pattern": "${zipName}",
+                            "target": "${artifactoryRepo}/${artifactoryNamespace}/openscap",
+                            "props": "${properties}"
+                        }
+                    ]
+                }"""
+
+            // Upload artifacts to the given Artifactory
+            artifactory.uploadBinariesToArtifactory(artifactoryServer, buildInfo, uploadSpec, publishInfo)
+
+        } else {
+            common.warningMsg('ARTIFACTORY_URL was not given, skip uploading to artifactory')
+        }
+    }
+*/
+
+}
diff --git a/test-reclass-package.groovy b/test-reclass-package.groovy
new file mode 100644
index 0000000..109d986
--- /dev/null
+++ b/test-reclass-package.groovy
@@ -0,0 +1,45 @@
+/**
+ * Check new Reclass version against current model.
+ *
+ * Expected parameters:
+ *   SALT_MASTER_CREDENTIALS            Credentials to the Salt API.
+ *   SALT_MASTER_URL                    Full Salt API address [http://10.10.10.1:8000].
+ *   DISTRIB_REVISION                   Mirror version to use
+ *   EXTRA_REPO_PREDEFINED              Use mcp extra repo defined on host
+ *   EXTRA_REPO                         Extra repo to use in format (for example, deb [arch=amd64] http://apt.mirantis.com/xenial/ nightly extra)
+ *   EXTRA_REPO_GPG_KEY_URL             GPG key URL for extra repo
+ *   TARGET_NODES                       Target specification, e.g. 'I@openssh:server'
+**/
+
+def common = new com.mirantis.mk.Common()
+def salt = new com.mirantis.mk.Salt()
+def saltModel = new com.mirantis.mk.SaltModelTesting()
+def python = new com.mirantis.mk.Python()
+
+def env = "env"
+def extraRepo = env.EXTRA_REPO
+def extraRepoKey = env.EXTRA_REPO_GPG_KEY_URL
+def targetNodes = env.TARGET_NODES
+def distribRevision = env.DISTRIB_REVISION
+def usePredefinedExtra = env.EXTRA_REPO_PREDEFINED
+node('cfg') {
+
+    stage('Setup virtualenv for Pepper') {
+      python.setupPepperVirtualenv(env, SALT_MASTER_URL, SALT_MASTER_CREDENTIALS)
+    }
+
+    def minions = salt.getMinionsSorted(env, targetNodes)
+    if (usePredefinedExtra) {
+      def mcp_extra = salt.getPillar(env, 'I@salt:master', "linux:system:repo:mcp_extra").get("return")[0].values()[0]
+      extraRepoKey = mcp_extra['key_url']
+      extraRepo = mcp_extra['source']
+    }
+    def config = [
+      'distribRevision': distribRevision,
+      'targetNodes': minions,
+      'extraRepo': extraRepo,
+      'extraRepoKey': extraRepoKey,
+      'venv': env
+    ]
+    saltModel.compareReclassVersions(config)
+}
diff --git a/test-salt-models-pipeline.groovy b/test-salt-models-pipeline.groovy
index f4467c1..729fdb4 100644
--- a/test-salt-models-pipeline.groovy
+++ b/test-salt-models-pipeline.groovy
@@ -61,36 +61,26 @@
 common = new com.mirantis.mk.Common()
 
 def setupRunner() {
-
-  def branches = [:]
-  for (int i = 0; i < PARALLEL_NODE_GROUP_SIZE.toInteger() && i < futureNodes.size(); i++) {
-    branches["Runner ${i}"] = {
-      while (futureNodes && !failedNodes) {
-        def currentNode = futureNodes[0] ? futureNodes[0] : null
+    def branches = [:]
+    branches.failFast = true
+    for(int i = 0; i < futureNodes.size(); i++) {
+        def currentNode = futureNodes[i] ? futureNodes[i] : null
         if (!currentNode) {
-          continue
+            continue
         }
-
-        def clusterName = currentNode[2]
-        futureNodes.remove(currentNode)
-        try {
-            triggerTestNodeJob(currentNode[0], currentNode[1], currentNode[2], currentNode[3], currentNode[4])
-        } catch (Exception e) {
-          if (e.getMessage().contains("completed with status ABORTED")) {
-            common.warningMsg("Test of ${clusterName} failed because the test was aborted :  ${e}")
-            futureNodes << currentNode
-          } else {
-            common.warningMsg("Test of ${clusterName} failed :  ${e}")
-            failedNodes = true
-          }
+        branches["Runner ${i}"] = {
+            try {
+                triggerTestNodeJob(currentNode[0], currentNode[1], currentNode[2], currentNode[3], currentNode[4])
+            } catch (Exception e) {
+                  common.warningMsg("Test of ${currentNode[2]} failed :  ${e}")
+                  throw e
+            }
         }
-      }
     }
-  }
 
-  if (branches) {
-    parallel branches
-  }
+    if (branches) {
+        common.runParallel(branches, PARALLEL_NODE_GROUP_SIZE.toInteger())
+    }
 }
 
 def triggerTestNodeJob(defaultGitUrl, defaultGitRef, clusterName, testTarget, formulasSource) {
diff --git a/test-system-reclass-pipeline.groovy b/test-system-reclass-pipeline.groovy
index afd2857..04eafeb 100644
--- a/test-system-reclass-pipeline.groovy
+++ b/test-system-reclass-pipeline.groovy
@@ -1,35 +1,31 @@
 def gerrit = new com.mirantis.mk.Gerrit()
 def common = new com.mirantis.mk.Common()
 
-
-slaveNode = env.SLAVE_NODE ?: 'python&&docker'
-
-def gerritCredentials
-try {
-    gerritCredentials = CREDENTIALS_ID
-} catch (MissingPropertyException e) {
-    gerritCredentials = "gerrit"
+// extraVarsYaml contains GERRIT_ vars from gate job
+// or will contain GERRIT_ vars from reclass-system patch
+def extraVarsYaml = env.EXTRA_VARIABLES_YAML ?: ''
+if (extraVarsYaml != '') {
+    common.mergeEnv(env, extraVarsYaml)
+} else {
+    extraVarsYaml = '\n---'
+    for (envVar in env.getEnvironment()) {
+        if (envVar.key.startsWith("GERRIT_")) {
+            extraVarsYaml += "\n${envVar.key}: '${envVar.value}'"
+        }
+    }
 }
 
-def gerritRef
-try {
-    gerritRef = GERRIT_REFSPEC
-} catch (MissingPropertyException e) {
-    gerritRef = null
-}
+def slaveNode = env.SLAVE_NODE ?: 'python&&docker'
+def gerritCredentials = env.CREDENTIALS_ID ?: 'gerrit'
 
-def defaultGitRef, defaultGitUrl
-try {
-    defaultGitRef = DEFAULT_GIT_REF
-    defaultGitUrl = DEFAULT_GIT_URL
-} catch (MissingPropertyException e) {
-    defaultGitRef = null
-    defaultGitUrl = null
-}
+def gerritRef = env.GERRIT_REFSPEC ?: null
+def defaultGitRef = env.DEFAULT_GIT_REF ?: null
+def defaultGitUrl = env.DEFAULT_GIT_URL ?: null
+
 def checkouted = false
 def merged = false
 def systemRefspec = "HEAD"
-def formulasRevision = 'testing'
+
 timeout(time: 12, unit: 'HOURS') {
     node(slaveNode) {
         try {
@@ -67,35 +63,36 @@
 
                         def branches = [:]
                         def testModels = documentationOnly ? [] : TEST_MODELS.split(',')
-                        for (int i = 0; i < testModels.size(); i++) {
-                            def cluster = testModels[i]
-                            def clusterGitUrl = defaultGitUrl.substring(0, defaultGitUrl.lastIndexOf("/") + 1) + cluster
-                            branches["${cluster}"] = {
-                                build job: "test-salt-model-${cluster}", parameters: [
-                                    [$class: 'StringParameterValue', name: 'DEFAULT_GIT_URL', value: clusterGitUrl],
-                                    [$class: 'StringParameterValue', name: 'DEFAULT_GIT_REF', value: "HEAD"],
-                                    [$class: 'StringParameterValue', name: 'SYSTEM_GIT_URL', value: defaultGitUrl],
-                                    [$class: 'StringParameterValue', name: 'SYSTEM_GIT_REF', value: systemRefspec],
-                                    [$class: 'StringParameterValue', name: 'FORMULAS_REVISION', value: formulasRevision],
-                                ]
+                        if (['master'].contains(env.GERRIT_BRANCH)) {
+                            for (int i = 0; i < testModels.size(); i++) {
+                                def cluster = testModels[i]
+                                def clusterGitUrl = defaultGitUrl.substring(0, defaultGitUrl.lastIndexOf("/") + 1) + cluster
+                                branches["${cluster}"] = {
+                                    build job: "test-salt-model-${cluster}", parameters: [
+                                        [$class: 'StringParameterValue', name: 'DEFAULT_GIT_URL', value: clusterGitUrl],
+                                        [$class: 'StringParameterValue', name: 'DEFAULT_GIT_REF', value: "HEAD"],
+                                        [$class: 'StringParameterValue', name: 'SYSTEM_GIT_URL', value: defaultGitUrl],
+                                        [$class: 'StringParameterValue', name: 'SYSTEM_GIT_REF', value: systemRefspec]
+                                    ]
+                                }
                             }
+                        } else {
+                            common.warningMsg("Tests for ${testModels} skipped!")
                         }
                         branches["cookiecutter"] = {
                             build job: "test-mk-cookiecutter-templates", parameters: [
                                 [$class: 'StringParameterValue', name: 'RECLASS_SYSTEM_URL', value: defaultGitUrl],
                                 [$class: 'StringParameterValue', name: 'RECLASS_SYSTEM_GIT_REF', value: systemRefspec],
-                                [$class: 'StringParameterValue', name: 'DISTRIB_REVISION', value: formulasRevision]
-
+                                [$class: 'TextParameterValue', name: 'EXTRA_VARIABLES_YAML', value: extraVarsYaml ]
                             ]
                         }
                         parallel branches
                     } else {
-                        throw new Exception("Cannot checkout gerrit patchset, GERRIT_REFSPEC and DEFAULT_GIT_REF is null")
+                        error("Cannot checkout gerrit patchset, GERRIT_REFSPEC and DEFAULT_GIT_REF is null")
                     }
                 }
             }
         } catch (Throwable e) {
-            // If there was an error or exception thrown, the build failed
             currentBuild.result = "FAILURE"
             currentBuild.description = currentBuild.description ? e.message + " " + currentBuild.description : e.message
             throw e