Merge "Add conformance execution for containerD cases"
diff --git a/aptly-promote-pipeline.groovy b/aptly-promote-pipeline.groovy
index b68bc81..1d12d97 100644
--- a/aptly-promote-pipeline.groovy
+++ b/aptly-promote-pipeline.groovy
@@ -28,22 +28,24 @@
 timeout(time: 12, unit: 'HOURS') {
     node("docker&&hardware") {
         try {
+            if ("testing" in TARGET && !jenkinsUtils.currentUserInGroup(["release-engineering", "aptly-promote-users"])) {
+                insufficientPermissions = true
+                throw new Exception("Only release-engineering or aptly-promote-users can perform promote to testing.")
+            } else if (!jenkinsUtils.currentUserInGroup(["release-engineering"])) {
+                insufficientPermissions = true
+                throw new Exception("Only release-engineering team can perform promote.")
+            }
             stage("promote") {
                 // promote is restricted to users in aptly-promote-users LDAP group
-                if (jenkinsUtils.currentUserInGroups(["mcp-cicd-admins", "aptly-promote-users"])) {
-                    lock("aptly-api") {
-                        for (storage in storages) {
-                            if (storage == "local") {
-                                storage = ""
-                            }
-                            retry(2) {
-                                aptly.promotePublish(APTLY_URL, SOURCE, TARGET, RECREATE, components, packages, DIFF_ONLY, '-d --timeout 600', DUMP_PUBLISH.toBoolean(), storage)
-                            }
+                lock("aptly-api") {
+                    for (storage in storages) {
+                        if (storage == "local") {
+                            storage = ""
+                        }
+                        retry(2) {
+                            aptly.promotePublish(APTLY_URL, SOURCE, TARGET, RECREATE, components, packages, DIFF_ONLY, '-d --timeout 600', DUMP_PUBLISH.toBoolean(), storage)
                         }
                     }
-                } else {
-                    insufficientPermissions = true
-                    throw new Exception(String.format("You don't have permissions to make aptly promote from source:%s to target:%s! Only CI/CD and QA team can perform aptly promote.", SOURCE, TARGET))
                 }
             }
         } catch (Throwable e) {
diff --git a/docker-mirror-images.groovy b/docker-mirror-images.groovy
index 636c666..0de5590 100644
--- a/docker-mirror-images.groovy
+++ b/docker-mirror-images.groovy
@@ -37,6 +37,30 @@
     }
 }
 
+def getImageInfo(String imageName) {
+    String unique_image_id = sh(
+        script: "docker inspect --format='{{index .RepoDigests 0}}' '${imageName}'",
+        returnStdout: true,
+    ).trim()
+    String imageSha256 = unique_image_id.tokenize(':')[1]
+    common.infoMsg("Docker ${imageName} image sha256 is ${imageSha256}")
+    return [ 'id': unique_image_id, 'sha256': imageSha256 ]
+}
+
+def imageURL(String registry, String imageName, String sha256) {
+    def ret = new URL("https://${registry}/artifactory/api/search/checksum?sha256=${sha256}").getText()
+    // Most probably, we would get many images, especially for external images. We need to guess
+    // exactly one, which we pushing now
+    def tgtGuessImage = imageName.replace(':', '/').replace(registry, '')
+    ArrayList img_data = new JsonSlurper().parseText(ret)['results']
+    def tgtImgUrl = img_data*.uri.find { it.contains(tgtGuessImage) }
+    if (tgtImgUrl) {
+        return tgtImgUrl
+    } else {
+        error("Can't find image ${imageName} in registry ${registry} with sha256: ${sha256}!")
+    }
+}
+
 timeout(time: 4, unit: 'HOURS') {
     node(slaveNode) {
         def user = ''
@@ -78,6 +102,7 @@
                     common.retry(3, 5) {
                         srcImage.pull()
                     }
+                    source_image_sha256 = getImageInfo(sourceImage)['sha256']
                     // Use sh-docker call for tag, due magic code in plugin:
                     // https://github.com/jenkinsci/docker-workflow-plugin/blob/docker-workflow-1.17/src/main/resources/org/jenkinsci/plugins/docker/workflow/Docker.groovy#L168-L170
                     sh("docker tag ${srcImage.id} ${targetImageFull}")
@@ -92,18 +117,10 @@
                     if (setDefaultArtifactoryProperties) {
                         common.infoMsg("Processing artifactory props for : ${targetImageFull}")
                         LinkedHashMap artifactoryProperties = [:]
-                        // Get digest of pushed image
-                        String unique_image_id = sh(
-                            script: "docker inspect --format='{{index .RepoDigests 0}}' '${targetImageFull}'",
-                            returnStdout: true,
-                        ).trim()
-                        def image_sha256 = unique_image_id.tokenize(':')[1]
-                        def ret = new URL("https://${targetRegistry}/artifactory/api/search/checksum?sha256=${image_sha256}").getText()
-                        // Most probably, we would get many images, especially for external images. We need to guess
-                        // exactly one, which we pushing now
-                        guessImage = targetImageFull.replace(':', '/').replace(targetRegistry, '')
-                        ArrayList img_data = new JsonSlurper().parseText(ret)['results']
-                        def imgUrl = img_data*.uri.find { it.contains(guessImage) } - '/manifest.json'
+                        def tgtImageInfo = getImageInfo(targetImageFull)
+                        def tgt_image_sha256 = tgtImageInfo['sha256']
+                        def unique_image_id = tgtImageInfo['id']
+                        def tgtImgUrl = imageURL(targetRegistry, targetImageFull, tgt_image_sha256) - '/manifest.json'
                         artifactoryProperties = [
                             'com.mirantis.targetTag'    : env.IMAGE_TAG,
                             'com.mirantis.uniqueImageId': unique_image_id,
@@ -111,9 +128,11 @@
                         if (external) {
                             artifactoryProperties << ['com.mirantis.externalImage': external]
                         }
-                        def existingProps = mcp_artifactory.getPropertiesForArtifact(imgUrl)
+                        def sourceRegistry = sourceImage.split('/')[0]
+                        def sourceImgUrl = imageURL(sourceRegistry, sourceImage, source_image_sha256) - '/manifest.json'
+                        def existingProps = mcp_artifactory.getPropertiesForArtifact(sourceImgUrl)
                         def historyProperties = []
-                        // check does image have already some props
+                        // check does the source image have already history props
                         if (existingProps) {
                             historyProperties = existingProps.get('com.mirantis.versionHistory', [])
                         }
@@ -122,7 +141,7 @@
                         artifactoryProperties << [ 'com.mirantis.versionHistory': historyProperties.join(',') ]
                         common.infoMsg("artifactoryProperties=> ${artifactoryProperties}")
                         common.retry(3, 5) {
-                            mcp_artifactory.setProperties(imgUrl, artifactoryProperties)
+                            mcp_artifactory.setProperties(tgtImgUrl, artifactoryProperties)
                         }
                     }
                 }
diff --git a/gating-pipeline.groovy b/gating-pipeline.groovy
index 15518d4..d449cd8 100644
--- a/gating-pipeline.groovy
+++ b/gating-pipeline.groovy
@@ -19,9 +19,10 @@
 }
 
 def callJobWithExtraVars(String jobName) {
-    def gerritVars = JsonOutput.toJson(env.getEnvironment().findAll{ it.key.startsWith('GERRIT_') })
+    def gerritVars = env.getEnvironment().findAll{ it.key.startsWith('GERRIT_') }
+    gerritVars['GERRIT_CI_MERGE_TRIGGER'] = true
     testJob = build job: jobName, parameters: [
-        [$class: 'TextParameterValue', name: 'EXTRA_VARIABLES_YAML', value: gerritVars]
+        [$class: 'TextParameterValue', name: 'EXTRA_VARIABLES_YAML', value: JsonOutput.toJson(gerritVars) ]
     ]
     if (testJob.getResult() != 'SUCCESS') {
         error("Gate job ${testJob.getBuildUrl().toString()}  finished with ${testJob.getResult()} !")
@@ -56,10 +57,8 @@
                             gerritProject = gerritProject + "-latest"
                         }
                         def testJob = String.format("test-%s-%s", jobsNamespace, gerritProject)
-                        if (env.GERRIT_PROJECT == 'mk/cookiecutter-templates') {
-                            callJobWithExtraVars('test-mk-cookiecutter-templates')
-                        } else if (env.GERRIT_PROJECT == 'salt-models/reclass-system') {
-                            callJobWithExtraVars('test-salt-model-reclass-system')
+                        if (env.GERRIT_PROJECT == 'mk/cookiecutter-templates' || env.GERRIT_PROJECT == 'salt-models/reclass-system') {
+                            callJobWithExtraVars('test-salt-model-ci-wrapper')
                         } else {
                             if (isJobExists(testJob)) {
                                 common.infoMsg("Test job ${testJob} found, running")
diff --git a/generate-cookiecutter-products.groovy b/generate-cookiecutter-products.groovy
index 515fcb6..33a3f60 100644
--- a/generate-cookiecutter-products.groovy
+++ b/generate-cookiecutter-products.groovy
@@ -73,6 +73,11 @@
         common.warningMsg("Binary release: ${distribRevision} not exist. Fallback to 'proposed'! ")
         distribRevision = 'proposed'
     }
+    // (azvyagintsev) WA for PROD-25732
+    if (context.cookiecutter_template_url.contains('gerrit.mcp.mirantis.com/mk/cookiecutter-templates')) {
+        common.warningMsg('Apply WA for PROD-25732')
+        context.cookiecutter_template_url = 'ssh://gerrit.mcp.mirantis.com:29418/mk/cookiecutter-templates.git'
+    }
     common.warningMsg("Fetching:\n" +
         "DISTRIB_REVISION from ${distribRevision}")
     common.infoMsg("Using context:\n" + context)
@@ -246,7 +251,7 @@
                 sh(returnStatus: true, script: "tar -czf output-${context['cluster_name']}/${context['cluster_name']}.tar.gz --exclude='*@tmp' -C ${modelEnv} .")
                 archiveArtifacts artifacts: "output-${context['cluster_name']}/${context['cluster_name']}.tar.gz"
 
-                if(RequesterEmail != '' && !RequesterEmail.contains('example')){
+                if (RequesterEmail != '' && !RequesterEmail.contains('example')) {
                     emailext(to: RequesterEmail,
                         attachmentsPattern: "output-${context['cluster_name']}/*",
                         body: "Mirantis Jenkins\n\nRequested reclass model ${context['cluster_name']} has been created and attached to this email.\nEnjoy!\n\nMirantis",
diff --git a/k8s-upgrade-pipeline.groovy b/k8s-upgrade-pipeline.groovy
index 46a846e..19cf47d 100644
--- a/k8s-upgrade-pipeline.groovy
+++ b/k8s-upgrade-pipeline.groovy
@@ -24,6 +24,8 @@
  *   CALICO_UPGRADE_VERSION     Version of "calico-upgrade" utility to be used ("v1.0.5" for Calico v3.1.3 target).
  *
 **/
+import groovy.json.JsonSlurper
+
 def common = new com.mirantis.mk.Common()
 def salt = new com.mirantis.mk.Salt()
 def python = new com.mirantis.mk.Python()
@@ -342,6 +344,85 @@
     }
 }
 
+def checkCalicoPolicySetting(pepperEnv, target) {
+    def common = new com.mirantis.mk.Common()
+    def salt = new com.mirantis.mk.Salt()
+
+    stage("Checking of Calico network policy setting") {
+        // check Calico policy enabled
+        def cniPolicy = false
+        def addonsPolicy = false
+        def kubeCtrlRunning = false
+
+        // check CNI config
+        def cniCfgResult = salt.cmdRun(
+                pepperEnv, target, "cat /etc/cni/net.d/10-calico.conf"
+            )['return'][0].values()[0].toString()
+        def cniCfg = new JsonSlurper().parseText(cniCfgResult)
+        if (cniCfg.get("policy") != null) {
+            if (cniCfg["policy"].get("type") == "k8s") {
+                cniPolicy = true
+            } else {
+                common.warningMsg("Calico policy type is unknown or not set.")
+            }
+        }
+
+        // check k8s addons
+        def addonsResult = salt.cmdRun(
+                pepperEnv, target, "ls /etc/kubernetes/addons"
+            )['return'][0].values()[0].toString()
+        if (addonsResult.contains("calico_policy")) {
+            addonsPolicy = true
+        }
+
+        // check kube-controllers is running
+        def kubeCtrlResult = salt.cmdRun(
+                pepperEnv, target, "kubectl get pod -n kube-system --selector=k8s-app=calico-kube-controllers"
+            )['return'][0].values()[0].toString()
+        if (kubeCtrlResult.contains("Running")) {
+            kubeCtrlRunning = true
+        }
+
+        // It's safe to enable Calico policy any time, but it may be unsafe to disable it.
+        // So, no need to disable Calico policy for v3.x if it's not in use currently.
+        // But if Calico policy is in use already, it should be enabled after upgrade as well.
+
+        // check for consistency
+        if ((cniPolicy != addonsPolicy) || (addonsPolicy != kubeCtrlRunning)) {
+            caution = "ATTENTION. Calico policy setting cannot be determined reliably (enabled in CNI config: ${cniPolicy}, " +
+                "presence in k8s addons: ${addonsPolicy}, kube-controllers is running: ${kubeCtrlRunning})."
+            currentBuild.description += "<br><b>${caution}</b><br><br>"
+            common.warningMsg(caution)
+        } else {
+            common.infoMsg("Current Calico policy state is detected as: ${cniPolicy}")
+            if (cniPolicy) {
+                // Calico policy is in use. Check policy setting for v3.x.
+                common.infoMsg("Calico policy is in use. It should be enabled for v3.x as well.")
+                def saltPolicyResult = salt.getPillar(
+                        pepperEnv, target, "kubernetes:pool:network:calico:policy"
+                    )["return"][0].values()[0].toString()
+
+                common.infoMsg("kubernetes.pool.network.calico.policy: ${saltPolicyResult}")
+                if (saltPolicyResult.toLowerCase().contains("true")) {
+                    common.infoMsg("Calico policy setting for v3.x is detected as: true")
+                } else {
+                    caution = "ATTENTION. Currently, Calico is running with policy switched on. " +
+                        "Calico policy setting for v3.x is not set to true. " +
+                        "After upgrade is completed, Calico policy will be switched off. " +
+                        "You will need to switch it on manually if required."
+                    currentBuild.description += "<br><b>${caution}</b><br><br>"
+                    common.warningMsg(caution)
+                }
+            }
+        }
+
+        if (addonsPolicy) {
+            // Remove v2.6.x policy-related addons on masters to not interfere with v3.x kube-controllers
+            salt.cmdRun(pepperEnv, CTL_TARGET, "rm -rf /etc/kubernetes/addons/calico_policy")
+        }
+    }
+}
+
 timeout(time: 12, unit: 'HOURS') {
     node() {
         try {
@@ -408,6 +489,9 @@
                     pullCalicoImages(pepperEnv, POOL)
                 }
 
+                // check and adjust Calico policy setting
+                checkCalicoPolicySetting(pepperEnv, ctl_node)
+
                 // this sequence implies workloads operations downtime
                 startCalicoUpgrade(pepperEnv, ctl_node)
                 performCalicoConfigurationUpdateAndServicesRestart(pepperEnv, POOL)
diff --git a/stacklight-upgrade.groovy b/stacklight-upgrade.groovy
index 7b79f4c..443d56b 100644
--- a/stacklight-upgrade.groovy
+++ b/stacklight-upgrade.groovy
@@ -12,59 +12,42 @@
  *
  */
 
-def common = new com.mirantis.mk.Common()
-def salt = new com.mirantis.mk.Salt()
-def python = new com.mirantis.mk.Python()
-
-def pepperEnv = "pepperEnv"
-def targetLiveSubset
-def targetLiveAll
-def minions
-def result
-def args
-def commandKwargs
-def probe = 1
-def errorOccured = false
+common = new com.mirantis.mk.Common()
+salt = new com.mirantis.mk.Salt()
+python = new com.mirantis.mk.Python()
+command = 'cmd.run'
+pepperEnv = "pepperEnv"
+errorOccured = false
 
 def upgrade(master, target, service, pckg, state) {
-    def common = new com.mirantis.mk.Common()
-    def salt = new com.mirantis.mk.Salt()
-    def command = 'cmd.run'
-    stage("Change ${target} repos") {
+    stage("Upgrade ${service}") {
         salt.runSaltProcessStep(master, "${target}", 'saltutil.refresh_pillar', [], null, true, 5)
         salt.enforceState(master, "${target}", 'linux.system.repo', true)
-    }
-    stage("Update ${pckg} package") {
         common.infoMsg("Upgrade ${service} package")
         try {
             salt.runSaltProcessStep(master, "${target}", command, ["apt-get install --only-upgrade ${pckg}"], null, true)
         } catch (Exception er) {
             errorOccured = true
-            common.errorMsg("${pckg} package is not upgraded.")
+            common.errorMsg("[ERROR] ${pckg} package was not upgraded.")
             return
         }
-    }
-    stage("Run ${state} state on ${target} nodes") {
+        common.infoMsg("Run ${state} state on ${target} nodes")
         try {
             salt.enforceState(master, "${target}", ["${state}"], true)
         } catch (Exception er) {
             errorOccured = true
-            common.errorMsg("${state} state was executed and failed. Please fix it manually.")
+            common.errorMsg("[ERROR] ${state} state was executed and failed. Please fix it manually.")
         }
+        common.infoMsg("Check ${service} service status on the target nodes")
+        salt.runSaltProcessStep(master, "${target}", "service.status", ["${service}"], null, true)
+        return
     }
-    out = salt.runSaltCommand(master, 'local', ['expression': "${target}", 'type': 'compound'], command, null, "systemctl status ${service}.service", null)
-    salt.printSaltCommandResult(out)
-
-    common.warningMsg("Please check \'systemctl status ${service}.service\' on ${target} nodes if ${service} is running.")
-    return
 }
 
 def upgrade_es_kibana(master) {
-    def common = new com.mirantis.mk.Common()
-    def salt = new com.mirantis.mk.Salt()
-    def command = 'cmd.run'
-    stage('Elasticsearch upgrade') {
+    stage('Upgrade elasticsearch') {
         try {
+            common.infoMsg('Upgrade the Elasticsearch package')
             salt.runSaltProcessStep(master, 'I@elasticsearch:server', command, ["systemctl stop elasticsearch"], null, true)
             salt.runSaltProcessStep(master, 'I@elasticsearch:server', command, ["apt-get --only-upgrade install elasticsearch"], null, true)
             salt.runSaltProcessStep(master, 'I@elasticsearch:server', command, ["systemctl daemon-reload"], null, true)
@@ -72,11 +55,10 @@
             salt.runSaltProcessStep(master, '*', 'saltutil.sync_all', [], null, true)
         } catch (Exception er) {
             errorOccured = true
-            common.errorMsg("Elasticsearch upgrade failed. Please fix it manually.")
+            common.errorMsg("[ERROR] Elasticsearch upgrade failed. Please fix it manually.")
             return
         }
-    }
-    stage('Verify that the Elasticsearch cluster status is green') {
+        common.infoMsg('Verify that the Elasticsearch cluster status is green')
         try {
             def retries_wait = 20
             def retries = 15
@@ -102,24 +84,24 @@
             }
         } catch (Exception er) {
             errorOccured = true
-            common.errorMsg("Elasticsearch cluster status is not \'green\'. Please fix it manually.")
+            common.errorMsg("[ERROR] Elasticsearch cluster status is not \'green\'. Please fix it manually.")
             return
         }
     }
-    stage('Kibana upgrade') {
+    stage('Upgrade kibana') {
         try {
+            common.infoMsg('Upgrade the Kibana package')
             salt.runSaltProcessStep(master, 'I@kibana:server', command, ["systemctl stop kibana"], null, true)
             salt.runSaltProcessStep(master, 'I@kibana:server', command, ["apt-get --only-upgrade install kibana"], null, true)
             salt.runSaltProcessStep(master, 'I@kibana:server', command, ["systemctl start kibana"], null, true)
         } catch (Exception er) {
             errorOccured = true
-            common.errorMsg("Kibana upgrade failed. Please fix it manually.")
+            common.errorMsg("[ERROR] Kibana upgrade failed. Please fix it manually.")
             return
         }
-        out = salt.runSaltCommand(master, 'local', ['expression': 'I@kibana:server', 'type': 'compound'], command, null, 'systemctl status kibana.service', null)
-        salt.printSaltCommandResult(out)
 
-        common.warningMsg('Please check if kibana service is running.')
+        common.infoMsg("Check kibana status on the target nodes")
+        salt.runSaltProcessStep(master, "I@kibana:server", "service.status", ["kibana"], null, true)
         return
     }
 }
@@ -132,7 +114,7 @@
 
         if (STAGE_UPGRADE_SYSTEM_PART.toBoolean() == true && !errorOccured) {
             upgrade(pepperEnv, "I@telegraf:agent or I@telegraf:remote_agent", "telegraf", "telegraf", "telegraf")
-            upgrade(pepperEnv, "I@fluentd:agent", "td-agent", "td-agent", "fluentd")
+            upgrade(pepperEnv, "I@fluentd:agent", "td-agent", "td-agent td-agent-additional-plugins", "fluentd")
             if (salt.testTarget(pepperEnv, "I@prometheus:relay")) {
                 upgrade(pepperEnv, "I@prometheus:relay", "prometheus-relay", "prometheus-relay", "prometheus")
             }
@@ -150,18 +132,23 @@
 
         if (STAGE_UPGRADE_DOCKER_COMPONENTS.toBoolean() == true && !errorOccured) {
 
-            stage('Docker components upgrade') {
+            stage('Upgrade docker components') {
 
                 try {
-                    salt.runSaltProcessStep(pepperEnv, 'I@docker:swarm:role:master and I@prometheus:server', 'cmd.run', ["docker stack rm monitoring"], null, true)
+                    common.infoMsg('Disable and remove the previous versions of monitoring services')
+                    salt.runSaltProcessStep(pepperEnv, 'I@docker:swarm:role:master and I@prometheus:server', command, ["docker stack rm monitoring"], null, true)
+                    common.infoMsg('Rebuild the Prometheus configuration')
                     salt.enforceState(pepperEnv, 'I@docker:swarm and I@prometheus:server', 'prometheus')
-                    salt.runSaltProcessStep(pepperEnv, 'I@docker:swarm:role:master and I@prometheus:server', 'cmd.run', ["docker stack rm dashboard"], null, true)
+                    common.infoMsg('Disable and remove the previous version of Grafana')
+                    salt.runSaltProcessStep(pepperEnv, 'I@docker:swarm:role:master and I@prometheus:server', command, ["docker stack rm dashboard"], null, true)
+                    common.infoMsg('Start the monitoring services')
                     salt.enforceState(pepperEnv, 'I@docker:swarm:role:master and I@prometheus:server', 'docker')
                     salt.runSaltProcessStep(pepperEnv, '*', 'saltutil.sync_all', [], null, true)
+                    common.infoMsg('Refresh the Grafana dashboards')
                     salt.enforceState(pepperEnv, 'I@grafana:client', 'grafana.client')
                 } catch (Exception er) {
                     errorOccured = true
-                    common.errorMsg("Upgrade of docker components failed. Please fix it manually.")
+                    common.errorMsg("[ERROR] Upgrade of docker components failed. Please fix it manually.")
                     return
                 }
             }
diff --git a/test-openscap-pipeline.groovy b/test-openscap-pipeline.groovy
index b886467..7134cfb 100644
--- a/test-openscap-pipeline.groovy
+++ b/test-openscap-pipeline.groovy
@@ -15,6 +15,7 @@
  *                              For OVAL definitions, paths to OVAL definition files separated by semicolon, profile is ignored.
  *  XCCDF_VERSION               The XCCDF version (default 1.2)
  *  XCCDF_TAILORING_ID          The tailoring id (default None)
+ *  XCCDF_CPE                   CPE dictionary or language for applicability checks (default None)
  *
  *  TARGET_SERVERS              The target Salt nodes (default *)
  *
@@ -149,6 +150,7 @@
     def benchmarksAndProfilesArray = XCCDF_BENCHMARKS.tokenize(';')
     def xccdfVersion = XCCDF_VERSION ?: '1.2'
     def xccdfTailoringId = XCCDF_TAILORING_ID ?: 'None'
+    def xccdfCPE = XCCDF_CPE ?: ''
     def targetServers = TARGET_SERVERS ?: '*'
 
     // To have an ability to work in heavy concurrency conditions
@@ -203,7 +205,7 @@
             salt.runSaltProcessStep(pepperEnv, targetServers, 'oscap.eval', [
                 benchmarkType, benchmarkFile, "results_dir=${resultsDir}",
                 "profile=${profileName}", "xccdf_version=${xccdfVersion}",
-                "tailoring_id=${xccdfTailoringId}"
+                "tailoring_id=${xccdfTailoringId}", "cpe=${xccdfCPE}"
             ])
 
             salt.cmdRun(pepperEnv, targetServers, "rm -f /tmp/${scanUUID}.tar.xz; tar -cJf /tmp/${scanUUID}.tar.xz -C ${resultsBaseDir} .")
diff --git a/test-salt-model-wrapper.groovy b/test-salt-model-wrapper.groovy
index 5039661..e8941ea 100644
--- a/test-salt-model-wrapper.groovy
+++ b/test-salt-model-wrapper.groovy
@@ -1,28 +1,7 @@
 /*
- Global wrapper for testing next projects:
+ Global CI wrapper for testing next projects:
    - salt-models/reclass-system
    - mk/cookiecutter-templates
-
- Can be triggered manually or by gerrit trigger:
- 1) gerrit trigger
-    Automatically switches if GERRIT_PROJECT variable detected
-    Always test GERRIT_REFSPEC VS GERRIT_BRANCH-master version of opposite project
-
- 2) manual run via job-build , possible to pass refspecs
-    - for CC
-    - Reclass
-
-    Example of TEST_PARAMETERS_YAML manual config:
----
-RECLASS_SYSTEM_URL: ssh://mcp-jenkins@gerrit.mcp.mirantis.net:29418/salt-models/reclass-system
-RECLASS_SYSTEM_GIT_REF: 2018.11.0
-RECLASS_SYSTEM_BRANCH: refs/heads/2018.11.0
-COOKIECUTTER_TEMPLATE_URL: ssh://mcp-jenkins@gerrit.mcp.mirantis.net:29418/mk/cookiecutter-templates
-COOKIECUTTER_TEMPLATE_REF: refs/heads/2018.11.0
-COOKIECUTTER_TEMPLATE_BRANCH: 2018.11.0
-DISTRIB_REVISION: 2018.11.0
-TEST_MODELS: ''
-
  */
 
 import groovy.json.JsonOutput
@@ -41,87 +20,58 @@
 ]
 
 baseGerritConfig = [:]
+jobResultComments = [:]
+commentLock = false
 
-LinkedHashMap getManualRefParams(LinkedHashMap map) {
-    LinkedHashMap manualParams = [:]
-    String defaultGitRef = 'HEAD'
-    if (map.containsKey('RECLASS_SYSTEM_GIT_REF') && map.containsKey('RECLASS_SYSTEM_URL')) {
-        manualParams[reclassSystemRepo] = [
-            'url': map.get('RECLASS_SYSTEM_URL'),
-            'ref': map.get('RECLASS_SYSTEM_GIT_REF'),
-            'branch': map.get('RECLASS_SYSTEM_BRANCH', 'master'),
-        ]
-    }
-    if (map.containsKey('COOKIECUTTER_TEMPLATE_REF') && map.containsKey('COOKIECUTTER_TEMPLATE_URL')) {
-        manualParams[cookiecutterTemplatesRepo] = [
-            'url': map.get('COOKIECUTTER_TEMPLATE_URL'),
-            'ref': map.get('COOKIECUTTER_TEMPLATE_REF'),
-            'branch': map.get('COOKIECUTTER_TEMPLATE_BRANCH', 'master'),
-        ]
-    }
-    return manualParams
-}
-
-def setGerritReviewComment(String jobName, String jobBuildURL, String jobStatus) {
+// post Gerrit review comment to patch
+def setGerritReviewComment() {
     if (baseGerritConfig) {
-        String skipped = voteMatrix.get(jobName, 'true') ? '' : '(skipped)'
+        while(commentLock) {
+            sleep 5
+        }
+        commentLock = true
         LinkedHashMap config = baseGerritConfig.clone()
-        config['message'] = "- ${jobName} ${jobBuildURL}console : ${jobStatus} ${skipped}".trim()
+        String jobResultComment = ''
+        jobResultComments.each { job, info ->
+            String skipped = voteMatrix.get(job, 'true') ? '' : '(non-voting)'
+            jobResultComment += "- ${job} ${info.url}console : ${info.status} ${skipped}".trim() + '\n'
+        }
+        config['message'] = sh(script: "echo '${jobResultComment}'", returnStdout: true).trim()
         gerrit.postGerritComment(config)
+        commentLock = false
     }
 }
 
-def runTests(String jobName, String extraVars) {
+// get job parameters for YAML-based job parametrization
+def yamlJobParameters(LinkedHashMap jobParams) {
+    return [
+        [$class: 'TextParameterValue', name: 'EXTRA_VARIABLES_YAML', value: JsonOutput.toJson(jobParams) ]
+    ]
+}
+
+// run needed job with params
+def runTests(String jobName, ArrayList jobParams) {
     def propagateStatus = voteMatrix.get(jobName, true)
     return {
-        def jobBuild = build job: "${jobName}", propagate: false, parameters: [
-            [$class: 'TextParameterValue', name: 'EXTRA_VARIABLES_YAML', value: extraVars ]
-        ]
-        setGerritReviewComment(jobName, jobBuild.absoluteUrl, jobBuild.result)
+        def jobBuild = build job: jobName, propagate: false, parameters: jobParams
+        jobResultComments[jobName] = [ 'url': jobBuild.absoluteUrl, 'status': jobBuild.result ]
+        setGerritReviewComment()
         if (propagateStatus && jobBuild.result == 'FAILURE') {
             throw new Exception("Build ${jobName} is failed!")
         }
     }
 }
 
-def runTestSaltModelReclass(String cluster, String defaultGitUrl, String clusterGitUrl, String refSpec) {
-    def saltModelJob = "test-salt-model-${cluster}"
-    def propagateStatus = voteMatrix.get(saltModelJob, true)
-    return {
-        def jobBuild = build job: saltModelJob, propagate: false, parameters: [
-            [$class: 'StringParameterValue', name: 'DEFAULT_GIT_URL', value: clusterGitUrl],
-            [$class: 'StringParameterValue', name: 'DEFAULT_GIT_REF', value: "HEAD"],
-            [$class: 'StringParameterValue', name: 'SYSTEM_GIT_URL', value: defaultGitUrl],
-            [$class: 'StringParameterValue', name: 'SYSTEM_GIT_REF', value: refSpec ],
-        ]
-        setGerritReviewComment(saltModelJob, jobBuild.absoluteUrl, jobBuild.result)
-        if (propagateStatus && jobBuild.result == 'FAILURE') {
-            throw new Exception("Build ${saltModelJob} is failed!")
-        }
-    }
-}
-
-def checkReclassSystemDocumentationCommit(gerritCredentials) {
-    gerrit.gerritPatchsetCheckout([
-        credentialsId: gerritCredentials
-    ])
-
-    sh("git diff-tree --no-commit-id --diff-filter=d --name-only -r HEAD  | grep .yml | xargs -I {}  python -c \"import yaml; yaml.load(open('{}', 'r'))\" \\;")
-
-    return sh(script: "git diff-tree --no-commit-id --name-only -r HEAD | grep -v .releasenotes", returnStatus: true) == 1
-
-}
-
 timeout(time: 12, unit: 'HOURS') {
     node(slaveNode) {
         def common = new com.mirantis.mk.Common()
         def git = new com.mirantis.mk.Git()
         def python = new com.mirantis.mk.Python()
 
-        // Var TEST_PARAMETERS_YAML contains any additional parameters for tests,
+        // Var EXTRA_VARIABLES_YAML contains any additional parameters for tests,
         // like manually specified Gerrit Refs/URLs, additional parameters and so on
         def buildTestParams = [:]
-        def buildTestParamsYaml = env.getProperty('TEST_PARAMETERS_YAML')
+        def buildTestParamsYaml = env.getProperty('EXTRA_VARIABLES_YAML')
         if (buildTestParamsYaml) {
             common.mergeEnv(env, buildTestParamsYaml)
             buildTestParams = readYaml text: buildTestParamsYaml
@@ -132,100 +82,114 @@
 
         // Gerrit parameters
         String gerritCredentials = job_env.get('CREDENTIALS_ID', 'gerrit')
-        String gerritRef = job_env.get('GERRIT_REFSPEC', '')
-        String gerritProject = ''
-        String gerritName = ''
-        String gerritScheme = ''
-        String gerritHost = ''
-        String gerritPort = ''
-        String gerritChangeNumber = ''
+        String gerritRef = job_env.get('GERRIT_REFSPEC')
+        String gerritProject = job_env.get('GERRIT_PROJECT')
+        String gerritName = job_env.get('GERRIT_NAME')
+        String gerritScheme = job_env.get('GERRIT_SCHEME')
+        String gerritHost = job_env.get('GERRIT_HOST')
+        String gerritPort = job_env.get('GERRIT_PORT')
+        String gerritChangeNumber = job_env.get('GERRIT_CHANGE_NUMBER')
+        String gerritPatchSetNumber = job_env.get('GERRIT_PATCHSET_NUMBER')
+        String gerritBranch = job_env.get('GERRIT_BRANCH')
+        Boolean gateMode = job_env.get('GERRIT_CI_MERGE_TRIGGER', false).toBoolean()
 
         // Common and manual build parameters
         LinkedHashMap projectsMap = [:]
-        String distribRevision = job_env.get('DISTRIB_REVISION', 'nightly')
+        String distribRevision = 'nightly'
+        //checking if the branch is from release
+        if (gerritBranch.startsWith('release')) {
+            def distribRevisionRelease = gerritBranch.tokenize('/')[-1]
+            if (!common.checkRemoteBinary([apt_mk_version: distribRevisionRelease]).linux_system_repo_url) {
+              common.infoMsg("Binary release ${distribRevisionRelease} does not exist on http://mirror.mirantis.com. Falling back to 'proposed'.")
+              distribRevision = 'proposed'
+            }
+            distribRevision = distribRevisionRelease
+        }
         ArrayList testModels = job_env.get('TEST_MODELS', 'mcp-virtual-lab,infra').split(',')
 
-        stage('Check build mode') {
-            def buildType = ''
-            if (gerritRef) {
-                // job is triggered by Gerrit, get all required Gerrit vars
-                gerritProject = job_env.get('GERRIT_PROJECT')
-                gerritName = job_env.get('GERRIT_NAME')
-                gerritScheme = job_env.get('GERRIT_SCHEME')
-                gerritHost = job_env.get('GERRIT_HOST')
-                gerritPort = job_env.get('GERRIT_PORT')
-                gerritChangeNumber = job_env.get('GERRIT_CHANGE_NUMBER')
-                gerritPatchSetNumber = job_env.get('GERRIT_PATCHSET_NUMBER')
-                gerritBranch = job_env.get('GERRIT_BRANCH')
-
-                // check if change aren't already merged
-                def gerritChange = gerrit.getGerritChange(gerritName, gerritHost, gerritChangeNumber, gerritCredentials)
-                if (gerritChange.status == "MERGED") {
-                    common.successMsg('Patch set is alredy merged, no need to test it')
-                    currentBuild.result = 'SUCCESS'
-                    return
-                }
-
-                projectsMap[gerritProject] = [
-                    'url': "${gerritScheme}://${gerritName}@${gerritHost}:${gerritPort}/${gerritProject}",
-                    'ref': gerritRef,
-                    'branch': gerritBranch,
-                ]
-                buildType = 'Gerrit Trigger'
-                buildTestParams << job_env.findAll { k,v -> k ==~ /GERRIT_.+/ }
-                baseGerritConfig = [
-                    'gerritName': gerritName,
-                    'gerritHost': gerritHost,
-                    'gerritChangeNumber': gerritChangeNumber,
-                    'credentialsId': gerritCredentials,
-                    'gerritPatchSetNumber': gerritPatchSetNumber,
-                ]
-            } else {
-                projectsMap = getManualRefParams(job_env)
-                if (!projectsMap) {
-                    error('Manual build detected and no valid Git refs provided!')
-                }
-                buildType = 'Manual build'
+        stage('Gerrit prepare') {
+            // check if change aren't already merged
+            def gerritChange = gerrit.getGerritChange(gerritName, gerritHost, gerritChangeNumber, gerritCredentials)
+            if (gerritChange.status == "MERGED") {
+                common.successMsg('Patch set is alredy merged, no need to test it')
+                currentBuild.result = 'SUCCESS'
+                return
             }
-            ArrayList descriptionMsgs = [ "<font color='red'>${buildType} detected!</font> Running with next parameters:" ]
+            def defaultURL =  "${gerritScheme}://${gerritName}@${gerritHost}:${gerritPort}"
+            projectsMap[gerritProject] = [
+                'url': "${defaultURL}/${gerritProject}",
+                'ref': gerritRef,
+                'branch': gerritBranch,
+            ]
+            buildType = 'Gerrit Trigger'
+            buildTestParams << job_env.findAll { k,v -> k ==~ /GERRIT_.+/ }
+            baseGerritConfig = [
+                'gerritName': gerritName,
+                'gerritHost': gerritHost,
+                'gerritChangeNumber': gerritChangeNumber,
+                'credentialsId': gerritCredentials,
+                'gerritPatchSetNumber': gerritPatchSetNumber,
+            ]
+            ArrayList descriptionMsgs = [ "Running with next parameters:" ]
             for(String project in projectsMap.keySet()) {
                 descriptionMsgs.add("Ref for ${project} => ${projectsMap[project]['ref']}")
                 descriptionMsgs.add("Branch for ${project} => ${projectsMap[project]['branch']}")
             }
             descriptionMsgs.add("Distrib revision => ${distribRevision}")
             currentBuild.description = descriptionMsgs.join('<br/>')
+
+            gerrit.gerritPatchsetCheckout([
+                credentialsId: gerritCredentials
+            ])
         }
 
         stage("Run tests") {
-            def branches = [:]
+            def documentationOnly = sh(script: "git diff-tree --no-commit-id --name-only -r HEAD | grep -v .releasenotes", returnStatus: true) == 1
+            if (documentationOnly) {
+                common.infoMsg("Tests skipped, documenation only changed!")
+                currentBuild.result = 'SUCCESS'
+                return
+            }
 
-            if (projectsMap.containsKey(reclassSystemRepo)) {
-                def documentationOnly = checkReclassSystemDocumentationCommit(gerritCredentials)
-                if (['master'].contains(gerritBranch) && !documentationOnly) {
-                    for (int i = 0; i < testModels.size(); i++) {
-                        def cluster = testModels[i]
-                        def clusterGitUrl = projectsMap[reclassSystemRepo]['url'].substring(0, projectsMap[reclassSystemRepo]['url'].lastIndexOf("/") + 1) + cluster
-                        branches["reclass-system-${cluster}"] = runTestSaltModelReclass(cluster, projectsMap[reclassSystemRepo]['url'], clusterGitUrl, projectsMap[reclassSystemRepo]['ref'])
-                    }
-                } else {
-                    common.warningMsg("Tests for ${testModels} skipped!")
+            def branches = [:]
+            String branchJobName = ''
+
+            if (gerritProject == reclassSystemRepo && gerritBranch == 'master') {
+                sh("git diff-tree --no-commit-id --diff-filter=d --name-only -r HEAD  | grep .yml | xargs -I {}  python -c \"import yaml; yaml.load(open('{}', 'r'))\" \\;")
+                for (int i = 0; i < testModels.size(); i++) {
+                    def cluster = testModels[i]
+                    def clusterGitUrl = projectsMap[reclassSystemRepo]['url'].substring(0, projectsMap[reclassSystemRepo]['url'].lastIndexOf("/") + 1) + cluster
+                    branchJobName = "test-salt-model-${cluster}"
+                    def jobParams = [
+                        [$class: 'StringParameterValue', name: 'DEFAULT_GIT_URL', value: clusterGitUrl],
+                        [$class: 'StringParameterValue', name: 'DEFAULT_GIT_REF', value: "HEAD"],
+                        [$class: 'StringParameterValue', name: 'SYSTEM_GIT_URL', value: projectsMap[reclassSystemRepo]['url']],
+                        [$class: 'StringParameterValue', name: 'SYSTEM_GIT_REF', value: projectsMap[reclassSystemRepo]['ref'] ],
+                    ]
+                    branches[branchJobName] = runTests(branchJobName, jobParams)
                 }
             }
-            if (projectsMap.containsKey(reclassSystemRepo) || projectsMap.containsKey(cookiecutterTemplatesRepo)) {
-                branches['cookiecutter-templates'] = runTests('test-mk-cookiecutter-templates', JsonOutput.toJson(buildTestParams))
-            }
-            if (projectsMap.containsKey(cookiecutterTemplatesRepo)) {
-                branches['test-drivetrain'] = runTests('test-drivetrain', JsonOutput.toJson(buildTestParams))
-                // TODO: enable oscore-test job once it's ready to consume EXTRA_VARIABLES_YAML
-                //branches['oscore-test-cookiecutter-models'] = runTests('oscore-test-cookiecutter-models', JsonOutput.toJson(buildTestParams))
+            if (gerritProject == reclassSystemRepo || gerritProject == cookiecutterTemplatesRepo) {
+                branchJobName = 'test-mk-cookiecutter-templates'
+                branches[branchJobName] = runTests(branchJobName, yamlJobParameters(buildTestParams))
             }
 
-            try {
-                parallel branches
-            } catch (Exception e) {
-                println e
-                println 'Job is in non-voting mode for now. Skipping fails.'
+            if (!gateMode) {
+                if (gerritProject == cookiecutterTemplatesRepo) {
+                    branchJobName = 'test-drivetrain'
+                    branches[branchJobName] = runTests(branchJobName, yamlJobParameters(buildTestParams))
+                    branchJobName = 'oscore-test-cookiecutter-models'
+                    branches[branchJobName] = runTests(branchJobName, yamlJobParameters(buildTestParams))
+                }
             }
+
+            branches.keySet().each { key ->
+                if (branches[key] instanceof Closure) {
+                    jobResultComments[key] = [ 'url': job_env.get('BUILD_URL'), 'status': 'WAITING' ]
+                }
+            }
+            setGerritReviewComment()
+            parallel branches
         }
     }
 }