Merge "Add ability to generate relative URLs to charts"
diff --git a/src/com/mirantis/mcp/Git.groovy b/src/com/mirantis/mcp/Git.groovy
index 92359a0..4231016 100644
--- a/src/com/mirantis/mcp/Git.groovy
+++ b/src/com/mirantis/mcp/Git.groovy
@@ -79,6 +79,9 @@
   def credentialsId = config.get('credentialsId', '')
   def protocol = config.get('protocol', 'ssh')
   def refspec = config.get('refspec', null)
+  String branch = config.get('branch', 'FETCH_HEAD')
+  Integer depth = config.get('depth', 0)
+  Integer timeout = config.get('timeout', 0)
 
   // default parameters
   def scmExtensions = [
@@ -88,7 +91,7 @@
 
   // https://issues.jenkins-ci.org/browse/JENKINS-6856
   if (merge) {
-    scmExtensions.add([$class: 'LocalBranch', localBranch: "${config.branch}"])
+    scmExtensions.add([$class: 'LocalBranch', localBranch: "${branch}"])
   }
 
   // we need wipe workspace before checkout
@@ -96,10 +99,20 @@
     scmExtensions.add([$class: 'WipeWorkspace'])
   }
 
+  // optionally limit depth of checkout
+  if (depth) {
+    scmExtensions.add([$class: 'CloneOption', depth: "${depth}", shallow: 'true'])
+  }
+
+  // optionally set timeout
+  if (timeout) {
+    scmExtensions.add([$class: 'CloneOption', timeout: "${timeout}"])
+  }
+
   checkout(
     scm: [
       $class: 'GitSCM',
-      branches: [[name: "${config.branch}"]],
+      branches: [[name: "${branch}"]],
       extensions: scmExtensions,
       userRemoteConfigs: [[
         credentialsId: credentialsId,
diff --git a/src/com/mirantis/mcp/MCPArtifactory.groovy b/src/com/mirantis/mcp/MCPArtifactory.groovy
index 272b1bc..83b1975 100644
--- a/src/com/mirantis/mcp/MCPArtifactory.groovy
+++ b/src/com/mirantis/mcp/MCPArtifactory.groovy
@@ -139,14 +139,8 @@
  */
 def moveItem (String artifactoryURL, String sourcePath, String dstPath, boolean copy = false, boolean dryRun = false) {
     def url = "${artifactoryURL}/api/${copy ? 'copy' : 'move'}/${sourcePath}?to=/${dstPath}&dry=${dryRun ? '1' : '0'}"
-    withCredentials([
-            [$class          : 'UsernamePasswordMultiBinding',
-             credentialsId   : 'artifactory',
-             passwordVariable: 'ARTIFACTORY_PASSWORD',
-             usernameVariable: 'ARTIFACTORY_LOGIN']
-    ]) {
-        sh "bash -c \"curl -X POST -u ${ARTIFACTORY_LOGIN}:${ARTIFACTORY_PASSWORD} \'${url}\'\""
-    }
+    def http = new com.mirantis.mk.Http()
+    return http.doPost(url, 'artifactory')
 }
 
 /**
@@ -157,14 +151,8 @@
  */
 def deleteItem (String artifactoryURL, String itemPath) {
     def url = "${artifactoryURL}/${itemPath}"
-    withCredentials([
-            [$class          : 'UsernamePasswordMultiBinding',
-             credentialsId   : 'artifactory',
-             passwordVariable: 'ARTIFACTORY_PASSWORD',
-             usernameVariable: 'ARTIFACTORY_LOGIN']
-    ]) {
-        sh "bash -c \"curl -X DELETE -u ${ARTIFACTORY_LOGIN}:${ARTIFACTORY_PASSWORD} \'${url}\'\""
-    }
+    def http = new com.mirantis.mk.Http()
+    return http.doDelete(url, 'artifactory')
 }
 
 /**
@@ -284,6 +272,48 @@
 }
 
 /**
+ * Convert Mirantis docker image url/path to Mirantis artifactory path ready for use in API calls
+ *
+ * For example:
+ * 'docker-dev-kaas-local.docker.mirantis.net/mirantis/kaas/si-test:master' -> 'docker-dev-kaas-local/mirantis/kaas/si-test/master'
+ *
+ */
+def dockerImageToArtifactoryPath(String image) {
+    List imageParts = image.tokenize('/')
+    String repoName = imageParts[0].tokenize('.')[0]
+    String namespace = imageParts[1..-2].join('/')
+    String imageName = imageParts[-1].tokenize(':')[0]
+    String imageTag = imageParts[-1].tokenize(':')[1]
+
+    return [repoName, namespace, imageName, imageTag].join('/')
+}
+
+/**
+ * Copy docker image from one url to another
+ *
+ * @param srcImage String, Mirantis URL/path for docker image to copy from
+ * @param dstImage String, Mirantis URL/path for docker image to copy to
+ */
+def copyDockerImage(String srcImage, String dstImage) {
+    def artifactoryServer = Artifactory.server(env.ARTIFACTORY_SERVER ?: 'mcp-ci')
+    String srcPath = dockerImageToArtifactoryPath(srcImage)
+    String dstPath = dockerImageToArtifactoryPath(dstImage)
+
+    return moveItem(artifactoryServer.getUrl(), srcPath, dstPath, true)
+}
+
+/**
+ * Delete docker image on Mirantis's artifactory
+ *
+ * @param image String, Mirantis URL/path for docker image to delete
+ */
+def deleteDockerImage(String image) {
+    def artifactoryServer = Artifactory.server(env.ARTIFACTORY_SERVER ?: 'mcp-ci')
+
+    return deleteItem(artifactoryServer.getUrl() + '/artifactory', dockerImageToArtifactoryPath(image))
+}
+
+/**
  * Upload docker image to Artifactory
  *
  * @param server ArtifactoryServer, the instance of Artifactory server
@@ -522,7 +552,7 @@
             }"""
 
         artifactoryServer.upload(uploadSpec, newBuildInfo())
-        def linkUrl = "${artifactoryServer.getUrl()}/artifactory/${config.get('artifactoryRepo')}"
+        def linkUrl = "${artifactoryServer.getUrl()}/${config.get('artifactoryRepo')}"
         artifactsDescription = "Job artifacts uploaded to Artifactory: <a href=\"${linkUrl}\">${linkUrl}</a>"
     } catch (Exception e) {
         if (e =~ /no artifacts/) {
diff --git a/src/com/mirantis/mk/Artifactory.groovy b/src/com/mirantis/mk/Artifactory.groovy
index 9ac53bc..d126a1b 100644
--- a/src/com/mirantis/mk/Artifactory.groovy
+++ b/src/com/mirantis/mk/Artifactory.groovy
@@ -346,7 +346,7 @@
       "files": [
         {
           "pattern": "${file}",
-          "target": "${art.outRepo}",
+          "target": "artifactory/${art.outRepo}",
           "props": "${props}"
         }
       ]
@@ -477,7 +477,7 @@
                 "files": [
                    {
                        "pattern": "${chartPattern}",
-                       "target": "${repoName}/"
+                       "target": "artifactory/${repoName}/"
                     }
                 ]
             }"""
diff --git a/src/com/mirantis/mk/Ceph.groovy b/src/com/mirantis/mk/Ceph.groovy
index 8660233..fe71d7d 100644
--- a/src/com/mirantis/mk/Ceph.groovy
+++ b/src/com/mirantis/mk/Ceph.groovy
@@ -1,103 +1,543 @@
 package com.mirantis.mk
 
 /**
+ * Install and configure ceph clients
  *
- * Ceph functions
- *
+ * @param master        Salt connection object
+ * @param extra_tgt     Extra targets for compound
  */
+def installClient(master, extra_tgt='') {
+    def salt = new Salt()
+
+    // install Ceph Radosgw
+    installRgw(master, "I@ceph:radosgw", extra_tgt)
+
+    // setup keyring for Openstack services
+    salt.enforceStateWithTest([saltId: master, target: "I@ceph:common and I@glance:server $extra_tgt", state: ['ceph.common', 'ceph.setup.keyring']])
+    salt.enforceStateWithTest([saltId: master, target: "I@ceph:common and I@cinder:controller $extra_tgt", state: ['ceph.common', 'ceph.setup.keyring']])
+    salt.enforceStateWithTest([saltId: master, target: "I@ceph:common and I@nova:compute $extra_tgt", state: ['ceph.common', 'ceph.setup.keyring']])
+    salt.enforceStateWithTest([saltId: master, target: "I@ceph:common and I@gnocchi:server $extra_tgt", state: ['ceph.common', 'ceph.setup.keyring']])
+}
 
 /**
- * Ceph health check
+ * Install and configure ceph monitor on target
  *
+ * @param master        Salt connection object
+ * @param target        Target specification, compliance to compound matcher in salt
+ * @param extra_tgt     Extra targets for compound
  */
-def waitForHealthy(master, target, flags=[], count=0, attempts=300) {
-    def common = new com.mirantis.mk.Common()
-    def salt = new com.mirantis.mk.Salt()
-    // wait for healthy cluster
-    while (count < attempts) {
-        def health = salt.cmdRun(master, target, 'ceph health')['return'][0].values()[0]
-        if (health.contains('HEALTH_OK')) {
-            common.infoMsg('Cluster is healthy')
-            break
-        } else {
-            for (flag in flags) {
-                if (health.contains(flag + ' flag(s) set') && !(health.contains('down'))) {
-                    common.infoMsg('Cluster is healthy')
-                    return
+def installMon(master, target="I@ceph:mon", extra_tgt='') {
+    def salt = new Salt()
+
+    salt.enforceState([saltId: master, target: "$target $extra_tgt", state: 'salt.minion.grains'])
+
+    // TODO: can we re-add cmn01 with proper keyrings?
+    // generate keyrings
+    if(salt.testTarget(master, "( I@ceph:mon:keyring:mon or I@ceph:common:keyring:admin ) $extra_tgt")) {
+        salt.enforceState([saltId: master, target: "( I@ceph:mon:keyring:mon or I@ceph:common:keyring:admin ) $extra_tgt", state: 'ceph.mon'])
+        salt.runSaltProcessStep(master, "I@ceph:mon $extra_tgt", 'saltutil.sync_grains')
+        salt.runSaltProcessStep(master, "( I@ceph:mon:keyring:mon or I@ceph:common:keyring:admin ) $extra_tgt", 'mine.update')
+
+        // on target nodes mine is used to get pillar from 'ceph:common:keyring:admin' via grain.items
+        // we need to refresh all pillar/grains to make data sharing work correctly
+        salt.fullRefresh(master, "( I@ceph:mon:keyring:mon or I@ceph:common:keyring:admin ) $extra_tgt")
+
+        sleep(5)
+    }
+    // install Ceph Mons
+    salt.enforceState([saltId: master, target: "I@ceph:mon $extra_tgt", state: 'ceph.mon'])
+    salt.enforceStateWithTest([saltId: master, target: "I@ceph:mgr $extra_tgt", state: 'ceph.mgr'])
+
+    // update config
+    salt.enforceState([saltId: master, target: "I@ceph:common $extra_tgt", state: 'ceph.common'])
+}
+
+/**
+ * Install and configure osd daemons on target
+ *
+ * @param master        Salt connection object
+ * @param target        Target specification, compliance to compound matcher in salt
+ * @param extra_tgt     Extra targets for compound
+ */
+def installOsd(master, target="I@ceph:osd", setup=true, extra_tgt='') {
+    def salt = new Salt()
+    def orchestrate = new Orchestrate()
+
+    // install Ceph OSDs
+    salt.enforceState([saltId: master, target: target, state: ['linux.storage','ceph.osd']])
+    salt.runSaltProcessStep(master, "I@ceph:osd $extra_tgt", 'saltutil.sync_grains')
+    salt.enforceState([saltId: master, target: target, state: 'ceph.osd.custom'])
+    salt.runSaltProcessStep(master, "I@ceph:osd $extra_tgt", 'saltutil.sync_grains')
+    salt.runSaltProcessStep(master, "I@ceph:osd $extra_tgt", 'mine.update')
+
+    // setup pools, keyrings and maybe crush
+    if(salt.testTarget(master, "I@ceph:setup $extra_tgt") && setup) {
+        orchestrate.installBackup(master, 'ceph')
+        salt.enforceState([saltId: master, target: "I@ceph:setup $extra_tgt", state: 'ceph.setup'])
+    }
+}
+
+/**
+ * Install and configure rgw service on target
+ *
+ * @param master        Salt connection object
+ * @param target        Target specification, compliance to compound matcher in salt
+ * @param extra_tgt     Extra targets for compound
+ */
+def installRgw(master, target="I@ceph:radosgw", extra_tgt='') {
+    def salt = new Salt()
+
+    if(salt.testTarget(master, "I@ceph:radosgw $extra_tgt")) {
+        salt.fullRefresh(master, "I@ceph:radosgw $extra_tgt")
+        salt.enforceState([saltId: master, target: "I@ceph:radosgw $extra_tgt", state: ['keepalived', 'haproxy', 'ceph.radosgw']])
+    }
+}
+
+/**
+ * Remove rgw daemons from target
+ *
+ * @param master        Salt connection object
+ * @param target        Target specification, compliance to compound matcher in salt
+ * @param extra_tgt     Extra targets for compound
+ */
+def removeRgw(master, target, extra_tgt='') {
+    def salt = new Salt()
+
+    // TODO needs to be reviewed
+    salt.fullRefresh(master, "I@ceph:radosgw $extra_tgt")
+    salt.enforceState([saltId: master, target: "I@ceph:radosgw $extra_tgt", state: ['keepalived', 'haproxy', 'ceph.radosgw']])
+}
+
+/**
+ * Remove osd daemons from target
+ *
+ * @param master        Salt connection object
+ * @param target        Target specification, compliance to compound matcher in salt
+ * @param osds          List of osd to remove
+ * @param safeRemove    Wait for data rebalance before remove drive
+ * @param target        Target specification, compliance to compound matcher in salt
+ */
+def removeOsd(master, target, osds, flags, safeRemove=true, wipeDisks=false) {
+    def common = new Common()
+    def salt = new Salt()
+
+    // systemctl stop ceph-osd@0 && ceph osd purge 0 --yes-i-really-mean-it && umount /dev/vdc1; test -b /dev/vdc1 && dd if=/dev/zero of=/dev/vdc1 bs=1M; test -b /dev/vdc2 && dd if=/dev/zero of=/dev/vdc2 bs=1M count=100; sgdisk -d1 -d2 /dev/vdc; partprobe
+    if(osds.isEmpty()) {
+        common.warningMsg('List of OSDs was empty. No OSD is removed from cluster')
+        return
+    }
+
+    // `ceph osd out <id> <id>`
+    cmdRun(master, 'ceph osd out ' + osds.join(' '), true, true)
+
+    if(safeRemove) {
+        waitForHealthy(master, flags)
+    }
+
+    for(osd in osds) {
+        salt.runSaltProcessStep(master, target, 'service.stop', "ceph-osd@$osd", null, true)
+        cmdRun(master, "ceph osd purge $osd --yes-i-really-mean-it", true, true)
+    }
+
+    for(osd in osds) {
+        def lvm_enabled = getPillar(master, target, "ceph:osd:lvm_enabled")
+        if(lvm_enabled) {
+            // ceph-volume lvm zap --osd-id 1 --osd-fsid 55BD4219-16A7-4037-BC20-0F158EFCC83D --destroy
+            def output = cmdRunOnTarget(master, target, "ceph-volume lvm zap --osd-id $osd --destroy >/dev/null && echo 'zaped'", false)
+            if(output == 'zaped') { continue }
+        }
+
+        common.infoMsg("Removing legacy osd.")
+        def journal_partition = ""
+        def block_db_partition = ""
+        def block_wal_partition = ""
+        def block_partition = ""
+        def data_partition = ""
+        def dataDir = "/var/lib/ceph/osd/ceph-$osd"
+        journal_partition = cmdRunOnTarget(master, target,
+            "test -f $dataDir/journal_uuid && readlink -f /dev/disk/by-partuuid/`cat $dataDir/journal_uuid`", false)
+        block_db_partition = cmdRunOnTarget(master, target,
+            "test -f $dataDir/block.db_uuid && readlink -f /dev/disk/by-partuuid/`cat $dataDir/block.db_uuid`", false)
+        block_wal_partition = cmdRunOnTarget(master, target,
+            "test -f $dataDir/block.wal_uuid && readlink -f /dev/disk/by-partuuid/`cat $dataDir/block.wal_uuid`", false)
+        block_partition = cmdRunOnTarget(master, target,
+            "test -f $dataDir/block_uuid && readlink -f /dev/disk/by-partuuid/`cat $dataDir/block_uuid`", false)
+        data_partition = cmdRunOnTarget(master, target,
+            "test -f $dataDir/fsid && readlink -f /dev/disk/by-partuuid/`cat $dataDir/fsid`", false)
+
+        try {
+            if(journal_partition.trim()) { removePartition(master, target, journal_partition) }
+            if(block_db_partition.trim()) { removePartition(master, target, block_db_partition) }
+            if(block_wal_partition.trim()) { removePartition(master, target, block_wal_partition) }
+            if(block_partition.trim()) { removePartition(master, target, block_partition, 'block', wipeDisks) }
+            if(data_partition.trim()) { removePartition(master, target, data_partition, 'data', wipeDisks) }
+            else { common.warningMsg("Can't find data partition for osd.$osd") }
+        }
+        catch(Exception e) {
+            // report but continue as problem on one osd could be sorted out after
+            common.errorMsg("Found some issue during cleaning partition for osd.$osd on $target")
+            common.errorMsg(e)
+            currentBuild.result = 'FAILURE'
+        }
+
+        cmdRunOnTarget(master, target, "partprobe", false)
+    }
+}
+
+/**
+ * Update montoring for target hosts
+ *
+ * @param master        Salt connection object
+ * @param target        Target specification, compliance to compound matcher in salt
+ * @param extra_tgt     Extra targets for compound
+ */
+def updateMonitoring(master, target="I@ceph:common", extra_tgt='') {
+    def common = new Common()
+    def salt = new Salt()
+
+    def prometheusNodes = salt.getMinions(master, "I@prometheus:server $extra_tgt")
+    if(!prometheusNodes.isEmpty()) {
+        //Collect Grains
+        salt.enforceState([saltId: master, target: "$target $extra_tgt", state: 'salt.minion.grains'])
+        salt.runSaltProcessStep(master, "$target $extra_tgt", 'saltutil.refresh_modules')
+        salt.runSaltProcessStep(master, "$target $extra_tgt", 'mine.update')
+        sleep(5)
+        salt.enforceState([saltId: master, target: "$target $extra_tgt", state: ['fluentd', 'telegraf', 'prometheus']])
+        salt.enforceState([saltId: master, target: "I@prometheus:server $extra_tgt", state: 'prometheus'])
+    }
+    else {
+        common.infoMsg('No Prometheus nodes in cluster. Nothing to do.')
+    }
+}
+
+def connectCeph(master, extra_tgt='') {
+    new Common().infoMsg("This method was renamed. Use method connectOS insead.")
+    connectOS(master, extra_tgt)
+}
+
+/**
+ * Enforce configuration and connect OpenStack clients
+ *
+ * @param master        Salt connection object
+ * @param extra_tgt     Extra targets for compound
+ */
+def connectOS(master, extra_tgt='') {
+    def salt = new Salt()
+
+    // setup Keystone service and endpoints for swift or / and S3
+    salt.enforceStateWithTest([saltId: master, target: "I@keystone:client $extra_tgt", state: 'keystone.client'])
+
+    // connect Ceph to the env
+    if(salt.testTarget(master, "I@ceph:common and I@glance:server $extra_tgt")) {
+        salt.enforceState([saltId: master, target: "I@ceph:common and I@glance:server $extra_tgt", state: ['glance']])
+        salt.runSaltProcessStep(master, "I@ceph:common and I@glance:server $extra_tgt", 'service.restart', ['glance-api'])
+    }
+    if(salt.testTarget(master, "I@ceph:common and I@cinder:controller $extra_tgt")) {
+        salt.enforceState([saltId: master, target: "I@ceph:common and I@cinder:controller $extra_tgt", state: ['cinder']])
+        salt.runSaltProcessStep(master, "I@ceph:common and I@cinder:controller $extra_tgt", 'service.restart', ['cinder-volume'])
+    }
+    if(salt.testTarget(master, "I@ceph:common and I@nova:compute $extra_tgt")) {
+        salt.enforceState([saltId: master, target: "I@ceph:common and I@nova:compute $extra_tgt", state: ['nova']])
+        salt.runSaltProcessStep(master, "I@ceph:common and I@nova:compute $extra_tgt", 'service.restart', ['nova-compute'])
+    }
+    if(salt.testTarget(master, "I@ceph:common and I@gnocchi:server $extra_tgt")) {
+        salt.enforceState([saltId: master, target: "I@ceph:common and I@gnocchi:server:role:primary $extra_tgt", state: 'gnocchi.server'])
+        salt.enforceState([saltId: master, target: "I@ceph:common and I@gnocchi:server $extra_tgt", state: 'gnocchi.server'])
+    }
+}
+
+/**
+ * Remove vm from VCP
+ *
+ * @param master        Salt connection object
+ * @param target        Target specification, compliance to compound matcher in salt
+ */
+def removeVm(master, target) {
+    def common = new Common()
+    def salt = new Salt()
+
+    def fqdn = getGrain(master, target, 'id')
+    def hostname = salt.stripDomainName(fqdn)
+    def hypervisor = getPillar(master, "I@salt:control", "salt:control:cluster:internal:node:$hostname:provider")
+
+    removeSalt(master, target)
+
+    if(hypervisor?.trim()) {
+        cmdRunOnTarget(master, hypervisor, "virsh destroy $fqdn")
+        cmdRunOnTarget(master, hypervisor, "virsh undefine $fqdn")
+    }
+    else {
+        common.ErrorMsg("There is no provider in pillar for $hostname")
+    }
+}
+
+/**
+ * Stop target salt minion, remove its key on master and definition in reclass
+ *
+ * @param master        Salt connection object
+ * @param target        Target specification, compliance to compound matcher in salt
+ */
+def removeSalt(master, target) {
+    def common = new Common()
+
+    def fqdn = getGrain(master, target, 'id')
+    try {
+        cmdRunOnTarget(master, 'I@salt:master', "salt-key --include-accepted -r $fqdn -y")
+    }
+    catch(Exception e) {
+        common.warningMsg(e)
+    }
+}
+
+def deleteKeyrings(master, target, extra_tgt='') {
+    def host = getGrain(master, target, 'host')
+    def keys = cmdRun(master, "ceph auth list | grep $host", false).tokenize('\n')
+    if(keys.empty()) {
+        new Common().warningMsg("Nothing to do. There is no keyring for $host")
+    }
+    for(key in keys) {
+        cmdRun(master, "ceph auth del $key")
+    }
+}
+
+def generateMapping(pgmap,map) {
+    def pg_new
+    def pg_old
+    for(pg in pgmap) {
+        pg_new = pg["up"].minus(pg["acting"])
+        pg_old = pg["acting"].minus(pg["up"])
+        for(int i = 0; i < pg_new.size(); i++) {
+            // def string = "ceph osd pg-upmap-items " + pg["pgid"].toString() + " " + pg_new[i] + " " + pg_old[i] + ";"
+            def string = "ceph osd pg-upmap-items ${pg["pgid"]} ${pg_new[i]} ${pg_old[i]}"
+            map.add(string)
+        }
+    }
+}
+
+/**
+ * Run command on the first of avaliable ceph monitors
+ *
+ * @param master        Salt connection object
+ * @param cmd           Command to run
+ * @param checkResponse Check response of command. (optional, default true)
+ * @param output        Print output (optional, default false)
+ */
+def cmdRun(master, cmd, checkResponse=true, output=false) {
+    def salt = new Salt()
+    def cmn01 = salt.getFirstMinion(master, "I@ceph:mon")
+    return salt.cmdRun(master, cmn01, cmd, checkResponse, null, output)['return'][0][cmn01]
+}
+
+/**
+ * Run command on target host
+ *
+ * @param master        Salt connection object
+ * @param target        Target specification, compliance to compound matcher in salt
+ * @param cmd           Command to run
+ * @param checkResponse Check response of command. (optional, default true)
+ * @param output        Print output (optional, default false)
+ */
+def cmdRunOnTarget(master, target, cmd, checkResponse=true, output=false) {
+    def salt = new Salt()
+    return salt.cmdRun(master, target, cmd, checkResponse, null, output)['return'][0].values()[0]
+}
+
+/**
+ * Ceph refresh pillars and get one for first host
+ *
+ * @param master        Salt connection object
+ * @param target        Target specification, compliance to compound matcher in salt
+ * @param pillar        Pillar to obtain
+ */
+def getPillar(master, target, pillar) {
+    def common = new Common()
+    def salt = new Salt()
+    try {
+        return salt.getPillar(master, target, pillar)['return'][0].values()[0]
+    }
+    catch(Exception e) {
+        common.warningMsg('There was no pillar for the target.')
+    }
+}
+
+/**
+ * Ceph refresh grains and get one for first host
+ *
+ * @param master        Salt connection object
+ * @param target        Target specification, compliance to compound matcher in salt
+ * @param grain         Grain to obtain
+ */
+def getGrain(master, target, grain) {
+    def common = new Common()
+    def salt = new Salt()
+    try {
+        return salt.getGrain(master, target, grain)['return'][0].values()[0].values()[0]
+    }
+    catch(Exception e) {
+        common.warningMsg('There was no grain for the target.')
+    }
+}
+
+/**
+ * Set flags
+ *
+ * @param master        Salt connection object
+ * @param flags         Collection of flags to set
+ */
+def setFlags(master, flags) {
+    if(flags instanceof String) { flags = [flags] }
+    for(flag in flags) {
+        cmdRun(master, 'ceph osd set ' + flag)
+    }
+}
+
+/**
+ * Unset flags
+ *
+ * @param master        Salt connection object
+ * @param flags         Collection of flags to unset (optional)
+ */
+def unsetFlags(master, flags=[]) {
+    if(flags instanceof String) { flags = [flags] }
+    for(flag in flags) {
+        cmdRun(master, 'ceph osd unset ' + flag)
+    }
+}
+
+/**
+ * Wait for healthy cluster while ignoring flags which have been set
+ *
+ * @param master        Salt connection object
+ * @param attempts      Attempts before it pause execution (optional, default 300)
+ */
+def waitForHealthy(master, flags, attempts=300) {
+    def common = new Common()
+
+    def count = 0
+    def isHealthy = false
+    def health = ''
+
+    // wait for current ops will be reflected in status
+    sleep(5)
+
+    while(count++ < attempts) {
+        health = cmdRun(master, 'ceph health', false)
+        if(health == 'HEALTH_OK') { return }
+        else {
+            // HEALTH_WARN noout,norebalance flag(s) set
+            def unexpectedFlags = health.tokenize(' ').getAt(1)?.tokenize(',')
+            unexpectedFlags.removeAll(flags)
+            if(health.contains('HEALTH_WARN') && unexpectedFlags.isEmpty()) { return }
+        }
+        common.warningMsg("Ceph cluster is still unhealthy: $health")
+        sleep(10)
+    }
+    // TODO: MissingMethodException
+    input message: "After ${count} attempts cluster is still unhealthy."
+    //throw new RuntimeException("After ${count} attempts cluster is still unhealthy. Can't proceed")
+}
+def waitForHealthy(master, String host, flags, attempts=300) {
+    new Common().warningMsg('This method will be deprecated.')
+    waitForHealthy(master, flags, attempts)
+}
+
+/**
+ * Remove unused orphan partition after some osds
+ *
+ * @param master        Salt connection object
+ * @param target        Target specification, compliance to compound matcher in salt
+ * @param wipePartitions     Wipe each found partitions completely (optional, defaul false)
+ */
+def removeOrphans(master, target, wipePartitions=false) {
+    def common = new Common()
+    def salt = new Salt()
+
+    def orphans = []
+    // TODO: ceph-disk is avaliable only in luminous
+    def disks = cmdRunOnTarget(master, target, "ceph-disk list --format json 2>/dev/null",false)
+    disks = "{\"disks\":$disks}" // common.parseJSON() can't parse a list of maps
+    disks = common.parseJSON(disks)['disks']
+    for(disk in disks) {
+        for(partition in disk.get('partitions')) {
+            def orphan = false
+            if(partition.get('type') == 'block.db' && !partition.containsKey('block.db_for')) { orphan = true }
+            else if(partition.get('type') == 'block' && !partition.containsKey('block_for')) { orphan = true }
+            else if(partition.get('type') == 'data' && !partition.get('state') == 'active') { orphan = true }
+            // TODO: test for the rest of types
+
+            if(orphan) {
+                if(partition.get('path')) {
+                    removePartition(master, target, partition['path'], partition['type'], wipePartitions)
+                }
+                else {
+                    common.warningMsg("Found orphan partition on $target but failed to remove it.")
                 }
             }
         }
-        common.infoMsg("Ceph health status: ${health}")
-        count++
-        sleep(10)
     }
+    cmdRunOnTarget(master, target, "partprobe", false)
 }
 
 /**
  * Ceph remove partition
  *
+ * @param master        Salt connection object
+ * @param target        Target specification, compliance to compound matcher in salt
+ * @param partition     Partition to remove on target host
+ * @param type          Type of partition. Some partition need additional steps (optional, default empty string)
+ * @param fullWipe      Fill the entire partition with zeros (optional, default false)
  */
-def removePartition(master, target, partition_uuid, type='', id=-1) {
-    def salt = new com.mirantis.mk.Salt()
-    def common = new com.mirantis.mk.Common()
-    def partition = ''
+def removePartition(master, target, partition, type='', fullWipe=false) {
+    def common = new Common()
+    def salt = new Salt()
+
     def dev = ''
     def part_id = ''
-    def lvm_enabled = salt.getPillar(master, "I@ceph:osd", "ceph:osd:lvm_enabled")['return'].first().containsValue(true)
-    if ( !lvm_enabled ){
-        if (type == 'lockbox') {
-            try {
-                // umount - partition = /dev/sdi2
-                partition = salt.cmdRun(master, target, "lsblk -rp | grep -v mapper | grep ${partition_uuid} ")['return'][0].values()[0].split()[0]
-                salt.cmdRun(master, target, "umount ${partition}")
-            } catch (Exception e) {
-                common.warningMsg(e)
-            }
-        } else if (type == 'data') {
-            try {
-                // umount - partition = /dev/sdi2
-                partition = salt.cmdRun(master, target, "lsblk -rp | grep /var/lib/ceph/osd/ceph-${id}")['return'][0].values()[0].split()[0]
-                salt.cmdRun(master, target, "umount ${partition}")
-            } catch (Exception e) {
-                common.warningMsg(e)
-            }
-            try {
-                // partition = /dev/sdi2
-                partition = salt.cmdRun(master, target, "blkid | grep ${partition_uuid} ")['return'][0].values()[0].split(":")[0]
-            } catch (Exception e) {
-                common.warningMsg(e)
-            }
-        } else {
-            try {
-                // partition = /dev/sdi2
-                partition = salt.cmdRun(master, target, "blkid | grep ${partition_uuid} ")['return'][0].values()[0].split(":")[0]
-            } catch (Exception e) {
-                common.warningMsg(e)
-            }
-        }
-        if (partition?.trim()) {
-            if (partition.contains("nvme")) {
-                // partition = /dev/nvme1n1p2
-                // dev = /dev/nvme1n1
-                dev = partition.replaceAll('p\\d+$', "")
-                // part_id = 2
-                part_id = partition.substring(partition.lastIndexOf("p") + 1).replaceAll("[^0-9]+", "")
+    def partitionID = ''
+    def disk = ''
+    def wipeCmd = ''
+    def lvm_enabled = getPillar(master, target, "ceph:osd:lvm_enabled")
 
-            } else {
-                // partition = /dev/sdi2
-                // dev = /dev/sdi
-                dev = partition.replaceAll('\\d+$', "")
-                // part_id = 2
-                part_id = partition.substring(partition.lastIndexOf("/") + 1).replaceAll("[^0-9]+", "")
-            }
+    if(!partition?.trim()) {
+        throw new Exception("Can't proceed without defined partition.")
+    }
+    cmdRunOnTarget(master, target, "test -b $partition")
+
+    if(fullWipe) { wipeCmd = "dd if=/dev/zero of=$partition bs=1M 2>/dev/null" }
+    else { wipeCmd = "dd if=/dev/zero of=$partition bs=1M count=100 2>/dev/null" }
+
+    common.infoMsg("Removing from the cluster $type partition $partition on $target.")
+    if(type == 'lockbox') {
+        try {
+            partition = cmdRunOnTarget(master, target, "lsblk -rp | grep -v mapper | grep $partition", false)
+            cmdRunOnTarget(master, target, "umount $partition")
+        }
+        catch (Exception e) {
+            common.warningMsg(e)
         }
     }
-    if (lvm_enabled && type != 'lockbox') {
-        salt.cmdRun(master, target, "ceph-volume lvm zap /dev/disk/by-partuuid/${partition_uuid} --destroy")
-    } else if (dev != '') {
-        salt.cmdRun(master, target, "parted ${dev} rm ${part_id}")
-    } else {
-        common.infoMsg("Did not found any device to be wiped.")
+    else if(type == 'data') {
+        cmdRunOnTarget(master, target, "umount $partition 2>/dev/null", false)
+        cmdRunOnTarget(master, target, wipeCmd, false)
     }
-    return
+    else if(type == 'block' || fullWipe) {
+        cmdRunOnTarget(master, target, wipeCmd, false)
+    }
+    try {
+        partitionID = cmdRunOnTarget(master, target, "cat /sys/dev/block/`lsblk $partition -no MAJ:MIN | xargs`/partition", false)
+        disk = cmdRunOnTarget(master, target, "lsblk $partition -no pkname", false)
+    }
+    catch (Exception e) {
+        common.errorMsg("Couldn't get disk name or partition number for $partition")
+        common.warningMsg(e)
+    }
+    try {
+        cmdRunOnTarget(master, target, "sgdisk -d$partitionID /dev/$disk", true, true)
+    }
+    catch (Exception e) {
+        common.warningMsg("Did not found any device to be wiped.")
+        common.warningMsg(e)
+    }
+    // try to remove partition table if disk have no partitions left - required by ceph-volume
+    cmdRunOnTarget(master, target, "partprobe -d -s /dev/$disk | grep partitions\$ && sgdisk -Z /dev/$disk", false, true)
 }
diff --git a/src/com/mirantis/mk/Common.groovy b/src/com/mirantis/mk/Common.groovy
index 65a847e..448935f 100644
--- a/src/com/mirantis/mk/Common.groovy
+++ b/src/com/mirantis/mk/Common.groovy
@@ -3,6 +3,8 @@
 import static groovy.json.JsonOutput.prettyPrint
 import static groovy.json.JsonOutput.toJson
 
+import groovy.time.TimeCategory
+
 import com.cloudbees.groovy.cps.NonCPS
 import groovy.json.JsonSlurperClassic
 
@@ -25,6 +27,31 @@
 }
 
 /**
+ * Return Duration for given datetime period with suffix
+ *
+ * @param input String in format '\d+[smhd]', to convert given number into seconds, minutes, hours
+ *              and days duration respectively. For example: '7d' is for 7 days, '10m' - 10 minutes
+ *              and so on. Return null if input in incorrect format
+ */
+def getDuration(String input) {
+    // Verify input format
+    if (!input.matches('[0-9]+[smhd]')) {
+        errorMsg("Incorrect input data for getDuration(): ${input}")
+        return
+    }
+    switch (input[-1]) {
+        case 's':
+            return TimeCategory.getSeconds(input[0..-2].toInteger())
+        case 'm':
+            return TimeCategory.getMinutes(input[0..-2].toInteger())
+        case 'h':
+            return TimeCategory.getHours(input[0..-2].toInteger())
+        case 'd':
+            return TimeCategory.getDays(input[0..-2].toInteger())
+    }
+}
+
+/**
  * Return workspace.
  * Currently implemented by calling pwd so it won't return relevant result in
  * dir context
@@ -1143,3 +1170,25 @@
         return readYaml(text: kwargs['text'])
     }
 }
+
+/**
+ * withTempDir runs a block of code inside a new temporary directory.
+ * This temp dir will be removed when finished.
+ * @param: closure Closure - code block to be executed in a tmp directory
+ *
+ * Example:
+ *
+ *     withTempDir {
+ *         sh "pwd"
+ *     }
+ *
+ **/
+void withTempDir(Closure closure) {
+    dir(pwd(tmp: true)) {
+        try {
+            closure()
+        } finally {
+            deleteDir()
+        }
+    }
+}
diff --git a/src/com/mirantis/mk/Debian.groovy b/src/com/mirantis/mk/Debian.groovy
index 47cd772..1b39d14 100644
--- a/src/com/mirantis/mk/Debian.groovy
+++ b/src/com/mirantis/mk/Debian.groovy
@@ -268,7 +268,7 @@
     salt.runSaltProcessStep(env, target, 'system.reboot', [], null, true, 5)
 
     common.retry(timeout, attempts) {
-        if (salt.runSaltProcessStep(env, target, 'file.file_exists', ['/tmp/rebooting'], null, true, 5)['return'][0].values()[0].toBoolean()) {
+        if (salt.runSaltProcessStep(env, target, 'cmd.run', ['test -e /tmp/rebooting || echo NOFILE'], null, true, 5)['return'][0].values()[0] != "NOFILE") {
             error("The system is still rebooting...")
         }
     }
@@ -354,4 +354,4 @@
             }
         }
     }
-}
\ No newline at end of file
+}
diff --git a/src/com/mirantis/mk/DockerImageScanner.groovy b/src/com/mirantis/mk/DockerImageScanner.groovy
index bb8e361..aeb162f 100644
--- a/src/com/mirantis/mk/DockerImageScanner.groovy
+++ b/src/com/mirantis/mk/DockerImageScanner.groovy
@@ -3,6 +3,7 @@
 package com.mirantis.mk
 
 import groovy.json.JsonSlurper
+import groovy.json.JsonOutput
 
 def callREST (String uri, String auth,
               String method = 'GET', String message = null) {
@@ -30,7 +31,7 @@
         case ~/^(tungsten|tungsten-operator)\/.*$/:
             team_assignee = 'OpenContrail'
             break
-        case ~/^bm\/.*$/:
+        case ~/^(bm|general)\/.*$/:
             team_assignee = 'BM/OS (KaaS BM)'
             break
         case ~/^openstack\/.*$/:
@@ -42,7 +43,7 @@
         case ~/^ceph\/.*$/:
             team_assignee = 'Storage'
             break
-        case ~/^iam\/.*$/:
+        case ~/^(core|iam)\/.*$/:
             team_assignee = 'KaaS'
             break
         case ~/^lcm\/.*$/:
@@ -91,7 +92,7 @@
     if (!found_key[0] && dict && image_short_name) {
         dict.each { issue_key_name ->
             if (!found_key[0]) {
-                def s = dict[issue_key_name.key]['summary'] =~ /\b${image_short_name}\b/
+                def s = dict[issue_key_name.key]['summary'] =~ /(?<=[\/\[])${image_short_name}(?=\])/
                 if (s) {
                     if (image_full_name) {
                         def d = dict[issue_key_name.key]['description'] =~ /(?m)\b${image_full_name}\b/
@@ -125,7 +126,38 @@
     return found_key
 }
 
-def reportJiraTickets(String reportFileContents, String jiraCredentialsID, String jiraUserID) {
+def getLatestAffectedVersion(cred, productName, defaultJiraAffectedVersion = 'Backlog') {
+    def filterName = ''
+    if (productName == 'mosk') {
+        filterName = 'MOSK'
+    } else if (productName == 'kaas') {
+        filterName = 'KaaS'
+    } else {
+        return defaultJiraAffectedVersion
+    }
+
+    def search_api_url = "${cred.description}/rest/api/2/issue/createmeta?projectKeys=PRODX&issuetypeNames=Bug&expand=projects.issuetypes.fields"
+    def response = callREST("${search_api_url}", "${cred.username}:${cred.password}", 'GET')
+    def InputJSON = new JsonSlurper().parseText(response["responseText"])
+    def AffectedVersions = InputJSON['projects'][0]['issuetypes'][0]['fields']['versions']['allowedValues']
+
+    def versions = []
+    AffectedVersions.each{
+        if (it.containsKey('released') && it['released']) {
+            if (it.containsKey('name') && it['name'].startsWith(filterName)) {
+                if (it.containsKey('releaseDate') && it['releaseDate']) {
+                    versions.add("${it['releaseDate']}`${it['name']}")
+                }
+            }
+        }
+    }
+    if (versions) {
+        return versions.sort()[-1].split('`')[-1]
+    }
+    return defaultJiraAffectedVersion
+}
+
+def reportJiraTickets(String reportFileContents, String jiraCredentialsID, String jiraUserID, String productName = '', String jiraNamespace = 'PRODX') {
 
     def dict = [:]
 
@@ -138,7 +170,7 @@
 
     def search_json = """
 {
-    "jql": "reporter = ${jiraUserID} and (labels = cve and labels = security) and (status = 'To Do' or status = 'For Triage' or status = Open or status = 'In Progress')"
+        "jql": "reporter = ${jiraUserID} and (labels = cve and labels = security) and (status = 'To Do' or status = 'For Triage' or status = Open or status = 'In Progress' or status = New)", "maxResults":-1
 }
 """
 
@@ -180,53 +212,62 @@
             }
     }
 
+    def affectedVersion = ''
+    if (jiraNamespace == 'PRODX') {
+        affectedVersion = getLatestAffectedVersion(cred, productName)
+    }
+
     def jira_summary = ''
     def jira_description = ''
     imageDict.each{
         image ->
             def image_key = image.key.replaceAll(/(^[a-z0-9-.]+.mirantis.(net|com)\/|:.*$)/, '')
+            // Below change was produced due to other workflow for UCP Docker images (RE-274)
+            if (image_key.startsWith('lcm/docker/ucp')) {
+                return
+            } else if (image_key.startsWith('mirantis/ucp') || image_key.startsWith('mirantiseng/ucp')) {
+                jiraNamespace = 'ENGORC'
+            } else {
+                jiraNamespace = 'PRODX'
+            }
             jira_summary = "[${image_key}] Found CVEs in Docker image"
-            jira_description = "${image.key}\\n"
+            jira_description = "${image.key}\n"
             image.value.each{
                 pkg ->
-                    jira_description += "__* ${pkg.key}\\n"
+                    jira_description += "__* ${pkg.key}\n"
                     pkg.value.each{
                         cve ->
-                            jira_description += "________${cve}\\n"
+                            jira_description += "________${cve}\n"
                     }
             }
 
             def team_assignee = getTeam(image_key)
 
-            def post_issue_json = """
-{
-    "fields": {
-        "project": {
-            "key": "PRODX"
-        },
-        "summary": "${jira_summary}",
-        "description": "${jira_description}",
-        "issuetype": {
-            "name": "Bug"
-        },
-        "labels": [
-            "security",
-            "cve"
-        ],
-        "customfield_19000": {
-            "value": "${team_assignee}"
-        },
-        "versions": [
-            {
-                "name": "Backlog"
+            def basicIssueJSON = new JsonSlurper().parseText('{"fields": {}}')
+
+            basicIssueJSON['fields'] = [
+                project:[
+                    key:"${jiraNamespace}"
+                ],
+                summary:"${jira_summary}",
+                description:"${jira_description}",
+                issuetype:[
+                    name:'Bug'
+                ],
+                labels:[
+                    'security',
+                    'cve'
+                ]
+            ]
+            if (jiraNamespace == 'PRODX') {
+                basicIssueJSON['fields']['customfield_19000'] = [value:"${team_assignee}"]
+                basicIssueJSON['fields']['versions'] = [["name": affectedVersion]]
             }
-        ]
-    }
-}
-"""
+            def post_issue_json = JsonOutput.toJson(basicIssueJSON)
+            def jira_comment = jira_description.replaceAll(/\n/, '\\\\n')
             def post_comment_json = """
 {
-    "body": "${jira_description}"
+    "body": "${jira_comment}"
 }
 """
             def jira_key = cacheLookUp(dict, image_key, image.key)
@@ -234,7 +275,7 @@
                 def post_comment_response = callREST("${uri}/${jira_key[0]}/comment", auth, 'POST', post_comment_json)
                 if ( post_comment_response['responseCode'] == 201 ) {
                     def issueCommentJSON = new JsonSlurper().parseText(post_comment_response["responseText"])
-                    print "\n\nComment was posted to ${jira_key[0]} for ${image_key} and ${image.key}"
+                    print "\n\nComment was posted to ${jira_key[0]} ${affectedVersion} for ${image_key} and ${image.key}"
                 } else {
                     print "\nComment to ${jira_key[0]} Jira issue was not posted"
                 }
@@ -243,7 +284,7 @@
                 if (post_issue_response['responseCode'] == 201) {
                     def issueJSON = new JsonSlurper().parseText(post_issue_response["responseText"])
                     dict = updateDictionary(issueJSON['key'], dict, uri, auth, jiraUserID)
-                    print "\n\nJira issue was created ${issueJSON['key']} for ${image_key} and ${image.key}"
+                    print "\n\nJira issue was created ${issueJSON['key']} ${affectedVersion} for ${image_key} and ${image.key}"
                 } else {
                     print "\n${image.key} CVE issues were not published\n"
                 }
diff --git a/src/com/mirantis/mk/Gerrit.groovy b/src/com/mirantis/mk/Gerrit.groovy
index 88da957..07c6b5d 100644
--- a/src/com/mirantis/mk/Gerrit.groovy
+++ b/src/com/mirantis/mk/Gerrit.groovy
@@ -320,11 +320,13 @@
  *                          HOST, PORT and USER
  * @param changeParams      Parameters to identify Geriit change e.g.: owner, topic,
  *                          status, branch, project
+ * @param extraFlags        Additional flags for gerrit querry for example
+ *                          '--current-patch-set' or '--comments' as a simple string
  */
-def findGerritChange(credentialsId, LinkedHashMap gerritAuth, LinkedHashMap changeParams) {
+def findGerritChange(credentialsId, LinkedHashMap gerritAuth, LinkedHashMap changeParams, String extraFlags = '') {
     scriptText = """
                  ssh -p ${gerritAuth['PORT']} ${gerritAuth['USER']}@${gerritAuth['HOST']} \
-                 gerrit query \
+                 gerrit query ${extraFlags} \
                  --format JSON \
                  """
     changeParams.each {
diff --git a/src/com/mirantis/mk/Git.groovy b/src/com/mirantis/mk/Git.groovy
index d724b8f..5108cda 100644
--- a/src/com/mirantis/mk/Git.groovy
+++ b/src/com/mirantis/mk/Git.groovy
@@ -529,7 +529,7 @@
 
     def changeParams = ['owner': auth['USER'], 'status': status, 'project': project, 'branch': branch, 'topic': topic]
     def gerritChange = gerrit.findGerritChange(creds, auth, changeParams)
-    def changeId
+    def changeId = params.get('changeId', '')
     def commit
     if (gerritChange) {
         def jsonChange = readJSON text: gerritChange
diff --git a/src/com/mirantis/mk/Http.groovy b/src/com/mirantis/mk/Http.groovy
index b752b42..a7e0f97 100644
--- a/src/com/mirantis/mk/Http.groovy
+++ b/src/com/mirantis/mk/Http.groovy
@@ -55,7 +55,10 @@
         response = connection.inputStream.text
         try {
             response_content = new groovy.json.JsonSlurperClassic().parseText(response)
-        } catch (groovy.json.JsonException e) {
+        } catch (groovy.json.JsonException|java.lang.NullPointerException e) {
+            if(env.getEnvironment().containsKey('DEBUG') && env['DEBUG'] == "true"){
+                println("[HTTP] Cought exception while trying parsing response as JSON: ${e}")
+            }
             response_content = response
         }
         if(env.getEnvironment().containsKey('DEBUG') && env['DEBUG'] == "true"){
diff --git a/src/com/mirantis/mk/JenkinsUtils.groovy b/src/com/mirantis/mk/JenkinsUtils.groovy
index 780ccab..a9c5d0c 100644
--- a/src/com/mirantis/mk/JenkinsUtils.groovy
+++ b/src/com/mirantis/mk/JenkinsUtils.groovy
@@ -235,27 +235,33 @@
 /**
  * Check dependency jobs passed successfully
 
- * @param block   (bool) Block child jobs in case of parent dependencies failed
- * @return        (map)[
- *                    status: (bool) True if there are no failed dependencies
- *                    log: (string) Verbose description
- *                   ]
+ * @param block           (bool) Block child jobs in case of parent dependencies failed
+ * @param allowNotBuilt   (bool) Approve not_built status of the dependency job
+ * @return                (map)[
+ *                            status: (bool) True if there are no failed dependencies
+ *                            log: (string) Verbose description
+ *                           ]
  */
-def checkDependencyJobs(block = true) {
+def checkDependencyJobs(block = true, allowNotBuilt = false) {
     def common = new com.mirantis.mk.Common()
+    def acceptedStatuses = ['SUCCESS']
+    if (allowNotBuilt) {
+        acceptedStatuses.add('NOT_BUILT')
+    }
+
     depList = []
     if (env.TRIGGER_DEPENDENCY_KEYS){
         common.infoMsg('Job may depends on parent jobs, check if dependency jobs exist...')
         depKeys = env.TRIGGER_DEPENDENCY_KEYS.toString()
         depList = depKeys.split()
         if (depList){
-            common.infoMsg('Here is dependency jobs-list: ' + depList)
+            common.infoMsg("Here is dependency jobs-list: ${depList} , accepted job statuses are: ${acceptedStatuses}")
             for (String item : depList) {
                 prjName = item.replaceAll('[^a-zA-Z0-9]+', '_')
                 triggerResult = 'TRIGGER_' + prjName.toUpperCase() + '_BUILD_RESULT'
                 triggerJobName = 'TRIGGER_' + prjName.toUpperCase() + '_BUILD_NAME'
                 triggerJobBuild = 'TRIGGER_' + prjName.toUpperCase() + '_BUILD_NUMBER'
-                if (env.getProperty(triggerResult) != 'SUCCESS'){
+                if (!acceptedStatuses.contains(env.getProperty(triggerResult))) {
                     msg = "Dependency job ${env.getProperty(triggerJobName)} #${env.getProperty(triggerJobBuild)} is ${env.getProperty(triggerResult)}"
                     common.warningMsg(msg)
                     if (block){
@@ -271,3 +277,30 @@
     }
     return [status: true, log: '', jobs: depList]
 }
+
+/**
+ *  Return jenkins infra metadata according to specified jenkins intstance
+
+ * @param jenkinsServerURL  (string) URL to jenkins server in form: env.JENKINS_URL
+ * @return                  (map)[
+ *                              jenkins_service_user: (string) name of jenkins user needed for gerrit ops
+ *                             ]
+ */
+def getJenkinsInfraMetadata(jenkinsServerURL) {
+    def meta = [
+        jenkins_service_user: '',
+    ]
+
+    switch (jenkinsServerURL) {
+        case 'https://ci.mcp.mirantis.net/':
+            meta['jenkins_service_user'] = 'mcp-jenkins'
+            break
+        case 'https://mcc-ci.infra.mirantis.net/':
+            meta['jenkins_service_user'] = 'mcc-ci-jenkins'
+            break
+        default:
+            error("Failed to detect jenkins service user, supported jenkins platforms: 'https://ci.mcp.mirantis.net/' 'https://mcc-ci.infra.mirantis.net/'")
+    }
+
+    return meta
+}
diff --git a/src/com/mirantis/mk/KaasUtils.groovy b/src/com/mirantis/mk/KaasUtils.groovy
index 6f0ed77..5936ec8 100644
--- a/src/com/mirantis/mk/KaasUtils.groovy
+++ b/src/com/mirantis/mk/KaasUtils.groovy
@@ -6,6 +6,29 @@
  *
  */
 
+/**
+ * Check KaaS Core CICD feature flags
+ * such triggers can be used in case of switching between pipelines,
+ * conditions inside pipelines to reduce dependency on jenkins job builder and jenkins job templates itself
+ *
+ * @return      (map)[
+ *                    ffNameEnabled: (bool) True/False
+ *                   ]
+ */
+def checkCoreCIFeatureFlags() {
+    def common = new com.mirantis.mk.Common()
+    def ff = [
+        build_artifacts_upgrade: false,
+    ]
+
+    def commitMsg = env.GERRIT_CHANGE_COMMIT_MESSAGE ? new String(env.GERRIT_CHANGE_COMMIT_MESSAGE.decodeBase64()) : ''
+    if (commitMsg ==~ /(?s).*\[ci-build-artifacts-upgrade\].*/) {
+        ff['build_artifacts_upgrade'] = true
+    }
+
+    common.infoMsg("Core ci feature flags status: ${ff}")
+    return ff
+}
 
 /**
  * Determine scope of test suite against per-commit KaaS deployment based on keywords
@@ -13,7 +36,7 @@
  *
  * Used for components team to combine test-suites and forward desired parameters to kaas/core deployment jobs
  * Example scheme:
- * New CR pushed in kubernetes/lcm-ansible -> parsing it's commit body and combine test-suite -> trigger deployment jobs from kaas/core
+ * New CR pushed in kubernetes/lcm-ansible -> parsing it'cs commit body and combine test-suite -> trigger deployment jobs from kaas/core
  * manage test-suite through Jenkins Job Parameters
  *
  * @return      (map)[
@@ -25,16 +48,35 @@
     def common = new com.mirantis.mk.Common()
 
     // Available triggers and its sane defaults
+    def seedMacOs = env.SEED_MACOS ? env.SEED_MACOS.toBoolean() : false
     def deployChild = env.DEPLOY_CHILD_CLUSTER ? env.DEPLOY_CHILD_CLUSTER.toBoolean() : false
     def upgradeChild = env.UPGRADE_CHILD_CLUSTER ? env.UPGRADE_CHILD_CLUSTER.toBoolean() : false
+    def attachBYO = env.ATTACH_BYO ? env.ATTACH_BYO.toBoolean() : false
+    def upgradeBYO = env.UPGRADE_BYO ? env.UPGRADE_BYO.toBoolean() : false
     def upgradeMgmt = env.UPGRADE_MGMT_CLUSTER ? env.UPGRADE_MGMT_CLUSTER.toBoolean() : false
     def runUie2e = env.RUN_UI_E2E ? env.RUN_UI_E2E.toBoolean() : false
     def runMgmtConformance = env.RUN_MGMT_CFM ? env.RUN_MGMT_CFM.toBoolean() : false
     def runChildConformance = env.RUN_CHILD_CFM ? env.RUN_CHILD_CFM.toBoolean() : false
     def fetchServiceBinaries = env.FETCH_BINARIES_FROM_UPSTREAM ? env.FETCH_BINARIES_FROM_UPSTREAM.toBoolean() : false
+    // multiregion configuration from env variable: comma-separated string in form $mgmt_provider,$regional_provider
+    def multiregionalMappings = env.MULTIREGION_SETUP ? multiregionWorkflowParser(env.MULTIREGION_SETUP) : [
+        enabled: false,
+        managementLocation: '',
+        regionLocation: '',
+    ]
+
+    // optional demo deployment customization
     def awsOnDemandDemo = env.ALLOW_AWS_ON_DEMAND ? env.ALLOW_AWS_ON_DEMAND.toBoolean() : false
+    def awsOnRhelDemo = false
+    def vsphereOnDemandDemo = env.ALLOW_VSPHERE_ON_DEMAND ? env.ALLOW_VSPHERE_ON_DEMAND.toBoolean() : false
+    def equinixOnAwsDemo = false
+    def enableOSDemo = true
+    def enableBMDemo = true
 
     def commitMsg = env.GERRIT_CHANGE_COMMIT_MESSAGE ? new String(env.GERRIT_CHANGE_COMMIT_MESSAGE.decodeBase64()) : ''
+    if (commitMsg ==~ /(?s).*\[seed-macos\].*/ || env.GERRIT_EVENT_COMMENT_TEXT ==~ /(?s).*seed-macos.*/) {
+        seedMacOs = true
+    }
     if (commitMsg ==~ /(?s).*\[child-deploy\].*/ || env.GERRIT_EVENT_COMMENT_TEXT ==~ /(?s).*child-deploy.*/ || upgradeChild || runChildConformance) {
         deployChild = true
     }
@@ -42,8 +84,19 @@
         deployChild = true
         upgradeChild = true
     }
-    if (commitMsg ==~ /(?s).*\[mgmt-upgrade\].*/ || env.GERRIT_EVENT_COMMENT_TEXT ==~ /(?s).*mgmt-upgrade.*/) {
+    if (commitMsg ==~ /(?s).*\[byo-attach\].*/ || env.GERRIT_EVENT_COMMENT_TEXT ==~ /(?s).*byo-attach.*/) {
+        attachBYO = true
+    }
+    if (commitMsg ==~ /(?s).*\[byo-upgrade\].*/ || env.GERRIT_EVENT_COMMENT_TEXT ==~ /(?s).*byo-upgrade.*/) {
+        attachBYO = true
+        upgradeBYO = true
+    }
+    if (commitMsg ==~ /(?s).*\[mgmt-upgrade\].*/ || env.GERRIT_EVENT_COMMENT_TEXT ==~ /(?s).*mgmt-upgrade.*/ || upgradeBYO) {
         upgradeMgmt = true
+        if (upgradeBYO) {
+            // TODO (vnaumov) remove such dependency right after we can verify byo upgrade w/o mgmt upgrade
+            common.warningMsg('Forced running kaas mgmt upgrade scenario, due byo demo scenario trigger: \'[byo-upgrade]\' ')
+        }
     }
     if (commitMsg ==~ /(?s).*\[ui-e2e\].*/ || env.GERRIT_EVENT_COMMENT_TEXT ==~ /(?s).*ui-e2e.*/) {
         runUie2e = true
@@ -58,9 +111,34 @@
     if (commitMsg ==~ /(?s).*\[fetch.*binaries\].*/ || env.GERRIT_EVENT_COMMENT_TEXT ==~ /(?s).*fetch.*binaries.*/) {
         fetchServiceBinaries = true
     }
-    if (commitMsg ==~ /(?s).*\[aws-demo\].*/ || env.GERRIT_EVENT_COMMENT_TEXT ==~ /(?s).*aws-demo.*/) {
+    if (commitMsg ==~ /(?s).*\[equinix-demo\].*/ || env.GERRIT_EVENT_COMMENT_TEXT ==~ /(?s).*equinix-demo.*/) {
+        equinixOnAwsDemo = true
+        common.warningMsg('Forced running child cluster deployment on EQUINIX METAL provider based on AWS management cluster, triggered on patchset using custom keyword: \'[equinix-demo]\' ')
+    }
+    if (commitMsg ==~ /(?s).*\[aws-demo\].*/ || env.GERRIT_EVENT_COMMENT_TEXT ==~ /(?s).*aws-demo.*/ || attachBYO || upgradeBYO || seedMacOs || equinixOnAwsDemo) {
         awsOnDemandDemo = true
-        common.warningMsg('Forced running additional kaas deployment with AWS provider, triggered on patchset using custom keyword: \'aws-demo\' ')
+        if (attachBYO || upgradeBYO || seedMacOs || equinixOnAwsDemo) {
+            common.warningMsg('Forced running additional kaas deployment with AWS provider, due applied trigger cross dependencies, follow docs to clarify info')
+        }
+    }
+    if (commitMsg ==~ /(?s).*\[aws-rhel-demo\].*/) {
+        awsOnDemandDemo = false
+        awsOnRhelDemo = true
+        common.warningMsg('Forced running additional kaas deployment with AWS provider on RHEL, triggered on patchset using custom keyword: \'[aws-rhel-demo]\'.' +
+                'Upgrade scenario for Mgmt or Child cluster is not supported currently in such deployment')
+    }
+    if (commitMsg ==~ /(?s).*\[vsphere-demo\].*/ || env.GERRIT_EVENT_COMMENT_TEXT ==~ /(?s).*vsphere-demo.*/) {
+        vsphereOnDemandDemo = true
+        common.warningMsg('Forced running additional kaas deployment with VSPHERE provider, triggered on patchset using custom keyword: \'[vsphere-demo]\' ')
+    }
+    if (commitMsg ==~ /(?s).*\[disable-os-demo\].*/ || env.GERRIT_EVENT_COMMENT_TEXT ==~ /(?s).*disable-os-demo\.*/) {
+        enableOSDemo = false
+        common.errorMsg('Openstack demo deployment will be aborted, VF -1 will be set')
+    }
+
+    if (commitMsg ==~ /(?s).*\[disable-bm-demo\].*/ || env.GERRIT_EVENT_COMMENT_TEXT ==~ /(?s).*disable-bm-demo\.*/) {
+        enableBMDemo = false
+        common.errorMsg('BM demo deployment will be aborted, VF -1 will be set')
     }
 
     // TODO (vnaumov) remove below condition after moving all releases to UCP
@@ -74,25 +152,118 @@
         upgradeChild = false
     }
 
+    // multiregional tests
+    def multiRegionalMatches = (commitMsg =~ /(\[multiregion\s*.*?\])/)
+    if (multiRegionalMatches.size() > 0) {
+        multiregionalMappings = multiregionWorkflowParser(multiRegionalMatches)
+    }
+    switch (multiregionalMappings['managementLocation']) {
+        case 'aws':
+            common.warningMsg('Forced running additional kaas deployment with AWS provider according multiregional demo request')
+            awsOnDemandDemo = true
+            if (awsOnRhelDemo) {
+                // Run only one variant: standard AWS deployment (on Ubuntu) or on RHEL
+                awsOnDemandDemo = false
+            }
+
+            if (multiregionalMappings['regionLocation'] != 'aws' && seedMacOs) { // macstadium seed node has access only to *public* providers
+                error('incompatible triggers: [seed-macos] and multiregional deployment based on *private* regional provider cannot be applied simultaneously')
+            }
+            break
+        case 'os':
+            if (enableOSDemo == false) {
+                error('incompatible triggers: [disable-os-demo] and multiregional deployment based on OSt management region cannot be applied simultaneously')
+            }
+            break
+    }
+
+    // calculate weight of current demo run to manage lockable resources
+    def demoWeight = (deployChild) ? 2 : 1 // management = 1, child = 1
+
     common.infoMsg("""
+        Use MacOS node as seed: ${seedMacOs}
         Child cluster deployment scheduled: ${deployChild}
         Child cluster release upgrade scheduled: ${upgradeChild}
         Child conformance testing scheduled: ${runChildConformance}
+        BYO cluster attachment scheduled: ${attachBYO}
+        Attached BYO cluster upgrade test scheduled: ${upgradeBYO}
         Mgmt cluster release upgrade scheduled: ${upgradeMgmt}
         Mgmt conformance testing scheduled: ${runMgmtConformance}
         Mgmt UI e2e testing scheduled: ${runUie2e}
-        AWS provider additional deployment scheduled: ${awsOnDemandDemo}
+        AWS provider deployment scheduled: ${awsOnDemandDemo}
+        AWS provider on RHEL deployment scheduled: ${awsOnRhelDemo}
+        VSPHERE provider deployment scheduled: ${vsphereOnDemandDemo}
+        EQUINIX child cluster deployment scheduled: ${equinixOnAwsDemo}
+        OS provider deployment scheduled: ${enableOSDemo}
+        BM provider deployment scheduled: ${enableBMDemo}
+        Multiregional configuration: ${multiregionalMappings}
         Service binaries fetching scheduled: ${fetchServiceBinaries}
+        Current weight of the demo run: ${demoWeight} (Used to manage lockable resources)
         Triggers: https://docs.google.com/document/d/1SSPD8ZdljbqmNl_FEAvTHUTow9Ki8NIMu82IcAVhzXw/""")
     return [
+        useMacOsSeedNode           : seedMacOs,
         deployChildEnabled         : deployChild,
         upgradeChildEnabled        : upgradeChild,
         runChildConformanceEnabled : runChildConformance,
+        attachBYOEnabled           : attachBYO,
+        upgradeBYOEnabled          : upgradeBYO,
         upgradeMgmtEnabled         : upgradeMgmt,
         runUie2eEnabled            : runUie2e,
         runMgmtConformanceEnabled  : runMgmtConformance,
         fetchServiceBinariesEnabled: fetchServiceBinaries,
-        awsOnDemandDemoEnabled     : awsOnDemandDemo]
+        awsOnDemandDemoEnabled     : awsOnDemandDemo,
+        awsOnDemandRhelDemoEnabled : awsOnRhelDemo,
+        vsphereOnDemandDemoEnabled : vsphereOnDemandDemo,
+        equinixOnAwsDemoEnabled    : equinixOnAwsDemo,
+        bmDemoEnabled              : enableBMDemo,
+        osDemoEnabled              : enableOSDemo,
+        multiregionalConfiguration : multiregionalMappings,
+        demoWeight                 : demoWeight]
+}
+
+/**
+ * Determine management and regional setup for demo workflow scenario
+ *
+ *
+ * @param:        keyword (string) string , represents keyworkd trigger, specified in gerrit commit body, like `[multiregion aws,os]`
+                                   or Jenkins environment string variable in form like 'aws,os'
+ * @return        (map)[
+                          enabled: (bool),
+ *                        managementLocation: (string), //aws,os
+ *                        regionLocation: (string), //aws,os
+ *                     ]
+ */
+def multiregionWorkflowParser(keyword) {
+    def common = new com.mirantis.mk.Common()
+    def supportedManagementProviders = ['os', 'aws']
+    def supportedRegionalProviders = ['os']
+
+    def clusterTypes = ''
+    if (keyword.toString().contains('multiregion')) {
+        common.infoMsg('Multiregion definition configured via gerrit keyword trigger')
+        clusterTypes = keyword[0][0].split('multiregion')[1].replaceAll('[\\[\\]]', '').trim().split(',')
+    } else {
+        common.infoMsg('Multiregion definition configured via environment variable')
+        clusterTypes = keyword.trim().split(',')
+    }
+
+    if (clusterTypes.size() != 2) {
+        error("Incorrect regions definiton, valid scheme: [multiregion ${management}, ${region}], got: ${clusterTypes}")
+    }
+
+    def desiredManagementProvider = clusterTypes[0].trim()
+    def desiredRegionalProvider = clusterTypes[1].trim()
+    if (! supportedManagementProviders.contains(desiredManagementProvider) || ! supportedRegionalProviders.contains(desiredRegionalProvider)) {
+        error("""unsupported management <-> regional bundle, available options:
+              management providers list - ${supportedManagementProviders}
+              regional providers list - ${supportedRegionalProviders}""")
+    }
+
+    return [
+        enabled: true,
+        managementLocation: desiredManagementProvider,
+        regionLocation: desiredRegionalProvider,
+    ]
 }
 
 /**
@@ -114,7 +285,8 @@
     // Available triggers and its sane defaults
     def siTestsRefspec = env.SI_TESTS_REFSPEC ?: 'master'
     def siPipelinesRefspec = env.SI_PIPELINES_REFSPEC ?: 'master'
-    def siTestsDockerImage = env.SI_TESTS_DOCKER_IMAGE ?: 'docker-dev-kaas-local.docker.mirantis.net/mirantis/kaas/si-test:master'
+    def siTestsDockerImage = env.SI_TESTS_DOCKER_IMAGE ?: 'docker-dev-kaas-local.docker.mirantis.net/mirantis/kaas/si-test'
+    def siTestsDockerImageTag = env.SI_TESTS_DOCKER_IMAGE_TAG ?: 'master'
     def commitMsg = env.GERRIT_CHANGE_COMMIT_MESSAGE ? new String(env.GERRIT_CHANGE_COMMIT_MESSAGE.decodeBase64()) : ''
 
     def siTestMatches = (commitMsg =~ /(\[si-tests-ref\s*refs\/changes\/.*?\])/)
@@ -122,8 +294,8 @@
 
     if (siTestMatches.size() > 0) {
         siTestsRefspec = siTestMatches[0][0].split('si-tests-ref')[1].replaceAll('[\\[\\]]', '').trim()
-        siTestsDockerImage = "docker-dev-local.docker.mirantis.net/review/" +
-            "kaas-si-test-${siTestsRefspec.split('/')[-2]}:${siTestsRefspec.split('/')[-1]}"
+        siTestsDockerImage = "docker-dev-local.docker.mirantis.net/review/kaas-si-test-${siTestsRefspec.split('/')[-2]}"
+        siTestsDockerImageTag = siTestsRefspec.split('/')[-1]
     }
     if (siPipelinesMatches.size() > 0) {
         siPipelinesRefspec = siPipelinesMatches[0][0].split('si-pipelines-ref')[1].replaceAll('[\\[\\]]', '').trim()
@@ -132,9 +304,9 @@
     common.infoMsg("""
         kaas/si-pipelines will be fetched from: ${siPipelinesRefspec}
         kaas/si-tests will be fetched from: ${siTestsRefspec}
-        kaas/si-tests as dockerImage will be fetched from: ${siTestsDockerImage}
+        kaas/si-tests as dockerImage will be fetched from: ${siTestsDockerImage}:${siTestsDockerImageTag}
         Keywords: https://docs.google.com/document/d/1SSPD8ZdljbqmNl_FEAvTHUTow9Ki8NIMu82IcAVhzXw/""")
-    return [siTests: siTestsRefspec, siPipelines: siPipelinesRefspec, siTestsDockerImage: siTestsDockerImage]
+    return [siTests: siTestsRefspec, siPipelines: siPipelinesRefspec, siTestsDockerImage: siTestsDockerImage, siTestsDockerImageTag: siTestsDockerImageTag]
 }
 
 /**
@@ -208,6 +380,27 @@
 
 
 /**
+ * generate Jenkins Parameter objects from from text parameter with additonal kaas core context
+ * needed to forward inside kaas core set of jobs
+ *
+ * @param           context (string) Representation of the string enviroment variables needed for kaas core jobs in yaml format
+ * @return          (list)[    string(name: '', value: ''),
+ *                       ]
+ */
+def generateKaaSVarsFromContext(context) {
+    def common = new com.mirantis.mk.Common()
+    def parameters = []
+    def config = readYaml text: context
+
+    config.each { k,v ->
+        common.infoMsg("Custom KaaS Core context parameter: ${k}=${v}")
+        parameters.add(string(name: k, value: v))
+    }
+
+    return parameters
+}
+
+/**
  * Trigger KaaS demo jobs based on AWS/OS providers with customized test suite, parsed from external sources (gerrit commit/jj vars)
  * Keyword list: https://docs.google.com/document/d/1SSPD8ZdljbqmNl_FEAvTHUTow9Ki8NIMu82IcAVhzXw/
  * Used for components team to test component changes w/ customized SI tests/refspecs using kaas/core deployment jobs
@@ -216,7 +409,7 @@
  * @param:        patchSpec (string) Patch for kaas/cluster releases in json format
  * @param:        configurationFile (string) Additional file for component repo CI config in yaml format
  */
-def triggerPatchedComponentDemo(component, patchSpec, configurationFile = '.ci-parameters.yaml') {
+def triggerPatchedComponentDemo(component, patchSpec = '', configurationFile = '.ci-parameters.yaml', coreContext = '') {
     def common = new com.mirantis.mk.Common()
     // Determine if custom trigger keywords forwarded from gerrit
     def triggers = checkDeploymentTestSuite()
@@ -235,28 +428,46 @@
     } else {
         common.warningMsg('''Component CI configuration file is not exists,
             several code-management features may be unavailable,
-            follow https://mirantis.jira.com/wiki/spaces/QA/pages/2310832276/SI-tests+feature+flags#%5BUpdated%5D-Component-CI
-            to create configuration file''')
+            follow https://mirantis.jira.com/wiki/spaces/QA/pages/2310832276/SI-tests+feature+flags#%5BUpdated%5D-Using-a-feature-flag
+            to create the configuration file''')
     }
-
-
     def jobs = [:]
     def parameters = [
         string(name: 'GERRIT_REFSPEC', value: coreRefspec.core),
         string(name: 'KAAS_PIPELINE_REFSPEC', value: coreRefspec.corePipelines),
         string(name: 'SI_TESTS_REFSPEC', value: siRefspec.siTests),
         string(name: 'SI_TESTS_FEATURE_FLAGS', value: componentFeatureFlags),
+        string(name: 'SI_TESTS_DOCKER_IMAGE', value: siRefspec.siTestsDockerImage),
+        string(name: 'SI_TESTS_DOCKER_IMAGE_TAG', value: siRefspec.siTestsDockerImageTag),
         string(name: 'SI_PIPELINES_REFSPEC', value: siRefspec.siPipelines),
         string(name: 'CUSTOM_RELEASE_PATCH_SPEC', value: patchSpec),
+        booleanParam(name: 'SEED_MACOS', value: triggers.useMacOsSeedNode),
         booleanParam(name: 'UPGRADE_MGMT_CLUSTER', value: triggers.upgradeMgmtEnabled),
         booleanParam(name: 'RUN_UI_E2E', value: triggers.runUie2eEnabled),
         booleanParam(name: 'RUN_MGMT_CFM', value: triggers.runMgmtConformanceEnabled),
         booleanParam(name: 'DEPLOY_CHILD_CLUSTER', value: triggers.deployChildEnabled),
         booleanParam(name: 'UPGRADE_CHILD_CLUSTER', value: triggers.upgradeChildEnabled),
+        booleanParam(name: 'ATTACH_BYO', value: triggers.attachBYOEnabled),
+        booleanParam(name: 'UPGRADE_BYO', value: triggers.upgradeBYOEnabled),
         booleanParam(name: 'RUN_CHILD_CFM', value: triggers.runChildConformanceEnabled),
-        booleanParam(name: 'ALLOW_AWS_ON_DEMAND', value: triggers.awsOnDemandDemoEnabled),
+        booleanParam(name: 'ALLOW_AWS_ON_DEMAND', value: triggers.awsOnDemandDemoEnabled || triggers.awsOnDemandRhelDemoEnabled),
+        booleanParam(name: 'ALLOW_VSPHERE_ON_DEMAND', value: triggers.vsphereOnDemandDemoEnabled),
     ]
 
+    // customize multiregional demo
+    if (triggers.multiregionalConfiguration.enabled) {
+        parameters.add(string(name: 'MULTIREGION_SETUP',
+                              value: "${triggers.multiregionalConfiguration.managementLocation},${triggers.multiregionalConfiguration.regionLocation}"
+                              ))
+    }
+
+    // Determine component team custom context
+    if (coreContext != '') {
+        common.infoMsg('Additional KaaS Core context detected, will be forwarded into kaas core cicd...')
+        def additionalParameters = generateKaaSVarsFromContext(coreContext)
+        parameters.addAll(additionalParameters)
+    }
+
     def jobResults = []
     jobs["kaas-core-openstack-patched-${component}"] = {
         try {
@@ -300,6 +511,23 @@
             }
         }
     }
+    if (triggers.vsphereOnDemandDemoEnabled) {
+        jobs["kaas-core-vsphere-patched-${component}"] = {
+            try {
+                common.infoMsg('Deploy: patched KaaS demo with VSPHERE provider')
+                vsphere_job_info = build job: "kaas-testing-core-vsphere-workflow-${component}", parameters: parameters, wait: true
+                def build_description = vsphere_job_info.getDescription()
+                def build_result = vsphere_job_info.getResult()
+                jobResults.add(build_result)
+
+                if (build_description) {
+                    currentBuild.description += build_description
+                }
+            } finally {
+                common.infoMsg('Patched KaaS demo with VSPHERE provider finished')
+            }
+        }
+    }
 
     common.infoMsg('Trigger KaaS demo deployments according to defined provider set')
     // Limit build concurency workaround examples: https://issues.jenkins-ci.org/browse/JENKINS-44085
diff --git a/src/com/mirantis/mk/Lock.groovy b/src/com/mirantis/mk/Lock.groovy
new file mode 100644
index 0000000..d72ceed
--- /dev/null
+++ b/src/com/mirantis/mk/Lock.groovy
@@ -0,0 +1,155 @@
+import org.jfrog.hudson.pipeline.common.types.ArtifactoryServer
+import java.util.concurrent.TimeoutException
+
+class Lock {
+    String  name, id, path
+    Integer retryInterval, timeout, expiration
+    Boolean force
+    Map lockExtraMetadata
+    ArtifactoryServer artifactoryServer
+
+    private String lockFileContent
+    private String lockFileContentCache
+    private Boolean fileNotFound
+
+    final private String fileUri
+
+    final private def common = new com.mirantis.mk.Common()
+    final private def artifactoryTools = new com.mirantis.mk.Artifactory()
+
+    // Constructor
+    public Lock(Map args) {
+        // Mandatory
+        this.name = args.name
+        this.artifactoryServer = args.artifactoryServer
+
+        // Defaults
+        this.id = args.get('id', '')
+        this.path = args.get('path', 'binary-dev-local/locks')
+        this.retryInterval = args.get('retryInterval', 5*60)
+        this.timeout = args.get('timeout', 3*60*60)
+        this.expiration = args.get('expiration', 24*60*60)
+        this.force = args.get('force', false)
+        this.lockExtraMetadata = args.get('lockExtraMetadata', [:])
+
+        // Internal
+        this.fileUri = "/${path}/${name}.yaml".toLowerCase()
+    }
+
+    final private Map artObj
+    // getPasswordCredentials() is CPS-transformed function and cannot be used in constructor
+    final private Map getArtObj() {
+        def artifactoryCreds = common.getPasswordCredentials(artifactoryServer.getCredentialsId())
+        return [
+            'url': "${artifactoryServer.getUrl()}/artifactory",
+            'creds': [
+                'username': artifactoryCreds['username'],
+                'password': artifactoryCreds['password'],
+            ]
+        ]
+    }
+
+    // getter for lockFileContent
+    final private String getLockFileContent() {
+        if (this.lockFileContentCache == null) {
+            try {
+                this.lockFileContentCache = artifactoryTools.restCall(this.artObj, this.fileUri, 'GET', null, [:], '')
+                this.fileNotFound = false // file found
+            } catch (java.io.FileNotFoundException e) {
+                this.lockFileContentCache = ''
+                this.fileNotFound = true // file not found
+            } catch (Exception e) {
+                common.errorMsg(e.message)
+                this.lockFileContentCache = ''
+                this.fileNotFound = null // we don't know about file existence
+            }
+        }
+        return this.lockFileContentCache
+    }
+
+    public void lock() {
+        if (this.force) {
+            common.infoMsg("Ignore lock checking due 'force' flag presence")
+        } else {
+            waitLockReleased()
+        }
+        createLockFile()
+    }
+
+    public void unlock() {
+        if (!isLockFileExist()) {
+            common.infoMsg("Lock file '${this.artObj['url']}${this.fileUri}' does not exist. No need to remove it")
+            // No need to continue if file does not exist
+            return
+        }
+
+        Map lockMeta = common.readYaml2(text: this.lockFileContent ?: '{}')
+        if (this.force || (this.id && this.id == lockMeta.get('lockID', ''))) {
+            artifactoryTools.restCall(this.artObj, this.fileUri, 'DELETE', null, [:], '')
+            common.infoMsg("Lock file '${this.artObj['url']}${this.fileUri}' has been removed")
+        } else {
+            throw new RuntimeException("Given lock ID '${this.id}' is not equal to '${lockMeta.get('lockID')}' ID in lock file")
+        }
+    }
+
+    private void createLockFile() {
+        this.id = UUID.randomUUID().toString()
+
+        Calendar now = Calendar.getInstance()
+        Calendar expiredAt = now.clone()
+        expiredAt.add(Calendar.SECOND, this.expiration)
+
+        Map lockMeta = [
+            'lockID': this.id,
+            'createdAt': now.getTime().toString(),
+            'expiredAt': expiredAt.getTime().toString(),
+        ]
+        lockMeta.putAll(this.lockExtraMetadata)
+
+        def commonMCP = new com.mirantis.mcp.Common()
+        artifactoryTools.restCall(this.artObj, this.fileUri, 'PUT', commonMCP.dumpYAML(lockMeta), [:], '')
+        common.infoMsg("Lock file '${this.artObj['url']}${this.fileUri}' has been created")
+    }
+
+    private void waitLockReleased() {
+        Long startTime = System.currentTimeMillis()
+        while (isLocked()) {
+            if (System.currentTimeMillis() - startTime >= timeout*1000 ) {
+                throw new TimeoutException("Execution of waitLock timed out after ${this.timeout} seconds")
+            }
+            common.infoMsg("'${this.name}' is locked. Retry in ${this.retryInterval} seconds")
+            // Reset the cache so it will re-retrieve the file and its content
+            // otherwise it cannot determine that file has been removed on artifactory
+            // in the middle of waiting
+            this.lockFileContentCache = null
+            sleep(this.retryInterval*1000)
+        }
+    }
+
+    private Boolean isLocked() {
+        if (!isLockFileExist()) {
+            common.infoMsg("Lock file for '${this.name}' does not exist")
+            return false
+        } else if (isLockExpired()) {
+            common.infoMsg("Lock '${this.name}' has been expired")
+            return false
+        }
+        return true
+    }
+
+    private Boolean isLockFileExist() {
+        // If there is something in file's content that it definitly exists
+        // If we don't know about file existence (fileNotFound == null) we assume it exists
+        return !this.lockFileContent.isEmpty() || !this.fileNotFound
+    }
+
+    private Boolean isLockExpired() {
+        if (!isLockFileExist()) {
+            return true
+        }
+        Map lockMeta = common.readYaml2(text: this.lockFileContent ?: '{}')
+        Date expirationTime = new Date(lockMeta.get('expiredAt', '01/01/1970'))
+        Date currentTime = new Date()
+        return currentTime.after(expirationTime)
+    }
+}
diff --git a/src/com/mirantis/mk/Openstack.groovy b/src/com/mirantis/mk/Openstack.groovy
index 4ae71ac..0785bbe 100644
--- a/src/com/mirantis/mk/Openstack.groovy
+++ b/src/com/mirantis/mk/Openstack.groovy
@@ -64,7 +64,8 @@
         'openstacksdk<0.44.0',
         'python-octaviaclient==1.11.0',
         'python-heatclient==1.18.0',
-        'docutils==0.16'
+        'docutils==0.16',
+        'pyrsistent<0.17.1',
     ]
 
     if (version == 'kilo') {
diff --git a/src/com/mirantis/mk/Orchestrate.groovy b/src/com/mirantis/mk/Orchestrate.groovy
index e762eed..3e119f3 100644
--- a/src/com/mirantis/mk/Orchestrate.groovy
+++ b/src/com/mirantis/mk/Orchestrate.groovy
@@ -76,13 +76,15 @@
     salt.enforceStateWithTest([saltId: master, target: "I@openscap:service ${extra_tgt}", state: 'openscap', batch: batch])
 }
 
-def installFoundationInfraOnTarget(master, target, staticMgmtNet=false, extra_tgt = '') {
+def installFoundationInfraOnTarget(master, target, staticMgmtNet=false, extra_tgt = '', batch=20) {
     def salt = new com.mirantis.mk.Salt()
     def common = new com.mirantis.mk.Common()
 
     salt.enforceState([saltId: master, target: "I@salt:master ${extra_tgt}", state: ['reclass'], failOnError: false, read_timeout: 120, retries: 2])
     salt.fullRefresh(master, target)
     salt.enforceState([saltId: master, target: target, state: ['linux.network.proxy'], failOnError: false, read_timeout: 60, retries: 2])
+    // Make sure all repositories are in place before proceeding with package installation from other states
+    salt.enforceState([saltId: master, target: target, state: ['linux.system.repo'], batch: batch, failOnError: false, read_timeout: 180, retries: 2])
     try {
         salt.enforceState([saltId: master, target: target, state: ['salt.minion.base'], failOnError: false, read_timeout: 60, retries: 2])
         sleep(5)
@@ -1180,90 +1182,23 @@
 //
 
 def installCephMon(master, target="I@ceph:mon", extra_tgt = '') {
-    def salt = new com.mirantis.mk.Salt()
-
-    salt.enforceState([saltId: master, target: "I@ceph:common ${extra_tgt}", state: 'salt.minion.grains'])
-
-    // generate keyrings
-    if (salt.testTarget(master, "( I@ceph:mon:keyring:mon or I@ceph:common:keyring:admin ) ${extra_tgt}")) {
-        salt.enforceState([saltId: master, target: "( I@ceph:mon:keyring:mon or I@ceph:common:keyring:admin ) ${extra_tgt}", state: 'ceph.mon'])
-        salt.runSaltProcessStep(master, "I@ceph:mon ${extra_tgt}", 'saltutil.sync_grains')
-        salt.runSaltProcessStep(master, "( I@ceph:mon:keyring:mon or I@ceph:common:keyring:admin ) ${extra_tgt}", 'mine.update')
-
-        // on target nodes mine is used to get pillar from 'ceph:common:keyring:admin' via grain.items
-        // we need to refresh all pillar/grains to make data sharing work correctly
-        salt.fullRefresh(master, "( I@ceph:mon:keyring:mon or I@ceph:common:keyring:admin ) ${extra_tgt}")
-
-        sleep(5)
-    }
-    // install Ceph Mons
-    salt.enforceState([saltId: master, target: target, state: 'ceph.mon'])
-    salt.enforceStateWithTest([saltId: master, target: "I@ceph:mgr ${extra_tgt}", state: 'ceph.mgr'])
+    def ceph = new com.mirantis.mk.Ceph()
+    ceph.installMon(master, target, extra_tgt)
 }
 
 def installCephOsd(master, target="I@ceph:osd", setup=true, extra_tgt = '') {
-    def salt = new com.mirantis.mk.Salt()
-
-    // install Ceph OSDs
-    salt.enforceState([saltId: master, target: target, state: 'ceph.osd'])
-    salt.runSaltProcessStep(master, "I@ceph:osd ${extra_tgt}", 'saltutil.sync_grains')
-    salt.enforceState([saltId: master, target: target, state: 'ceph.osd.custom'])
-    salt.runSaltProcessStep(master, "I@ceph:osd ${extra_tgt}", 'saltutil.sync_grains')
-    salt.runSaltProcessStep(master, "I@ceph:osd ${extra_tgt}", 'mine.update')
-    installBackup(master, 'ceph')
-
-    // setup pools, keyrings and maybe crush
-    if (salt.testTarget(master, "I@ceph:setup ${extra_tgt}") && setup) {
-        sleep(5)
-        salt.enforceState([saltId: master, target: "I@ceph:setup ${extra_tgt}", state: 'ceph.setup'])
-    }
+    def ceph = new com.mirantis.mk.Ceph()
+    ceph.installOsd(master, target, setup, extra_tgt)
 }
 
 def installCephClient(master, extra_tgt = '') {
-    def salt = new com.mirantis.mk.Salt()
-
-    // install Ceph Radosgw
-    if (salt.testTarget(master, "I@ceph:radosgw ${extra_tgt}")) {
-        salt.runSaltProcessStep(master, "I@ceph:radosgw ${extra_tgt}", 'saltutil.sync_grains')
-        salt.enforceState([saltId: master, target: "I@ceph:radosgw ${extra_tgt}", state: 'ceph.radosgw'])
-    }
-
-    // setup keyring for Openstack services
-    salt.enforceStateWithTest([saltId: master, target: "I@ceph:common and I@glance:server ${extra_tgt}", state: ['ceph.common', 'ceph.setup.keyring']])
-
-    salt.enforceStateWithTest([saltId: master, target: "I@ceph:common and I@cinder:controller ${extra_tgt}", state: ['ceph.common', 'ceph.setup.keyring']])
-
-    if (salt.testTarget(master, "I@ceph:common and I@nova:compute ${extra_tgt}")) {
-        salt.enforceState([saltId: master, target: "I@ceph:common and I@nova:compute ${extra_tgt}", state: ['ceph.common', 'ceph.setup.keyring']])
-        salt.runSaltProcessStep(master, "I@ceph:common and I@nova:compute ${extra_tgt}", 'saltutil.sync_grains')
-    }
-
-    salt.enforceStateWithTest([saltId: master, target: "I@ceph:common and I@gnocchi:server ${extra_tgt}", state: ['ceph.common', 'ceph.setup.keyring']])
+    def ceph = new com.mirantis.mk.Ceph()
+    ceph.installClient(master, extra_tgt)
 }
 
 def connectCeph(master, extra_tgt = '') {
-    def salt = new com.mirantis.mk.Salt()
-
-    // setup Keystone service and endpoints for swift or / and S3
-    salt.enforceStateWithTest([saltId: master, target: "I@keystone:client ${extra_tgt}", state: 'keystone.client'])
-
-    // connect Ceph to the env
-    if (salt.testTarget(master, "I@ceph:common and I@glance:server ${extra_tgt}")) {
-        salt.enforceState([saltId: master, target: "I@ceph:common and I@glance:server ${extra_tgt}", state: ['glance']])
-        salt.runSaltProcessStep(master, "I@ceph:common and I@glance:server ${extra_tgt}", 'service.restart', ['glance-api'])
-    }
-    if (salt.testTarget(master, "I@ceph:common and I@cinder:controller ${extra_tgt}")) {
-        salt.enforceState([saltId: master, target: "I@ceph:common and I@cinder:controller ${extra_tgt}", state: ['cinder']])
-        salt.runSaltProcessStep(master, "I@ceph:common and I@cinder:controller ${extra_tgt}", 'service.restart', ['cinder-volume'])
-    }
-    if (salt.testTarget(master, "I@ceph:common and I@nova:compute ${extra_tgt}")) {
-        salt.enforceState([saltId: master, target: "I@ceph:common and I@nova:compute ${extra_tgt}", state: ['nova']])
-        salt.runSaltProcessStep(master, "I@ceph:common and I@nova:compute ${extra_tgt}", 'service.restart', ['nova-compute'])
-    }
-    if (salt.testTarget(master, "I@ceph:common and I@gnocchi:server ${extra_tgt}")) {
-        salt.enforceState([saltId: master, target: "I@ceph:common and I@gnocchi:server:role:primary ${extra_tgt}", state: 'gnocchi.server'])
-        salt.enforceState([saltId: master, target: "I@ceph:common and I@gnocchi:server ${extra_tgt}", state: 'gnocchi.server'])
-    }
+    def ceph = new com.mirantis.mk.Ceph()
+    ceph.connectOS(master, extra_tgt)
 }
 
 def installOssInfra(master, extra_tgt = '') {
diff --git a/src/com/mirantis/mk/ReleaseWorkflow.groovy b/src/com/mirantis/mk/ReleaseWorkflow.groovy
index a531d48..da17fb9 100644
--- a/src/com/mirantis/mk/ReleaseWorkflow.groovy
+++ b/src/com/mirantis/mk/ReleaseWorkflow.groovy
@@ -181,8 +181,7 @@
         commitMessage =
                 """${comment}
 
-               |${ChangeId}
-            """.stripMargin()
+               |${ChangeId}\n""".stripMargin()
 
         // Add some useful info (if it present) to commit message
         if (env.BUILD_URL) {
diff --git a/src/com/mirantis/mk/Ruby.groovy b/src/com/mirantis/mk/Ruby.groovy
index 4681d07..2e1e494 100644
--- a/src/com/mirantis/mk/Ruby.groovy
+++ b/src/com/mirantis/mk/Ruby.groovy
@@ -6,9 +6,9 @@
 
 /**
  * Ensures Ruby environment with given version (install it if necessary)
- * @param rubyVersion target ruby version (optional, default 2.2.3)
+ * @param rubyVersion target ruby version (optional, default 2.6.6)
  */
-def ensureRubyEnv(rubyVersion="2.4.1"){
+def ensureRubyEnv(rubyVersion="2.6.6"){
     if (!fileExists("/var/lib/jenkins/.rbenv/versions/${rubyVersion}/bin/ruby")){
         //XXX: patch ruby-build because debian package is quite old
         sh "git clone https://github.com/rbenv/ruby-build.git ~/.rbenv/plugins/ruby-build || git -C ~/.rbenv/plugins/ruby-build pull origin master"