Merge "Update fail logic for workflow"
diff --git a/src/com/mirantis/mcp/MCPArtifactory.groovy b/src/com/mirantis/mcp/MCPArtifactory.groovy
index 272b1bc..83b1975 100644
--- a/src/com/mirantis/mcp/MCPArtifactory.groovy
+++ b/src/com/mirantis/mcp/MCPArtifactory.groovy
@@ -139,14 +139,8 @@
  */
 def moveItem (String artifactoryURL, String sourcePath, String dstPath, boolean copy = false, boolean dryRun = false) {
     def url = "${artifactoryURL}/api/${copy ? 'copy' : 'move'}/${sourcePath}?to=/${dstPath}&dry=${dryRun ? '1' : '0'}"
-    withCredentials([
-            [$class          : 'UsernamePasswordMultiBinding',
-             credentialsId   : 'artifactory',
-             passwordVariable: 'ARTIFACTORY_PASSWORD',
-             usernameVariable: 'ARTIFACTORY_LOGIN']
-    ]) {
-        sh "bash -c \"curl -X POST -u ${ARTIFACTORY_LOGIN}:${ARTIFACTORY_PASSWORD} \'${url}\'\""
-    }
+    def http = new com.mirantis.mk.Http()
+    return http.doPost(url, 'artifactory')
 }
 
 /**
@@ -157,14 +151,8 @@
  */
 def deleteItem (String artifactoryURL, String itemPath) {
     def url = "${artifactoryURL}/${itemPath}"
-    withCredentials([
-            [$class          : 'UsernamePasswordMultiBinding',
-             credentialsId   : 'artifactory',
-             passwordVariable: 'ARTIFACTORY_PASSWORD',
-             usernameVariable: 'ARTIFACTORY_LOGIN']
-    ]) {
-        sh "bash -c \"curl -X DELETE -u ${ARTIFACTORY_LOGIN}:${ARTIFACTORY_PASSWORD} \'${url}\'\""
-    }
+    def http = new com.mirantis.mk.Http()
+    return http.doDelete(url, 'artifactory')
 }
 
 /**
@@ -284,6 +272,48 @@
 }
 
 /**
+ * Convert Mirantis docker image url/path to Mirantis artifactory path ready for use in API calls
+ *
+ * For example:
+ * 'docker-dev-kaas-local.docker.mirantis.net/mirantis/kaas/si-test:master' -> 'docker-dev-kaas-local/mirantis/kaas/si-test/master'
+ *
+ */
+def dockerImageToArtifactoryPath(String image) {
+    List imageParts = image.tokenize('/')
+    String repoName = imageParts[0].tokenize('.')[0]
+    String namespace = imageParts[1..-2].join('/')
+    String imageName = imageParts[-1].tokenize(':')[0]
+    String imageTag = imageParts[-1].tokenize(':')[1]
+
+    return [repoName, namespace, imageName, imageTag].join('/')
+}
+
+/**
+ * Copy docker image from one url to another
+ *
+ * @param srcImage String, Mirantis URL/path for docker image to copy from
+ * @param dstImage String, Mirantis URL/path for docker image to copy to
+ */
+def copyDockerImage(String srcImage, String dstImage) {
+    def artifactoryServer = Artifactory.server(env.ARTIFACTORY_SERVER ?: 'mcp-ci')
+    String srcPath = dockerImageToArtifactoryPath(srcImage)
+    String dstPath = dockerImageToArtifactoryPath(dstImage)
+
+    return moveItem(artifactoryServer.getUrl(), srcPath, dstPath, true)
+}
+
+/**
+ * Delete docker image on Mirantis's artifactory
+ *
+ * @param image String, Mirantis URL/path for docker image to delete
+ */
+def deleteDockerImage(String image) {
+    def artifactoryServer = Artifactory.server(env.ARTIFACTORY_SERVER ?: 'mcp-ci')
+
+    return deleteItem(artifactoryServer.getUrl() + '/artifactory', dockerImageToArtifactoryPath(image))
+}
+
+/**
  * Upload docker image to Artifactory
  *
  * @param server ArtifactoryServer, the instance of Artifactory server
@@ -522,7 +552,7 @@
             }"""
 
         artifactoryServer.upload(uploadSpec, newBuildInfo())
-        def linkUrl = "${artifactoryServer.getUrl()}/artifactory/${config.get('artifactoryRepo')}"
+        def linkUrl = "${artifactoryServer.getUrl()}/${config.get('artifactoryRepo')}"
         artifactsDescription = "Job artifacts uploaded to Artifactory: <a href=\"${linkUrl}\">${linkUrl}</a>"
     } catch (Exception e) {
         if (e =~ /no artifacts/) {
diff --git a/src/com/mirantis/mk/Ceph.groovy b/src/com/mirantis/mk/Ceph.groovy
index 8660233..72e644f 100644
--- a/src/com/mirantis/mk/Ceph.groovy
+++ b/src/com/mirantis/mk/Ceph.groovy
@@ -1,103 +1,543 @@
 package com.mirantis.mk
 
 /**
+ * Install and configure ceph clients
  *
- * Ceph functions
- *
+ * @param master        Salt connection object
+ * @param extra_tgt     Extra targets for compound
  */
+def installClient(master, extra_tgt='') {
+    def salt = new Salt()
+
+    // install Ceph Radosgw
+    installRgw(master, "I@ceph:radosgw", extra_tgt)
+
+    // setup keyring for Openstack services
+    salt.enforceStateWithTest([saltId: master, target: "I@ceph:common and I@glance:server $extra_tgt", state: ['ceph.common', 'ceph.setup.keyring']])
+    salt.enforceStateWithTest([saltId: master, target: "I@ceph:common and I@cinder:controller $extra_tgt", state: ['ceph.common', 'ceph.setup.keyring']])
+    salt.enforceStateWithTest([saltId: master, target: "I@ceph:common and I@nova:compute $extra_tgt", state: ['ceph.common', 'ceph.setup.keyring']])
+    salt.enforceStateWithTest([saltId: master, target: "I@ceph:common and I@gnocchi:server $extra_tgt", state: ['ceph.common', 'ceph.setup.keyring']])
+}
 
 /**
- * Ceph health check
+ * Install and configure ceph monitor on target
  *
+ * @param master        Salt connection object
+ * @param target        Target specification, compliance to compound matcher in salt
+ * @param extra_tgt     Extra targets for compound
  */
-def waitForHealthy(master, target, flags=[], count=0, attempts=300) {
-    def common = new com.mirantis.mk.Common()
-    def salt = new com.mirantis.mk.Salt()
-    // wait for healthy cluster
-    while (count < attempts) {
-        def health = salt.cmdRun(master, target, 'ceph health')['return'][0].values()[0]
-        if (health.contains('HEALTH_OK')) {
-            common.infoMsg('Cluster is healthy')
-            break
-        } else {
-            for (flag in flags) {
-                if (health.contains(flag + ' flag(s) set') && !(health.contains('down'))) {
-                    common.infoMsg('Cluster is healthy')
-                    return
+def installMon(master, target="I@ceph:mon", extra_tgt='') {
+    def salt = new Salt()
+
+    salt.enforceState([saltId: master, target: "$target $extra_tgt", state: 'salt.minion.grains'])
+
+    // TODO: can we re-add cmn01 with proper keyrings?
+    // generate keyrings
+    if(salt.testTarget(master, "( I@ceph:mon:keyring:mon or I@ceph:common:keyring:admin ) $extra_tgt")) {
+        salt.enforceState([saltId: master, target: "( I@ceph:mon:keyring:mon or I@ceph:common:keyring:admin ) $extra_tgt", state: 'ceph.mon'])
+        salt.runSaltProcessStep(master, "I@ceph:mon $extra_tgt", 'saltutil.sync_grains')
+        salt.runSaltProcessStep(master, "( I@ceph:mon:keyring:mon or I@ceph:common:keyring:admin ) $extra_tgt", 'mine.update')
+
+        // on target nodes mine is used to get pillar from 'ceph:common:keyring:admin' via grain.items
+        // we need to refresh all pillar/grains to make data sharing work correctly
+        salt.fullRefresh(master, "( I@ceph:mon:keyring:mon or I@ceph:common:keyring:admin ) $extra_tgt")
+
+        sleep(5)
+    }
+    // install Ceph Mons
+    salt.enforceState([saltId: master, target: "I@ceph:mon $extra_tgt", state: 'ceph.mon'])
+    salt.enforceStateWithTest([saltId: master, target: "I@ceph:mgr $extra_tgt", state: 'ceph.mgr'])
+
+    // update config
+    salt.enforceState([saltId: master, target: "I@ceph:common $extra_tgt", state: 'ceph.common'])
+}
+
+/**
+ * Install and configure osd daemons on target
+ *
+ * @param master        Salt connection object
+ * @param target        Target specification, compliance to compound matcher in salt
+ * @param extra_tgt     Extra targets for compound
+ */
+def installOsd(master, target="I@ceph:osd", setup=true, extra_tgt='') {
+    def salt = new Salt()
+    def orchestrate = new Orchestrate()
+
+    // install Ceph OSDs
+    salt.enforceState([saltId: master, target: target, state: ['linux.storage','ceph.osd']])
+    salt.runSaltProcessStep(master, "I@ceph:osd $extra_tgt", 'saltutil.sync_grains')
+    salt.enforceState([saltId: master, target: target, state: 'ceph.osd.custom'])
+    salt.runSaltProcessStep(master, "I@ceph:osd $extra_tgt", 'saltutil.sync_grains')
+    salt.runSaltProcessStep(master, "I@ceph:osd $extra_tgt", 'mine.update')
+
+    // setup pools, keyrings and maybe crush
+    if(salt.testTarget(master, "I@ceph:setup $extra_tgt") && setup) {
+        orchestrate.installBackup(master, 'ceph')
+        salt.enforceState([saltId: master, target: "I@ceph:setup $extra_tgt", state: 'ceph.setup'])
+    }
+}
+
+/**
+ * Install and configure rgw service on target
+ *
+ * @param master        Salt connection object
+ * @param target        Target specification, compliance to compound matcher in salt
+ * @param extra_tgt     Extra targets for compound
+ */
+def installRgw(master, target="I@ceph:radosgw", extra_tgt='') {
+    def salt = new Salt()
+
+    if(salt.testTarget(master, "I@ceph:radosgw $extra_tgt")) {
+        salt.fullRefresh(master, "I@ceph:radosgw $extra_tgt")
+        salt.enforceState([saltId: master, target: "I@ceph:radosgw $extra_tgt", state: ['keepalived', 'haproxy', 'ceph.radosgw']])
+    }
+}
+
+/**
+ * Remove rgw daemons from target
+ *
+ * @param master        Salt connection object
+ * @param target        Target specification, compliance to compound matcher in salt
+ * @param extra_tgt     Extra targets for compound
+ */
+def removeRgw(master, target, extra_tgt='') {
+    def salt = new Salt()
+
+    // TODO needs to be reviewed
+    salt.fullRefresh(master, "I@ceph:radosgw $extra_tgt")
+    salt.enforceState([saltId: master, target: "I@ceph:radosgw $extra_tgt", state: ['keepalived', 'haproxy', 'ceph.radosgw']])
+}
+
+/**
+ * Remove osd daemons from target
+ *
+ * @param master        Salt connection object
+ * @param target        Target specification, compliance to compound matcher in salt
+ * @param osds          List of osd to remove
+ * @param safeRemove    Wait for data rebalance before remove drive
+ * @param target        Target specification, compliance to compound matcher in salt
+ */
+def removeOsd(master, target, osds, flags, safeRemove=true, wipeDisks=false) {
+    def common = new Common()
+    def salt = new Salt()
+
+    // systemctl stop ceph-osd@0 && ceph osd purge 0 --yes-i-really-mean-it && umount /dev/vdc1; test -b /dev/vdc1 && dd if=/dev/zero of=/dev/vdc1 bs=1M; test -b /dev/vdc2 && dd if=/dev/zero of=/dev/vdc2 bs=1M count=100; sgdisk -d1 -d2 /dev/vdc; partprobe
+    if(osds.isEmpty()) {
+        common.warningMsg('List of OSDs was empty. No OSD is removed from cluster')
+        return
+    }
+
+    // `ceph osd out <id> <id>`
+    cmdRun(master, 'ceph osd out ' + osds.join(' '), true, true)
+
+    if(safeRemove) {
+        waitForHealthy(master, flags)
+    }
+
+    for(osd in osds) {
+        salt.runSaltProcessStep(master, target, 'service.stop', "ceph-osd@$osd", null, true)
+        cmdRun(master, "ceph osd purge $osd --yes-i-really-mean-it", true, true)
+    }
+
+    for(osd in osds) {
+        def lvm_enabled = getPillar(master, target, "ceph:osd:lvm_enabled")
+        if(lvm_enabled) {
+            // ceph-volume lvm zap --osd-id 1 --osd-fsid 55BD4219-16A7-4037-BC20-0F158EFCC83D --destroy
+            def output = cmdRunOnTarget(master, target, "ceph-volume lvm zap --osd-id $osd --destroy >/dev/null && echo 'zaped'", false)
+            if(output == 'zaped') { continue }
+        }
+
+        common.infoMsg("Removing legacy osd.")
+        def journal_partition = ""
+        def block_db_partition = ""
+        def block_wal_partition = ""
+        def block_partition = ""
+        def data_partition = ""
+        def dataDir = "/var/lib/ceph/osd/ceph-$osd"
+        journal_partition = cmdRunOnTarget(master, target,
+            "test -f $dataDir/journal_uuid && readlink -f /dev/disk/by-partuuid/`cat $dataDir/journal_uuid`", false)
+        block_db_partition = cmdRunOnTarget(master, target,
+            "test -f $dataDir/block.db_uuid && readlink -f /dev/disk/by-partuuid/`cat $dataDir/block.db_uuid`", false)
+        block_wal_partition = cmdRunOnTarget(master, target,
+            "test -f $dataDir/block.wal_uuid && readlink -f /dev/disk/by-partuuid/`cat $dataDir/block.wal_uuid`", false)
+        block_partition = cmdRunOnTarget(master, target,
+            "test -f $dataDir/block_uuid && readlink -f /dev/disk/by-partuuid/`cat $dataDir/block_uuid`", false)
+        data_partition = cmdRunOnTarget(master, target,
+            "test -f $dataDir/fsid && readlink -f /dev/disk/by-partuuid/`cat $dataDir/fsid`", false)
+
+        try {
+            if(journal_partition.trim()) { removePartition(master, target, journal_partition) }
+            if(block_db_partition.trim()) { removePartition(master, target, block_db_partition) }
+            if(block_wal_partition.trim()) { removePartition(master, target, block_wal_partition) }
+            if(block_partition.trim()) { removePartition(master, target, block_partition, 'block', wipeDisks) }
+            if(data_partition.trim()) { removePartition(master, target, data_partition, 'data', wipeDisks) }
+            else { common.warningMsg("Can't find data partition for osd.$osd") }
+        }
+        catch(Exception e) {
+            // report but continue as problem on one osd could be sorted out after
+            common.errorMsg("Found some issue during cleaning partition for osd.$osd on $target")
+            common.errorMsg(e)
+            currentBuild.result = 'FAILURE'
+        }
+
+        cmdRunOnTarget(master, target, "partprobe", false)
+    }
+}
+
+/**
+ * Update montoring for target hosts
+ *
+ * @param master        Salt connection object
+ * @param target        Target specification, compliance to compound matcher in salt
+ * @param extra_tgt     Extra targets for compound
+ */
+def updateMonitoring(master, target="I@ceph:common", extra_tgt='') {
+    def common = new Common()
+    def salt = new Salt()
+
+    def prometheusNodes = salt.getMinions(master, "I@prometheus:server $extra_tgt")
+    if(!prometheusNodes.isEmpty()) {
+        //Collect Grains
+        salt.enforceState([saltId: master, target: "$target $extra_tgt", state: 'salt.minion.grains'])
+        salt.runSaltProcessStep(master, "$target $extra_tgt", 'saltutil.refresh_modules')
+        salt.runSaltProcessStep(master, "$target $extra_tgt", 'mine.update')
+        sleep(5)
+        salt.enforceState([saltId: master, target: "$target $extra_tgt", state: ['fluentd', 'telegraf', 'prometheus']])
+        salt.enforceState([saltId: master, target: "I@prometheus:server $extra_tgt", state: 'prometheus'])
+    }
+    else {
+        common.infoMsg('No Prometheus nodes in cluster. Nothing to do.')
+    }
+}
+
+def connectCeph(master, extra_tgt='') {
+    new Common().infoMsg("This method was renamed. Use method connectOS insead.")
+    connectOS(master, extra_tgt)
+}
+
+/**
+ * Enforce configuration and connect OpenStack clients
+ *
+ * @param master        Salt connection object
+ * @param extra_tgt     Extra targets for compound
+ */
+def connectOS(master, extra_tgt='') {
+    def salt = new Salt()
+
+    // setup Keystone service and endpoints for swift or / and S3
+    salt.enforceStateWithTest([saltId: master, target: "I@keystone:client $extra_tgt", state: 'keystone.client'])
+
+    // connect Ceph to the env
+    if(salt.testTarget(master, "I@ceph:common and I@glance:server $extra_tgt")) {
+        salt.enforceState([saltId: master, target: "I@ceph:common and I@glance:server $extra_tgt", state: ['glance']])
+        salt.runSaltProcessStep(master, "I@ceph:common and I@glance:server $extra_tgt", 'service.restart', ['glance-api'])
+    }
+    if(salt.testTarget(master, "I@ceph:common and I@cinder:controller $extra_tgt")) {
+        salt.enforceState([saltId: master, target: "I@ceph:common and I@cinder:controller $extra_tgt", state: ['cinder']])
+        salt.runSaltProcessStep(master, "I@ceph:common and I@cinder:controller $extra_tgt", 'service.restart', ['cinder-volume'])
+    }
+    if(salt.testTarget(master, "I@ceph:common and I@nova:compute $extra_tgt")) {
+        salt.enforceState([saltId: master, target: "I@ceph:common and I@nova:compute $extra_tgt", state: ['nova']])
+        salt.runSaltProcessStep(master, "I@ceph:common and I@nova:compute $extra_tgt", 'service.restart', ['nova-compute'])
+    }
+    if(salt.testTarget(master, "I@ceph:common and I@gnocchi:server $extra_tgt")) {
+        salt.enforceState([saltId: master, target: "I@ceph:common and I@gnocchi:server:role:primary $extra_tgt", state: 'gnocchi.server'])
+        salt.enforceState([saltId: master, target: "I@ceph:common and I@gnocchi:server $extra_tgt", state: 'gnocchi.server'])
+    }
+}
+
+/**
+ * Remove vm from VCP
+ *
+ * @param master        Salt connection object
+ * @param target        Target specification, compliance to compound matcher in salt
+ */
+def removeVm(master, target) {
+    def common = new Common()
+    def salt = new Salt()
+
+    def fqdn = getGrain(master, target, 'id')
+    def hostname = salt.stripDomainName(fqdn)
+    def hypervisor = getPillar(master, "I@salt:control", "salt:control:cluster:internal:node:$hostname:provider")
+
+    removeSalt(master, target)
+
+    if(hypervisor?.trim()) {
+        cmdRunOnTarget(master, hypervisor, "virsh destroy $fqdn")
+        cmdRunOnTarget(master, hypervisor, "virsh undefine $fqdn")
+    }
+    else {
+        common.errorMsg("There is no provider in pillar for $hostname")
+    }
+}
+
+/**
+ * Stop target salt minion, remove its key on master and definition in reclass
+ *
+ * @param master        Salt connection object
+ * @param target        Target specification, compliance to compound matcher in salt
+ */
+def removeSalt(master, target) {
+    def common = new Common()
+
+    def fqdn = getGrain(master, target, 'id')
+    try {
+        cmdRunOnTarget(master, 'I@salt:master', "salt-key --include-accepted -r $fqdn -y")
+    }
+    catch(Exception e) {
+        common.warningMsg(e)
+    }
+}
+
+def deleteKeyrings(master, target, extra_tgt='') {
+    def host = getGrain(master, target, 'host')
+    def keys = cmdRun(master, "ceph auth list 2>/dev/null | grep $host", false).tokenize('\n')
+    if(keys.isEmpty()) {
+        new Common().warningMsg("Nothing to do. There is no keyring for $host")
+    }
+    for(key in keys) {
+        cmdRun(master, "ceph auth del $key")
+    }
+}
+
+def generateMapping(pgmap,map) {
+    def pg_new
+    def pg_old
+    for(pg in pgmap) {
+        pg_new = pg["up"].minus(pg["acting"])
+        pg_old = pg["acting"].minus(pg["up"])
+        for(int i = 0; i < pg_new.size(); i++) {
+            // def string = "ceph osd pg-upmap-items " + pg["pgid"].toString() + " " + pg_new[i] + " " + pg_old[i] + ";"
+            def string = "ceph osd pg-upmap-items ${pg["pgid"]} ${pg_new[i]} ${pg_old[i]}"
+            map.add(string)
+        }
+    }
+}
+
+/**
+ * Run command on the first of avaliable ceph monitors
+ *
+ * @param master        Salt connection object
+ * @param cmd           Command to run
+ * @param checkResponse Check response of command. (optional, default true)
+ * @param output        Print output (optional, default false)
+ */
+def cmdRun(master, cmd, checkResponse=true, output=false) {
+    def salt = new Salt()
+    def cmn01 = salt.getFirstMinion(master, "I@ceph:mon")
+    return salt.cmdRun(master, cmn01, cmd, checkResponse, null, output)['return'][0][cmn01]
+}
+
+/**
+ * Run command on target host
+ *
+ * @param master        Salt connection object
+ * @param target        Target specification, compliance to compound matcher in salt
+ * @param cmd           Command to run
+ * @param checkResponse Check response of command. (optional, default true)
+ * @param output        Print output (optional, default false)
+ */
+def cmdRunOnTarget(master, target, cmd, checkResponse=true, output=false) {
+    def salt = new Salt()
+    return salt.cmdRun(master, target, cmd, checkResponse, null, output)['return'][0].values()[0]
+}
+
+/**
+ * Ceph refresh pillars and get one for first host
+ *
+ * @param master        Salt connection object
+ * @param target        Target specification, compliance to compound matcher in salt
+ * @param pillar        Pillar to obtain
+ */
+def getPillar(master, target, pillar) {
+    def common = new Common()
+    def salt = new Salt()
+    try {
+        return salt.getPillar(master, target, pillar)['return'][0].values()[0]
+    }
+    catch(Exception e) {
+        common.warningMsg('There was no pillar for the target.')
+    }
+}
+
+/**
+ * Ceph refresh grains and get one for first host
+ *
+ * @param master        Salt connection object
+ * @param target        Target specification, compliance to compound matcher in salt
+ * @param grain         Grain to obtain
+ */
+def getGrain(master, target, grain) {
+    def common = new Common()
+    def salt = new Salt()
+    try {
+        return salt.getGrain(master, target, grain)['return'][0].values()[0].values()[0]
+    }
+    catch(Exception e) {
+        common.warningMsg('There was no grain for the target.')
+    }
+}
+
+/**
+ * Set flags
+ *
+ * @param master        Salt connection object
+ * @param flags         Collection of flags to set
+ */
+def setFlags(master, flags) {
+    if(flags instanceof String) { flags = [flags] }
+    for(flag in flags) {
+        cmdRun(master, 'ceph osd set ' + flag)
+    }
+}
+
+/**
+ * Unset flags
+ *
+ * @param master        Salt connection object
+ * @param flags         Collection of flags to unset (optional)
+ */
+def unsetFlags(master, flags=[]) {
+    if(flags instanceof String) { flags = [flags] }
+    for(flag in flags) {
+        cmdRun(master, 'ceph osd unset ' + flag)
+    }
+}
+
+/**
+ * Wait for healthy cluster while ignoring flags which have been set
+ *
+ * @param master        Salt connection object
+ * @param attempts      Attempts before it pause execution (optional, default 300)
+ */
+def waitForHealthy(master, flags, attempts=300) {
+    def common = new Common()
+
+    def count = 0
+    def isHealthy = false
+    def health = ''
+
+    // wait for current ops will be reflected in status
+    sleep(5)
+
+    while(count++ < attempts) {
+        health = cmdRun(master, 'ceph health', false)
+        if(health == 'HEALTH_OK') { return }
+        else {
+            // HEALTH_WARN noout,norebalance flag(s) set
+            def unexpectedFlags = health.tokenize(' ').getAt(1)?.tokenize(',')
+            unexpectedFlags.removeAll(flags)
+            if(health.contains('HEALTH_WARN') && unexpectedFlags.isEmpty()) { return }
+        }
+        common.warningMsg("Ceph cluster is still unhealthy: $health")
+        sleep(10)
+    }
+    // TODO: MissingMethodException
+    input message: "After ${count} attempts cluster is still unhealthy."
+    //throw new RuntimeException("After ${count} attempts cluster is still unhealthy. Can't proceed")
+}
+def waitForHealthy(master, String host, flags, attempts=300) {
+    new Common().warningMsg('This method will be deprecated.')
+    waitForHealthy(master, flags, attempts)
+}
+
+/**
+ * Remove unused orphan partition after some osds
+ *
+ * @param master        Salt connection object
+ * @param target        Target specification, compliance to compound matcher in salt
+ * @param wipePartitions     Wipe each found partitions completely (optional, defaul false)
+ */
+def removeOrphans(master, target, wipePartitions=false) {
+    def common = new Common()
+    def salt = new Salt()
+
+    def orphans = []
+    // TODO: ceph-disk is avaliable only in luminous
+    def disks = cmdRunOnTarget(master, target, "ceph-disk list --format json 2>/dev/null",false)
+    disks = "{\"disks\":$disks}" // common.parseJSON() can't parse a list of maps
+    disks = common.parseJSON(disks)['disks']
+    for(disk in disks) {
+        for(partition in disk.get('partitions')) {
+            def orphan = false
+            if(partition.get('type') == 'block.db' && !partition.containsKey('block.db_for')) { orphan = true }
+            else if(partition.get('type') == 'block' && !partition.containsKey('block_for')) { orphan = true }
+            else if(partition.get('type') == 'data' && !partition.get('state') == 'active') { orphan = true }
+            // TODO: test for the rest of types
+
+            if(orphan) {
+                if(partition.get('path')) {
+                    removePartition(master, target, partition['path'], partition['type'], wipePartitions)
+                }
+                else {
+                    common.warningMsg("Found orphan partition on $target but failed to remove it.")
                 }
             }
         }
-        common.infoMsg("Ceph health status: ${health}")
-        count++
-        sleep(10)
     }
+    cmdRunOnTarget(master, target, "partprobe", false)
 }
 
 /**
  * Ceph remove partition
  *
+ * @param master        Salt connection object
+ * @param target        Target specification, compliance to compound matcher in salt
+ * @param partition     Partition to remove on target host
+ * @param type          Type of partition. Some partition need additional steps (optional, default empty string)
+ * @param fullWipe      Fill the entire partition with zeros (optional, default false)
  */
-def removePartition(master, target, partition_uuid, type='', id=-1) {
-    def salt = new com.mirantis.mk.Salt()
-    def common = new com.mirantis.mk.Common()
-    def partition = ''
+def removePartition(master, target, partition, type='', fullWipe=false) {
+    def common = new Common()
+    def salt = new Salt()
+
     def dev = ''
     def part_id = ''
-    def lvm_enabled = salt.getPillar(master, "I@ceph:osd", "ceph:osd:lvm_enabled")['return'].first().containsValue(true)
-    if ( !lvm_enabled ){
-        if (type == 'lockbox') {
-            try {
-                // umount - partition = /dev/sdi2
-                partition = salt.cmdRun(master, target, "lsblk -rp | grep -v mapper | grep ${partition_uuid} ")['return'][0].values()[0].split()[0]
-                salt.cmdRun(master, target, "umount ${partition}")
-            } catch (Exception e) {
-                common.warningMsg(e)
-            }
-        } else if (type == 'data') {
-            try {
-                // umount - partition = /dev/sdi2
-                partition = salt.cmdRun(master, target, "lsblk -rp | grep /var/lib/ceph/osd/ceph-${id}")['return'][0].values()[0].split()[0]
-                salt.cmdRun(master, target, "umount ${partition}")
-            } catch (Exception e) {
-                common.warningMsg(e)
-            }
-            try {
-                // partition = /dev/sdi2
-                partition = salt.cmdRun(master, target, "blkid | grep ${partition_uuid} ")['return'][0].values()[0].split(":")[0]
-            } catch (Exception e) {
-                common.warningMsg(e)
-            }
-        } else {
-            try {
-                // partition = /dev/sdi2
-                partition = salt.cmdRun(master, target, "blkid | grep ${partition_uuid} ")['return'][0].values()[0].split(":")[0]
-            } catch (Exception e) {
-                common.warningMsg(e)
-            }
-        }
-        if (partition?.trim()) {
-            if (partition.contains("nvme")) {
-                // partition = /dev/nvme1n1p2
-                // dev = /dev/nvme1n1
-                dev = partition.replaceAll('p\\d+$', "")
-                // part_id = 2
-                part_id = partition.substring(partition.lastIndexOf("p") + 1).replaceAll("[^0-9]+", "")
+    def partitionID = ''
+    def disk = ''
+    def wipeCmd = ''
+    def lvm_enabled = getPillar(master, target, "ceph:osd:lvm_enabled")
 
-            } else {
-                // partition = /dev/sdi2
-                // dev = /dev/sdi
-                dev = partition.replaceAll('\\d+$', "")
-                // part_id = 2
-                part_id = partition.substring(partition.lastIndexOf("/") + 1).replaceAll("[^0-9]+", "")
-            }
+    if(!partition?.trim()) {
+        throw new Exception("Can't proceed without defined partition.")
+    }
+    cmdRunOnTarget(master, target, "test -b $partition")
+
+    if(fullWipe) { wipeCmd = "dd if=/dev/zero of=$partition bs=1M 2>/dev/null" }
+    else { wipeCmd = "dd if=/dev/zero of=$partition bs=1M count=100 2>/dev/null" }
+
+    common.infoMsg("Removing from the cluster $type partition $partition on $target.")
+    if(type == 'lockbox') {
+        try {
+            partition = cmdRunOnTarget(master, target, "lsblk -rp | grep -v mapper | grep $partition", false)
+            cmdRunOnTarget(master, target, "umount $partition")
+        }
+        catch (Exception e) {
+            common.warningMsg(e)
         }
     }
-    if (lvm_enabled && type != 'lockbox') {
-        salt.cmdRun(master, target, "ceph-volume lvm zap /dev/disk/by-partuuid/${partition_uuid} --destroy")
-    } else if (dev != '') {
-        salt.cmdRun(master, target, "parted ${dev} rm ${part_id}")
-    } else {
-        common.infoMsg("Did not found any device to be wiped.")
+    else if(type == 'data') {
+        cmdRunOnTarget(master, target, "umount $partition 2>/dev/null", false)
+        cmdRunOnTarget(master, target, wipeCmd, false)
     }
-    return
+    else if(type == 'block' || fullWipe) {
+        cmdRunOnTarget(master, target, wipeCmd, false)
+    }
+    try {
+        partitionID = cmdRunOnTarget(master, target, "cat /sys/dev/block/`lsblk $partition -no MAJ:MIN | xargs`/partition", false)
+        disk = cmdRunOnTarget(master, target, "lsblk $partition -no pkname", false)
+    }
+    catch (Exception e) {
+        common.errorMsg("Couldn't get disk name or partition number for $partition")
+        common.warningMsg(e)
+    }
+    try {
+        cmdRunOnTarget(master, target, "sgdisk -d$partitionID /dev/$disk", true, true)
+    }
+    catch (Exception e) {
+        common.warningMsg("Did not found any device to be wiped.")
+        common.warningMsg(e)
+    }
+    // try to remove partition table if disk have no partitions left - required by ceph-volume
+    cmdRunOnTarget(master, target, "partprobe -d -s /dev/$disk | grep partitions\$ && sgdisk -Z /dev/$disk", false, true)
 }
diff --git a/src/com/mirantis/mk/Common.groovy b/src/com/mirantis/mk/Common.groovy
index 448935f..8f2474d 100644
--- a/src/com/mirantis/mk/Common.groovy
+++ b/src/com/mirantis/mk/Common.groovy
@@ -1192,3 +1192,22 @@
         }
     }
 }
+
+/**
+ * Save metadata about results in yaml formatted file
+ * @param success (boolean) answer the question "was it success or not?"
+ * @param code (string) some generalized code to determine error
+ * @param msg (string) user-friendly message about what's happend (error?)
+ * @param extraMeta (map) additional data can be added to resulted yaml file trhoug this map
+ * @param dstFile (string) name of yaml file to store the data
+*/
+def saveMetaResult(Boolean success, String code, String msg, Map extraMeta = [:], String dstFile = 'result.yaml') {
+    Map result = extraMeta.clone()
+    result.putAll([
+        'success': success,
+        'code': code,
+        'message': msg,
+    ])
+    writeYaml file: dstFile, data: result, overwrite: true
+    archiveArtifacts artifacts: dstFile
+}
diff --git a/src/com/mirantis/mk/DockerImageScanner.groovy b/src/com/mirantis/mk/DockerImageScanner.groovy
index 8178564..64817a8 100644
--- a/src/com/mirantis/mk/DockerImageScanner.groovy
+++ b/src/com/mirantis/mk/DockerImageScanner.groovy
@@ -43,7 +43,7 @@
         case ~/^ceph\/.*$/:
             team_assignee = 'Storage'
             break
-        case ~/^iam\/.*$/:
+        case ~/^(core|iam)\/.*$/:
             team_assignee = 'KaaS'
             break
         case ~/^lcm\/.*$/:
@@ -227,6 +227,8 @@
                 return
             } else if (image_key.startsWith('mirantis/ucp') || image_key.startsWith('mirantiseng/ucp')) {
                 jiraNamespace = 'ENGORC'
+            } else if (image_key.startsWith('mirantis/dtr') || image_key.startsWith('mirantiseng/dtr')) {
+                jiraNamespace = 'ENGDTR'
             } else {
                 jiraNamespace = 'PRODX'
             }
diff --git a/src/com/mirantis/mk/Helm.groovy b/src/com/mirantis/mk/Helm.groovy
index 61ff48a..ad262e0 100644
--- a/src/com/mirantis/mk/Helm.groovy
+++ b/src/com/mirantis/mk/Helm.groovy
@@ -18,17 +18,22 @@
 
 /**
  * Rebuild index file for helm chart repo
- * @param helmRepoUrl  repository with helm charts
- * @param md5Remote    md5 sum of index.yaml for check
+ * @param absHelmRepoUrl if set to true, URLs to charts will be absolute
+ * @param helmRepoUrl    repository with helm charts
+ * @param md5Remote      md5 sum of index.yaml for check
  */
 
-def helmMergeRepoIndex(helmRepoUrl, md5Remote='') {
+def helmMergeRepoIndex(helmRepoUrl, md5Remote='', absHelmRepoUrl=true) {
     def common      = new com.mirantis.mk.Common()
 
     def helmRepoDir    = '.'
-    def helmExtraParams = "--url ${helmRepoUrl}"
+    def helmExtraParams = ''
+    if (absHelmRepoUrl) {
+        helmExtraParams = "--url ${helmRepoUrl}"
+    }
 
     def indexRes = common.shCmdStatus("wget -O index-upstream.yaml ${helmRepoUrl}/index.yaml")
+
     if (indexRes['status']){
         if (indexRes['status'] == 8 && indexRes['stderr'].contains('ERROR 404') && !md5Remote) {
             common.warningMsg("Index.yaml not found in ${helmRepoUrl} and will be fully regenerated")
diff --git a/src/com/mirantis/mk/JenkinsUtils.groovy b/src/com/mirantis/mk/JenkinsUtils.groovy
index b760812..a9c5d0c 100644
--- a/src/com/mirantis/mk/JenkinsUtils.groovy
+++ b/src/com/mirantis/mk/JenkinsUtils.groovy
@@ -277,3 +277,30 @@
     }
     return [status: true, log: '', jobs: depList]
 }
+
+/**
+ *  Return jenkins infra metadata according to specified jenkins intstance
+
+ * @param jenkinsServerURL  (string) URL to jenkins server in form: env.JENKINS_URL
+ * @return                  (map)[
+ *                              jenkins_service_user: (string) name of jenkins user needed for gerrit ops
+ *                             ]
+ */
+def getJenkinsInfraMetadata(jenkinsServerURL) {
+    def meta = [
+        jenkins_service_user: '',
+    ]
+
+    switch (jenkinsServerURL) {
+        case 'https://ci.mcp.mirantis.net/':
+            meta['jenkins_service_user'] = 'mcp-jenkins'
+            break
+        case 'https://mcc-ci.infra.mirantis.net/':
+            meta['jenkins_service_user'] = 'mcc-ci-jenkins'
+            break
+        default:
+            error("Failed to detect jenkins service user, supported jenkins platforms: 'https://ci.mcp.mirantis.net/' 'https://mcc-ci.infra.mirantis.net/'")
+    }
+
+    return meta
+}
diff --git a/src/com/mirantis/mk/KaasUtils.groovy b/src/com/mirantis/mk/KaasUtils.groovy
index 5936ec8..40aa2b0 100644
--- a/src/com/mirantis/mk/KaasUtils.groovy
+++ b/src/com/mirantis/mk/KaasUtils.groovy
@@ -32,7 +32,7 @@
 
 /**
  * Determine scope of test suite against per-commit KaaS deployment based on keywords
- * Keyword list: https://docs.google.com/document/d/1SSPD8ZdljbqmNl_FEAvTHUTow9Ki8NIMu82IcAVhzXw/
+ * Keyword list: https://gerrit.mcp.mirantis.com/plugins/gitiles/kaas/core/+/refs/heads/master/hack/ci-gerrit-keywords.md
  *
  * Used for components team to combine test-suites and forward desired parameters to kaas/core deployment jobs
  * Example scheme:
@@ -65,6 +65,11 @@
         regionLocation: '',
     ]
 
+    // proxy customization
+    def proxyConfig = [
+        mgmtOffline: env.OFFLINE_MGMT_CLUSTER ? env.OFFLINE_MGMT_CLUSTER.toBoolean() : false, // TODO(vnaumov) add additonal vars for regional/child cluster ops
+    ]
+
     // optional demo deployment customization
     def awsOnDemandDemo = env.ALLOW_AWS_ON_DEMAND ? env.ALLOW_AWS_ON_DEMAND.toBoolean() : false
     def awsOnRhelDemo = false
@@ -74,6 +79,10 @@
     def enableBMDemo = true
 
     def commitMsg = env.GERRIT_CHANGE_COMMIT_MESSAGE ? new String(env.GERRIT_CHANGE_COMMIT_MESSAGE.decodeBase64()) : ''
+    if (commitMsg ==~ /(?s).*\[mgmt-proxy\].*/ || env.GERRIT_EVENT_COMMENT_TEXT ==~ /(?s).*mgmt-proxy.*/) {
+        proxyConfig['mgmtOffline'] = true
+        common.warningMsg('Forced running offline mgmt deployment, all provider CDN regions for mgmt deployment will be set to *public-ci* to verify proxy configuration')
+    }
     if (commitMsg ==~ /(?s).*\[seed-macos\].*/ || env.GERRIT_EVENT_COMMENT_TEXT ==~ /(?s).*seed-macos.*/) {
         seedMacOs = true
     }
@@ -91,12 +100,8 @@
         attachBYO = true
         upgradeBYO = true
     }
-    if (commitMsg ==~ /(?s).*\[mgmt-upgrade\].*/ || env.GERRIT_EVENT_COMMENT_TEXT ==~ /(?s).*mgmt-upgrade.*/ || upgradeBYO) {
+    if (commitMsg ==~ /(?s).*\[mgmt-upgrade\].*/ || env.GERRIT_EVENT_COMMENT_TEXT ==~ /(?s).*mgmt-upgrade.*/) {
         upgradeMgmt = true
-        if (upgradeBYO) {
-            // TODO (vnaumov) remove such dependency right after we can verify byo upgrade w/o mgmt upgrade
-            common.warningMsg('Forced running kaas mgmt upgrade scenario, due byo demo scenario trigger: \'[byo-upgrade]\' ')
-        }
     }
     if (commitMsg ==~ /(?s).*\[ui-e2e\].*/ || env.GERRIT_EVENT_COMMENT_TEXT ==~ /(?s).*ui-e2e.*/) {
         runUie2e = true
@@ -177,10 +182,21 @@
             break
     }
 
+    // CDN configuration
+    def cdnConfig = [
+        mgmt: [
+            openstack:  (proxyConfig['mgmtOffline'] == true) ? 'public-ci' : 'internal-ci',
+            vsphere:  (proxyConfig['mgmtOffline'] == true) ? 'public-ci' : 'internal-ci',
+            aws: 'public-ci',
+        ], // TODO(vnaumov) add additonal vars for regional cluster cdn ops
+    ]
+
     // calculate weight of current demo run to manage lockable resources
     def demoWeight = (deployChild) ? 2 : 1 // management = 1, child = 1
 
     common.infoMsg("""
+        CDN deployment configuration: ${cdnConfig}
+        MCC offline deployment configuration: ${proxyConfig}
         Use MacOS node as seed: ${seedMacOs}
         Child cluster deployment scheduled: ${deployChild}
         Child cluster release upgrade scheduled: ${upgradeChild}
@@ -199,8 +215,10 @@
         Multiregional configuration: ${multiregionalMappings}
         Service binaries fetching scheduled: ${fetchServiceBinaries}
         Current weight of the demo run: ${demoWeight} (Used to manage lockable resources)
-        Triggers: https://docs.google.com/document/d/1SSPD8ZdljbqmNl_FEAvTHUTow9Ki8NIMu82IcAVhzXw/""")
+        Triggers: https://gerrit.mcp.mirantis.com/plugins/gitiles/kaas/core/+/refs/heads/master/hack/ci-gerrit-keywords.md""")
     return [
+        cdnConfig                  : cdnConfig,
+        proxyConfig                : proxyConfig,
         useMacOsSeedNode           : seedMacOs,
         deployChildEnabled         : deployChild,
         upgradeChildEnabled        : upgradeChild,
@@ -269,7 +287,7 @@
 /**
  * Determine if custom si tests/pipelines refspec forwarded from gerrit change request
 
- * Keyword list: https://docs.google.com/document/d/1SSPD8ZdljbqmNl_FEAvTHUTow9Ki8NIMu82IcAVhzXw/
+ * Keyword list: https://gerrit.mcp.mirantis.com/plugins/gitiles/kaas/core/+/refs/heads/master/hack/ci-gerrit-keywords.md
  * Used for components team to test component changes w/ custom SI refspecs using kaas/core deployment jobs
  * Example scheme:
  * New CR pushed in kubernetes/lcm-ansible -> parsing it's commit body and get custom test refspecs -> trigger deployment jobs from kaas/core
@@ -305,7 +323,7 @@
         kaas/si-pipelines will be fetched from: ${siPipelinesRefspec}
         kaas/si-tests will be fetched from: ${siTestsRefspec}
         kaas/si-tests as dockerImage will be fetched from: ${siTestsDockerImage}:${siTestsDockerImageTag}
-        Keywords: https://docs.google.com/document/d/1SSPD8ZdljbqmNl_FEAvTHUTow9Ki8NIMu82IcAVhzXw/""")
+        Keywords: https://gerrit.mcp.mirantis.com/plugins/gitiles/kaas/core/+/refs/heads/master/hack/ci-gerrit-keywords.md""")
     return [siTests: siTestsRefspec, siPipelines: siPipelinesRefspec, siTestsDockerImage: siTestsDockerImage, siTestsDockerImageTag: siTestsDockerImageTag]
 }
 
@@ -342,7 +360,7 @@
 /**
  * Determine if custom kaas core/pipelines refspec forwarded from gerrit change request
 
- * Keyword list: https://docs.google.com/document/d/1SSPD8ZdljbqmNl_FEAvTHUTow9Ki8NIMu82IcAVhzXw/
+ * Keyword list: https://gerrit.mcp.mirantis.com/plugins/gitiles/kaas/core/+/refs/heads/master/hack/ci-gerrit-keywords.md
  * Used for components team to test component changes w/ custom Core refspecs using kaas/core deployment jobs
  * Example scheme:
  * New CR pushed in kubernetes/lcm-ansible -> parsing it's commit body and get custom test refspecs -> trigger deployment jobs from kaas/core
@@ -374,7 +392,7 @@
     common.infoMsg("""
         kaas/core will be fetched from: ${coreRefspec}
         kaas/core pipelines will be fetched from: ${corePipelinesRefspec}
-        Keywords: https://docs.google.com/document/d/1SSPD8ZdljbqmNl_FEAvTHUTow9Ki8NIMu82IcAVhzXw/""")
+        Keywords: https://gerrit.mcp.mirantis.com/plugins/gitiles/kaas/core/+/refs/heads/master/hack/ci-gerrit-keywords.md""")
     return [core: coreRefspec, corePipelines: corePipelinesRefspec]
 }
 
@@ -402,7 +420,7 @@
 
 /**
  * Trigger KaaS demo jobs based on AWS/OS providers with customized test suite, parsed from external sources (gerrit commit/jj vars)
- * Keyword list: https://docs.google.com/document/d/1SSPD8ZdljbqmNl_FEAvTHUTow9Ki8NIMu82IcAVhzXw/
+ * Keyword list: https://gerrit.mcp.mirantis.com/plugins/gitiles/kaas/core/+/refs/heads/master/hack/ci-gerrit-keywords.md
  * Used for components team to test component changes w/ customized SI tests/refspecs using kaas/core deployment jobs
  *
  * @param:        component (string) component name [iam, lcm, stacklight]
@@ -441,6 +459,7 @@
         string(name: 'SI_TESTS_DOCKER_IMAGE_TAG', value: siRefspec.siTestsDockerImageTag),
         string(name: 'SI_PIPELINES_REFSPEC', value: siRefspec.siPipelines),
         string(name: 'CUSTOM_RELEASE_PATCH_SPEC', value: patchSpec),
+        booleanParam(name: 'OFFLINE_MGMT_CLUSTER', value: triggers.proxyConfig['mgmtOffline']),
         booleanParam(name: 'SEED_MACOS', value: triggers.useMacOsSeedNode),
         booleanParam(name: 'UPGRADE_MGMT_CLUSTER', value: triggers.upgradeMgmtEnabled),
         booleanParam(name: 'RUN_UI_E2E', value: triggers.runUie2eEnabled),
@@ -469,19 +488,21 @@
     }
 
     def jobResults = []
-    jobs["kaas-core-openstack-patched-${component}"] = {
-        try {
-            common.infoMsg('Deploy: patched KaaS demo with Openstack provider')
-            os_job_info = build job: "kaas-testing-core-openstack-workflow-${component}", parameters: parameters, wait: true
-            def build_description = os_job_info.getDescription()
-            def build_result = os_job_info.getResult()
-            jobResults.add(build_result)
+    if (triggers.osDemoEnabled) {
+        jobs["kaas-core-openstack-patched-${component}"] = {
+            try {
+                common.infoMsg('Deploy: patched KaaS demo with Openstack provider')
+                os_job_info = build job: "kaas-testing-core-openstack-workflow-${component}", parameters: parameters, wait: true
+                def build_description = os_job_info.getDescription()
+                def build_result = os_job_info.getResult()
+                jobResults.add(build_result)
 
-            if (build_description) {
-                currentBuild.description += build_description
+                if (build_description) {
+                    currentBuild.description += build_description
+                }
+            } finally {
+                common.infoMsg('Patched KaaS demo with Openstack provider finished')
             }
-        } finally {
-            common.infoMsg('Patched KaaS demo with Openstack provider finished')
         }
     }
     if (triggers.awsOnDemandDemoEnabled) {
@@ -530,6 +551,9 @@
     }
 
     common.infoMsg('Trigger KaaS demo deployments according to defined provider set')
+    if (jobs.size() == 0) {
+        error('No demo jobs matched with keywords, execution will be aborted, at least 1 provider should be enabled')
+    }
     // Limit build concurency workaround examples: https://issues.jenkins-ci.org/browse/JENKINS-44085
     parallel jobs
 
diff --git a/src/com/mirantis/mk/Lock.groovy b/src/com/mirantis/mk/Lock.groovy
index d72ceed..6b34dbe 100644
--- a/src/com/mirantis/mk/Lock.groovy
+++ b/src/com/mirantis/mk/Lock.groovy
@@ -1,9 +1,10 @@
 import org.jfrog.hudson.pipeline.common.types.ArtifactoryServer
 import java.util.concurrent.TimeoutException
+import groovy.time.TimeCategory
 
 class Lock {
     String  name, id, path
-    Integer retryInterval, timeout, expiration
+    String retryInterval, timeout, expiration
     Boolean force
     Map lockExtraMetadata
     ArtifactoryServer artifactoryServer
@@ -24,13 +25,13 @@
         this.artifactoryServer = args.artifactoryServer
 
         // Defaults
-        this.id = args.get('id', '')
-        this.path = args.get('path', 'binary-dev-local/locks')
-        this.retryInterval = args.get('retryInterval', 5*60)
-        this.timeout = args.get('timeout', 3*60*60)
-        this.expiration = args.get('expiration', 24*60*60)
-        this.force = args.get('force', false)
-        this.lockExtraMetadata = args.get('lockExtraMetadata', [:])
+        this.id = args.getOrDefault('id', '')
+        this.path = args.getOrDefault('path', 'binary-dev-local/locks')
+        this.retryInterval = args.getOrDefault('retryInterval', '5m')
+        this.timeout = args.getOrDefault('timeout', '3h')
+        this.expiration = args.getOrDefault('expiration', '24h')
+        this.force = args.getOrDefault('force', false)
+        this.lockExtraMetadata = args.getOrDefault('lockExtraMetadata', [:])
 
         // Internal
         this.fileUri = "/${path}/${name}.yaml".toLowerCase()
@@ -84,7 +85,7 @@
         }
 
         Map lockMeta = common.readYaml2(text: this.lockFileContent ?: '{}')
-        if (this.force || (this.id && this.id == lockMeta.get('lockID', ''))) {
+        if (this.force || (this.id && this.id == lockMeta.getOrDefault('lockID', ''))) {
             artifactoryTools.restCall(this.artObj, this.fileUri, 'DELETE', null, [:], '')
             common.infoMsg("Lock file '${this.artObj['url']}${this.fileUri}' has been removed")
         } else {
@@ -95,14 +96,12 @@
     private void createLockFile() {
         this.id = UUID.randomUUID().toString()
 
-        Calendar now = Calendar.getInstance()
-        Calendar expiredAt = now.clone()
-        expiredAt.add(Calendar.SECOND, this.expiration)
-
+        Date now = new Date()
+        Date expiredAt = TimeCategory.plus(now, common.getDuration(this.expiration))
         Map lockMeta = [
             'lockID': this.id,
-            'createdAt': now.getTime().toString(),
-            'expiredAt': expiredAt.getTime().toString(),
+            'createdAt': now.toString(),
+            'expiredAt': expiredAt.toString(),
         ]
         lockMeta.putAll(this.lockExtraMetadata)
 
@@ -113,16 +112,18 @@
 
     private void waitLockReleased() {
         Long startTime = System.currentTimeMillis()
+        Long timeoutMillis = common.getDuration(this.timeout).toMilliseconds()
+        Long retryIntervalMillis = common.getDuration(this.retryInterval).toMilliseconds()
         while (isLocked()) {
-            if (System.currentTimeMillis() - startTime >= timeout*1000 ) {
-                throw new TimeoutException("Execution of waitLock timed out after ${this.timeout} seconds")
+            if (System.currentTimeMillis() - startTime >= timeoutMillis) {
+                throw new TimeoutException("Execution of waitLock timed out after ${this.timeout}")
             }
-            common.infoMsg("'${this.name}' is locked. Retry in ${this.retryInterval} seconds")
+            common.infoMsg("'${this.name}' is locked. Retry in ${this.retryInterval}")
             // Reset the cache so it will re-retrieve the file and its content
             // otherwise it cannot determine that file has been removed on artifactory
             // in the middle of waiting
             this.lockFileContentCache = null
-            sleep(this.retryInterval*1000)
+            sleep(retryIntervalMillis)
         }
     }
 
@@ -148,7 +149,7 @@
             return true
         }
         Map lockMeta = common.readYaml2(text: this.lockFileContent ?: '{}')
-        Date expirationTime = new Date(lockMeta.get('expiredAt', '01/01/1970'))
+        Date expirationTime = new Date(lockMeta.getOrDefault('expiredAt', '01/01/1970'))
         Date currentTime = new Date()
         return currentTime.after(expirationTime)
     }
diff --git a/src/com/mirantis/mk/Orchestrate.groovy b/src/com/mirantis/mk/Orchestrate.groovy
index ec1c5d1..3e119f3 100644
--- a/src/com/mirantis/mk/Orchestrate.groovy
+++ b/src/com/mirantis/mk/Orchestrate.groovy
@@ -1182,90 +1182,23 @@
 //
 
 def installCephMon(master, target="I@ceph:mon", extra_tgt = '') {
-    def salt = new com.mirantis.mk.Salt()
-
-    salt.enforceState([saltId: master, target: "I@ceph:common ${extra_tgt}", state: 'salt.minion.grains'])
-
-    // generate keyrings
-    if (salt.testTarget(master, "( I@ceph:mon:keyring:mon or I@ceph:common:keyring:admin ) ${extra_tgt}")) {
-        salt.enforceState([saltId: master, target: "( I@ceph:mon:keyring:mon or I@ceph:common:keyring:admin ) ${extra_tgt}", state: 'ceph.mon'])
-        salt.runSaltProcessStep(master, "I@ceph:mon ${extra_tgt}", 'saltutil.sync_grains')
-        salt.runSaltProcessStep(master, "( I@ceph:mon:keyring:mon or I@ceph:common:keyring:admin ) ${extra_tgt}", 'mine.update')
-
-        // on target nodes mine is used to get pillar from 'ceph:common:keyring:admin' via grain.items
-        // we need to refresh all pillar/grains to make data sharing work correctly
-        salt.fullRefresh(master, "( I@ceph:mon:keyring:mon or I@ceph:common:keyring:admin ) ${extra_tgt}")
-
-        sleep(5)
-    }
-    // install Ceph Mons
-    salt.enforceState([saltId: master, target: target, state: 'ceph.mon'])
-    salt.enforceStateWithTest([saltId: master, target: "I@ceph:mgr ${extra_tgt}", state: 'ceph.mgr'])
+    def ceph = new com.mirantis.mk.Ceph()
+    ceph.installMon(master, target, extra_tgt)
 }
 
 def installCephOsd(master, target="I@ceph:osd", setup=true, extra_tgt = '') {
-    def salt = new com.mirantis.mk.Salt()
-
-    // install Ceph OSDs
-    salt.enforceState([saltId: master, target: target, state: 'ceph.osd'])
-    salt.runSaltProcessStep(master, "I@ceph:osd ${extra_tgt}", 'saltutil.sync_grains')
-    salt.enforceState([saltId: master, target: target, state: 'ceph.osd.custom'])
-    salt.runSaltProcessStep(master, "I@ceph:osd ${extra_tgt}", 'saltutil.sync_grains')
-    salt.runSaltProcessStep(master, "I@ceph:osd ${extra_tgt}", 'mine.update')
-    installBackup(master, 'ceph')
-
-    // setup pools, keyrings and maybe crush
-    if (salt.testTarget(master, "I@ceph:setup ${extra_tgt}") && setup) {
-        sleep(5)
-        salt.enforceState([saltId: master, target: "I@ceph:setup ${extra_tgt}", state: 'ceph.setup'])
-    }
+    def ceph = new com.mirantis.mk.Ceph()
+    ceph.installOsd(master, target, setup, extra_tgt)
 }
 
 def installCephClient(master, extra_tgt = '') {
-    def salt = new com.mirantis.mk.Salt()
-
-    // install Ceph Radosgw
-    if (salt.testTarget(master, "I@ceph:radosgw ${extra_tgt}")) {
-        salt.runSaltProcessStep(master, "I@ceph:radosgw ${extra_tgt}", 'saltutil.sync_grains')
-        salt.enforceState([saltId: master, target: "I@ceph:radosgw ${extra_tgt}", state: 'ceph.radosgw'])
-    }
-
-    // setup keyring for Openstack services
-    salt.enforceStateWithTest([saltId: master, target: "I@ceph:common and I@glance:server ${extra_tgt}", state: ['ceph.common', 'ceph.setup.keyring']])
-
-    salt.enforceStateWithTest([saltId: master, target: "I@ceph:common and I@cinder:controller ${extra_tgt}", state: ['ceph.common', 'ceph.setup.keyring']])
-
-    if (salt.testTarget(master, "I@ceph:common and I@nova:compute ${extra_tgt}")) {
-        salt.enforceState([saltId: master, target: "I@ceph:common and I@nova:compute ${extra_tgt}", state: ['ceph.common', 'ceph.setup.keyring']])
-        salt.runSaltProcessStep(master, "I@ceph:common and I@nova:compute ${extra_tgt}", 'saltutil.sync_grains')
-    }
-
-    salt.enforceStateWithTest([saltId: master, target: "I@ceph:common and I@gnocchi:server ${extra_tgt}", state: ['ceph.common', 'ceph.setup.keyring']])
+    def ceph = new com.mirantis.mk.Ceph()
+    ceph.installClient(master, extra_tgt)
 }
 
 def connectCeph(master, extra_tgt = '') {
-    def salt = new com.mirantis.mk.Salt()
-
-    // setup Keystone service and endpoints for swift or / and S3
-    salt.enforceStateWithTest([saltId: master, target: "I@keystone:client ${extra_tgt}", state: 'keystone.client'])
-
-    // connect Ceph to the env
-    if (salt.testTarget(master, "I@ceph:common and I@glance:server ${extra_tgt}")) {
-        salt.enforceState([saltId: master, target: "I@ceph:common and I@glance:server ${extra_tgt}", state: ['glance']])
-        salt.runSaltProcessStep(master, "I@ceph:common and I@glance:server ${extra_tgt}", 'service.restart', ['glance-api'])
-    }
-    if (salt.testTarget(master, "I@ceph:common and I@cinder:controller ${extra_tgt}")) {
-        salt.enforceState([saltId: master, target: "I@ceph:common and I@cinder:controller ${extra_tgt}", state: ['cinder']])
-        salt.runSaltProcessStep(master, "I@ceph:common and I@cinder:controller ${extra_tgt}", 'service.restart', ['cinder-volume'])
-    }
-    if (salt.testTarget(master, "I@ceph:common and I@nova:compute ${extra_tgt}")) {
-        salt.enforceState([saltId: master, target: "I@ceph:common and I@nova:compute ${extra_tgt}", state: ['nova']])
-        salt.runSaltProcessStep(master, "I@ceph:common and I@nova:compute ${extra_tgt}", 'service.restart', ['nova-compute'])
-    }
-    if (salt.testTarget(master, "I@ceph:common and I@gnocchi:server ${extra_tgt}")) {
-        salt.enforceState([saltId: master, target: "I@ceph:common and I@gnocchi:server:role:primary ${extra_tgt}", state: 'gnocchi.server'])
-        salt.enforceState([saltId: master, target: "I@ceph:common and I@gnocchi:server ${extra_tgt}", state: 'gnocchi.server'])
-    }
+    def ceph = new com.mirantis.mk.Ceph()
+    ceph.connectOS(master, extra_tgt)
 }
 
 def installOssInfra(master, extra_tgt = '') {