Merge the tip of origin/release/proposed/2019.2.0 into origin/release/2019.2.0

bfd8101 Fix for proper parameter parsing in pkg.purge
d0ddb56 Fix condition - include empty string in the condition
646cd72 Remove extra_tgt parameter
522d9f9 Move salt formulas test to xenial jenkins node
d085e51 Rewrite ceph pipelines for adding and removing nodes
a5594de Add ask_confirmation parameter for ceph-upgrade pipeline

Change-Id: I851cea3ba86c6a1c929d6344fe047c0b5590168a
diff --git a/ceph-add-node.groovy b/ceph-add-node.groovy
index 294f1ed..9ef1f88 100644
--- a/ceph-add-node.groovy
+++ b/ceph-add-node.groovy
@@ -6,92 +6,146 @@
  *  SALT_MASTER_URL             URL of Salt master
  *  SALT_MASTER_CREDENTIALS     Credentials to the Salt API
  *  HOST                        Host (minion id) to be added
- *  HOST_TYPE                   Type of Ceph node to be added. Valid values are mon/osd/rgw
+ *  CLUSTER_FLAGS               Expected flags on the cluster during job run
+ *  OSD_ONLY                    Add only new osds while keep rest intact
+ *  USE_UPMAP                   Use upmap for rebalance the data after node was added
  *
  */
 
-common = new com.mirantis.mk.Common()
-salt = new com.mirantis.mk.Salt()
-orchestrate = new com.mirantis.mk.Orchestrate()
+def common = new com.mirantis.mk.Common()
+def salt = new com.mirantis.mk.Salt()
+def ceph = new com.mirantis.mk.Ceph()
+def orchestrate = new com.mirantis.mk.Orchestrate()
 def python = new com.mirantis.mk.Python()
 
 def pepperEnv = "pepperEnv"
+def flags = CLUSTER_FLAGS.tokenize(',').toSet()
+def osdOnly = OSD_ONLY.toBoolean()
+def useUpmap = USE_UPMAP.toBoolean()
+
 timeout(time: 12, unit: 'HOURS') {
     node("python") {
 
         // create connection to salt master
         python.setupPepperVirtualenv(pepperEnv, SALT_MASTER_URL, SALT_MASTER_CREDENTIALS)
 
-        matches = ["osd", "mon", "rgw"]
-        def found = false
-        for (s in matches) {
-            if (HOST_TYPE.toLowerCase() == s) {
-                found = true
-            }
-        }
-
-        if (!found) {
-            common.errorMsg("No such HOST_TYPE was found. Please insert one of the following types: mon/osd/rgw")
-            throw new InterruptedException()
-        }
-
-        def checknode = salt.runSaltProcessStep(pepperEnv, HOST, 'test.ping')
-        if (checknode['return'][0].values().isEmpty()) {
+        def target = salt.getMinions(pepperEnv, HOST)
+        if(target.isEmpty()) {
             common.errorMsg("Host not found")
             throw new InterruptedException()
         }
+        else if(target.size() > 1) {
+            common.warningMsg("$HOST targeted more than one minion")
+        }
 
-        if (HOST_TYPE.toLowerCase() != 'osd') {
+        if(useUpmap) {
+            stage('enable upmap balancer') {
+                def features = ceph.cmdRun(pepperEnv, "ceph features --format json", false)
+                features = common.parseJSON(features)
+                for(group in features['client']) {
+                    if(group instanceof java.util.HashMap$Node) { // Luminous
+                        if(group.getValue()['release'] != 'luminous') {
+                            throw new Exception("Some of installed clients does not support upmap. Update all clients to luminous or newer before using upmap")
+                        }
+                    }
+                    else if(group['release'] != 'luminous') { // Nautilus
+                        throw new Exception("Some of installed clients does not support upmap. Update all clients to luminous or newer before using upmap")
+                    }
+                }
+                ceph.cmdRun(pepperEnv, 'ceph osd set-require-min-compat-client luminous')
+                ceph.cmdRun(pepperEnv, 'ceph balancer on')
+                ceph.cmdRun(pepperEnv, 'ceph balancer mode upmap')
+            }
+        }
 
-            // launch VMs
+        salt.fullRefresh(pepperEnv, HOST)
+
+        stage("set flags") {
+            if(useUpmap) {
+                flags.add('norebalance')
+            }
+            ceph.setFlags(pepperEnv, flags)
+        }
+
+        try {
             stage('Launch VMs') {
-                salt.enforceState(pepperEnv, 'I@salt:control', 'salt.control', true)
+                if(salt.testTarget(pepperEnv, "$HOST and not I@ceph:osd")) {
+                    // launch VMs
+                    salt.enforceState([saltId: pepperEnv, target: "I@salt:control", state: 'salt.control'])
 
-                // wait till the HOST appears in salt-key on salt-master
-                salt.minionPresent(pepperEnv, 'I@salt:master', HOST)
-            }
-        }
-
-        // run basic states
-        stage('Install infra') {
-            orchestrate.installFoundationInfraOnTarget(pepperEnv, HOST)
-        }
-
-        if (HOST_TYPE.toLowerCase() == 'osd') {
-
-            // Install Ceph osd
-            stage('Install Ceph OSD') {
-                orchestrate.installCephOsd(pepperEnv, HOST)
-            }
-        } else if (HOST_TYPE.toLowerCase() == 'mon') {
-            // Install Ceph mon
-            stage('Install Ceph MON') {
-                salt.enforceState(pepperEnv, 'I@ceph:common', 'ceph.common', true)
-                // install Ceph Mons
-                salt.enforceState(pepperEnv, 'I@ceph:mon', 'ceph.mon', true)
-                if (salt.testTarget(pepperEnv, 'I@ceph:mgr')) {
-                    salt.enforceState(pepperEnv, 'I@ceph:mgr', 'ceph.mgr', true)
+                    // wait till the HOST appears in salt-key on salt-master
+                    salt.minionPresent(pepperEnv, 'I@salt:master', HOST)
+                }
+                else {
+                    common.infoMsg("No VM require for a osd node.")
                 }
             }
-        } else if (HOST_TYPE.toLowerCase() == 'rgw') {
-            // Install Ceph rgw
-            stage('Install Ceph RGW') {
-                salt.enforceState(pepperEnv, 'I@ceph:radosgw', ['keepalived', 'haproxy', 'ceph.radosgw'], true)
+
+            stage('Install infra') {
+                if(!osdOnly) {
+                    // run basic states
+                    orchestrate.installFoundationInfraOnTarget(pepperEnv, HOST)
+                }
+                else {
+                    common.infoMsg('Stage skipped due to OSD_ONLY.')
+                }
+            }
+
+            stage('Install ceph components') {
+                if(salt.testTarget(pepperEnv, "$HOST and I@ceph:mon")) {
+                    ceph.installMon(pepperEnv, HOST)
+                }
+                if(salt.testTarget(pepperEnv, "$HOST and I@ceph:radosgw")) {
+                    ceph.installRgw(pepperEnv, HOST)
+                }
+                if(salt.testTarget(pepperEnv, "$HOST and I@ceph:osd")) {
+                    ceph.installOsd(pepperEnv, HOST, !osdOnly) //skip setup while osdOnly
+                }
+                else if(osdOnly) {
+                    common.infoMsg('Stage skipped due to OSD_ONLY.')
+                }
+            }
+
+            stage("Update/Install monitoring and hosts files") {
+                if(!osdOnly) {
+                    ceph.updateMonitoring(pepperEnv, HOST)
+                    salt.enforceState([saltId: pepperEnv, target: "I@ceph:common", state: 'linux.network.host'])
+                }
+                else {
+                    common.infoMsg('Stage skipped due to OSD_ONLY.')
+                }
+            }
+
+            if(useUpmap) {
+                stage("update mappings") {
+                    def mapping = []
+                    def pgmap
+                    for (int x = 1; x <= 3; x++) {
+                        pgmap = ceph.cmdRun(pepperEnv, 'ceph pg ls remapped --format=json', false)
+                        if (pgmap.trim()) {
+                            pgmap = "{\"pgs\":$pgmap}" // common.parseJSON() can't parse a list of maps
+                            pgmap = common.parseJSON(pgmap)['pgs']
+                            ceph.generateMapping(pgmap, mapping)
+                            for(map in mapping) {
+                                ceph.cmdRun(pepperEnv, map)
+                            }
+                            sleep(30)
+                        }
+                    }
+                }
+
+                stage('Unset norebalance') {
+                    ceph.unsetFlags(pepperEnv, 'norebalance')
+                    flags.removeElement('norebalance')
+                }
+            }
+            stage('Wait for healthy cluster status') {
+                ceph.waitForHealthy(pepperEnv, flags)
             }
         }
-
-        stage("Update/Install monitoring") {
-            def prometheusNodes = salt.getMinions(pepperEnv, 'I@prometheus:server')
-            if (!prometheusNodes.isEmpty()) {
-                //Collect Grains
-                salt.enforceState(pepperEnv, HOST, 'salt.minion.grains')
-                salt.runSaltProcessStep(pepperEnv, HOST, 'saltutil.refresh_modules')
-                salt.runSaltProcessStep(pepperEnv, HOST, 'mine.update')
-                sleep(5)
-                salt.enforceState(pepperEnv, HOST, 'prometheus')
-                salt.enforceState(pepperEnv, 'I@prometheus:server', 'prometheus')
-            } else {
-                common.infoMsg('No Prometheus nodes in cluster. Nothing to do')
+        finally {
+            stage('Unset cluster flags') {
+                ceph.unsetFlags(pepperEnv, flags)
             }
         }
     }
diff --git a/ceph-add-osd-upmap.groovy b/ceph-add-osd-upmap.groovy
index 07fc662..0a98f5c 100644
--- a/ceph-add-osd-upmap.groovy
+++ b/ceph-add-osd-upmap.groovy
@@ -1,134 +1,22 @@
 /**
  *
- * Add Ceph OSD node to existing cluster using upmap mechanism
+ * Add Ceph node to existing cluster
  *
  * Requred parameters:
  *  SALT_MASTER_URL             URL of Salt master
  *  SALT_MASTER_CREDENTIALS     Credentials to the Salt API
- *  HOST                        OSD Host (minion id) to be added
+ *  HOST                        Host (minion id) to be added
+ *  CLUSTER_FLAGS               Expected flags on the cluster during job run
  *
  */
 
-common = new com.mirantis.mk.Common()
-salt = new com.mirantis.mk.Salt()
-def python = new com.mirantis.mk.Python()
-def ceph = new com.mirantis.mk.Ceph()
-orchestrate = new com.mirantis.mk.Orchestrate()
-pepperEnv = "pepperEnv"
-def flags = CLUSTER_FLAGS ? CLUSTER_FLAGS.tokenize(',') : []
-
-def runCephCommand(cmd) {
-    return salt.cmdRun(pepperEnv, "I@ceph:mon and I@ceph:common:keyring:admin", cmd, checkResponse = true, batch = null, output = false)
-}
-
-def getpgmap() {
-    return runCephCommand('ceph pg ls remapped --format=json')['return'][0].values()[0]
-}
-
-def generatemapping(master,pgmap,map) {
-    def pg_new
-    def pg_old
-    for (pg in pgmap.get('pg_stats',[])) {
-        pg_new = pg["up"].minus(pg["acting"])
-        pg_old = pg["acting"].minus(pg["up"])
-        for (i = 0; i < pg_new.size(); i++) {
-            def string = "ceph osd pg-upmap-items " + pg["pgid"].toString() + " " + pg_new[i] + " " + pg_old[i] + ";"
-            map.add(string)
-        }
-    }
-}
-
 timeout(time: 12, unit: 'HOURS') {
-    node("python") {
-        try {
-            // create connection to salt master
-            python.setupPepperVirtualenv(pepperEnv, SALT_MASTER_URL, SALT_MASTER_CREDENTIALS)
-
-            stage ("verification of supported features") {
-                def checknode = salt.runSaltProcessStep(pepperEnv, HOST, 'test.ping')
-                if (checknode['return'][0].values().isEmpty()) {
-                    common.errorMsg("Host not found")
-                    throw new InterruptedException()
-                }
-                def cmn = salt.getFirstMinion(pepperEnv, "I@ceph:mon")
-                def features = salt.cmdRun(pepperEnv, cmn, "ceph features --format json", checkResponse=true, batch=null, output=false).values()[0]
-
-                features = new groovy.json.JsonSlurperClassic().parseText(features[0][cmn])
-                for ( group in features['client'] ) {
-                    if ( group['release'] != 'luminous' ) {
-                        throw new Exception("Some of installed clients does not support upmap. Update all clients to luminous or newer before using this pipeline")
-                    }
-                }
-            }
-
-            stage("enable luminous compat") {
-                runCephCommand('ceph osd set-require-min-compat-client luminous')['return'][0].values()[0]
-            }
-
-            stage("enable upmap balancer") {
-                runCephCommand('ceph balancer on')['return'][0].values()[0]
-                runCephCommand('ceph balancer mode upmap')['return'][0].values()[0]
-            }
-
-            stage("set norebalance") {
-                runCephCommand('ceph osd set norebalance')['return'][0].values()[0]
-            }
-
-            stage('Install infra') {
-                orchestrate.installFoundationInfraOnTarget(pepperEnv, HOST)
-            }
-
-            stage('Install Ceph OSD') {
-                salt.enforceState(pepperEnv, HOST, 'linux.storage')
-                orchestrate.installCephOsd(pepperEnv, HOST)
-            }
-
-            stage("Update/Install monitoring") {
-                def prometheusNodes = salt.getMinions(pepperEnv, 'I@prometheus:server')
-                if (!prometheusNodes.isEmpty()) {
-                    //Collect Grains
-                    salt.enforceState(pepperEnv, HOST, 'salt.minion.grains')
-                    salt.runSaltProcessStep(pepperEnv, HOST, 'saltutil.refresh_modules')
-                    salt.runSaltProcessStep(pepperEnv, HOST, 'mine.update')
-                    sleep(5)
-                    salt.enforceState(pepperEnv, HOST, ['fluentd', 'telegraf', 'prometheus'])
-                    salt.enforceState(pepperEnv, 'I@prometheus:server', 'prometheus')
-                } else {
-                    common.infoMsg('No Prometheus nodes in cluster. Nothing to do')
-                }
-            }
-
-            stage("Update host files") {
-                salt.enforceState(pepperEnv, '*', 'linux.network.host')
-            }
-
-
-            stage("update mappings") {
-                def pgmap
-                for (int x = 1; x <= 3; x++) {
-                    pgmap = getpgmap()
-                    if (pgmap == '') {
-                        return 1
-                    } else {
-                        def mapping = []
-                        pgmap = new groovy.json.JsonSlurperClassic().parseText(pgmap)
-                        generatemapping(pepperEnv, pgmap, mapping)
-                        mapping.each(this.&runCephCommand)
-                        sleep(30)
-                    }
-                }
-            }
-
-            stage("unset norebalance") {
-                runCephCommand('ceph osd unset norebalance')['return'][0].values()[0]
-            }
-
-            stage("wait for healthy cluster") {
-                ceph.waitForHealthy(pepperEnv, "I@ceph:mon and I@ceph:common:keyring:admin", flags)
-            }
-        }
-        finally {
-            runCephCommand('ceph osd unset norebalance')['return'][0].values()[0]
-        }
-    }
+    build job: 'ceph-add-node', parameters: [
+        [$class: 'BooleanParameterValue', name: 'OSD_ONLY', value: true],
+        [$class: 'BooleanParameterValue', name: 'USE_UPMAP', value: true],
+        [$class: 'StringParameterValue', name: 'HOST', value: HOST],
+        [$class: 'StringParameterValue', name: 'CLUSTER_FLAGS', value: CLUSTER_FLAGS],
+        [$class: 'StringParameterValue', name: 'SALT_MASTER_CREDENTIALS', value: SALT_MASTER_CREDENTIALS],
+        [$class: 'StringParameterValue', name: 'SALT_MASTER_URL', value: SALT_MASTER_URL]
+    ]
 }
diff --git a/ceph-remove-node.groovy b/ceph-remove-node.groovy
index 771a399..9fd38b6 100644
--- a/ceph-remove-node.groovy
+++ b/ceph-remove-node.groovy
@@ -6,10 +6,12 @@
  *  SALT_MASTER_URL             URL of Salt master
  *  SALT_MASTER_CREDENTIALS     Credentials to the Salt API
  *  HOST                        Host (minion id) to be removed
- *  HOST_TYPE                   Type of Ceph node to be removed. Valid values are mon/osd/rgw
- *  ADMIN_HOST                  Host (minion id) with admin keyring
- *  WAIT_FOR_HEALTHY            Wait for cluster rebalance before stoping daemons
- *  GENERATE_CRUSHMAP           Set to true if the crush map should be generated
+ *  WAIT_FOR_HEALTHY            Wait for cluster rebalance after a osd was removed
+ *  CLUSTER_FLAGS               Expected flags on the cluster during job run
+ *  FAST_WIPE                   Clean only partition table insted of full wipe
+ *  CLEAN_ORPHANS               Clean ceph partition which are no longer part of the cluster
+ *  OSD                         Coma separated list of OSDs to remove while keep the rest intact
+ *  GENERATE_CRUSHMAP           Generate new crush map. Excludes OSD
  *
  */
 
@@ -17,9 +19,17 @@
 def salt = new com.mirantis.mk.Salt()
 def ceph = new com.mirantis.mk.Ceph()
 def python = new com.mirantis.mk.Python()
+
 def pepperEnv = "pepperEnv"
 
-def cleanDisk = CLEANDISK
+def osds = OSD.tokenize(',').toSet()
+def flags = CLUSTER_FLAGS.tokenize(',').toSet()
+def cleanOrphans = CLEAN_ORPHANS.toBoolean()
+def fullWipe = !FAST_WIPE.toBoolean()
+def safeRemove = WAIT_FOR_HEALTHY.toBoolean()
+
+def osdOnly = OSD.trim() as Boolean
+def generateCrushmap = osdOnly ? false : GENERATE_CRUSHMAP.toBoolean()
 
 timeout(time: 12, unit: 'HOURS') {
     node("python") {
@@ -27,282 +37,132 @@
         // create connection to salt master
         python.setupPepperVirtualenv(pepperEnv, SALT_MASTER_URL, SALT_MASTER_CREDENTIALS)
 
-        matches = ["osd", "mon", "rgw"]
-        def found = false
-        for (s in matches) {
-            if (HOST_TYPE.toLowerCase() == s) {
-                found = true
-            }
-        }
-
-        if (!found) {
-            common.errorMsg("No such HOST_TYPE was found. Please insert one of the following types: mon/osd/rgw")
-            throw new InterruptedException()
-        }
-
-        def checknode = salt.runSaltProcessStep(pepperEnv, HOST, 'test.ping')
-        if (checknode['return'][0].values().isEmpty()) {
+        def target = salt.getMinions(pepperEnv, HOST)
+        if(target.isEmpty()) {
             common.errorMsg("Host not found")
             throw new InterruptedException()
         }
-
-        stage('Refresh_pillar') {
-            salt.runSaltProcessStep(pepperEnv, '*', 'saltutil.refresh_pillar', [], null, true, 5)
+        else if(target.size() > 1) {
+            common.errorMsg("$HOST targeted more than one minion")
+            throw new InterruptedException()
         }
 
-        //  split minion id on '.' and remove '*'
-        def target = HOST.split("\\.")[0].replace("*", "")
+        salt.fullRefresh(pepperEnv, HOST)
 
-        salt.runSaltProcessStep(pepperEnv, 'I@salt:master', 'saltutil.sync_grains', [], null, true, 5)
-        def _pillar = salt.getGrain(pepperEnv, 'I@salt:master', 'domain')
-        domain = _pillar['return'][0].values()[0].values()[0]
-
-        if (HOST_TYPE.toLowerCase() == 'rgw') {
-            // Remove Ceph rgw
-            stage('Remove Ceph RGW') {
-                salt.enforceState(pepperEnv, 'I@ceph:radosgw', ['keepalived', 'haproxy'], true)
-            }
-
-            stage('Purge Ceph RGW pkgs') {
-                salt.runSaltProcessStep(pepperEnv, HOST, 'pkg.purge', 'ceph-common,libcephfs2,python-cephfs,radosgw,python-rados,python-rbd,python-rgw')
-            }
+        stage('Set flags') {
+            ceph.setFlags(pepperEnv, flags)
         }
 
-        if (HOST_TYPE.toLowerCase() != 'osd') {
-
-            // virsh destroy rgw04.deploy-name.local; virsh undefine rgw04.deploy-name.local;
-            stage('Destroy/Undefine VM') {
-                _pillar = salt.getGrain(pepperEnv, 'I@salt:control', 'id')
-                def kvm01 = _pillar['return'][0].values()[0].values()[0]
-
-                _pillar = salt.getPillar(pepperEnv, "${kvm01}", "salt:control:cluster:internal:node:${target}:provider")
-                def targetProvider = _pillar['return'][0].values()[0]
-
-                salt.cmdRun(pepperEnv, "${targetProvider}", "virsh destroy ${target}.${domain}")
-                salt.cmdRun(pepperEnv, "${targetProvider}", "virsh undefine ${target}.${domain}")
-            }
-        }
-        else if (HOST_TYPE.toLowerCase() == 'osd') {
-            def osd_ids = []
-            def device_grain_name =  "ceph_disk"
-            // get list of osd disks of the host
-            salt.runSaltProcessStep(pepperEnv, HOST, 'saltutil.sync_grains', [], null, true, 5)
-            def ceph_disks = salt.getGrain(pepperEnv, HOST, 'ceph')['return'][0].values()[0].values()[0][device_grain_name]
-
-            for (i in ceph_disks) {
-                def osd_id = i.getKey().toString()
-                osd_ids.add('osd.' + osd_id)
-                print("Will delete " + osd_id)
-            }
-
-            // `ceph osd out <id> <id>`
-            stage('Set OSDs out') {
-                salt.cmdRun(pepperEnv, ADMIN_HOST, 'ceph osd out ' + osd_ids.join(' '))
-            }
-
-            // wait for healthy cluster
-            if (WAIT_FOR_HEALTHY.toBoolean()) {
-                sleep(5)
-                ceph.waitForHealthy(pepperEnv, ADMIN_HOST)
-            }
-
-            // stop osd daemons
-            stage('Stop OSD daemons') {
-                for (i in osd_ids) {
-                    salt.runSaltProcessStep(pepperEnv, HOST, 'service.stop', ['ceph-osd@' + i.replaceAll('osd.', '')], null, true)
-                }
-            }
-
-            // `ceph osd crush remove osd.2`
-            stage('Remove OSDs from CRUSH') {
-                for (i in osd_ids) {
-                    salt.cmdRun(pepperEnv, ADMIN_HOST, 'ceph osd crush remove ' + i)
-                }
-            }
-
-            // remove keyring `ceph auth del osd.3`
-            stage('Remove OSD keyrings from auth') {
-                for (i in osd_ids) {
-                    salt.cmdRun(pepperEnv, ADMIN_HOST, 'ceph auth del ' + i)
-                }
-            }
-
-            // remove osd `ceph osd rm osd.3`
+        try {
             stage('Remove OSDs') {
-                for (i in osd_ids) {
-                    salt.cmdRun(pepperEnv, ADMIN_HOST, 'ceph osd rm ' + i)
+                if(salt.testTarget(pepperEnv, "$HOST and I@ceph:osd")) {
+                    // get list of osd disks of the host
+                    def cephGrain = ceph.getGrain(pepperEnv, HOST, 'ceph')
+                    def cephDisks = cephGrain.get('ceph_disk',[:]).keySet()
+                    if (cephGrain.isEmpty()) {
+                        throw new Exception("Ceph salt grains cannot be found on $HOST")
+                    }
+
+                    // glob for OSD input or whole node is going to be removed
+                    if(OSD == '*' || !osdOnly) {
+                        osds = cephDisks
+                    }
+
+                    // discard all osds which aren't deployed on target HOST
+                    osds = osds.intersect(cephDisks)
+
+                    if(!osds.isEmpty()) {
+                        common.infoMsg("The following osds will be removed: ${osds.join(', ')}")
+                    }
+                    if(osds != cephDisks) {
+                        common.infoMsg("The following osds will be skiped: ${cephDisks.removeAll(osds).join(', ')}")
+                    }
+
+                    ceph.removeOsd(pepperEnv, HOST, osds, flags, safeRemove, fullWipe)
+
+                    if(cleanOrphans) {
+                        ceph.removeOrphans(pepperEnv, HOST, fullWipe)
+                    }
+                }
+                else {
+                    common.infoMsg('Stage skipped.')
                 }
             }
 
-            for (osd_id in osd_ids) {
-                id = osd_id.replaceAll('osd.', '')
+            stage('Remove keyring') {
+                // only non-osd nodes as keyrings for osds was removed already in previous step
+                if(salt.testTarget(pepperEnv, "$HOST and not I@ceph:osd")) {
+                    ceph.deleteKeyrings(pepperEnv, HOST)
+                }
+                else {
+                    common.infoMsg('Stage skipped.')
+                }
+            }
 
-                // remove journal, block_db, block_wal partition `parted /dev/sdj rm 3`
-                stage('Remove journal / block_db / block_wal partition') {
-                    def partition_uuid = ""
-                    def journal_partition_uuid = ""
-                    def block_db_partition_uuid = ""
-                    def block_wal_partition_uuid = ""
-                    def ceph_version = salt.getPillar(pepperEnv, HOST, 'ceph:common:ceph_version').get('return')[0].values()[0]
+            stage('Update monmap') {
+                if(salt.testTarget(pepperEnv, "$HOST and I@ceph:mon")) {
+                    def hostname = ceph.getGrain(pepperEnv, HOST, 'host')
+                    ceph.cmdRun(pepperEnv, 'ceph mon getmap -o monmap.backup')
+                    ceph.cmdRun(pepperEnv, "ceph mon remove $hostname")
+                    salt.cmdRun(pepperEnv, 'I@ceph:mon', "monmaptool /tmp/monmap --rm $hostname")
+                }
+                else {
+                    common.infoMsg('Stage skipped.')
+                }
+            }
 
-                    if (ceph_version == "luminous") {
-                        try {
-                            journal_partition_uuid = salt.cmdRun(pepperEnv, HOST, "cat /var/lib/ceph/osd/ceph-${id}/journal_uuid")['return'][0].values()[0].split("\n")[0]
-                        }
-                        catch(Exception e) {
-                            common.infoMsg(e)
-                        }
-                        try {
-                            block_db_partition_uuid = salt.cmdRun(pepperEnv, HOST, "cat /var/lib/ceph/osd/ceph-${id}/block.db_uuid")['return'][0].values()[0].split("\n")[0]
-                        }
-                        catch(Exception e) {
-                            common.infoMsg(e)
-                        }
-                        try {
-                            block_wal_partition_uuid = salt.cmdRun(pepperEnv, HOST, "cat /var/lib/ceph/osd/ceph-${id}/block.wal_uuid")['return'][0].values()[0].split("\n")[0]
-                        }
-                        catch(Exception e) {
-                            common.infoMsg(e)
-                        }
+            stage('Update Ceph configs/crushmap') {
+                //TODO: it won't remove removed mon from config
+                if(salt.testTarget(pepperEnv, "$HOST and I@ceph:mon")) {
+                    salt.enforceState(pepperEnv, 'I@ceph:common', 'ceph.common', true)
+                }
+                else if (salt.testTarget(pepperEnv, "$HOST and I@ceph:osd") && salt.testTarget(pepperEnv, "I@ceph:setup:crush and not $HOST") && generateCrushmap) {
+                    salt.enforceState(pepperEnv, 'I@ceph:setup:crush', 'ceph.setup.crush', true)
+                }
+                else {
+                    common.infoMsg('Stage skipped.')
+                }
+            }
+
+            stage('Purge Ceph components') {
+                Set pkgs = ['ceph-base','ceph-common']
+                if(salt.testTarget(pepperEnv, "$HOST and I@ceph:osd")) {
+                    pkgs.addAll(['ceph-osd','ceph-fuse','ceph-mds','python-cephfs','librados2','python-rados','python-rbd','python-rgw'])
+                }
+                //TODO: why removed pkgs on vm which will be remved as whole in next stage
+                if(salt.testTarget(pepperEnv, "$HOST and I@ceph:radosgw")) {
+                    ceph.removeRgw(pepperEnv, HOST)
+                    pkgs.addAll(['radosgw','libcephfs2','python-cephfs','python-rados','python-rbd','python-rgw'])
+                }
+                if(salt.testTarget(pepperEnv, "$HOST and I@ceph:mon")) {
+                    pkgs.addAll(['ceph-mon','ceph-mgr','libcephfs2','python-cephfs','python-rbd','python-rgw'])
+                }
+
+                if(!osdOnly) {
+                    salt.runSaltProcessStep(pepperEnv, HOST, 'pkg.purge', "pkgs='$pkgs'")
+                }
+                else {
+                    common.infoMsg('Stage skipped.')
+                }
+            }
+
+            stage('Remove salt minion and destroy VM') {
+                if(!osdOnly) {
+                    if(salt.testTarget(pepperEnv, "$HOST and I@ceph:osd")) {
+                        ceph.removeSalt(pepperEnv, HOST)
                     }
                     else {
-                        def volumes = salt.cmdRun(pepperEnv, HOST, "ceph-volume lvm list --format=json", checkResponse=true, batch=null, output=false)
-                        volumes = new groovy.json.JsonSlurperClassic().parseText(volumes['return'][0].values()[0])
-
-                        block_db_partition_uuid = volumes[id][0]['tags'].get('ceph.db_uuid')
-                        block_wal_partition_uuid = volumes[id][0]['tags'].get('ceph.wal_uuid')
-                    }
-
-
-                    if (journal_partition_uuid?.trim()) {
-                        ceph.removePartition(pepperEnv, HOST, journal_partition_uuid)
-                    }
-                    if (block_db_partition_uuid?.trim()) {
-                        ceph.removePartition(pepperEnv, HOST, block_db_partition_uuid)
-                    }
-                    if (block_wal_partition_uuid?.trim()) {
-                        ceph.removePartition(pepperEnv, HOST, block_wal_partition_uuid)
-                    }
-
-                    try {
-                        salt.cmdRun(pepperEnv, HOST, "partprobe")
-                    } catch (Exception e) {
-                        common.warningMsg(e)
+                        ceph.removeVm(pepperEnv, HOST)
                     }
                 }
-
-                if (cleanDisk) {
-                // remove data / block / lockbox partition `parted /dev/sdj rm 3`
-                    stage('Remove data / block / lockbox partition') {
-                        def data_partition_uuid = ""
-                        def block_partition_uuid = ""
-                        def osd_fsid = ""
-                        def lvm = ""
-                        def lvm_enabled= salt.getPillar(pepperEnv,"I@ceph:osd","ceph:osd:lvm_enabled")['return'].first().containsValue(true)
-                        try {
-                            osd_fsid = salt.cmdRun(pepperEnv, HOST, "cat /var/lib/ceph/osd/ceph-${id}/fsid")['return'][0].values()[0].split("\n")[0]
-                            if (lvm_enabled) {
-                                lvm = salt.runSaltCommand(pepperEnv, 'local', ['expression': HOST, 'type': 'compound'], 'cmd.run', null, "salt-call lvm.lvdisplay --output json -l quiet")['return'][0].values()[0]
-                                lvm = new groovy.json.JsonSlurperClassic().parseText(lvm)
-                                lvm["local"].each { lv, params ->
-                                    if (params["Logical Volume Name"].contains(osd_fsid)) {
-                                        data_partition_uuid = params["Logical Volume Name"].minus("/dev/")
-                                    }
-                                }
-                            } else {
-                                data_partition_uuid = osd_fsid
-                            }
-                        } catch (Exception e) {
-                            common.infoMsg(e)
-                        }
-                        try {
-                            block_partition_uuid = salt.cmdRun(pepperEnv, HOST, "cat /var/lib/ceph/osd/ceph-${id}/block_uuid")['return'][0].values()[0].split("\n")[0]
-                        }
-                        catch (Exception e) {
-                            common.infoMsg(e)
-                        }
-
-                        // remove partition_uuid = 2c76f144-f412-481e-b150-4046212ca932
-                        if (block_partition_uuid?.trim()) {
-                            ceph.removePartition(pepperEnv, HOST, block_partition_uuid)
-                            try {
-                                salt.cmdRun(pepperEnv, HOST, "ceph-volume lvm zap `readlink /var/lib/ceph/osd/ceph-${id}/block` --destroy")
-                            }
-                            catch (Exception e) {
-                                common.infoMsg(e)
-                            }
-                        }
-                        if (data_partition_uuid?.trim()) {
-                            ceph.removePartition(pepperEnv, HOST, data_partition_uuid, 'data', id)
-                        }
-                    }
-                }
-            }
-
-            // purge Ceph pkgs
-            stage('Purge Ceph OSD pkgs') {
-                salt.runSaltProcessStep(pepperEnv, HOST, 'pkg.purge', 'ceph-base,ceph-common,ceph-fuse,ceph-mds,ceph-osd,python-cephfs,librados2,python-rados,python-rbd,python-rgw')
-            }
-
-            stage('Remove OSD host from crushmap') {
-                def hostname = salt.cmdRun(pepperEnv, HOST, "hostname -s")['return'][0].values()[0].split('\n')[0]
-                try {
-                    salt.cmdRun(pepperEnv, ADMIN_HOST, "ceph osd crush remove ${hostname}")
-                } catch (Exception e) {
-                    common.warningMsg(e)
-                }
-            }
-
-            // stop salt-minion service and move its configuration
-            stage('Stop salt-minion') {
-                salt.cmdRun(pepperEnv, HOST, "mv /etc/salt/minion.d/minion.conf minion.conf")
-                salt.runSaltProcessStep(pepperEnv, HOST, 'service.stop', ['salt-minion'], [], null, true, 5)
-            }
-
-            stage('Remove salt-key') {
-                try {
-                    salt.cmdRun(pepperEnv, 'I@salt:master', "salt-key -d ${target}.${domain} -y")
-                } catch (Exception e) {
-                    common.warningMsg(e)
-                }
-                try {
-                    salt.cmdRun(pepperEnv, 'I@salt:master', "rm /srv/salt/reclass/nodes/_generated/${target}.${domain}.yml")
-                } catch (Exception e) {
-                    common.warningMsg(e)
+                else {
+                    common.infoMsg('Stage skipped.')
                 }
             }
         }
-
-        if (HOST_TYPE.toLowerCase() == 'mon') {
-            // Update Monmap
-            stage('Update monmap') {
-                salt.cmdRun(pepperEnv, 'I@ceph:mon', "ceph mon getmap -o monmap.backup")
-                try {
-                    salt.cmdRun(pepperEnv, 'I@ceph:mon', "ceph mon remove ${target}")
-                } catch (Exception e) {
-                    common.warningMsg(e)
-                }
-                salt.cmdRun(pepperEnv, 'I@ceph:mon', "monmaptool /tmp/monmap --rm ${target}")
-            }
-
-            def target_hosts = salt.getMinions(pepperEnv, 'I@ceph:common')
-
-            // Update configs
-            stage('Update Ceph configs') {
-                for (tgt in target_hosts) {
-                    salt.enforceState(pepperEnv, tgt, 'ceph.common', true)
-                }
-            }
-
-            stage('Purge Ceph MON pkgs') {
-                salt.runSaltProcessStep(pepperEnv, HOST, 'pkg.purge', 'ceph-base,ceph-common,ceph-mgr,ceph-mon,libcephfs2,python-cephfs,python-rbd,python-rgw')
-            }
-        }
-
-        def crushmap_target = salt.getMinions(pepperEnv, "I@ceph:setup:crush")
-        if (HOST_TYPE.toLowerCase() == 'osd' && GENERATE_CRUSHMAP.toBoolean() == true && crushmap_target) {
-            stage('Generate CRUSHMAP') {
-                salt.enforceState(pepperEnv, 'I@ceph:setup:crush', 'ceph.setup.crush', true)
+        finally {
+            stage('Unset cluster flags') {
+                ceph.unsetFlags(pepperEnv, flags)
             }
         }
     }
diff --git a/ceph-remove-osd.groovy b/ceph-remove-osd.groovy
index 405d478..249b51c 100644
--- a/ceph-remove-osd.groovy
+++ b/ceph-remove-osd.groovy
@@ -1,238 +1,28 @@
 /**
  *
- * Remove OSD from existing cluster
+ * Remove Ceph osds from node
  *
  * Requred parameters:
  *  SALT_MASTER_URL             URL of Salt master
  *  SALT_MASTER_CREDENTIALS     Credentials to the Salt API
- *
- *  HOST                        Host (minion id) to be removed
- *  OSD                         Comma separated list of osd ids to be removed
- *  ADMIN_HOST                  Host (minion id) with admin keyring
- *  CLUSTER_FLAGS               Comma separated list of tags to apply to cluster
- *  WAIT_FOR_HEALTHY            Wait for cluster rebalance before stoping daemons
- *  CLEANDISK                   Wipe data disk of removed osd
- *  CLEAN_ORPHANS               Wipe partition left over after unknown osd
+ *  HOST                        Host (minion id) to be added
+ *  WAIT_FOR_HEALTHY            Wait for cluster rebalance after a osd was removed
+ *  CLUSTER_FLAGS               Expected flags on the cluster during job run
+ *  FAST_WIPE                   Clean only partition table insted of full wipe
+ *  CLEAN_ORPHANS               Clean ceph partition which are no longer part of the cluster
+ *  OSD                         Coma separated list of OSDs to remove while keep the rest intact
  *
  */
 
-def common = new com.mirantis.mk.Common()
-def salt = new com.mirantis.mk.Salt()
-def ceph = new com.mirantis.mk.Ceph()
-def python = new com.mirantis.mk.Python()
-
-def pepperEnv = "pepperEnv"
-def flags = CLUSTER_FLAGS.tokenize(',')
-def osds = OSD.tokenize(',')
-def cleanDisk = CLEANDISK.toBoolean()
-def cleanOrphans = CLEAN_ORPHANS.toBoolean()
-
 timeout(time: 12, unit: 'HOURS') {
-    node("python") {
-
-        // create connection to salt master
-        python.setupPepperVirtualenv(pepperEnv, SALT_MASTER_URL, SALT_MASTER_CREDENTIALS)
-
-        def osd_ids = []
-
-        def checknode = salt.runSaltProcessStep(pepperEnv, HOST, 'test.ping')
-        if (checknode['return'][0].values().isEmpty()) {
-            common.errorMsg("Host not found")
-            throw new InterruptedException()
-        }
-
-        // get list of osd disks of the host
-        salt.runSaltProcessStep(pepperEnv, HOST, 'saltutil.sync_grains', [], null, true, 5)
-        def cephGrain = salt.getGrain(pepperEnv, HOST, 'ceph')
-        def ceph_disks = cephGrain['return'][0].values()[0].values()[0]['ceph_disk']
-
-        if (cephGrain['return'].isEmpty()) {
-            throw new Exception("Ceph salt grain cannot be found!")
-        }
-
-        if (flags.size() > 0) {
-            stage('Set cluster flags') {
-                for (flag in flags) {
-                    salt.cmdRun(pepperEnv, ADMIN_HOST, 'ceph osd set ' + flag)
-                }
-            }
-        }
-
-        try {
-            for (i in ceph_disks) {
-                def osd_id = i.getKey().toString()
-                if (osd_id in osds || OSD == '*') {
-                    osd_ids.add('osd.' + osd_id)
-                    print("Will delete " + osd_id)
-                } else {
-                    print("Skipping " + osd_id)
-                }
-            }
-
-            // `ceph osd out <id> <id>`
-            stage('Set OSDs out') {
-                if ( !osd_ids.isEmpty() ) {
-                    salt.cmdRun(pepperEnv, ADMIN_HOST, 'ceph osd out ' + osd_ids.join(' '))
-                }
-            }
-
-            // wait for healthy cluster
-            if (WAIT_FOR_HEALTHY.toBoolean()) {
-                sleep(5)
-                ceph.waitForHealthy(pepperEnv, ADMIN_HOST)
-            }
-
-            // stop osd daemons
-            stage('Stop OSD daemons') {
-                for (i in osd_ids) {
-                    salt.runSaltProcessStep(pepperEnv, HOST, 'service.stop', ['ceph-osd@' + i.replaceAll('osd.', '')], null, true)
-                }
-            }
-
-            // `ceph osd crush remove osd.2`
-            stage('Remove OSDs from CRUSH') {
-                for (i in osd_ids) {
-                    salt.cmdRun(pepperEnv, ADMIN_HOST, 'ceph osd crush remove ' + i)
-                }
-            }
-
-            // remove keyring `ceph auth del osd.3`
-            stage('Remove OSD keyrings from auth') {
-                for (i in osd_ids) {
-                    salt.cmdRun(pepperEnv, ADMIN_HOST, 'ceph auth del ' + i)
-                }
-            }
-
-            // remove osd `ceph osd rm osd.3`
-            stage('Remove OSDs') {
-                for (i in osd_ids) {
-                    salt.cmdRun(pepperEnv, ADMIN_HOST, 'ceph osd rm ' + i)
-                }
-            }
-
-            for (osd_id in osd_ids) {
-                id = osd_id.replaceAll('osd.', '')
-
-                // remove journal, block_db, block_wal partition `parted /dev/sdj rm 3`
-                stage('Remove journal / block_db / block_wal partition') {
-                    def partition_uuid = ""
-                    def journal_partition_uuid = ""
-                    def block_db_partition_uuid = ""
-                    def block_wal_partition_uuid = ""
-                    try {
-                        journal_partition_uuid = salt.cmdRun(pepperEnv, HOST, "cat /var/lib/ceph/osd/ceph-${id}/journal_uuid")['return'][0].values()[0].split("\n")[0]
-                    }
-                     catch (Exception e) {
-                        common.infoMsg(e)
-                    }
-                    try {
-                        block_db_partition_uuid = salt.cmdRun(pepperEnv, HOST, "cat /var/lib/ceph/osd/ceph-${id}/block.db_uuid")['return'][0].values()[0].split("\n")[0]
-                    }
-                    catch (Exception e) {
-                        common.infoMsg(e)
-                    }
-
-                    try {
-                        block_wal_partition_uuid = salt.cmdRun(pepperEnv, HOST, "cat /var/lib/ceph/osd/ceph-${id}/block.wal_uuid")['return'][0].values()[0].split("\n")[0]
-                    }
-                    catch (Exception e) {
-                        common.infoMsg(e)
-                    }
-
-                    // remove partition_uuid = 2c76f144-f412-481e-b150-4046212ca932
-                    if (journal_partition_uuid?.trim()) {
-                        ceph.removePartition(pepperEnv, HOST, journal_partition_uuid)
-                    }
-                    if (block_db_partition_uuid?.trim()) {
-                        ceph.removePartition(pepperEnv, HOST, block_db_partition_uuid)
-                    }
-                    if (block_wal_partition_uuid?.trim()) {
-                        ceph.removePartition(pepperEnv, HOST, block_wal_partition_uuid)
-                    }
-
-                    try {
-                        salt.cmdRun(pepperEnv, HOST, "partprobe")
-                    }
-                    catch (Exception e) {
-                        common.warningMsg(e)
-                    }
-                }
-                if (cleanDisk) {
-                    // remove data / block / lockbox partition `parted /dev/sdj rm 3`
-                    stage('Remove data / block / lockbox partition') {
-                        def data_partition_uuid = ""
-                        def block_partition_uuid = ""
-                        def osd_fsid = ""
-                        def lvm = ""
-                        def lvm_enabled= salt.getPillar(pepperEnv,"I@ceph:osd","ceph:osd:lvm_enabled")['return'].first().containsValue(true)
-                        try {
-                            osd_fsid = salt.cmdRun(pepperEnv, HOST, "cat /var/lib/ceph/osd/ceph-${id}/fsid")['return'][0].values()[0].split("\n")[0]
-                            if (lvm_enabled) {
-                                lvm = salt.runSaltCommand(pepperEnv, 'local', ['expression': HOST, 'type': 'compound'], 'cmd.run', null, "salt-call lvm.lvdisplay --output json -l quiet")['return'][0].values()[0]
-                                lvm = new groovy.json.JsonSlurperClassic().parseText(lvm)
-                                lvm["local"].each { lv, params ->
-                                    if (params["Logical Volume Name"].contains(osd_fsid)) {
-                                        data_partition_uuid = params["Logical Volume Name"].minus("/dev/")
-                                    }
-                                }
-                            }
-                        }
-                        catch (Exception e) {
-                            common.infoMsg(e)
-                        }
-                        try {
-                            block_partition_uuid = salt.cmdRun(pepperEnv, HOST, "cat /var/lib/ceph/osd/ceph-${id}/block_uuid")['return'][0].values()[0].split("\n")[0]
-                        }
-                        catch (Exception e) {
-                            common.infoMsg(e)
-                        }
-
-                        // remove partition_uuid = 2c76f144-f412-481e-b150-4046212ca932
-                        if (block_partition_uuid?.trim()) {
-                            ceph.removePartition(pepperEnv, HOST, block_partition_uuid)
-                            try{
-                                salt.cmdRun(pepperEnv, HOST, "ceph-volume lvm zap `readlink /var/lib/ceph/osd/ceph-${id}/block` --destroy")
-                            }
-                            catch (Exception e) {
-                                common.infoMsg(e)
-                            }
-                        }
-                        if (data_partition_uuid?.trim()) {
-                            ceph.removePartition(pepperEnv, HOST, data_partition_uuid, 'data', id)
-                        }
-                        else {
-                            ceph.removePartition(pepperEnv, HOST, osd_fsid, 'data', id)
-                        }
-                    }
-                }
-            }
-            if (cleanOrphans) {
-                stage('Remove orphan partitions') {
-                    def orphans = []
-                    def disks = salt.cmdRun(pepperEnv, ADMIN_HOST, "ceph-disk list --format json")['return'][0].values()[0]
-                    for (disk in disks) {
-                        for (partition in disk.get('partitions')) {
-                            if (partition.get('type') == 'block.db' && !partition.containsKey('block.db_for')) {
-                                orphans.add(partition['uuid'])
-                            }
-                        }
-                    }
-                    for (orphan in orphans) {
-                        ceph.removePartition(pepperEnv, HOST, orphan)
-                    }
-                }
-            }
-        }
-        finally {
-            // remove cluster flags
-            if (flags.size() > 0) {
-                stage('Unset cluster flags') {
-                    for (flag in flags) {
-                        common.infoMsg('Removing flag ' + flag)
-                        salt.cmdRun(pepperEnv, ADMIN_HOST, 'ceph osd unset ' + flag)
-                    }
-                }
-            }
-        }
-    }
+    build job: 'ceph-remove-node', parameters: [
+        [$class: 'BooleanParameterValue', name: 'CLEAN_ORPHANS', value: CLEAN_ORPHANS],
+        [$class: 'BooleanParameterValue', name: 'FAST_WIPE', value: FAST_WIPE],
+        [$class: 'BooleanParameterValue', name: 'WAIT_FOR_HEALTHY', value: WAIT_FOR_HEALTHY],
+        [$class: 'StringParameterValue', name: 'HOST', value: HOST],
+        [$class: 'StringParameterValue', name: 'OSD', value: OSD],
+        [$class: 'StringParameterValue', name: 'CLUSTER_FLAGS', value: CLUSTER_FLAGS],
+        [$class: 'StringParameterValue', name: 'SALT_MASTER_CREDENTIALS', value: SALT_MASTER_CREDENTIALS],
+        [$class: 'StringParameterValue', name: 'SALT_MASTER_URL', value: SALT_MASTER_URL]
+    ]
 }
diff --git a/ceph-upgrade.groovy b/ceph-upgrade.groovy
index 25b4748..39695f2 100644
--- a/ceph-upgrade.groovy
+++ b/ceph-upgrade.groovy
@@ -9,6 +9,7 @@
  *  ADMIN_HOST                      Host (minion id) with admin keyring and /etc/crushmap file present
  *  CLUSTER_FLAGS                   Comma separated list of tags to apply to cluster
  *  WAIT_FOR_HEALTHY                Wait for cluster rebalance before stoping daemons
+ *  ASK_CONFIRMATION                Ask for manual confirmation
  *  ORIGIN_RELEASE                  Ceph release version before upgrade
  *  TARGET_RELEASE                  Ceph release version after upgrade
  *  STAGE_UPGRADE_MON               Set to True if Ceph mon nodes upgrade is desired
@@ -26,6 +27,7 @@
 salt = new com.mirantis.mk.Salt()
 def python = new com.mirantis.mk.Python()
 ceph = new com.mirantis.mk.Ceph()
+askConfirmation = (env.getProperty('ASK_CONFIRMATION') ?: true).toBoolean()
 
 pepperEnv = "pepperEnv"
 flags = CLUSTER_FLAGS.tokenize(',')
@@ -129,8 +131,10 @@
                 salt.cmdRun(master, "${minion}", "systemctl status ceph-${target}.target")
             }
 
-            stage('Ask for manual confirmation') {
-                input message: "From the verification command above, please check Ceph ${target} joined the cluster correctly. If so, Do you want to continue to upgrade next node?"
+            if (askConfirmation) {
+                stage('Ask for manual confirmation') {
+                    input message: "From the verification command above, please check Ceph ${target} joined the cluster correctly. If so, Do you want to continue to upgrade next node?"
+                }
             }
         }
     }
diff --git a/test-salt-formulas-env.groovy b/test-salt-formulas-env.groovy
index 8ea606d..42966e9 100644
--- a/test-salt-formulas-env.groovy
+++ b/test-salt-formulas-env.groovy
@@ -22,6 +22,8 @@
   openstack_credentials_id = OPENSTACK_API_CREDENTIALS
 }
 
+def nodeLabel = 'old16.04'
+
 def checkouted = false
 def openstackTest = false
 def travisLess = false      /** TODO: Remove once formulas are witched to new config */
@@ -30,7 +32,7 @@
 
 throttle(['test-formula']) {
   timeout(time: 1, unit: 'HOURS') {
-    node("python&&docker") {
+    node(nodeLabel) {
       try {
         stage("checkout") {
           if (defaultGitRef && defaultGitUrl) {
diff --git a/test-salt-formulas-pipeline.groovy b/test-salt-formulas-pipeline.groovy
index e3449f1..a071058 100644
--- a/test-salt-formulas-pipeline.groovy
+++ b/test-salt-formulas-pipeline.groovy
@@ -13,7 +13,7 @@
 def gerritRef = env.GERRIT_REFSPEC ?: null
 def defaultGitRef = env.DEFAULT_GIT_REF ?: null
 def defaultGitUrl = env.DEFAULT_GIT_URL ?: null
-def slaveNode = env.SLAVE_NODE ?: 'python&&docker'
+def slaveNode = env.SLAVE_NODE ?: 'old16.04'
 def saltVersion = env.SALT_VERSION ?: ""
 def dockerLib = new com.mirantis.mk.Docker()
 
diff --git a/test-salt-model-node.groovy b/test-salt-model-node.groovy
index 27e0909..7753b27 100644
--- a/test-salt-model-node.groovy
+++ b/test-salt-model-node.groovy
@@ -28,9 +28,11 @@
 def distribRevision = env.DISTRIB_REVISION ?: 'nightly'
 def checkouted = false
 
+def nodeLabel = 'old16.04'
+
 throttle(['test-model']) {
   timeout(time: 1, unit: 'HOURS') {
-    node("python&&docker") {
+    node(nodeLabel) {
       try{
         stage("checkout") {
           if(defaultGitRef != "" && defaultGitUrl != "") {
diff --git a/test-salt-models-pipeline.groovy b/test-salt-models-pipeline.groovy
index 3b88aee..0b4a4d6 100644
--- a/test-salt-models-pipeline.groovy
+++ b/test-salt-models-pipeline.groovy
@@ -86,8 +86,10 @@
   return true;
 }
 
+def nodeLabel = 'old16.04'
+
 timeout(time: 12, unit: 'HOURS') {
-  node("python") {
+  node(nodeLabel) {
     try{
       stage("checkout") {
         if (gerritRef) {
diff --git a/update-ceph.groovy b/update-ceph.groovy
index dbb45d5..6cef0b3 100644
--- a/update-ceph.groovy
+++ b/update-ceph.groovy
@@ -89,7 +89,7 @@
                     for (i in osd_ids) {
                         salt.runSaltProcessStep(pepperEnv, tgt, 'service.restart', ['ceph-osd@' + i.replaceAll('osd.', '')], null, true)
                         // wait for healthy cluster
-                        ceph.waitForHealthy(pepperEnv, tgt, flags, 0, 100)
+                        ceph.waitForHealthy(pepperEnv, tgt, flags, 100)
                     }
 
                     if (runHighState) {