Merge "Use common waitForHealthy function from pipeline-library"
diff --git a/ceph-add-node.groovy b/ceph-add-node.groovy
index 33a5a67..9ec96c2 100644
--- a/ceph-add-node.groovy
+++ b/ceph-add-node.groovy
@@ -75,14 +75,18 @@
         }
 
         stage("Update/Install monitoring") {
-            //Collect Grains
-            salt.enforceState(pepperEnv, HOST, 'salt.minion.grains')
-            salt.runSaltProcessStep(pepperEnv, HOST, 'saltutil.refresh_modules')
-            salt.runSaltProcessStep(pepperEnv, HOST, 'mine.update')
-            sleep(5)
-
-            salt.enforceState(pepperEnv, HOST, 'prometheus')
-            salt.enforceState(pepperEnv, 'I@prometheus:server', 'prometheus')
+            def prometheusNodes = salt.getMinions(pepperEnv, 'I@prometheus:server')
+            if (!prometheusNodes.isEmpty()) {
+                //Collect Grains
+                salt.enforceState(pepperEnv, HOST, 'salt.minion.grains')
+                salt.runSaltProcessStep(pepperEnv, HOST, 'saltutil.refresh_modules')
+                salt.runSaltProcessStep(pepperEnv, HOST, 'mine.update')
+                sleep(5)
+                salt.enforceState(pepperEnv, HOST, 'prometheus')
+                salt.enforceState(pepperEnv, 'I@prometheus:server', 'prometheus')
+            } else {
+                common.infoMsg('No Prometheus nodes in cluster. Nothing to do')
+            }
         }
     }
 }
diff --git a/ceph-add-osd-upmap.groovy b/ceph-add-osd-upmap.groovy
index 4bbb78d..f7cb469 100644
--- a/ceph-add-osd-upmap.groovy
+++ b/ceph-add-osd-upmap.groovy
@@ -9,56 +9,35 @@
  *
  */
 
-common = new com.mirantis.mk.Common()
 salt = new com.mirantis.mk.Salt()
 def python = new com.mirantis.mk.Python()
+def ceph = new com.mirantis.mk.Ceph()
 orchestrate = new com.mirantis.mk.Orchestrate()
-
-def waitForHealthy(master, count=0, attempts=100) {
-    // wait for healthy cluster
-    while (count<attempts) {
-        def health = runCephCommand('ceph health')['return'][0].values()[0]
-        if (health.contains('HEALTH_OK')) {
-            common.infoMsg('Cluster is healthy')
-            break;
-        }
-        count++
-        sleep(10)
-    }
-}
+pepperEnv = "pepperEnv"
 
 def runCephCommand(cmd) {
-  return salt.cmdRun("pepperEnv", "I@ceph:mon and I@ceph:common:keyring:admin", cmd, checkResponse=true, batch=null, output=false)
+    return salt.cmdRun(pepperEnv, "I@ceph:mon and I@ceph:common:keyring:admin", cmd, checkResponse = true, batch = null, output = false)
 }
 
-def getpgmap(master) {
-  return runCephCommand('ceph pg ls remapped --format=json')['return'][0].values()[0]
+def getpgmap() {
+    return runCephCommand('ceph pg ls remapped --format=json')['return'][0].values()[0]
 }
 
 def generatemapping(master,pgmap,map) {
-  def pg_new
-  def pg_old
-
-  for ( pg in pgmap )
-  {
-
-    pg_new = pg["up"].minus(pg["acting"])
-    pg_old = pg["acting"].minus(pg["up"])
-
-    for ( i = 0; i < pg_new.size(); i++ )
-    {
-      def string = "ceph osd pg-upmap-items " + pg["pgid"].toString() + " " + pg_new[i] + " " + pg_old[i] + ";"
-      map.add(string)
+    def pg_new
+    def pg_old
+    for (pg in pgmap) {
+        pg_new = pg["up"].minus(pg["acting"])
+        pg_old = pg["acting"].minus(pg["up"])
+        for (i = 0; i < pg_new.size(); i++) {
+            def string = "ceph osd pg-upmap-items " + pg["pgid"].toString() + " " + pg_new[i] + " " + pg_old[i] + ";"
+            map.add(string)
+        }
     }
-
-  }
 }
 
-def pepperEnv = "pepperEnv"
-
 timeout(time: 12, unit: 'HOURS') {
     node("python") {
-
         // create connection to salt master
         python.setupPepperVirtualenv(pepperEnv, SALT_MASTER_URL, SALT_MASTER_CREDENTIALS)
 
@@ -77,21 +56,18 @@
           }
         }
 
-        stage ("enable luminous compat")
-        {
-          runCephCommand('ceph osd set-require-min-compat-client luminous')['return'][0].values()[0]
+        stage("enable luminous compat") {
+            runCephCommand('ceph osd set-require-min-compat-client luminous')['return'][0].values()[0]
         }
 
-        stage ("enable upmap balancer")
-        {
-          runCephCommand('ceph balancer on')['return'][0].values()[0]
-          runCephCommand('ceph balancer mode upmap')['return'][0].values()[0]
+        stage("enable upmap balancer") {
+            runCephCommand('ceph balancer on')['return'][0].values()[0]
+            runCephCommand('ceph balancer mode upmap')['return'][0].values()[0]
         }
 
 
-        stage ("set norebalance")
-        {
-          runCephCommand('ceph osd set norebalance')['return'][0].values()[0]
+        stage("set norebalance") {
+            runCephCommand('ceph osd set norebalance')['return'][0].values()[0]
         }
 
         stage('Install Ceph OSD') {
@@ -100,35 +76,27 @@
 
         def mapping = []
 
-        stage ("update mappings")
-        {
-          def pgmap1 = getpgmap(pepperEnv)
-          if ( pgmap1 == '' )
-          {
-            return 1
-          }
-          else
-          {
-            def pgmap = new groovy.json.JsonSlurperClassic().parseText(pgmap1)
-            for(int x=1; x<=3; x++){
-              pgmap1 = getpgmap(pepperEnv)
-              generatemapping(pepperEnv,pgmap,mapping)
-              mapping.each(this.&runCephCommand)
-              sleep(30)
+        stage("update mappings") {
+            def pgmap
+            for (int x = 1; x <= 3; x++) {
+                pgmap = getpgmap()
+                if (pgmap == '') {
+                    return 1
+                } else {
+                    pgmap = new groovy.json.JsonSlurperClassic().parseText(pgmap)
+                    generatemapping(pepperEnv, pgmap, mapping)
+                    mapping.each(this.&runCephCommand)
+                    sleep(30)
+                }
             }
-          }
-
         }
 
-        stage ("unset norebalance")
-        {
-          runCephCommand('ceph osd unset norebalance')['return'][0].values()[0]
+        stage("unset norebalance") {
+            runCephCommand('ceph osd unset norebalance')['return'][0].values()[0]
         }
 
-        stage ("wait for healthy cluster")
-        {
-          waitForHealthy(pepperEnv)
+        stage("wait for healthy cluster") {
+            ceph.waitForHealthy(pepperEnv, "I@ceph:mon and I@ceph:common:keyring:admin")
         }
-
     }
 }
diff --git a/ceph-backend-migration.groovy b/ceph-backend-migration.groovy
index 7a5821d..676c236 100644
--- a/ceph-backend-migration.groovy
+++ b/ceph-backend-migration.groovy
@@ -20,6 +20,7 @@
 common = new com.mirantis.mk.Common()
 salt = new com.mirantis.mk.Salt()
 def python = new com.mirantis.mk.Python()
+ceph = new com.mirantis.mk.Ceph()
 
 MIGRATION_METHOD = "per-osd"
 // TBD: per-host
@@ -28,27 +29,7 @@
 def flags = CLUSTER_FLAGS.tokenize(',')
 def osds = OSD.tokenize(',')
 
-def removePartition(master, target, partition_uuid) {
-    def partition = ""
-    try {
-        // partition = /dev/sdi2
-        partition = runCephCommand(master, target, "blkid | grep ${partition_uuid} ")['return'][0].values()[0].split("(?<=[0-9])")[0]
-    } catch (Exception e) {
-        common.warningMsg(e)
-    }
-
-    if (partition?.trim()) {
-        // dev = /dev/sdi
-        def dev = partition.replaceAll('\\d+$', "")
-        // part_id = 2
-        def part_id = partition.substring(partition.lastIndexOf("/")+1).replaceAll("[^0-9]", "")
-        runCephCommand(master, target, "parted ${dev} rm ${part_id}")
-    }
-    return
-}
-
 def removeJournalOrBlockPartitions(master, target, id) {
-
     // remove journal, block_db, block_wal partition `parted /dev/sdj rm 3`
     stage('Remove journal / block_db / block_wal partition') {
         def partition_uuid = ""
@@ -56,20 +37,20 @@
         def block_db_partition_uuid = ""
         def block_wal_partition_uuid = ""
         try {
-            journal_partition_uuid = runCephCommand(master, target, "ls -la /var/lib/ceph/osd/ceph-${id}/ | grep journal | grep partuuid")
+            journal_partition_uuid = salt.cmdRun(master, target, "ls -la /var/lib/ceph/osd/ceph-${id}/ | grep journal | grep partuuid")
             journal_partition_uuid = journal_partition_uuid.toString().trim().split("\n")[0].substring(journal_partition_uuid.toString().trim().lastIndexOf("/")+1)
         } catch (Exception e) {
             common.infoMsg(e)
         }
         try {
-            block_db_partition_uuid = runCephCommand(master, target, "ls -la /var/lib/ceph/osd/ceph-${id}/ | grep 'block.db' | grep partuuid")
+            block_db_partition_uuid = salt.cmdRun(master, target, "ls -la /var/lib/ceph/osd/ceph-${id}/ | grep 'block.db' | grep partuuid")
             block_db_partition_uuid = block_db_partition_uuid.toString().trim().split("\n")[0].substring(block_db_partition_uuid.toString().trim().lastIndexOf("/")+1)
         } catch (Exception e) {
             common.infoMsg(e)
         }
 
         try {
-            block_wal_partition_uuid = runCephCommand(master, target, "ls -la /var/lib/ceph/osd/ceph-${id}/ | grep 'block.wal' | grep partuuid")
+            block_wal_partition_uuid = salt.cmdRun(master, target, "ls -la /var/lib/ceph/osd/ceph-${id}/ | grep 'block.wal' | grep partuuid")
             block_wal_partition_uuid = block_wal_partition_uuid.toString().trim().split("\n")[0].substring(block_wal_partition_uuid.toString().trim().lastIndexOf("/")+1)
         } catch (Exception e) {
             common.infoMsg(e)
@@ -84,31 +65,15 @@
 
         // if disk has journal, block_db or block_wal on different disk, then remove the partition
         if (partition_uuid?.trim()) {
-            removePartition(master, target, partition_uuid)
+            ceph.removePartition(master, target, partition_uuid)
         }
         if (block_wal_partition_uuid?.trim()) {
-            removePartition(master, target, block_wal_partition_uuid)
+            ceph.removePartition(master, target, block_wal_partition_uuid)
         }
     }
     return
 }
 
-def runCephCommand(master, target, cmd) {
-    return salt.cmdRun(master, target, cmd)
-}
-
-def waitForHealthy(master, count=0, attempts=300) {
-    // wait for healthy cluster
-    while (count<attempts) {
-        def health = runCephCommand(master, ADMIN_HOST, 'ceph health')['return'][0].values()[0]
-        if (health.contains('HEALTH_OK')) {
-            common.infoMsg('Cluster is healthy')
-            break;
-        }
-        count++
-        sleep(10)
-    }
-}
 timeout(time: 12, unit: 'HOURS') {
     node("python") {
 
@@ -120,7 +85,7 @@
             if (flags.size() > 0) {
                 stage('Set cluster flags') {
                     for (flag in flags) {
-                        runCephCommand(pepperEnv, ADMIN_HOST, 'ceph osd set ' + flag)
+                        salt.cmdRun(pepperEnv, ADMIN_HOST, 'ceph osd set ' + flag)
                     }
                 }
             }
@@ -147,23 +112,23 @@
                 for (osd_id in osd_ids) {
 
                     def id = osd_id.replaceAll('osd.', '')
-                    def backend = runCephCommand(pepperEnv, ADMIN_HOST, "ceph osd metadata ${id} | grep osd_objectstore")['return'][0].values()[0]
+                    def backend = salt.cmdRun(pepperEnv, ADMIN_HOST, "ceph osd metadata ${id} | grep osd_objectstore")['return'][0].values()[0]
 
                     if (backend.contains(ORIGIN_BACKEND.toLowerCase())) {
 
                         // wait for healthy cluster before manipulating with osds
-                        if (WAIT_FOR_HEALTHY.toBoolean() == true) {
-                            waitForHealthy(pepperEnv)
+                        if (WAIT_FOR_HEALTHY.toBoolean()) {
+                            ceph.waitForHealthy(pepperEnv, ADMIN_HOST)
                         }
 
                         // `ceph osd out <id> <id>`
                         stage('Set OSDs out') {
-                                runCephCommand(pepperEnv, ADMIN_HOST, "ceph osd out ${osd_id}")
+                            salt.cmdRun(pepperEnv, ADMIN_HOST, "ceph osd out ${osd_id}")
                         }
 
-                        if (WAIT_FOR_HEALTHY.toBoolean() == true) {
+                        if (WAIT_FOR_HEALTHY.toBoolean()) {
                             sleep(5)
-                            waitForHealthy(pepperEnv)
+                            ceph.waitForHealthy(pepperEnv, ADMIN_HOST)
                         }
 
                         // stop osd daemons
@@ -173,28 +138,28 @@
 
                         // remove keyring `ceph auth del osd.3`
                         stage('Remove OSD keyrings from auth') {
-                            runCephCommand(pepperEnv, ADMIN_HOST, 'ceph auth del ' + osd_id)
+                            salt.cmdRun(pepperEnv, ADMIN_HOST, 'ceph auth del ' + osd_id)
                         }
 
                         // remove osd `ceph osd rm osd.3`
                         stage('Remove OSDs') {
-                            runCephCommand(pepperEnv, ADMIN_HOST, 'ceph osd rm ' + osd_id)
+                            salt.cmdRun(pepperEnv, ADMIN_HOST, 'ceph osd rm ' + osd_id)
                         }
 
                         def dmcrypt = ""
                         try {
-                            dmcrypt = runCephCommand(pepperEnv, tgt, "ls -la /var/lib/ceph/osd/ceph-${id}/ | grep dmcrypt")['return'][0].values()[0]
+                            dmcrypt = salt.cmdRun(pepperEnv, tgt, "ls -la /var/lib/ceph/osd/ceph-${id}/ | grep dmcrypt")['return'][0].values()[0]
                         } catch (Exception e) {
                             common.warningMsg(e)
                         }
 
                         if (dmcrypt?.trim()) {
-                            def mount = runCephCommand(pepperEnv, tgt, "lsblk -rp | grep /var/lib/ceph/osd/ceph-${id} -B1")['return'][0].values()[0]
+                            def mount = salt.cmdRun(pepperEnv, tgt, "lsblk -rp | grep /var/lib/ceph/osd/ceph-${id} -B1")['return'][0].values()[0]
                             dev = mount.split()[0].replaceAll("[0-9]","")
 
                             // remove partition tables
                             stage('dd part tables') {
-                                runCephCommand(pepperEnv, tgt, "dd if=/dev/zero of=${dev} bs=512 count=1 conv=notrunc")
+                                salt.cmdRun(pepperEnv, tgt, "dd if=/dev/zero of=${dev} bs=512 count=1 conv=notrunc")
                             }
 
                             // remove journal, block_db, block_wal partition `parted /dev/sdj rm 3`
@@ -210,16 +175,16 @@
                             // zap disks `ceph-disk zap /dev/sdi`
                             stage('Zap devices') {
                                 try {
-                                    runCephCommand(pepperEnv, tgt, 'ceph-disk zap ' + dev)
+                                    salt.cmdRun(pepperEnv, tgt, 'ceph-disk zap ' + dev)
                                 } catch (Exception e) {
                                     common.warningMsg(e)
                                 }
-                                runCephCommand(pepperEnv, tgt, 'ceph-disk zap ' + dev)
+                                salt.cmdRun(pepperEnv, tgt, 'ceph-disk zap ' + dev)
                             }
 
                         } else {
 
-                            def mount = runCephCommand(pepperEnv, tgt, "mount | grep /var/lib/ceph/osd/ceph-${id}")['return'][0].values()[0]
+                            def mount = salt.cmdRun(pepperEnv, tgt, "mount | grep /var/lib/ceph/osd/ceph-${id}")['return'][0].values()[0]
                             dev = mount.split()[0].replaceAll("[0-9]","")
 
                             // remove journal, block_db, block_wal partition `parted /dev/sdj rm 3`
@@ -227,12 +192,12 @@
 
                             // umount `umount /dev/sdi1`
                             stage('Umount devices') {
-                                runCephCommand(pepperEnv, tgt, "umount /var/lib/ceph/osd/ceph-${id}")
+                                salt.cmdRun(pepperEnv, tgt, "umount /var/lib/ceph/osd/ceph-${id}")
                             }
 
                             // zap disks `ceph-disk zap /dev/sdi`
                             stage('Zap device') {
-                                runCephCommand(pepperEnv, tgt, 'ceph-disk zap ' + dev)
+                                salt.cmdRun(pepperEnv, tgt, 'ceph-disk zap ' + dev)
                             }
                         }
 
@@ -245,8 +210,8 @@
                         if (PER_OSD_CONTROL.toBoolean() == true) {
                             stage("Verify backend version for osd.${id}") {
                                 sleep(5)
-                                runCephCommand(pepperEnv, tgt, "ceph osd metadata ${id} | grep osd_objectstore")
-                                runCephCommand(pepperEnv, tgt, "ceph -s")
+                                salt.cmdRun(pepperEnv, tgt, "ceph osd metadata ${id} | grep osd_objectstore")
+                                salt.cmdRun(pepperEnv, tgt, "ceph -s")
                             }
 
                             stage('Ask for manual confirmation') {
@@ -258,8 +223,8 @@
                 if (PER_OSD_HOST_CONTROL.toBoolean() == true) {
                     stage("Verify backend versions") {
                         sleep(5)
-                        runCephCommand(pepperEnv, tgt, "ceph osd metadata | grep osd_objectstore -B2")
-                        runCephCommand(pepperEnv, tgt, "ceph -s")
+                        salt.cmdRun(pepperEnv, tgt, "ceph osd metadata | grep osd_objectstore -B2")
+                        salt.cmdRun(pepperEnv, tgt, "ceph -s")
                     }
 
                     stage('Ask for manual confirmation') {
@@ -273,7 +238,7 @@
                 stage('Unset cluster flags') {
                     for (flag in flags) {
                         common.infoMsg('Removing flag ' + flag)
-                        runCephCommand(pepperEnv, ADMIN_HOST, 'ceph osd unset ' + flag)
+                        salt.cmdRun(pepperEnv, ADMIN_HOST, 'ceph osd unset ' + flag)
                     }
                 }
             }
diff --git a/ceph-remove-node.groovy b/ceph-remove-node.groovy
index 766dda1..e1d6ce8 100644
--- a/ceph-remove-node.groovy
+++ b/ceph-remove-node.groovy
@@ -13,48 +13,12 @@
  *
  */
 
-common = new com.mirantis.mk.Common()
-salt = new com.mirantis.mk.Salt()
-orchestrate = new com.mirantis.mk.Orchestrate()
+def common = new com.mirantis.mk.Common()
+def salt = new com.mirantis.mk.Salt()
+def ceph = new com.mirantis.mk.Ceph()
 def python = new com.mirantis.mk.Python()
-
 def pepperEnv = "pepperEnv"
 
-def removePartition(master, target, partition_uuid) {
-    def partition = ""
-    try {
-        // partition = /dev/sdi2
-        partition = runCephCommand(master, target, "blkid | grep ${partition_uuid} ")['return'][0].values()[0].split("(?<=[0-9])")[0]
-    } catch (Exception e) {
-        common.warningMsg(e)
-    }
-
-    if (partition?.trim()) {
-        // dev = /dev/sdi
-        def dev = partition.replaceAll('\\d+$', "")
-        // part_id = 2
-        def part_id = partition.substring(partition.lastIndexOf("/")+1).replaceAll("[^0-9]", "")
-        runCephCommand(master, target, "parted ${dev} rm ${part_id}")
-    }
-    return
-}
-
-def runCephCommand(master, target, cmd) {
-    return salt.cmdRun(master, target, cmd)
-}
-
-def waitForHealthy(master, count=0, attempts=300) {
-    // wait for healthy cluster
-    while (count<attempts) {
-        def health = runCephCommand(master, ADMIN_HOST, 'ceph health')['return'][0].values()[0]
-        if (health.contains('HEALTH_OK')) {
-            common.infoMsg('Cluster is healthy')
-            break;
-        }
-        count++
-        sleep(10)
-    }
-}
 timeout(time: 12, unit: 'HOURS') {
     node("python") {
 
@@ -124,40 +88,40 @@
 
             // `ceph osd out <id> <id>`
             stage('Set OSDs out') {
-                    runCephCommand(pepperEnv, ADMIN_HOST, 'ceph osd out ' + osd_ids.join(' '))
+                salt.cmdRun(pepperEnv, ADMIN_HOST, 'ceph osd out ' + osd_ids.join(' '))
             }
 
             // wait for healthy cluster
-            if (WAIT_FOR_HEALTHY.toBoolean() == true) {
+            if (WAIT_FOR_HEALTHY.toBoolean()) {
                 sleep(5)
-                waitForHealthy(pepperEnv)
+                ceph.waitForHealthy(pepperEnv, ADMIN_HOST)
             }
 
             // stop osd daemons
             stage('Stop OSD daemons') {
                 for (i in osd_ids) {
-                    salt.runSaltProcessStep(pepperEnv, HOST, 'service.stop', ['ceph-osd@' + i.replaceAll('osd.', '')],  null, true)
+                    salt.runSaltProcessStep(pepperEnv, HOST, 'service.stop', ['ceph-osd@' + i.replaceAll('osd.', '')], null, true)
                 }
             }
 
             // `ceph osd crush remove osd.2`
             stage('Remove OSDs from CRUSH') {
                 for (i in osd_ids) {
-                    runCephCommand(pepperEnv, ADMIN_HOST, 'ceph osd crush remove ' + i)
+                    salt.cmdRun(pepperEnv, ADMIN_HOST, 'ceph osd crush remove ' + i)
                 }
             }
 
             // remove keyring `ceph auth del osd.3`
             stage('Remove OSD keyrings from auth') {
                 for (i in osd_ids) {
-                    runCephCommand(pepperEnv, ADMIN_HOST, 'ceph auth del ' + i)
+                    salt.cmdRun(pepperEnv, ADMIN_HOST, 'ceph auth del ' + i)
                 }
             }
 
             // remove osd `ceph osd rm osd.3`
             stage('Remove OSDs') {
                 for (i in osd_ids) {
-                    runCephCommand(pepperEnv, ADMIN_HOST, 'ceph osd rm ' + i)
+                    salt.cmdRun(pepperEnv, ADMIN_HOST, 'ceph osd rm ' + i)
                 }
             }
 
@@ -166,18 +130,18 @@
                 id = osd_id.replaceAll('osd.', '')
                 def dmcrypt = ""
                 try {
-                    dmcrypt = runCephCommand(pepperEnv, HOST, "ls -la /var/lib/ceph/osd/ceph-${id}/ | grep dmcrypt")['return'][0].values()[0]
+                    dmcrypt = salt.cmdRun(pepperEnv, HOST, "ls -la /var/lib/ceph/osd/ceph-${id}/ | grep dmcrypt")['return'][0].values()[0]
                 } catch (Exception e) {
                     common.warningMsg(e)
                 }
 
                 if (dmcrypt?.trim()) {
-                    mount = runCephCommand(pepperEnv, HOST, "lsblk -rp | grep /var/lib/ceph/osd/ceph-${id} -B1")['return'][0].values()[0]
-                    dev = mount.split()[0].replaceAll("[0-9]","")
+                    mount = salt.cmdRun(pepperEnv, HOST, "lsblk -rp | grep /var/lib/ceph/osd/ceph-${id} -B1")['return'][0].values()[0]
+                    dev = mount.split()[0].replaceAll("[0-9]", "")
 
                     // remove partition tables
                     stage("dd part table on ${dev}") {
-                        runCephCommand(pepperEnv, HOST, "dd if=/dev/zero of=${dev} bs=512 count=1 conv=notrunc")
+                        salt.cmdRun(pepperEnv, HOST, "dd if=/dev/zero of=${dev} bs=512 count=1 conv=notrunc")
                     }
 
                 }
@@ -188,21 +152,21 @@
                     def block_db_partition_uuid = ""
                     def block_wal_partition_uuid = ""
                     try {
-                        journal_partition_uuid = runCephCommand(pepperEnv, HOST, "ls -la /var/lib/ceph/osd/ceph-${id}/ | grep journal | grep partuuid")
-                        journal_partition_uuid = journal_partition_uuid.toString().trim().split("\n")[0].substring(journal_partition_uuid.toString().trim().lastIndexOf("/")+1)
+                        journal_partition_uuid = salt.cmdRun(pepperEnv, HOST, "ls -la /var/lib/ceph/osd/ceph-${id}/ | grep journal | grep partuuid")
+                        journal_partition_uuid = journal_partition_uuid.toString().trim().split("\n")[0].substring(journal_partition_uuid.toString().trim().lastIndexOf("/") + 1)
                     } catch (Exception e) {
                         common.infoMsg(e)
                     }
                     try {
-                        block_db_partition_uuid = runCephCommand(pepperEnv, HOST, "ls -la /var/lib/ceph/osd/ceph-${id}/ | grep 'block.db' | grep partuuid")
-                        block_db_partition_uuid = block_db_partition_uuid.toString().trim().split("\n")[0].substring(block_db_partition_uuid.toString().trim().lastIndexOf("/")+1)
+                        block_db_partition_uuid = salt.cmdRun(pepperEnv, HOST, "ls -la /var/lib/ceph/osd/ceph-${id}/ | grep 'block.db' | grep partuuid")
+                        block_db_partition_uuid = block_db_partition_uuid.toString().trim().split("\n")[0].substring(block_db_partition_uuid.toString().trim().lastIndexOf("/") + 1)
                     } catch (Exception e) {
                         common.infoMsg(e)
                     }
 
                     try {
-                        block_wal_partition_uuid = runCephCommand(pepperEnv, HOST, "ls -la /var/lib/ceph/osd/ceph-${id}/ | grep 'block.wal' | grep partuuid")
-                        block_wal_partition_uuid = block_wal_partition_uuid.toString().trim().split("\n")[0].substring(block_wal_partition_uuid.toString().trim().lastIndexOf("/")+1)
+                        block_wal_partition_uuid = salt.cmdRun(pepperEnv, HOST, "ls -la /var/lib/ceph/osd/ceph-${id}/ | grep 'block.wal' | grep partuuid")
+                        block_wal_partition_uuid = block_wal_partition_uuid.toString().trim().split("\n")[0].substring(block_wal_partition_uuid.toString().trim().lastIndexOf("/") + 1)
                     } catch (Exception e) {
                         common.infoMsg(e)
                     }
@@ -216,10 +180,10 @@
 
                     // if disk has journal, block_db or block_wal on different disk, then remove the partition
                     if (partition_uuid?.trim()) {
-                        removePartition(pepperEnv, HOST, partition_uuid)
+                        ceph.removePartition(pepperEnv, HOST, partition_uuid)
                     }
                     if (block_wal_partition_uuid?.trim()) {
-                        removePartition(pepperEnv, HOST, block_wal_partition_uuid)
+                        ceph.removePartition(pepperEnv, HOST, block_wal_partition_uuid)
                     }
                 }
             }
@@ -230,9 +194,9 @@
             }
 
             stage('Remove OSD host from crushmap') {
-                def hostname = runCephCommand(pepperEnv, HOST, "hostname -s")['return'][0].values()[0].split('\n')[0]
+                def hostname = salt.cmdRun(pepperEnv, HOST, "hostname -s")['return'][0].values()[0].split('\n')[0]
                 try {
-                    runCephCommand(pepperEnv, ADMIN_HOST, "ceph osd crush remove ${hostname}")
+                    salt.cmdRun(pepperEnv, ADMIN_HOST, "ceph osd crush remove ${hostname}")
                 } catch (Exception e) {
                     common.warningMsg(e)
                 }
@@ -262,7 +226,7 @@
             def keyring = ""
             def keyring_lines = ""
             try {
-                keyring_lines = runCephCommand(pepperEnv, ADMIN_HOST, "ceph auth list | grep ${target}")['return'][0].values()[0].split('\n')
+                keyring_lines = salt.cmdRun(pepperEnv, ADMIN_HOST, "ceph auth list | grep ${target}")['return'][0].values()[0].split('\n')
             } catch (Exception e) {
                 common.warningMsg(e)
             }
@@ -273,20 +237,20 @@
                 }
             }
             if (keyring?.trim()) {
-                runCephCommand(pepperEnv, ADMIN_HOST, "ceph auth del ${keyring}")
+                salt.cmdRun(pepperEnv, ADMIN_HOST, "ceph auth del ${keyring}")
             }
         }
 
         if (HOST_TYPE.toLowerCase() == 'mon') {
             // Update Monmap
             stage('Update monmap') {
-                runCephCommand(pepperEnv, 'I@ceph:mon', "ceph mon getmap -o monmap.backup")
+                salt.cmdRun(pepperEnv, 'I@ceph:mon', "ceph mon getmap -o monmap.backup")
                 try {
-                    runCephCommand(pepperEnv, 'I@ceph:mon', "ceph mon remove ${target}")
+                    salt.cmdRun(pepperEnv, 'I@ceph:mon', "ceph mon remove ${target}")
                 } catch (Exception e) {
                     common.warningMsg(e)
                 }
-                runCephCommand(pepperEnv, 'I@ceph:mon', "monmaptool /tmp/monmap --rm ${target}")
+                salt.cmdRun(pepperEnv, 'I@ceph:mon', "monmaptool /tmp/monmap --rm ${target}")
             }
 
             def target_hosts = salt.getMinions(pepperEnv, 'I@ceph:common')
@@ -305,7 +269,7 @@
         }
 
         def crushmap_target = salt.getMinions(pepperEnv, "I@ceph:setup:crush")
-        if (HOST_TYPE.toLowerCase() == 'osd' && GENERATE_CRUSHMAP.toBoolean() == true && crushmap_target ) {
+        if (HOST_TYPE.toLowerCase() == 'osd' && GENERATE_CRUSHMAP.toBoolean() == true && crushmap_target) {
             stage('Generate CRUSHMAP') {
                 salt.enforceState(pepperEnv, 'I@ceph:setup:crush', 'ceph.setup.crush', true)
             }
diff --git a/ceph-remove-osd.groovy b/ceph-remove-osd.groovy
index 66e9422..0a045c3 100644
--- a/ceph-remove-osd.groovy
+++ b/ceph-remove-osd.groovy
@@ -14,77 +14,15 @@
  *
  */
 
-common = new com.mirantis.mk.Common()
-salt = new com.mirantis.mk.Salt()
+def common = new com.mirantis.mk.Common()
+def salt = new com.mirantis.mk.Salt()
+def ceph = new com.mirantis.mk.Ceph()
 def python = new com.mirantis.mk.Python()
 
 def pepperEnv = "pepperEnv"
 def flags = CLUSTER_FLAGS.tokenize(',')
 def osds = OSD.tokenize(',')
 
-
-def removePartition(master, target, partition_uuid, type='', id=-1) {
-    def partition = ""
-    if (type == 'lockbox') {
-        try {
-            // umount - partition = /dev/sdi2
-            partition = runCephCommand(master, target, "lsblk -rp | grep -v mapper | grep ${partition_uuid} ")['return'][0].values()[0].split()[0]
-            runCephCommand(master, target, "umount ${partition}")
-        } catch (Exception e) {
-            common.warningMsg(e)
-        }
-    } else if (type == 'data') {
-        try {
-            // umount - partition = /dev/sdi2
-            partition = runCephCommand(master, target, "df | grep /var/lib/ceph/osd/ceph-${id}")['return'][0].values()[0].split()[0]
-            runCephCommand(master, target, "umount ${partition}")
-        } catch (Exception e) {
-            common.warningMsg(e)
-        }
-        try {
-            // partition = /dev/sdi2
-            partition = runCephCommand(master, target, "blkid | grep ${partition_uuid} ")['return'][0].values()[0].split(":")[0]
-        } catch (Exception e) {
-            common.warningMsg(e)
-        }
-    } else {
-        try {
-            // partition = /dev/sdi2
-            partition = runCephCommand(master, target, "blkid | grep ${partition_uuid} ")['return'][0].values()[0].split(":")[0]
-        } catch (Exception e) {
-            common.warningMsg(e)
-        }
-    }
-    if (partition?.trim()) {
-        def part_id
-        if (partition.contains("nvme")) {
-          part_id = partition.substring(partition.lastIndexOf("p")+1).replaceAll("[^0-9]+", "")
-        }
-        else {
-          part_id = partition.substring(partition.lastIndexOf("/")+1).replaceAll("[^0-9]+", "")
-        }
-        def dev = partition.replaceAll('\\d+$', "")
-        runCephCommand(master, target, "Ignore | parted ${dev} rm ${part_id}")
-    }
-    return
-}
-
-def runCephCommand(master, target, cmd) {
-    return salt.cmdRun(master, target, cmd)
-}
-
-def waitForHealthy(master, count=0, attempts=300) {
-    // wait for healthy cluster
-    while (count<attempts) {
-        def health = runCephCommand(master, ADMIN_HOST, 'ceph health')['return'][0].values()[0]
-        if (health.contains('HEALTH_OK')) {
-            common.infoMsg('Cluster is healthy')
-            break;
-        }
-        count++
-        sleep(10)
-    }
-}
 timeout(time: 12, unit: 'HOURS') {
     node("python") {
 
@@ -94,7 +32,7 @@
         if (flags.size() > 0) {
             stage('Set cluster flags') {
                 for (flag in flags) {
-                    runCephCommand(pepperEnv, ADMIN_HOST, 'ceph osd set ' + flag)
+                    salt.cmdRun(pepperEnv, ADMIN_HOST, 'ceph osd set ' + flag)
                 }
             }
         }
@@ -105,7 +43,7 @@
         salt.runSaltProcessStep(pepperEnv, HOST, 'saltutil.sync_grains', [], null, true, 5)
         def cephGrain = salt.getGrain(pepperEnv, HOST, 'ceph')
 
-        if(cephGrain['return'].isEmpty()){
+        if (cephGrain['return'].isEmpty()) {
             throw new Exception("Ceph salt grain cannot be found!")
         }
         common.print(cephGrain)
@@ -124,70 +62,50 @@
 
         // wait for healthy cluster
         if (WAIT_FOR_HEALTHY.toBoolean()) {
-            waitForHealthy(pepperEnv)
+            ceph.waitForHealthy(pepperEnv, ADMIN_HOST)
         }
 
         // `ceph osd out <id> <id>`
         stage('Set OSDs out') {
-            runCephCommand(pepperEnv, ADMIN_HOST, 'ceph osd out ' + osd_ids.join(' '))
+            salt.cmdRun(pepperEnv, ADMIN_HOST, 'ceph osd out ' + osd_ids.join(' '))
         }
 
         // wait for healthy cluster
         if (WAIT_FOR_HEALTHY.toBoolean()) {
             sleep(5)
-            waitForHealthy(pepperEnv)
+            ceph.waitForHealthy(pepperEnv, ADMIN_HOST)
         }
 
         // stop osd daemons
         stage('Stop OSD daemons') {
             for (i in osd_ids) {
-                salt.runSaltProcessStep(pepperEnv, HOST, 'service.stop', ['ceph-osd@' + i.replaceAll('osd.', '')],  null, true)
+                salt.runSaltProcessStep(pepperEnv, HOST, 'service.stop', ['ceph-osd@' + i.replaceAll('osd.', '')], null, true)
             }
         }
 
         // `ceph osd crush remove osd.2`
         stage('Remove OSDs from CRUSH') {
             for (i in osd_ids) {
-                runCephCommand(pepperEnv, ADMIN_HOST, 'ceph osd crush remove ' + i)
+                salt.cmdRun(pepperEnv, ADMIN_HOST, 'ceph osd crush remove ' + i)
             }
         }
 
         // remove keyring `ceph auth del osd.3`
         stage('Remove OSD keyrings from auth') {
             for (i in osd_ids) {
-                runCephCommand(pepperEnv, ADMIN_HOST, 'ceph auth del ' + i)
+                salt.cmdRun(pepperEnv, ADMIN_HOST, 'ceph auth del ' + i)
             }
         }
 
         // remove osd `ceph osd rm osd.3`
         stage('Remove OSDs') {
             for (i in osd_ids) {
-                runCephCommand(pepperEnv, ADMIN_HOST, 'ceph osd rm ' + i)
+                salt.cmdRun(pepperEnv, ADMIN_HOST, 'ceph osd rm ' + i)
             }
         }
 
         for (osd_id in osd_ids) {
             id = osd_id.replaceAll('osd.', '')
-            /*
-
-            def dmcrypt = ""
-            try {
-                dmcrypt = runCephCommand(pepperEnv, HOST, "ls -la /var/lib/ceph/osd/ceph-${id}/ | grep dmcrypt")['return'][0].values()[0]
-            } catch (Exception e) {
-                common.warningMsg(e)
-            }
-
-            if (dmcrypt?.trim()) {
-                mount = runCephCommand(pepperEnv, HOST, "lsblk -rp | grep /var/lib/ceph/osd/ceph-${id} -B1")['return'][0].values()[0]
-                dev = mount.split()[0].replaceAll("[0-9]","")
-
-                // remove partition tables
-                stage("dd part table on ${dev}") {
-                    runCephCommand(pepperEnv, HOST, "dd if=/dev/zero of=${dev} bs=512 count=1 conv=notrunc")
-                }
-
-            }
-            */
 
             // remove journal, block_db, block_wal partition `parted /dev/sdj rm 3`
             stage('Remove journal / block_db / block_wal partition') {
@@ -196,35 +114,35 @@
                 def block_db_partition_uuid = ""
                 def block_wal_partition_uuid = ""
                 try {
-                    journal_partition_uuid = runCephCommand(pepperEnv, HOST, "cat /var/lib/ceph/osd/ceph-${id}/journal_uuid")['return'][0].values()[0].split("\n")[0]
+                    journal_partition_uuid = salt.cmdRun(pepperEnv, HOST, "cat /var/lib/ceph/osd/ceph-${id}/journal_uuid")['return'][0].values()[0].split("\n")[0]
                 } catch (Exception e) {
                     common.infoMsg(e)
                 }
                 try {
-                    block_db_partition_uuid = runCephCommand(pepperEnv, HOST, "cat /var/lib/ceph/osd/ceph-${id}/block.db_uuid")['return'][0].values()[0].split("\n")[0]
+                    block_db_partition_uuid = salt.cmdRun(pepperEnv, HOST, "cat /var/lib/ceph/osd/ceph-${id}/block.db_uuid")['return'][0].values()[0].split("\n")[0]
                 } catch (Exception e) {
                     common.infoMsg(e)
                 }
 
                 try {
-                    block_wal_partition_uuid = runCephCommand(pepperEnv, HOST, "cat /var/lib/ceph/osd/ceph-${id}/block.wal_uuid")['return'][0].values()[0].split("\n")[0]
+                    block_wal_partition_uuid = salt.cmdRun(pepperEnv, HOST, "cat /var/lib/ceph/osd/ceph-${id}/block.wal_uuid")['return'][0].values()[0].split("\n")[0]
                 } catch (Exception e) {
                     common.infoMsg(e)
                 }
 
                 // remove partition_uuid = 2c76f144-f412-481e-b150-4046212ca932
                 if (journal_partition_uuid?.trim()) {
-                    removePartition(pepperEnv, HOST, journal_partition_uuid)
+                    ceph.removePartition(pepperEnv, HOST, journal_partition_uuid)
                 }
                 if (block_db_partition_uuid?.trim()) {
-                    removePartition(pepperEnv, HOST, block_db_partition_uuid)
+                    ceph.removePartition(pepperEnv, HOST, block_db_partition_uuid)
                 }
                 if (block_wal_partition_uuid?.trim()) {
-                    removePartition(pepperEnv, HOST, block_wal_partition_uuid)
+                    ceph.removePartition(pepperEnv, HOST, block_wal_partition_uuid)
                 }
 
                 try {
-                    runCephCommand(pepperEnv, HOST, "partprobe")
+                    salt.cmdRun(pepperEnv, HOST, "partprobe")
                 } catch (Exception e) {
                     common.warningMsg(e)
                 }
@@ -236,13 +154,13 @@
                 def block_partition_uuid = ""
                 def lockbox_partition_uuid = ""
                 try {
-                    data_partition_uuid = runCephCommand(pepperEnv, HOST, "cat /var/lib/ceph/osd/ceph-${id}/fsid")['return'][0].values()[0].split("\n")[0]
+                    data_partition_uuid = salt.cmdRun(pepperEnv, HOST, "cat /var/lib/ceph/osd/ceph-${id}/fsid")['return'][0].values()[0].split("\n")[0]
                     common.print(data_partition_uuid)
                 } catch (Exception e) {
                     common.infoMsg(e)
                 }
                 try {
-                    block_partition_uuid = runCephCommand(pepperEnv, HOST, "cat /var/lib/ceph/osd/ceph-${id}/block_uuid")['return'][0].values()[0].split("\n")[0]
+                    block_partition_uuid = salt.cmdRun(pepperEnv, HOST, "cat /var/lib/ceph/osd/ceph-${id}/block_uuid")['return'][0].values()[0].split("\n")[0]
                 } catch (Exception e) {
                     common.infoMsg(e)
                 }
@@ -255,13 +173,13 @@
 
                 // remove partition_uuid = 2c76f144-f412-481e-b150-4046212ca932
                 if (block_partition_uuid?.trim()) {
-                    removePartition(pepperEnv, HOST, block_partition_uuid)
+                    ceph.removePartition(pepperEnv, HOST, block_partition_uuid)
                 }
                 if (data_partition_uuid?.trim()) {
-                    removePartition(pepperEnv, HOST, data_partition_uuid, 'data', id)
+                    ceph.removePartition(pepperEnv, HOST, data_partition_uuid, 'data', id)
                 }
                 if (lockbox_partition_uuid?.trim()) {
-                    removePartition(pepperEnv, HOST, lockbox_partition_uuid, 'lockbox')
+                    ceph.removePartition(pepperEnv, HOST, lockbox_partition_uuid, 'lockbox')
                 }
             }
         }
@@ -270,7 +188,7 @@
             stage('Unset cluster flags') {
                 for (flag in flags) {
                     common.infoMsg('Removing flag ' + flag)
-                    runCephCommand(pepperEnv, ADMIN_HOST, 'ceph osd unset ' + flag)
+                    salt.cmdRun(pepperEnv, ADMIN_HOST, 'ceph osd unset ' + flag)
                 }
             }
         }
diff --git a/ceph-upgrade.groovy b/ceph-upgrade.groovy
index a50c253..dd34973 100644
--- a/ceph-upgrade.groovy
+++ b/ceph-upgrade.groovy
@@ -25,41 +25,18 @@
 common = new com.mirantis.mk.Common()
 salt = new com.mirantis.mk.Salt()
 def python = new com.mirantis.mk.Python()
+ceph = new com.mirantis.mk.Ceph()
 
 def pepperEnv = "pepperEnv"
-def flags = CLUSTER_FLAGS.tokenize(',')
+flags = CLUSTER_FLAGS.tokenize(',')
 
-def runCephCommand(master, target, cmd) {
-    return salt.cmdRun(master, target, cmd)
-}
-
-def waitForHealthy(master, flags, count=0, attempts=300) {
-    // wait for healthy cluster
-    while (count<attempts) {
-        def health = runCephCommand(master, ADMIN_HOST, 'ceph health')['return'][0].values()[0]
-        if (health.contains('HEALTH_OK')) {
-            common.infoMsg('Cluster is healthy')
-            break;
-        } else {
-          for (flag in flags) {
-            if (health.contains(flag + ' flag(s) set') && !(health.contains('down'))) {
-              common.infoMsg('Cluster is healthy')
-              return;
-            }
-          }
-        }
-        count++
-        sleep(10)
-    }
-}
-
-def backup(master, flags, target) {
+def backup(master, target) {
     stage("backup ${target}") {
 
         if (target == 'osd') {
             try {
                 salt.enforceState(master, "I@ceph:${target}", "ceph.backup", true)
-                runCephCommand(master, "I@ceph:${target}", "su root -c '/usr/local/bin/ceph-backup-runner-call.sh'")
+                salt.cmdRun(master, "I@ceph:${target}", "su root -c '/usr/local/bin/ceph-backup-runner-call.sh'")
             } catch (Exception e) {
                 common.errorMsg(e)
                 common.errorMsg("Make sure Ceph backup on OSD nodes is enabled")
@@ -79,7 +56,7 @@
                 def provider_pillar = salt.getPillar(master, "${kvm01}", "salt:control:cluster:internal:node:${minion_name}:provider")
                 def minionProvider = provider_pillar['return'][0].values()[0]
 
-                waitForHealthy(master, flags)
+                ceph.waitForHealthy(master, ADMIN_HOST, flags)
                 try {
                     salt.cmdRun(master, "${minionProvider}", "[ ! -f ${BACKUP_DIR}/${minion_name}.${domain}.qcow2.bak ] && virsh destroy ${minion_name}.${domain}")
                 } catch (Exception e) {
@@ -96,14 +73,14 @@
                     common.warningMsg(e)
                 }
                 salt.minionsReachable(master, 'I@salt:master', "${minion_name}*")
-                waitForHealthy(master, flags)
+                ceph.waitForHealthy(master, ADMIN_HOST, flags)
             }
         }
     }
     return
 }
 
-def upgrade(master, target, flags) {
+def upgrade(master, target) {
 
     stage("Change ${target} repos") {
         salt.runSaltProcessStep(master, "I@ceph:${target}", 'saltutil.refresh_pillar', [], null, true, 5)
@@ -116,7 +93,7 @@
     }
     if (target == 'common') {
         stage('Upgrade ceph-common pkgs') {
-            runCephCommand(master, "I@ceph:${target}", "apt install ceph-${target} -y")
+            salt.cmdRun(master, "I@ceph:${target}", "apt install ceph-${target} -y")
         }
     } else {
         minions = salt.getMinions(master, "I@ceph:${target}")
@@ -125,30 +102,30 @@
             // upgrade pkgs
             if (target == 'radosgw') {
                 stage('Upgrade radosgw pkgs') {
-                    runCephCommand(master, "I@ceph:${target}", "apt install ${target} -y ")
+                    salt.cmdRun(master, "I@ceph:${target}", "apt install ${target} -y ")
                 }
             } else {
                 stage("Upgrade ${target} pkgs on ${minion}") {
-                    runCephCommand(master, "${minion}", "apt install ceph-${target} -y")
+                    salt.cmdRun(master, "${minion}", "apt install ceph-${target} -y")
                 }
             }
             // restart services
             stage("Restart ${target} services on ${minion}") {
                 if (target == 'osd') {
-                  def osds = salt.getGrain(master, "${minion}", 'ceph:ceph_disk').values()[0]
-                  osds[0].values()[0].values()[0].each { osd,param ->
-                    runCephCommand(master, "${minion}", "systemctl restart ceph-${target}@${osd}")
-                    waitForHealthy(master, flags)
-                  }
+                    def osds = salt.getGrain(master, "${minion}", 'ceph:ceph_disk').values()[0]
+                    osds[0].values()[0].values()[0].each { osd, param ->
+                        salt.cmdRun(master, "${minion}", "systemctl restart ceph-${target}@${osd}")
+                        ceph.waitForHealthy(master, ADMIN_HOST, flags)
+                    }
                 } else {
-                  runCephCommand(master, "${minion}", "systemctl restart ceph-${target}.target")
-                  waitForHealthy(master, flags)
+                    salt.cmdRun(master, "${minion}", "systemctl restart ceph-${target}.target")
+                    ceph.waitForHealthy(master, ADMIN_HOST, flags)
                 }
             }
 
             stage("Verify services for ${minion}") {
                 sleep(10)
-                runCephCommand(master, "${minion}", "systemctl status ceph-${target}.target")
+                salt.cmdRun(master, "${minion}", "systemctl status ceph-${target}.target")
             }
 
             stage('Ask for manual confirmation') {
@@ -157,32 +134,33 @@
             }
         }
     }
-    runCephCommand(master, ADMIN_HOST, "ceph versions")
+    salt.cmdRun(master, ADMIN_HOST, "ceph versions")
     sleep(5)
     return
 }
+
 timeout(time: 12, unit: 'HOURS') {
     node("python") {
 
         // create connection to salt master
         python.setupPepperVirtualenv(pepperEnv, SALT_MASTER_URL, SALT_MASTER_CREDENTIALS)
 
-        stage ('Check user choices') {
+        stage('Check user choices') {
             if (STAGE_UPGRADE_RGW.toBoolean() == true) {
                 // if rgw, check if other stuff has required version
                 def mon_ok = true
                 if (STAGE_UPGRADE_MON.toBoolean() == false) {
-                    def mon_v = runCephCommand(pepperEnv, ADMIN_HOST, "ceph mon versions")['return'][0].values()[0]
+                    def mon_v = salt.cmdRun(pepperEnv, ADMIN_HOST, "ceph mon versions")['return'][0].values()[0]
                     mon_ok = mon_v.contains("${TARGET_RELEASE}") && !mon_v.contains("${ORIGIN_RELEASE}")
                 }
                 def mgr_ok = true
                 if (STAGE_UPGRADE_MGR.toBoolean() == false) {
-                    def mgr_v = runCephCommand(pepperEnv, ADMIN_HOST, "ceph mgr versions")['return'][0].values()[0]
+                    def mgr_v = salt.cmdRun(pepperEnv, ADMIN_HOST, "ceph mgr versions")['return'][0].values()[0]
                     mgr_ok = mgr_v.contains("${TARGET_RELEASE}") && !mgr_v.contains("${ORIGIN_RELEASE}")
                 }
                 def osd_ok = true
                 if (STAGE_UPGRADE_OSD.toBoolean() == false) {
-                    def osd_v = runCephCommand(pepperEnv, ADMIN_HOST, "ceph osd versions")['return'][0].values()[0]
+                    def osd_v = salt.cmdRun(pepperEnv, ADMIN_HOST, "ceph osd versions")['return'][0].values()[0]
                     osd_ok = osd_v.contains("${TARGET_RELEASE}") && !osd_v.contains("${ORIGIN_RELEASE}")
                 }
                 if (!mon_ok || !osd_ok || !mgr_ok) {
@@ -207,29 +185,29 @@
         if (flags.size() > 0) {
             stage('Set cluster flags') {
                 for (flag in flags) {
-                    runCephCommand(pepperEnv, ADMIN_HOST, 'ceph osd set ' + flag)
+                    salt.cmdRun(pepperEnv, ADMIN_HOST, 'ceph osd set ' + flag)
                 }
             }
         }
 
         if (STAGE_UPGRADE_MON.toBoolean() == true) {
-            upgrade(pepperEnv, 'mon', flags)
+            upgrade(pepperEnv, 'mon')
         }
 
         if (STAGE_UPGRADE_MGR.toBoolean() == true) {
-            upgrade(pepperEnv, 'mgr', flags)
+            upgrade(pepperEnv, 'mgr')
         }
 
         if (STAGE_UPGRADE_OSD.toBoolean() == true) {
-            upgrade(pepperEnv, 'osd', flags)
+            upgrade(pepperEnv, 'osd')
         }
 
         if (STAGE_UPGRADE_RGW.toBoolean() == true) {
-            upgrade(pepperEnv, 'radosgw', flags)
+            upgrade(pepperEnv, 'radosgw')
         }
 
         if (STAGE_UPGRADE_CLIENT.toBoolean() == true) {
-            upgrade(pepperEnv, 'common', flags)
+            upgrade(pepperEnv, 'common')
         }
 
         // remove cluster flags
@@ -238,7 +216,7 @@
                 for (flag in flags) {
                     if (!flag.contains('sortbitwise')) {
                         common.infoMsg('Removing flag ' + flag)
-                        runCephCommand(pepperEnv, ADMIN_HOST, 'ceph osd unset ' + flag)
+                        salt.cmdRun(pepperEnv, ADMIN_HOST, 'ceph osd unset ' + flag)
                     }
 
                 }
@@ -247,14 +225,14 @@
 
         if (STAGE_FINALIZE.toBoolean() == true) {
             stage("Finalize ceph version upgrade") {
-                runCephCommand(pepperEnv, ADMIN_HOST, "ceph osd require-osd-release ${TARGET_RELEASE}")
+                salt.cmdRun(pepperEnv, ADMIN_HOST, "ceph osd require-osd-release ${TARGET_RELEASE}")
                 try {
-                    runCephCommand(pepperEnv, ADMIN_HOST, "ceph osd set-require-min-compat-client ${ORIGIN_RELEASE}")
+                    salt.cmdRun(pepperEnv, ADMIN_HOST, "ceph osd set-require-min-compat-client ${ORIGIN_RELEASE}")
                 } catch (Exception e) {
                     common.warningMsg(e)
                 }
                 try {
-                    runCephCommand(pepperEnv, ADMIN_HOST, "ceph osd crush tunables optimal")
+                    salt.cmdRun(pepperEnv, ADMIN_HOST, "ceph osd crush tunables optimal")
                 } catch (Exception e) {
                     common.warningMsg(e)
                 }
@@ -262,8 +240,8 @@
         }
 
         // wait for healthy cluster
-        if (WAIT_FOR_HEALTHY.toBoolean() == true) {
-            waitForHealthy(pepperEnv, flags)
+        if (WAIT_FOR_HEALTHY.toBoolean()) {
+            ceph.waitForHealthy(pepperEnv, ADMIN_HOST, flags)
         }
     }
 }
diff --git a/update-ceph.groovy b/update-ceph.groovy
index 0302d5e..d3591b1 100644
--- a/update-ceph.groovy
+++ b/update-ceph.groovy
@@ -7,43 +7,19 @@
  */
 
 pepperEnv = "pepperEnv"
-salt = new com.mirantis.mk.Salt()
-def common = new com.mirantis.mk.Common()
+def salt = new com.mirantis.mk.Salt()
+def ceph = new com.mirantis.mk.Ceph()
 def python = new com.mirantis.mk.Python()
-def targetLiveSubset
-def targetLiveAll
-def minions
-def result
 def packages
 def command
 def commandKwargs
 def selMinions = []
-def check_mon
-
-def runCephCommand(master, target, cmd) {
-    return salt.cmdRun(master, target, cmd)
-}
-
-def waitForHealthy(master, tgt, count = 0, attempts=100) {
-    // wait for healthy cluster
-    common = new com.mirantis.mk.Common()
-    while (count<attempts) {
-        def health = runCephCommand(master, tgt, 'ceph health')['return'][0].values()[0]
-        if (health.contains('HEALTH_OK') || health.contains('HEALTH_WARN noout flag(s) set\n')) {
-            common.infoMsg('Cluster is healthy')
-            break;
-        }
-        count++
-        sleep(10)
-    }
-}
 
 timeout(time: 12, unit: 'HOURS') {
     node() {
         try {
-
             def targets = ["common": "ceph-common", "osd": "ceph-osd", "mon": "ceph-mon",
-                          "mgr":"ceph-mgr", "radosgw": "radosgw"]
+                           "mgr"   : "ceph-mgr", "radosgw": "radosgw"]
 
             stage('Setup virtualenv for Pepper') {
                 python.setupPepperVirtualenv(pepperEnv, SALT_MASTER_URL, SALT_MASTER_CREDENTIALS)
@@ -52,13 +28,13 @@
             stage('Apply package upgrades on all nodes') {
 
                 targets.each { key, value ->
-                   // try {
-                        command = "pkg.install"
-                        packages = value
-                        commandKwargs = ['only_upgrade': 'true','force_yes': 'true']
-                        target = "I@ceph:${key}"
-                        out = salt.runSaltCommand(pepperEnv, 'local', ['expression': target, 'type': 'compound'], command, true, packages, commandKwargs)
-                        salt.printSaltCommandResult(out)
+                    // try {
+                    command = "pkg.install"
+                    packages = value
+                    commandKwargs = ['only_upgrade': 'true', 'force_yes': 'true']
+                    target = "I@ceph:${key}"
+                    out = salt.runSaltCommand(pepperEnv, 'local', ['expression': target, 'type': 'compound'], command, true, packages, commandKwargs)
+                    salt.printSaltCommandResult(out)
                 }
             }
 
@@ -66,13 +42,13 @@
                 selMinions = salt.getMinions(pepperEnv, "I@ceph:mon")
                 for (tgt in selMinions) {
                     // runSaltProcessStep 'service.restart' don't work for this services
-                    runCephCommand(pepperEnv, tgt, "systemctl restart ceph-mon.target")
-                    waitForHealthy(pepperEnv, tgt)
+                    salt.cmdRun(pepperEnv, tgt, "systemctl restart ceph-mon.target")
+                    ceph.waitForHealthy(pepperEnv, tgt)
                 }
                 selMinions = salt.getMinions(pepperEnv, "I@ceph:radosgw")
                 for (tgt in selMinions) {
-                    runCephCommand(pepperEnv, tgt, "systemctl restart ceph-radosgw.target")
-                    waitForHealthy(pepperEnv, tgt)
+                    salt.cmdRun(pepperEnv, tgt, "systemctl restart ceph-radosgw.target")
+                    ceph.waitForHealthy(pepperEnv, tgt)
                 }
             }
 
@@ -89,15 +65,15 @@
                         osd_ids.add('osd.' + osd_id)
                     }
 
-                    runCephCommand(pepperEnv, tgt, 'ceph osd set noout')
+                    salt.cmdRun(pepperEnv, tgt, 'ceph osd set noout')
 
                     for (i in osd_ids) {
-                        salt.runSaltProcessStep(pepperEnv, tgt, 'service.restart', ['ceph-osd@' + i.replaceAll('osd.', '')],  null, true)
+                        salt.runSaltProcessStep(pepperEnv, tgt, 'service.restart', ['ceph-osd@' + i.replaceAll('osd.', '')], null, true)
                         // wait for healthy cluster
-                        waitForHealthy(pepperEnv, tgt)
+                        ceph.waitForHealthy(pepperEnv, tgt, ['noout'], 0, 100)
                     }
 
-                    runCephCommand(pepperEnv, tgt, 'ceph osd unset noout')
+                    salt.cmdRun(pepperEnv, tgt, 'ceph osd unset noout')
                 }
             }