dmcrypt support for ceph pipelines

PROD-16319

Change-Id: I77f4993bb9bcc5602d08d3fb343d161409b8976e
diff --git a/ceph-backend-migration.groovy b/ceph-backend-migration.groovy
index 8ca8d58..f5c99a4 100644
--- a/ceph-backend-migration.groovy
+++ b/ceph-backend-migration.groovy
@@ -47,6 +47,52 @@
     return
 }
 
+def removeJournalOrBlockPartitions(master, target, id) {
+
+    // remove journal, block_db, block_wal partition `parted /dev/sdj rm 3`
+    stage('Remove journal / block_db / block_wal partition') {
+        def partition_uuid = ""
+        def journal_partition_uuid = ""
+        def block_db_partition_uuid = ""
+        def block_wal_partition_uuid = ""
+        try {
+            journal_partition_uuid = runCephCommand(master, target, "ls -la /var/lib/ceph/osd/ceph-${id}/ | grep journal | grep partuuid")
+            journal_partition_uuid = journal_partition_uuid.toString().trim().split("\n")[0].substring(journal_partition_uuid.toString().trim().lastIndexOf("/")+1)
+        } catch (Exception e) {
+            common.infoMsg(e)
+        }
+        try {
+            block_db_partition_uuid = runCephCommand(master, target, "ls -la /var/lib/ceph/osd/ceph-${id}/ | grep 'block.db' | grep partuuid")
+            block_db_partition_uuid = block_db_partition_uuid.toString().trim().split("\n")[0].substring(block_db_partition_uuid.toString().trim().lastIndexOf("/")+1)
+        } catch (Exception e) {
+            common.infoMsg(e)
+        }
+
+        try {
+            block_wal_partition_uuid = runCephCommand(master, target, "ls -la /var/lib/ceph/osd/ceph-${id}/ | grep 'block.wal' | grep partuuid")
+            block_wal_partition_uuid = block_wal_partition_uuid.toString().trim().split("\n")[0].substring(block_wal_partition_uuid.toString().trim().lastIndexOf("/")+1)
+        } catch (Exception e) {
+            common.infoMsg(e)
+        }
+
+        // set partition_uuid = 2c76f144-f412-481e-b150-4046212ca932
+        if (journal_partition_uuid?.trim()) {
+            partition_uuid = journal_partition_uuid
+        } else if (block_db_partition_uuid?.trim()) {
+            partition_uuid = block_db_partition_uuid
+        }
+
+        // if disk has journal, block_db or block_wal on different disk, then remove the partition
+        if (partition_uuid?.trim()) {
+            removePartition(master, target, partition_uuid)
+        }
+        if (block_wal_partition_uuid?.trim()) {
+            removePartition(master, target, block_wal_partition_uuid)
+        }
+    }
+    return
+}
+
 def runCephCommand(master, target, cmd) {
     return salt.cmdRun(master, target, cmd)
 }
@@ -81,11 +127,12 @@
 
         def target_hosts = salt.getMinions(pepperEnv, TARGET)
 
-        for (HOST in target_hosts) {
+        for (tgt in target_hosts) {
             def osd_ids = []
 
-            // get list of osd disks of the host
-            def ceph_disks = salt.getGrain(pepperEnv, HOST, 'ceph')['return'][0].values()[0].values()[0]['ceph_disk']
+            // get list of osd disks of the tgt
+            salt.runSaltProcessStep(pepperEnv, tgt, 'saltutil.sync_grains', [], null, true, 5)
+            def ceph_disks = salt.getGrain(pepperEnv, tgt, 'ceph')['return'][0].values()[0].values()[0]['ceph_disk']
 
             for (i in ceph_disks) {
                 def osd_id = i.getKey().toString()
@@ -121,7 +168,7 @@
 
                     // stop osd daemons
                     stage('Stop OSD daemons') {
-                        salt.runSaltProcessStep(pepperEnv, HOST, 'service.stop', ['ceph-osd@' + osd_id.replaceAll('osd.', '')],  null, true)
+                        salt.runSaltProcessStep(pepperEnv, tgt, 'service.stop', ['ceph-osd@' + osd_id.replaceAll('osd.', '')],  null, true)
                     }
 
                     // remove keyring `ceph auth del osd.3`
@@ -134,72 +181,72 @@
                         runCephCommand(pepperEnv, ADMIN_HOST, 'ceph osd rm ' + osd_id)
                     }
 
-                    def mount = runCephCommand(pepperEnv, HOST, "mount | grep /var/lib/ceph/osd/ceph-${id}")['return'][0].values()[0]
-                    dev = mount.split()[0].replaceAll("[0-9]","")
+                    def dmcrypt = ""
+                    try {
+                        dmcrypt = runCephCommand(pepperEnv, tgt, "ls -la /var/lib/ceph/osd/ceph-${id}/ | grep dmcrypt")['return'][0].values()[0]
+                    } catch (Exception e) {
+                        common.warningMsg(e)
+                    }
 
-                    // remove journal, block_db, block_wal partition `parted /dev/sdj rm 3`
-                    stage('Remove journal / block_db / block_wal partition') {
-                        def partition_uuid = ""
-                        def journal_partition_uuid = ""
-                        def block_db_partition_uuid = ""
-                        def block_wal_partition_uuid = ""
-                        try {
-                            journal_partition_uuid = runCephCommand(pepperEnv, HOST, "ls -la /var/lib/ceph/osd/ceph-${id}/ | grep journal | grep partuuid")
-                            journal_partition_uuid = journal_partition_uuid.toString().trim().split("\n")[0].substring(journal_partition_uuid.toString().trim().lastIndexOf("/")+1)
-                        } catch (Exception e) {
-                            common.infoMsg(e)
-                        }
-                        try {
-                            block_db_partition_uuid = runCephCommand(pepperEnv, HOST, "ls -la /var/lib/ceph/osd/ceph-${id}/ | grep 'block.db' | grep partuuid")
-                            block_db_partition_uuid = block_db_partition_uuid.toString().trim().split("\n")[0].substring(block_db_partition_uuid.toString().trim().lastIndexOf("/")+1)
-                        } catch (Exception e) {
-                            common.infoMsg(e)
+                    if (dmcrypt?.trim()) {
+                        def mount = runCephCommand(pepperEnv, tgt, "lsblk -rp | grep /var/lib/ceph/osd/ceph-${id} -B1")['return'][0].values()[0]
+                        dev = mount.split()[0].replaceAll("[0-9]","")
+
+                        // remove partition tables
+                        stage('dd part tables') {
+                            runCephCommand(pepperEnv, tgt, "dd if=/dev/zero of=${dev} bs=512 count=1 conv=notrunc")
                         }
 
-                        try {
-                            block_wal_partition_uuid = runCephCommand(pepperEnv, HOST, "ls -la /var/lib/ceph/osd/ceph-${id}/ | grep 'block.wal' | grep partuuid")
-                            block_wal_partition_uuid = block_wal_partition_uuid.toString().trim().split("\n")[0].substring(block_wal_partition_uuid.toString().trim().lastIndexOf("/")+1)
-                        } catch (Exception e) {
-                            common.infoMsg(e)
+                        // remove journal, block_db, block_wal partition `parted /dev/sdj rm 3`
+                        removeJournalOrBlockPartitions(pepperEnv, tgt, id)
+
+                        // reboot
+                        stage('reboot and wait') {
+                            salt.runSaltProcessStep(pepperEnv, tgt, 'system.reboot', null, null, true, 5)
+                            salt.minionsReachable(pepperEnv, 'I@salt:master', tgt)
+                            sleep(10)
                         }
 
-                        // set partition_uuid = 2c76f144-f412-481e-b150-4046212ca932
-                        if (journal_partition_uuid?.trim()) {
-                            partition_uuid = journal_partition_uuid
-                        } else if (block_db_partition_uuid?.trim()) {
-                            partition_uuid = block_db_partition_uuid
+                        // zap disks `ceph-disk zap /dev/sdi`
+                        stage('Zap devices') {
+                            try {
+                                runCephCommand(pepperEnv, tgt, 'ceph-disk zap ' + dev)
+                            } catch (Exception e) {
+                                common.warningMsg(e)
+                            }
+                            runCephCommand(pepperEnv, tgt, 'ceph-disk zap ' + dev)
                         }
 
-                        // if disk has journal, block_db or block_wal on different disk, then remove the partition
-                        if (partition_uuid?.trim()) {
-                            removePartition(pepperEnv, HOST, partition_uuid)
+                    } else {
+
+                        def mount = runCephCommand(pepperEnv, tgt, "mount | grep /var/lib/ceph/osd/ceph-${id}")['return'][0].values()[0]
+                        dev = mount.split()[0].replaceAll("[0-9]","")
+
+                        // remove journal, block_db, block_wal partition `parted /dev/sdj rm 3`
+                        removeJournalOrBlockPartitions(pepperEnv, tgt, id)
+
+                        // umount `umount /dev/sdi1`
+                        stage('Umount devices') {
+                            runCephCommand(pepperEnv, tgt, "umount /var/lib/ceph/osd/ceph-${id}")
                         }
-                        if (block_wal_partition_uuid?.trim()) {
-                            removePartition(pepperEnv, HOST, block_wal_partition_uuid)
+
+                        // zap disks `ceph-disk zap /dev/sdi`
+                        stage('Zap device') {
+                            runCephCommand(pepperEnv, tgt, 'ceph-disk zap ' + dev)
                         }
                     }
 
-                    // umount `umount /dev/sdi1`
-                    stage('Umount devices') {
-                        runCephCommand(pepperEnv, HOST, "umount /var/lib/ceph/osd/ceph-${id}")
-                    }
-
-                    // zap disks `ceph-disk zap /dev/sdi`
-                    stage('Zap device') {
-                        runCephCommand(pepperEnv, HOST, 'ceph-disk zap ' + dev)
-                    }
-
-                    // Deploy failed Ceph OSD
+                    // Deploy Ceph OSD
                     stage('Deploy Ceph OSD') {
-                        salt.runSaltProcessStep(pepperEnv, HOST, 'saltutil.refresh_pillar', [], null, true, 5)
-                        salt.enforceState(pepperEnv, HOST, 'ceph.osd', true)
+                        salt.runSaltProcessStep(pepperEnv, tgt, 'saltutil.refresh_pillar', [], null, true, 5)
+                        salt.enforceState(pepperEnv, tgt, 'ceph.osd', true)
                     }
 
                     if (PER_OSD_CONTROL.toBoolean() == true) {
                         stage("Verify backend version for osd.${id}") {
                             sleep(5)
-                            runCephCommand(pepperEnv, HOST, "ceph osd metadata ${id} | grep osd_objectstore")
-                            runCephCommand(pepperEnv, HOST, "ceph -s")
+                            runCephCommand(pepperEnv, tgt, "ceph osd metadata ${id} | grep osd_objectstore")
+                            runCephCommand(pepperEnv, tgt, "ceph -s")
                         }
 
                         stage('Ask for manual confirmation') {
@@ -211,8 +258,8 @@
             if (PER_OSD_HOST_CONTROL.toBoolean() == true) {
                 stage("Verify backend versions") {
                     sleep(5)
-                    runCephCommand(pepperEnv, HOST, "ceph osd metadata | grep osd_objectstore -B2")
-                    runCephCommand(pepperEnv, HOST, "ceph -s")
+                    runCephCommand(pepperEnv, tgt, "ceph osd metadata | grep osd_objectstore -B2")
+                    runCephCommand(pepperEnv, tgt, "ceph -s")
                 }
 
                 stage('Ask for manual confirmation') {
diff --git a/ceph-remove-node.groovy b/ceph-remove-node.groovy
index 3a38471..6387b20 100644
--- a/ceph-remove-node.groovy
+++ b/ceph-remove-node.groovy
@@ -20,6 +20,25 @@
 
 def pepperEnv = "pepperEnv"
 
+def removePartition(master, target, partition_uuid) {
+    def partition = ""
+    try {
+        // partition = /dev/sdi2
+        partition = runCephCommand(master, target, "blkid | grep ${partition_uuid} ")['return'][0].values()[0].split("(?<=[0-9])")[0]
+    } catch (Exception e) {
+        common.warningMsg(e)
+    }
+
+    if (partition?.trim()) {
+        // dev = /dev/sdi
+        def dev = partition.replaceAll('\\d+$', "")
+        // part_id = 2
+        def part_id = partition.substring(partition.lastIndexOf("/")+1).replaceAll("[^0-9]", "")
+        runCephCommand(master, target, "parted ${dev} rm ${part_id}")
+    }
+    return
+}
+
 def runCephCommand(master, target, cmd) {
     return salt.cmdRun(master, target, cmd)
 }
@@ -62,6 +81,7 @@
     //  split minion id on '.' and remove '*'
     def target = HOST.split("\\.")[0].replace("*", "")
 
+    salt.runSaltProcessStep(pepperEnv, 'I@salt:master', 'saltutil.sync_grains', [], null, true, 5)
     def _pillar = salt.getGrain(pepperEnv, 'I@salt:master', 'domain')
     domain = _pillar['return'][0].values()[0].values()[0]
 
@@ -89,6 +109,7 @@
         def osd_ids = []
 
         // get list of osd disks of the host
+        salt.runSaltProcessStep(pepperEnv, HOST, 'saltutil.sync_grains', [], null, true, 5)
         def ceph_disks = salt.getGrain(pepperEnv, HOST, 'ceph')['return'][0].values()[0].values()[0]['ceph_disk']
 
         for (i in ceph_disks) {
@@ -136,6 +157,69 @@
             }
         }
 
+        for (osd_id in osd_ids) {
+
+            id = osd_id.replaceAll('osd.', '')
+            def dmcrypt = ""
+            try {
+                dmcrypt = runCephCommand(pepperEnv, HOST, "ls -la /var/lib/ceph/osd/ceph-${id}/ | grep dmcrypt")['return'][0].values()[0]
+            } catch (Exception e) {
+                common.warningMsg(e)
+            }
+
+            if (dmcrypt?.trim()) {
+                mount = runCephCommand(pepperEnv, HOST, "lsblk -rp | grep /var/lib/ceph/osd/ceph-${id} -B1")['return'][0].values()[0]
+                dev = mount.split()[0].replaceAll("[0-9]","")
+
+                // remove partition tables
+                stage("dd part table on ${dev}") {
+                    runCephCommand(pepperEnv, HOST, "dd if=/dev/zero of=${dev} bs=512 count=1 conv=notrunc")
+                }
+
+            }
+            // remove journal, block_db, block_wal partition `parted /dev/sdj rm 3`
+            stage('Remove journal / block_db / block_wal partition') {
+                def partition_uuid = ""
+                def journal_partition_uuid = ""
+                def block_db_partition_uuid = ""
+                def block_wal_partition_uuid = ""
+                try {
+                    journal_partition_uuid = runCephCommand(pepperEnv, HOST, "ls -la /var/lib/ceph/osd/ceph-${id}/ | grep journal | grep partuuid")
+                    journal_partition_uuid = journal_partition_uuid.toString().trim().split("\n")[0].substring(journal_partition_uuid.toString().trim().lastIndexOf("/")+1)
+                } catch (Exception e) {
+                    common.infoMsg(e)
+                }
+                try {
+                    block_db_partition_uuid = runCephCommand(pepperEnv, HOST, "ls -la /var/lib/ceph/osd/ceph-${id}/ | grep 'block.db' | grep partuuid")
+                    block_db_partition_uuid = block_db_partition_uuid.toString().trim().split("\n")[0].substring(block_db_partition_uuid.toString().trim().lastIndexOf("/")+1)
+                } catch (Exception e) {
+                    common.infoMsg(e)
+                }
+
+                try {
+                    block_wal_partition_uuid = runCephCommand(pepperEnv, HOST, "ls -la /var/lib/ceph/osd/ceph-${id}/ | grep 'block.wal' | grep partuuid")
+                    block_wal_partition_uuid = block_wal_partition_uuid.toString().trim().split("\n")[0].substring(block_wal_partition_uuid.toString().trim().lastIndexOf("/")+1)
+                } catch (Exception e) {
+                    common.infoMsg(e)
+                }
+
+                // set partition_uuid = 2c76f144-f412-481e-b150-4046212ca932
+                if (journal_partition_uuid?.trim()) {
+                    partition_uuid = journal_partition_uuid
+                } else if (block_db_partition_uuid?.trim()) {
+                    partition_uuid = block_db_partition_uuid
+                }
+
+                // if disk has journal, block_db or block_wal on different disk, then remove the partition
+                if (partition_uuid?.trim()) {
+                    removePartition(pepperEnv, HOST, partition_uuid)
+                }
+                if (block_wal_partition_uuid?.trim()) {
+                    removePartition(pepperEnv, HOST, block_wal_partition_uuid)
+                }
+            }
+        }
+
         // purge Ceph pkgs
         stage('Purge Ceph OSD pkgs') {
             runCephCommand(pepperEnv, HOST, 'apt purge ceph-base ceph-common ceph-fuse ceph-mds ceph-osd python-cephfs librados2 python-rados -y')
diff --git a/ceph-remove-osd.groovy b/ceph-remove-osd.groovy
index c51292e..8483f3a 100644
--- a/ceph-remove-osd.groovy
+++ b/ceph-remove-osd.groovy
@@ -22,8 +22,27 @@
 def flags = CLUSTER_FLAGS.tokenize(',')
 def osds = OSD.tokenize(',')
 
-def runCephCommand(master, cmd) {
-    return salt.cmdRun(master, ADMIN_HOST, cmd)
+def removePartition(master, target, partition_uuid) {
+    def partition = ""
+    try {
+        // partition = /dev/sdi2
+        partition = runCephCommand(master, target, "blkid | grep ${partition_uuid} ")['return'][0].values()[0].split("(?<=[0-9])")[0]
+    } catch (Exception e) {
+        common.warningMsg(e)
+    }
+
+    if (partition?.trim()) {
+        // dev = /dev/sdi
+        def dev = partition.replaceAll('\\d+$', "")
+        // part_id = 2
+        def part_id = partition.substring(partition.lastIndexOf("/")+1).replaceAll("[^0-9]", "")
+        runCephCommand(master, target, "parted ${dev} rm ${part_id}")
+    }
+    return
+}
+
+def runCephCommand(master, target, cmd) {
+    return salt.cmdRun(master, target, cmd)
 }
 
 def waitForHealthy(master, count=0, attempts=300) {
@@ -47,17 +66,15 @@
     if (flags.size() > 0) {
         stage('Set cluster flags') {
             for (flag in flags) {
-                runCephCommand(pepperEnv, 'ceph osd set ' + flag)
+                runCephCommand(pepperEnv, ADMIN_HOST, 'ceph osd set ' + flag)
             }
         }
     }
 
     def osd_ids = []
 
-    print("osds:")
-    print(osds)
-
     // get list of osd disks of the host
+    salt.runSaltProcessStep(pepperEnv, HOST, 'saltutil.sync_grains', [], null, true, 5)
     def ceph_disks = salt.getGrain(pepperEnv, HOST, 'ceph')['return'][0].values()[0].values()[0]['ceph_disk']
     common.prettyPrint(ceph_disks)
 
@@ -71,9 +88,14 @@
         }
     }
 
+    // wait for healthy cluster
+    if (WAIT_FOR_HEALTHY.toBoolean() == true) {
+        waitForHealthy(pepperEnv)
+    }
+
     // `ceph osd out <id> <id>`
     stage('Set OSDs out') {
-        runCephCommand(pepperEnv, 'ceph osd out ' + osd_ids.join(' '))
+        runCephCommand(pepperEnv, ADMIN_HOST, 'ceph osd out ' + osd_ids.join(' '))
     }
 
     // wait for healthy cluster
@@ -92,30 +114,92 @@
     // `ceph osd crush remove osd.2`
     stage('Remove OSDs from CRUSH') {
         for (i in osd_ids) {
-            runCephCommand(pepperEnv, 'ceph osd crush remove ' + i)
+            runCephCommand(pepperEnv, ADMIN_HOST, 'ceph osd crush remove ' + i)
         }
     }
 
     // remove keyring `ceph auth del osd.3`
     stage('Remove OSD keyrings from auth') {
         for (i in osd_ids) {
-            runCephCommand(pepperEnv, 'ceph auth del ' + i)
+            runCephCommand(pepperEnv, ADMIN_HOST, 'ceph auth del ' + i)
         }
     }
 
     // remove osd `ceph osd rm osd.3`
     stage('Remove OSDs') {
         for (i in osd_ids) {
-            runCephCommand(pepperEnv, 'ceph osd rm ' + i)
+            runCephCommand(pepperEnv, ADMIN_HOST, 'ceph osd rm ' + i)
         }
     }
 
+    for (osd_id in osd_ids) {
+
+        id = osd_id.replaceAll('osd.', '')
+        def dmcrypt = ""
+        try {
+            dmcrypt = runCephCommand(pepperEnv, HOST, "ls -la /var/lib/ceph/osd/ceph-${id}/ | grep dmcrypt")['return'][0].values()[0]
+        } catch (Exception e) {
+            common.warningMsg(e)
+        }
+
+        if (dmcrypt?.trim()) {
+            mount = runCephCommand(pepperEnv, HOST, "lsblk -rp | grep /var/lib/ceph/osd/ceph-${id} -B1")['return'][0].values()[0]
+            dev = mount.split()[0].replaceAll("[0-9]","")
+
+            // remove partition tables
+            stage("dd part table on ${dev}") {
+                runCephCommand(pepperEnv, HOST, "dd if=/dev/zero of=${dev} bs=512 count=1 conv=notrunc")
+            }
+
+        }
+        // remove journal, block_db, block_wal partition `parted /dev/sdj rm 3`
+        stage('Remove journal / block_db / block_wal partition') {
+            def partition_uuid = ""
+            def journal_partition_uuid = ""
+            def block_db_partition_uuid = ""
+            def block_wal_partition_uuid = ""
+            try {
+                journal_partition_uuid = runCephCommand(pepperEnv, HOST, "ls -la /var/lib/ceph/osd/ceph-${id}/ | grep journal | grep partuuid")
+                journal_partition_uuid = journal_partition_uuid.toString().trim().split("\n")[0].substring(journal_partition_uuid.toString().trim().lastIndexOf("/")+1)
+            } catch (Exception e) {
+                common.infoMsg(e)
+            }
+            try {
+                block_db_partition_uuid = runCephCommand(pepperEnv, HOST, "ls -la /var/lib/ceph/osd/ceph-${id}/ | grep 'block.db' | grep partuuid")
+                block_db_partition_uuid = block_db_partition_uuid.toString().trim().split("\n")[0].substring(block_db_partition_uuid.toString().trim().lastIndexOf("/")+1)
+            } catch (Exception e) {
+                common.infoMsg(e)
+            }
+
+            try {
+                block_wal_partition_uuid = runCephCommand(pepperEnv, HOST, "ls -la /var/lib/ceph/osd/ceph-${id}/ | grep 'block.wal' | grep partuuid")
+                block_wal_partition_uuid = block_wal_partition_uuid.toString().trim().split("\n")[0].substring(block_wal_partition_uuid.toString().trim().lastIndexOf("/")+1)
+            } catch (Exception e) {
+                common.infoMsg(e)
+            }
+
+            // set partition_uuid = 2c76f144-f412-481e-b150-4046212ca932
+            if (journal_partition_uuid?.trim()) {
+                partition_uuid = journal_partition_uuid
+            } else if (block_db_partition_uuid?.trim()) {
+                partition_uuid = block_db_partition_uuid
+            }
+
+            // if disk has journal, block_db or block_wal on different disk, then remove the partition
+            if (partition_uuid?.trim()) {
+                removePartition(pepperEnv, HOST, partition_uuid)
+            }
+            if (block_wal_partition_uuid?.trim()) {
+                removePartition(pepperEnv, HOST, block_wal_partition_uuid)
+            }
+        }
+    }
     // remove cluster flags
     if (flags.size() > 0) {
         stage('Unset cluster flags') {
             for (flag in flags) {
                 common.infoMsg('Removing flag ' + flag)
-                runCephCommand(pepperEnv, 'ceph osd unset ' + flag)
+                runCephCommand(pepperEnv, ADMIN_HOST, 'ceph osd unset ' + flag)
             }
         }
     }
diff --git a/ceph-replace-failed-osd.groovy b/ceph-replace-failed-osd.groovy
index 086f9aa..0a27dc5 100644
--- a/ceph-replace-failed-osd.groovy
+++ b/ceph-replace-failed-osd.groovy
@@ -13,6 +13,7 @@
  *  JOURNAL_BLOCKDB_BLOCKWAL_PARTITION  Comma separated list of partitions where journal or block_db or block_wal for the failed devices on this HOST were stored (/dev/sdh2,/dev/sdh3)
  *  CLUSTER_FLAGS                       Comma separated list of tags to apply to cluster
  *  WAIT_FOR_HEALTHY                    Wait for cluster rebalance before stoping daemons
+ *  DMCRYPT                             Set to True if replacing osds are/were encrypted
  *
  */
 
@@ -49,31 +50,11 @@
     // create connection to salt master
     python.setupPepperVirtualenv(pepperEnv, SALT_MASTER_URL, SALT_MASTER_CREDENTIALS)
 
-    if (flags.size() > 0) {
-        stage('Set cluster flags') {
-            for (flag in flags) {
-                runCephCommand(pepperEnv, ADMIN_HOST, 'ceph osd set ' + flag)
-            }
-        }
-    }
-
     def osd_ids = []
 
-    print("osds:")
-    print(osds)
-
-    // get list of osd disks of the host
-    def ceph_disks = salt.getGrain(pepperEnv, HOST, 'ceph')['return'][0].values()[0].values()[0]['ceph_disk']
-    common.prettyPrint(ceph_disks)
-
-    for (i in ceph_disks) {
-        def osd_id = i.getKey().toString()
-        if (osd_id in osds || OSD == '*') {
-            osd_ids.add('osd.' + osd_id)
-            print("Will delete " + osd_id)
-        } else {
-            print("Skipping " + osd_id)
-        }
+    for (osd_id in osds) {
+        osd_ids.add('osd.' + osd_id)
+        print("Will delete " + osd_id)
     }
 
     // `ceph osd out <id> <id>`
@@ -87,6 +68,15 @@
         waitForHealthy(pepperEnv)
     }
 
+
+    if (flags.size() > 0) {
+        stage('Set cluster flags') {
+            for (flag in flags) {
+                runCephCommand(pepperEnv, ADMIN_HOST, 'ceph osd set ' + flag)
+            }
+        }
+    }
+
     // stop osd daemons
     stage('Stop OSD daemons') {
         for (i in osd_ids) {
@@ -129,29 +119,83 @@
         }
     }
 
-    // umount `umount /dev/sdi1`
-    stage('Umount devices') {
-        for (dev in devices) {
-            runCephCommand(pepperEnv, HOST, 'umount ' + dev + '1')
-        }
-    }
+    if (DMCRYPT.toBoolean() == true) {
 
-    // zap disks `ceph-disk zap /dev/sdi`
-    stage('Zap devices') {
-        for (dev in devices) {
-            runCephCommand(pepperEnv, HOST, 'ceph-disk zap ' + dev)
+        // remove partition tables
+        stage('dd part tables') {
+            for (dev in devices) {
+                runCephCommand(pepperEnv, HOST, "dd if=/dev/zero of=${dev} bs=512 count=1 conv=notrunc")
+            }
         }
-    }
 
-    // remove journal, block_db or block_wal partition `parted /dev/sdj rm 3`
-    stage('Remove journal / block_db / block_wal partitions') {
-        for (partition in journals_blockdbs_blockwals) {
-            if (partition?.trim()) {
-                // dev = /dev/sdi
-                def dev = partition.replaceAll("[0-9]", "")
-                // part_id = 2
-                def part_id = partition.substring(partition.lastIndexOf("/")+1).replaceAll("[^0-9]", "")
-                runCephCommand(pepperEnv, HOST, "parted ${dev} rm ${part_id}")
+        // remove journal, block_db or block_wal partition `parted /dev/sdj rm 3`
+        stage('Remove journal / block_db / block_wal partitions') {
+            for (partition in journals_blockdbs_blockwals) {
+                if (partition?.trim()) {
+                    // dev = /dev/sdi
+                    def dev = partition.replaceAll("[0-9]", "")
+                    // part_id = 2
+                    def part_id = partition.substring(partition.lastIndexOf("/")+1).replaceAll("[^0-9]", "")
+                    try {
+                        runCephCommand(pepperEnv, HOST, "Ignore | parted ${dev} rm ${part_id}")
+                    } catch (Exception e) {
+                        common.warningMsg(e)
+                    }
+                }
+            }
+        }
+
+        // reboot
+        stage('reboot and wait') {
+            salt.runSaltProcessStep(pepperEnv, HOST, 'system.reboot', null, null, true, 5)
+            salt.minionsReachable(pepperEnv, 'I@salt:master', HOST)
+            sleep(10)
+        }
+
+
+
+        // zap disks `ceph-disk zap /dev/sdi`
+        stage('Zap devices') {
+            for (dev in devices) {
+                try {
+                    runCephCommand(pepperEnv, HOST, 'ceph-disk zap ' + dev)
+                } catch (Exception e) {
+                    common.warningMsg(e)
+                }
+                runCephCommand(pepperEnv, HOST, 'ceph-disk zap ' + dev)
+            }
+        }
+
+    } else {
+
+        // umount `umount /dev/sdi1`
+        stage('Umount devices') {
+            for (dev in devices) {
+                runCephCommand(pepperEnv, HOST, 'umount ' + dev + '1')
+            }
+        }
+
+        // zap disks `ceph-disk zap /dev/sdi`
+        stage('Zap devices') {
+            for (dev in devices) {
+                runCephCommand(pepperEnv, HOST, 'ceph-disk zap ' + dev)
+            }
+        }
+
+        // remove journal, block_db or block_wal partition `parted /dev/sdj rm 3`
+        stage('Remove journal / block_db / block_wal partitions') {
+            for (partition in journals_blockdbs_blockwals) {
+                if (partition?.trim()) {
+                    // dev = /dev/sdi
+                    def dev = partition.replaceAll("[0-9]", "")
+                    // part_id = 2
+                    def part_id = partition.substring(partition.lastIndexOf("/")+1).replaceAll("[^0-9]", "")
+                    try {
+                        runCephCommand(pepperEnv, HOST, "parted ${dev} rm ${part_id}")
+                    } catch (Exception e) {
+                        common.warningMsg(e)
+                    }
+                }
             }
         }
     }