ceph remove block_wal support

Change-Id: I3b5784f8ffa110c46228a31aca764cc4b7de62fa
diff --git a/ceph-backend-migration.groovy b/ceph-backend-migration.groovy
index 634cdf8..8ca8d58 100644
--- a/ceph-backend-migration.groovy
+++ b/ceph-backend-migration.groovy
@@ -28,6 +28,25 @@
 def flags = CLUSTER_FLAGS.tokenize(',')
 def osds = OSD.tokenize(',')
 
+def removePartition(master, target, partition_uuid) {
+    def partition = ""
+    try {
+        // partition = /dev/sdi2
+        partition = runCephCommand(master, target, "blkid | grep ${partition_uuid} ")['return'][0].values()[0].split("(?<=[0-9])")[0]
+    } catch (Exception e) {
+        common.warningMsg(e)
+    }
+
+    if (partition?.trim()) {
+        // dev = /dev/sdi
+        def dev = partition.replaceAll('\\d+$', "")
+        // part_id = 2
+        def part_id = partition.substring(partition.lastIndexOf("/")+1).replaceAll("[^0-9]", "")
+        runCephCommand(master, target, "parted ${dev} rm ${part_id}")
+    }
+    return
+}
+
 def runCephCommand(master, target, cmd) {
     return salt.cmdRun(master, target, cmd)
 }
@@ -83,7 +102,7 @@
                 def id = osd_id.replaceAll('osd.', '')
                 def backend = runCephCommand(pepperEnv, ADMIN_HOST, "ceph osd metadata ${id} | grep osd_objectstore")['return'][0].values()[0]
 
-                if (backend.contains(ORIGIN_BACKEND)) {
+                if (backend.contains(ORIGIN_BACKEND.toLowerCase())) {
 
                     // wait for healthy cluster before manipulating with osds
                     if (WAIT_FOR_HEALTHY.toBoolean() == true) {
@@ -118,11 +137,12 @@
                     def mount = runCephCommand(pepperEnv, HOST, "mount | grep /var/lib/ceph/osd/ceph-${id}")['return'][0].values()[0]
                     dev = mount.split()[0].replaceAll("[0-9]","")
 
-                    // remove journal or block_db partition `parted /dev/sdj rm 3`
-                    stage('Remove journal / block_db partition') {
+                    // remove journal, block_db, block_wal partition `parted /dev/sdj rm 3`
+                    stage('Remove journal / block_db / block_wal partition') {
                         def partition_uuid = ""
                         def journal_partition_uuid = ""
                         def block_db_partition_uuid = ""
+                        def block_wal_partition_uuid = ""
                         try {
                             journal_partition_uuid = runCephCommand(pepperEnv, HOST, "ls -la /var/lib/ceph/osd/ceph-${id}/ | grep journal | grep partuuid")
                             journal_partition_uuid = journal_partition_uuid.toString().trim().split("\n")[0].substring(journal_partition_uuid.toString().trim().lastIndexOf("/")+1)
@@ -136,6 +156,13 @@
                             common.infoMsg(e)
                         }
 
+                        try {
+                            block_wal_partition_uuid = runCephCommand(pepperEnv, HOST, "ls -la /var/lib/ceph/osd/ceph-${id}/ | grep 'block.wal' | grep partuuid")
+                            block_wal_partition_uuid = block_wal_partition_uuid.toString().trim().split("\n")[0].substring(block_wal_partition_uuid.toString().trim().lastIndexOf("/")+1)
+                        } catch (Exception e) {
+                            common.infoMsg(e)
+                        }
+
                         // set partition_uuid = 2c76f144-f412-481e-b150-4046212ca932
                         if (journal_partition_uuid?.trim()) {
                             partition_uuid = journal_partition_uuid
@@ -143,23 +170,12 @@
                             partition_uuid = block_db_partition_uuid
                         }
 
-                        // if failed disk had block_db or journal on different disk, then remove the partition
+                        // if disk has journal, block_db or block_wal on different disk, then remove the partition
                         if (partition_uuid?.trim()) {
-                            def partition = ""
-                            try {
-                                // partition = /dev/sdi2
-                                partition = runCephCommand(pepperEnv, HOST, "blkid | grep ${partition_uuid} ")['return'][0].values()[0].split("(?<=[0-9])")[0]
-                            } catch (Exception e) {
-                                common.warningMsg(e)
-                            }
-
-                            if (partition?.trim()) {
-                                // dev = /dev/sdi
-                                def dev = partition.replaceAll('\\d+$', "")
-                                // part_id = 2
-                                def part_id = partition.substring(partition.lastIndexOf("/")+1).replaceAll("[^0-9]", "")
-                                runCephCommand(pepperEnv, HOST, "parted ${dev} rm ${part_id}")
-                            }
+                            removePartition(pepperEnv, HOST, partition_uuid)
+                        }
+                        if (block_wal_partition_uuid?.trim()) {
+                            removePartition(pepperEnv, HOST, block_wal_partition_uuid)
                         }
                     }
 
diff --git a/ceph-replace-failed-osd.groovy b/ceph-replace-failed-osd.groovy
index 9127581..086f9aa 100644
--- a/ceph-replace-failed-osd.groovy
+++ b/ceph-replace-failed-osd.groovy
@@ -3,16 +3,16 @@
  * Replace failed disk with a new disk
  *
  * Requred parameters:
- *  SALT_MASTER_URL                 URL of Salt master
- *  SALT_MASTER_CREDENTIALS         Credentials to the Salt API
+ *  SALT_MASTER_URL                     URL of Salt master
+ *  SALT_MASTER_CREDENTIALS             Credentials to the Salt API
  *
- *  HOST                            Host (minion id) to be removed
- *  ADMIN_HOST                      Host (minion id) with admin keyring and /etc/crushmap file present
- *  OSD                             Failed OSD ids to be replaced (comma-separated list - 1,2,3)
- *  DEVICE                          Comma separated list of failed devices that will be replaced at HOST (/dev/sdb,/dev/sdc)
- *  JOURNAL_OR_BLOCKDB_PARTITION    Comma separated list of partitions where journal or block_db for the failed devices on this HOST were stored (/dev/sdh2,/dev/sdh3)
- *  CLUSTER_FLAGS                   Comma separated list of tags to apply to cluster
- *  WAIT_FOR_HEALTHY                Wait for cluster rebalance before stoping daemons
+ *  HOST                                Host (minion id) to be removed
+ *  ADMIN_HOST                          Host (minion id) with admin keyring and /etc/crushmap file present
+ *  OSD                                 Failed OSD ids to be replaced (comma-separated list - 1,2,3)
+ *  DEVICE                              Comma separated list of failed devices that will be replaced at HOST (/dev/sdb,/dev/sdc)
+ *  JOURNAL_BLOCKDB_BLOCKWAL_PARTITION  Comma separated list of partitions where journal or block_db or block_wal for the failed devices on this HOST were stored (/dev/sdh2,/dev/sdh3)
+ *  CLUSTER_FLAGS                       Comma separated list of tags to apply to cluster
+ *  WAIT_FOR_HEALTHY                    Wait for cluster rebalance before stoping daemons
  *
  */
 
@@ -24,7 +24,7 @@
 def flags = CLUSTER_FLAGS.tokenize(',')
 def osds = OSD.tokenize(',')
 def devices = DEVICE.tokenize(',')
-def journals_blockdbs = JOURNAL_OR_BLOCKDB_PARTITION.tokenize(',')
+def journals_blockdbs_blockwals = JOURNAL_BLOCKDB_BLOCKWAL_PARTITION.tokenize(',')
 
 
 def runCephCommand(master, target, cmd) {
@@ -143,14 +143,14 @@
         }
     }
 
-    // remove journal or block_db partition `parted /dev/sdj rm 3`
-    stage('Remove journal / block_db partitions') {
-        for (journal_blockdb in journals_blockdbs) {
-            if (journal_blockdb?.trim()) {
+    // remove journal, block_db or block_wal partition `parted /dev/sdj rm 3`
+    stage('Remove journal / block_db / block_wal partitions') {
+        for (partition in journals_blockdbs_blockwals) {
+            if (partition?.trim()) {
                 // dev = /dev/sdi
-                def dev = journal_blockdb.replaceAll("[0-9]", "")
+                def dev = partition.replaceAll("[0-9]", "")
                 // part_id = 2
-                def part_id = journal_blockdb.substring(journal_blockdb.lastIndexOf("/")+1).replaceAll("[^0-9]", "")
+                def part_id = partition.substring(partition.lastIndexOf("/")+1).replaceAll("[^0-9]", "")
                 runCephCommand(pepperEnv, HOST, "parted ${dev} rm ${part_id}")
             }
         }