replaced hardcoded "ceph_disk" literal with variable based on the lvm_enabled pillar

Change-Id: I479ab9832961c4c5e012e661bdd06604ecfed638
Related-Prod: PROD-34370
diff --git a/ceph-backend-migration.groovy b/ceph-backend-migration.groovy
index 676c236..a9bf720 100644
--- a/ceph-backend-migration.groovy
+++ b/ceph-backend-migration.groovy
@@ -91,13 +91,13 @@
             }
 
             def target_hosts = salt.getMinions(pepperEnv, TARGET)
-
+            def device_grain_name =  salt.getPillar(pepperEnv,"I@ceph:osd","ceph:osd:lvm_enabled")['return'].first().containsValue(true) ? "ceph_volume" : "ceph_disk"
             for (tgt in target_hosts) {
                 def osd_ids = []
 
                 // get list of osd disks of the tgt
                 salt.runSaltProcessStep(pepperEnv, tgt, 'saltutil.sync_grains', [], null, true, 5)
-                def ceph_disks = salt.getGrain(pepperEnv, tgt, 'ceph')['return'][0].values()[0].values()[0]['ceph_disk']
+                def ceph_disks = salt.getGrain(pepperEnv, tgt, 'ceph')['return'][0].values()[0].values()[0][device_grain_name]
 
                 for (i in ceph_disks) {
                     def osd_id = i.getKey().toString()
diff --git a/ceph-remove-node.groovy b/ceph-remove-node.groovy
index e1d6ce8..39ed07e 100644
--- a/ceph-remove-node.groovy
+++ b/ceph-remove-node.groovy
@@ -75,10 +75,10 @@
             }
         } else if (HOST_TYPE.toLowerCase() == 'osd') {
             def osd_ids = []
-
+            def device_grain_name =  salt.getPillar(pepperEnv,"I@ceph:osd","ceph:osd:lvm_enabled")['return'].first().containsValue(true) ? "ceph_volume" : "ceph_disk"
             // get list of osd disks of the host
             salt.runSaltProcessStep(pepperEnv, HOST, 'saltutil.sync_grains', [], null, true, 5)
-            def ceph_disks = salt.getGrain(pepperEnv, HOST, 'ceph')['return'][0].values()[0].values()[0]['ceph_disk']
+            def ceph_disks = salt.getGrain(pepperEnv, HOST, 'ceph')['return'][0].values()[0].values()[0][device_grain_name]
 
             for (i in ceph_disks) {
                 def osd_id = i.getKey().toString()
diff --git a/ceph-remove-osd.groovy b/ceph-remove-osd.groovy
index 0a045c3..a7d4e15 100644
--- a/ceph-remove-osd.groovy
+++ b/ceph-remove-osd.groovy
@@ -47,7 +47,8 @@
             throw new Exception("Ceph salt grain cannot be found!")
         }
         common.print(cephGrain)
-        def ceph_disks = cephGrain['return'][0].values()[0].values()[0]['ceph_disk']
+        def device_grain_name =  salt.getPillar(pepperEnv,"I@ceph:osd","ceph:osd:lvm_enabled")['return'].first().containsValue(true) ? "ceph_volume" : "ceph_disk"
+        def ceph_disks = cephGrain['return'][0].values()[0].values()[0][device_grain_name]
         common.prettyPrint(ceph_disks)
 
         for (i in ceph_disks) {
@@ -153,9 +154,20 @@
                 def data_partition_uuid = ""
                 def block_partition_uuid = ""
                 def lockbox_partition_uuid = ""
+                def osd_fsid = ""
+                def lvm = ""
+                def lvm_enabled= salt.getPillar(pepperEnv,"I@ceph:osd","ceph:osd:lvm_enabled")['return'].first().containsValue(true)
                 try {
-                    data_partition_uuid = salt.cmdRun(pepperEnv, HOST, "cat /var/lib/ceph/osd/ceph-${id}/fsid")['return'][0].values()[0].split("\n")[0]
-                    common.print(data_partition_uuid)
+                    osd_fsid = salt.cmdRun(pepperEnv, HOST, "cat /var/lib/ceph/osd/ceph-${id}/fsid")['return'][0].values()[0].split("\n")[0]
+                    if (lvm_enabled) {
+                        lvm = salt.runSaltCommand(pepperEnv, 'local', ['expression': HOST, 'type': 'compound'], 'cmd.run', null, "salt-call lvm.lvdisplay --output json -l quiet")['return'][0].values()[0]
+                        lvm = new groovy.json.JsonSlurperClassic().parseText(lvm)
+                        lvm["local"].each { lv, params ->
+                            if (params["Logical Volume Name"].contains(osd_fsid)) {
+                                data_partition_uuid = params["Logical Volume Name"].minus("/dev/")
+                            }
+                        }
+                    }
                 } catch (Exception e) {
                     common.infoMsg(e)
                 }
@@ -193,4 +205,4 @@
             }
         }
     }
-}
+}
\ No newline at end of file
diff --git a/ceph-upgrade.groovy b/ceph-upgrade.groovy
index dd34973..2df4680 100644
--- a/ceph-upgrade.groovy
+++ b/ceph-upgrade.groovy
@@ -112,8 +112,9 @@
             // restart services
             stage("Restart ${target} services on ${minion}") {
                 if (target == 'osd') {
-                    def osds = salt.getGrain(master, "${minion}", 'ceph:ceph_disk').values()[0]
-                    osds[0].values()[0].values()[0].each { osd, param ->
+                    def device_grain_name =  salt.getPillar(pepperEnv,"I@ceph:osd","ceph:osd:lvm_enabled")['return'].first().containsValue(true) ? "ceph_volume" : "ceph_disk"
+                    def ceph_disks = salt.getGrain(pepperEnv, minion, 'ceph')['return'][0].values()[0].values()[0][device_grain_name]
+                    ceph_disks[0].values()[0].values()[0].each { osd, param ->
                         salt.cmdRun(master, "${minion}", "systemctl restart ceph-${target}@${osd}")
                         ceph.waitForHealthy(master, ADMIN_HOST, flags)
                     }
diff --git a/cloud-update.groovy b/cloud-update.groovy
index 9945d33..f45e4ec 100644
--- a/cloud-update.groovy
+++ b/cloud-update.groovy
@@ -897,6 +897,7 @@
     def salt = new com.mirantis.mk.Salt()
     def common = new com.mirantis.mk.Common()
     def targetHosts = salt.getMinionsSorted(pepperEnv, target)
+    def device_grain_name =  salt.getPillar(pepperEnv,"I@ceph:osd","ceph:osd:lvm_enabled")['return'].first().containsValue(true) ? "ceph_volume" : "ceph_disk"
     for (t in targetHosts) {
         def osd_ids = []
         // get list of osd disks of the host
@@ -906,7 +907,7 @@
             throw new Exception("Ceph salt grain cannot be found!")
         }
         common.print(cephGrain)
-        def ceph_disks = cephGrain['return'][0].values()[0].values()[0]['ceph_disk']
+        def ceph_disks = cephGrain['return'][0].values()[0].values()[0][device_grain_name]
         for (i in ceph_disks) {
             def osd_id = i.getKey().toString()
             osd_ids.add('osd.' + osd_id)
diff --git a/update-ceph.groovy b/update-ceph.groovy
index 7cf9242..e5c7c8e 100644
--- a/update-ceph.groovy
+++ b/update-ceph.groovy
@@ -54,11 +54,11 @@
             }
 
             stage('Restart OSDs') {
-
+                def device_grain_name =  salt.getPillar(pepperEnv,"I@ceph:osd","ceph:osd:lvm_enabled")['return'].first().containsValue(true) ? "ceph_volume" : "ceph_disk"
                 selMinions = salt.getMinions(pepperEnv, "I@ceph:osd")
                 for (tgt in selMinions) {
                     salt.runSaltProcessStep(pepperEnv, tgt, 'saltutil.sync_grains', [], null, true, 5)
-                    def ceph_disks = salt.getGrain(pepperEnv, tgt, 'ceph')['return'][0].values()[0].values()[0]['ceph_disk']
+                    def ceph_disks = salt.getGrain(pepperEnv, tgt, 'ceph')['return'][0].values()[0].values()[0][device_grain_name]
 
                     def osd_ids = []
                     for (i in ceph_disks) {