replaced hardcoded "ceph_disk" literal with variable based on the lvm_enabled pillar

Change-Id: I479ab9832961c4c5e012e661bdd06604ecfed638
Related-Prod: PROD-34370
(cherry picked from commit 12955c759f0f53b00c76af2e2798433e7705582b)
diff --git a/ceph-upgrade.groovy b/ceph-upgrade.groovy
index 297feaf..548b49d 100644
--- a/ceph-upgrade.groovy
+++ b/ceph-upgrade.groovy
@@ -112,8 +112,9 @@
             // restart services
             stage("Restart ${target} services on ${minion}") {
                 if (target == 'osd') {
-                    def osds = salt.getGrain(master, "${minion}", 'ceph:ceph_disk').values()[0]
-                    osds[0].values()[0].values()[0].each { osd, param ->
+                    def device_grain_name =  salt.getPillar(pepperEnv,"I@ceph:osd","ceph:osd:lvm_enabled")['return'].first().containsValue(true) ? "ceph_volume" : "ceph_disk"
+                    def ceph_disks = salt.getGrain(pepperEnv, minion, 'ceph')['return'][0].values()[0].values()[0][device_grain_name]
+                    ceph_disks[0].values()[0].values()[0].each { osd, param ->
                         salt.cmdRun(master, "${minion}", "systemctl restart ceph-${target}@${osd}")
                         ceph.waitForHealthy(master, ADMIN_HOST, flags)
                     }