unset flags even if pipeline failed with a exception
Related-Prod: PROD-35064
Change-Id: I8e037852f93cdcb157b3ef77d2a58dd1de3bbe5b
diff --git a/ceph-add-osd-upmap.groovy b/ceph-add-osd-upmap.groovy
index f873534..52ccf1e 100644
--- a/ceph-add-osd-upmap.groovy
+++ b/ceph-add-osd-upmap.groovy
@@ -128,10 +128,8 @@
ceph.waitForHealthy(pepperEnv, "I@ceph:mon and I@ceph:common:keyring:admin", flags)
}
}
- catch (Throwable e) {
- // There was an error or exception thrown. Unset norebalance.
+ finally {
runCephCommand('ceph osd unset norebalance')['return'][0].values()[0]
- throw e
}
}
}
diff --git a/ceph-backend-migration.groovy b/ceph-backend-migration.groovy
index 22809c8..4ee9f17 100644
--- a/ceph-backend-migration.groovy
+++ b/ceph-backend-migration.groovy
@@ -92,153 +92,157 @@
def target_hosts = salt.getMinions(pepperEnv, TARGET)
def device_grain_name = "ceph_disk"
- for (tgt in target_hosts) {
- def osd_ids = []
+ try {
+ for (tgt in target_hosts) {
+ def osd_ids = []
- // get list of osd disks of the tgt
- salt.runSaltProcessStep(pepperEnv, tgt, 'saltutil.sync_grains', [], null, true, 5)
- def ceph_disks = salt.getGrain(pepperEnv, tgt, 'ceph')['return'][0].values()[0].values()[0][device_grain_name]
+ // get list of osd disks of the tgt
+ salt.runSaltProcessStep(pepperEnv, tgt, 'saltutil.sync_grains', [], null, true, 5)
+ def ceph_disks = salt.getGrain(pepperEnv, tgt, 'ceph')['return'][0].values()[0].values()[0][device_grain_name]
- for (i in ceph_disks) {
- def osd_id = i.getKey().toString()
- if (osd_id in osds || OSD == '*') {
- osd_ids.add('osd.' + osd_id)
- print("Will migrate " + osd_id)
- } else {
- print("Skipping " + osd_id)
- }
- }
-
- for (osd_id in osd_ids) {
-
- def id = osd_id.replaceAll('osd.', '')
- def backend = salt.cmdRun(pepperEnv, ADMIN_HOST, "ceph osd metadata ${id} | grep osd_objectstore")['return'][0].values()[0]
-
- if (backend.contains(ORIGIN_BACKEND.toLowerCase())) {
-
- // wait for healthy cluster before manipulating with osds
- if (WAIT_FOR_HEALTHY.toBoolean()) {
- ceph.waitForHealthy(pepperEnv, ADMIN_HOST)
- }
-
- // `ceph osd out <id> <id>`
- stage('Set OSDs out') {
- salt.cmdRun(pepperEnv, ADMIN_HOST, "ceph osd out ${osd_id}")
- }
-
- if (WAIT_FOR_HEALTHY.toBoolean()) {
- sleep(5)
- ceph.waitForHealthy(pepperEnv, ADMIN_HOST)
- }
-
- // stop osd daemons
- stage('Stop OSD daemons') {
- salt.runSaltProcessStep(pepperEnv, tgt, 'service.stop', ['ceph-osd@' + osd_id.replaceAll('osd.', '')], null, true)
- }
-
- // remove keyring `ceph auth del osd.3`
- stage('Remove OSD keyrings from auth') {
- salt.cmdRun(pepperEnv, ADMIN_HOST, 'ceph auth del ' + osd_id)
- }
-
- // remove osd `ceph osd rm osd.3`
- stage('Remove OSDs') {
- salt.cmdRun(pepperEnv, ADMIN_HOST, 'ceph osd rm ' + osd_id)
- }
-
- def dmcrypt = ""
- try {
- dmcrypt = salt.cmdRun(pepperEnv, tgt, "ls -la /var/lib/ceph/osd/ceph-${id}/ | grep dmcrypt")['return'][0].values()[0]
- } catch (Exception e) {
- common.warningMsg(e)
- }
-
- if (dmcrypt?.trim()) {
- def mount = salt.cmdRun(pepperEnv, tgt, "lsblk -rp | grep /var/lib/ceph/osd/ceph-${id} -B1")['return'][0].values()[0]
- dev = mount.split()[0].replaceAll("[0-9]","")
-
- // remove partition tables
- stage('dd part tables') {
- salt.cmdRun(pepperEnv, tgt, "dd if=/dev/zero of=${dev} bs=512 count=1 conv=notrunc")
- }
-
- // remove journal, block_db, block_wal partition `parted /dev/sdj rm 3`
- removeJournalOrBlockPartitions(pepperEnv, tgt, id)
-
- // reboot
- stage('reboot and wait') {
- salt.runSaltProcessStep(pepperEnv, tgt, 'system.reboot', null, null, true, 5)
- salt.minionsReachable(pepperEnv, 'I@salt:master', tgt)
- sleep(10)
- }
-
- // zap disks `ceph-disk zap /dev/sdi`
- stage('Zap devices') {
- try {
- salt.cmdRun(pepperEnv, tgt, 'ceph-disk zap ' + dev)
- } catch (Exception e) {
- common.warningMsg(e)
- }
- salt.cmdRun(pepperEnv, tgt, 'ceph-disk zap ' + dev)
- }
-
+ for (i in ceph_disks) {
+ def osd_id = i.getKey().toString()
+ if (osd_id in osds || OSD == '*') {
+ osd_ids.add('osd.' + osd_id)
+ print("Will migrate " + osd_id)
} else {
+ print("Skipping " + osd_id)
+ }
+ }
- def mount = salt.cmdRun(pepperEnv, tgt, "mount | grep /var/lib/ceph/osd/ceph-${id}")['return'][0].values()[0]
- dev = mount.split()[0].replaceAll("[0-9]","")
+ for (osd_id in osd_ids) {
- // remove journal, block_db, block_wal partition `parted /dev/sdj rm 3`
- removeJournalOrBlockPartitions(pepperEnv, tgt, id)
+ def id = osd_id.replaceAll('osd.', '')
+ def backend = salt.cmdRun(pepperEnv, ADMIN_HOST, "ceph osd metadata ${id} | grep osd_objectstore")['return'][0].values()[0]
- // umount `umount /dev/sdi1`
- stage('Umount devices') {
- salt.cmdRun(pepperEnv, tgt, "umount /var/lib/ceph/osd/ceph-${id}")
+ if (backend.contains(ORIGIN_BACKEND.toLowerCase())) {
+
+ // wait for healthy cluster before manipulating with osds
+ if (WAIT_FOR_HEALTHY.toBoolean()) {
+ ceph.waitForHealthy(pepperEnv, ADMIN_HOST)
}
- // zap disks `ceph-disk zap /dev/sdi`
- stage('Zap device') {
- salt.cmdRun(pepperEnv, tgt, 'ceph-disk zap ' + dev)
+ // `ceph osd out <id> <id>`
+ stage('Set OSDs out') {
+ salt.cmdRun(pepperEnv, ADMIN_HOST, "ceph osd out ${osd_id}")
}
- }
- // Deploy Ceph OSD
- stage('Deploy Ceph OSD') {
- salt.runSaltProcessStep(pepperEnv, tgt, 'saltutil.refresh_pillar', [], null, true, 5)
- salt.enforceState(pepperEnv, tgt, 'ceph.osd', true)
- }
-
- if (PER_OSD_CONTROL.toBoolean() == true) {
- stage("Verify backend version for osd.${id}") {
+ if (WAIT_FOR_HEALTHY.toBoolean()) {
sleep(5)
- salt.cmdRun(pepperEnv, tgt, "ceph osd metadata ${id} | grep osd_objectstore")
- salt.cmdRun(pepperEnv, tgt, "ceph -s")
+ ceph.waitForHealthy(pepperEnv, ADMIN_HOST)
}
- stage('Ask for manual confirmation') {
- input message: "From the verification commands above, please check the backend version of osd.${id} and ceph status. If it is correct, Do you want to continue to migrate next osd?"
+ // stop osd daemons
+ stage('Stop OSD daemons') {
+ salt.runSaltProcessStep(pepperEnv, tgt, 'service.stop', ['ceph-osd@' + osd_id.replaceAll('osd.', '')], null, true)
+ }
+
+ // remove keyring `ceph auth del osd.3`
+ stage('Remove OSD keyrings from auth') {
+ salt.cmdRun(pepperEnv, ADMIN_HOST, 'ceph auth del ' + osd_id)
+ }
+
+ // remove osd `ceph osd rm osd.3`
+ stage('Remove OSDs') {
+ salt.cmdRun(pepperEnv, ADMIN_HOST, 'ceph osd rm ' + osd_id)
+ }
+
+ def dmcrypt = ""
+ try {
+ dmcrypt = salt.cmdRun(pepperEnv, tgt, "ls -la /var/lib/ceph/osd/ceph-${id}/ | grep dmcrypt")['return'][0].values()[0]
+ } catch (Exception e) {
+ common.warningMsg(e)
+ }
+
+ if (dmcrypt?.trim()) {
+ def mount = salt.cmdRun(pepperEnv, tgt, "lsblk -rp | grep /var/lib/ceph/osd/ceph-${id} -B1")['return'][0].values()[0]
+ dev = mount.split()[0].replaceAll("[0-9]","")
+
+ // remove partition tables
+ stage('dd part tables') {
+ salt.cmdRun(pepperEnv, tgt, "dd if=/dev/zero of=${dev} bs=512 count=1 conv=notrunc")
+ }
+
+ // remove journal, block_db, block_wal partition `parted /dev/sdj rm 3`
+ removeJournalOrBlockPartitions(pepperEnv, tgt, id)
+
+ // reboot
+ stage('reboot and wait') {
+ salt.runSaltProcessStep(pepperEnv, tgt, 'system.reboot', null, null, true, 5)
+ salt.minionsReachable(pepperEnv, 'I@salt:master', tgt)
+ sleep(10)
+ }
+
+ // zap disks `ceph-disk zap /dev/sdi`
+ stage('Zap devices') {
+ try {
+ salt.cmdRun(pepperEnv, tgt, 'ceph-disk zap ' + dev)
+ } catch (Exception e) {
+ common.warningMsg(e)
+ }
+ salt.cmdRun(pepperEnv, tgt, 'ceph-disk zap ' + dev)
+ }
+
+ } else {
+
+ def mount = salt.cmdRun(pepperEnv, tgt, "mount | grep /var/lib/ceph/osd/ceph-${id}")['return'][0].values()[0]
+ dev = mount.split()[0].replaceAll("[0-9]","")
+
+ // remove journal, block_db, block_wal partition `parted /dev/sdj rm 3`
+ removeJournalOrBlockPartitions(pepperEnv, tgt, id)
+
+ // umount `umount /dev/sdi1`
+ stage('Umount devices') {
+ salt.cmdRun(pepperEnv, tgt, "umount /var/lib/ceph/osd/ceph-${id}")
+ }
+
+ // zap disks `ceph-disk zap /dev/sdi`
+ stage('Zap device') {
+ salt.cmdRun(pepperEnv, tgt, 'ceph-disk zap ' + dev)
+ }
+ }
+
+ // Deploy Ceph OSD
+ stage('Deploy Ceph OSD') {
+ salt.runSaltProcessStep(pepperEnv, tgt, 'saltutil.refresh_pillar', [], null, true, 5)
+ salt.enforceState(pepperEnv, tgt, 'ceph.osd', true)
+ }
+
+ if (PER_OSD_CONTROL.toBoolean() == true) {
+ stage("Verify backend version for osd.${id}") {
+ sleep(5)
+ salt.cmdRun(pepperEnv, tgt, "ceph osd metadata ${id} | grep osd_objectstore")
+ salt.cmdRun(pepperEnv, tgt, "ceph -s")
+ }
+
+ stage('Ask for manual confirmation') {
+ input message: "From the verification commands above, please check the backend version of osd.${id} and ceph status. If it is correct, Do you want to continue to migrate next osd?"
+ }
}
}
}
- }
- if (PER_OSD_HOST_CONTROL.toBoolean() == true) {
- stage("Verify backend versions") {
- sleep(5)
- salt.cmdRun(pepperEnv, tgt, "ceph osd metadata | grep osd_objectstore -B2")
- salt.cmdRun(pepperEnv, tgt, "ceph -s")
+ if (PER_OSD_HOST_CONTROL.toBoolean() == true) {
+ stage("Verify backend versions") {
+ sleep(5)
+ salt.cmdRun(pepperEnv, tgt, "ceph osd metadata | grep osd_objectstore -B2")
+ salt.cmdRun(pepperEnv, tgt, "ceph -s")
+ }
+
+ stage('Ask for manual confirmation') {
+ input message: "From the verification command above, please check the ceph status and backend version of osds on this host. If it is correct, Do you want to continue to migrate next OSD host?"
+ }
}
- stage('Ask for manual confirmation') {
- input message: "From the verification command above, please check the ceph status and backend version of osds on this host. If it is correct, Do you want to continue to migrate next OSD host?"
- }
}
-
}
- // remove cluster flags
- if (flags.size() > 0) {
- stage('Unset cluster flags') {
- for (flag in flags) {
- common.infoMsg('Removing flag ' + flag)
- salt.cmdRun(pepperEnv, ADMIN_HOST, 'ceph osd unset ' + flag)
+ finally {
+ // remove cluster flags
+ if (flags.size() > 0) {
+ stage('Unset cluster flags') {
+ for (flag in flags) {
+ common.infoMsg('Removing flag ' + flag)
+ salt.cmdRun(pepperEnv, ADMIN_HOST, 'ceph osd unset ' + flag)
+ }
}
}
}
diff --git a/ceph-remove-osd.groovy b/ceph-remove-osd.groovy
index e461363..0a9d4cc 100644
--- a/ceph-remove-osd.groovy
+++ b/ceph-remove-osd.groovy
@@ -30,14 +30,6 @@
// create connection to salt master
python.setupPepperVirtualenv(pepperEnv, SALT_MASTER_URL, SALT_MASTER_CREDENTIALS)
- if (flags.size() > 0) {
- stage('Set cluster flags') {
- for (flag in flags) {
- salt.cmdRun(pepperEnv, ADMIN_HOST, 'ceph osd set ' + flag)
- }
- }
- }
-
def osd_ids = []
def checknode = salt.runSaltProcessStep(pepperEnv, HOST, 'test.ping')
@@ -49,167 +41,178 @@
// get list of osd disks of the host
salt.runSaltProcessStep(pepperEnv, HOST, 'saltutil.sync_grains', [], null, true, 5)
def cephGrain = salt.getGrain(pepperEnv, HOST, 'ceph')
+ def ceph_disks = cephGrain['return'][0].values()[0].values()[0]['ceph_disk']
if (cephGrain['return'].isEmpty()) {
throw new Exception("Ceph salt grain cannot be found!")
}
- common.print(cephGrain)
- def ceph_disks = cephGrain['return'][0].values()[0].values()[0]['ceph_disk']
- common.prettyPrint(ceph_disks)
- for (i in ceph_disks) {
- def osd_id = i.getKey().toString()
- if (osd_id in osds || OSD == '*') {
- osd_ids.add('osd.' + osd_id)
- print("Will delete " + osd_id)
- } else {
- print("Skipping " + osd_id)
- }
- }
-
- if (osd_ids == []) {
- currentBuild.result = 'SUCCESS'
- return
- }
-
- // `ceph osd out <id> <id>`
- stage('Set OSDs out') {
- salt.cmdRun(pepperEnv, ADMIN_HOST, 'ceph osd out ' + osd_ids.join(' '))
- }
-
- // wait for healthy cluster
- if (WAIT_FOR_HEALTHY.toBoolean()) {
- sleep(5)
- ceph.waitForHealthy(pepperEnv, ADMIN_HOST)
- }
-
- // stop osd daemons
- stage('Stop OSD daemons') {
- for (i in osd_ids) {
- salt.runSaltProcessStep(pepperEnv, HOST, 'service.stop', ['ceph-osd@' + i.replaceAll('osd.', '')], null, true)
- }
- }
-
- // `ceph osd crush remove osd.2`
- stage('Remove OSDs from CRUSH') {
- for (i in osd_ids) {
- salt.cmdRun(pepperEnv, ADMIN_HOST, 'ceph osd crush remove ' + i)
- }
- }
-
- // remove keyring `ceph auth del osd.3`
- stage('Remove OSD keyrings from auth') {
- for (i in osd_ids) {
- salt.cmdRun(pepperEnv, ADMIN_HOST, 'ceph auth del ' + i)
- }
- }
-
- // remove osd `ceph osd rm osd.3`
- stage('Remove OSDs') {
- for (i in osd_ids) {
- salt.cmdRun(pepperEnv, ADMIN_HOST, 'ceph osd rm ' + i)
- }
- }
-
- for (osd_id in osd_ids) {
- id = osd_id.replaceAll('osd.', '')
-
- // remove journal, block_db, block_wal partition `parted /dev/sdj rm 3`
- stage('Remove journal / block_db / block_wal partition') {
- def partition_uuid = ""
- def journal_partition_uuid = ""
- def block_db_partition_uuid = ""
- def block_wal_partition_uuid = ""
- try {
- journal_partition_uuid = salt.cmdRun(pepperEnv, HOST, "cat /var/lib/ceph/osd/ceph-${id}/journal_uuid")['return'][0].values()[0].split("\n")[0]
- }
- catch (Exception e) {
- common.infoMsg(e)
- }
- try {
- block_db_partition_uuid = salt.cmdRun(pepperEnv, HOST, "cat /var/lib/ceph/osd/ceph-${id}/block.db_uuid")['return'][0].values()[0].split("\n")[0]
- }
- catch (Exception e) {
- common.infoMsg(e)
- }
-
- try {
- block_wal_partition_uuid = salt.cmdRun(pepperEnv, HOST, "cat /var/lib/ceph/osd/ceph-${id}/block.wal_uuid")['return'][0].values()[0].split("\n")[0]
- }
- catch (Exception e) {
- common.infoMsg(e)
- }
-
- // remove partition_uuid = 2c76f144-f412-481e-b150-4046212ca932
- if (journal_partition_uuid?.trim()) {
- ceph.removePartition(pepperEnv, HOST, journal_partition_uuid)
- }
- if (block_db_partition_uuid?.trim()) {
- ceph.removePartition(pepperEnv, HOST, block_db_partition_uuid)
- }
- if (block_wal_partition_uuid?.trim()) {
- ceph.removePartition(pepperEnv, HOST, block_wal_partition_uuid)
- }
-
- try {
- salt.cmdRun(pepperEnv, HOST, "partprobe")
- }
- catch (Exception e) {
- common.warningMsg(e)
+ if (flags.size() > 0) {
+ stage('Set cluster flags') {
+ for (flag in flags) {
+ salt.cmdRun(pepperEnv, ADMIN_HOST, 'ceph osd set ' + flag)
}
}
- if (cleanDisk) {
- // remove data / block / lockbox partition `parted /dev/sdj rm 3`
- stage('Remove data / block / lockbox partition') {
- def data_partition_uuid = ""
- def block_partition_uuid = ""
- def osd_fsid = ""
- def lvm = ""
- def lvm_enabled= salt.getPillar(pepperEnv,"I@ceph:osd","ceph:osd:lvm_enabled")['return'].first().containsValue(true)
+ }
+
+ try {
+ for (i in ceph_disks) {
+ def osd_id = i.getKey().toString()
+ if (osd_id in osds || OSD == '*') {
+ osd_ids.add('osd.' + osd_id)
+ print("Will delete " + osd_id)
+ } else {
+ print("Skipping " + osd_id)
+ }
+ }
+
+ if ( osd_ids == [] )
+ {
+ currentBuild.result = 'SUCCESS'
+ return
+ }
+
+ // `ceph osd out <id> <id>`
+ stage('Set OSDs out') {
+ salt.cmdRun(pepperEnv, ADMIN_HOST, 'ceph osd out ' + osd_ids.join(' '))
+ }
+
+ // wait for healthy cluster
+ if (WAIT_FOR_HEALTHY.toBoolean()) {
+ sleep(5)
+ ceph.waitForHealthy(pepperEnv, ADMIN_HOST)
+ }
+
+ // stop osd daemons
+ stage('Stop OSD daemons') {
+ for (i in osd_ids) {
+ salt.runSaltProcessStep(pepperEnv, HOST, 'service.stop', ['ceph-osd@' + i.replaceAll('osd.', '')], null, true)
+ }
+ }
+
+ // `ceph osd crush remove osd.2`
+ stage('Remove OSDs from CRUSH') {
+ for (i in osd_ids) {
+ salt.cmdRun(pepperEnv, ADMIN_HOST, 'ceph osd crush remove ' + i)
+ }
+ }
+
+ // remove keyring `ceph auth del osd.3`
+ stage('Remove OSD keyrings from auth') {
+ for (i in osd_ids) {
+ salt.cmdRun(pepperEnv, ADMIN_HOST, 'ceph auth del ' + i)
+ }
+ }
+
+ // remove osd `ceph osd rm osd.3`
+ stage('Remove OSDs') {
+ for (i in osd_ids) {
+ salt.cmdRun(pepperEnv, ADMIN_HOST, 'ceph osd rm ' + i)
+ }
+ }
+
+ for (osd_id in osd_ids) {
+ id = osd_id.replaceAll('osd.', '')
+
+ // remove journal, block_db, block_wal partition `parted /dev/sdj rm 3`
+ stage('Remove journal / block_db / block_wal partition') {
+ def partition_uuid = ""
+ def journal_partition_uuid = ""
+ def block_db_partition_uuid = ""
+ def block_wal_partition_uuid = ""
try {
- osd_fsid = salt.cmdRun(pepperEnv, HOST, "cat /var/lib/ceph/osd/ceph-${id}/fsid")['return'][0].values()[0].split("\n")[0]
- if (lvm_enabled) {
- lvm = salt.runSaltCommand(pepperEnv, 'local', ['expression': HOST, 'type': 'compound'], 'cmd.run', null, "salt-call lvm.lvdisplay --output json -l quiet")['return'][0].values()[0]
- lvm = new groovy.json.JsonSlurperClassic().parseText(lvm)
- lvm["local"].each { lv, params ->
- if (params["Logical Volume Name"].contains(osd_fsid)) {
- data_partition_uuid = params["Logical Volume Name"].minus("/dev/")
- }
- }
- }
+ journal_partition_uuid = salt.cmdRun(pepperEnv, HOST, "cat /var/lib/ceph/osd/ceph-${id}/journal_uuid")['return'][0].values()[0].split("\n")[0]
+ }
+ catch (Exception e) {
+ common.infoMsg(e)
+ }
+ try {
+ block_db_partition_uuid = salt.cmdRun(pepperEnv, HOST, "cat /var/lib/ceph/osd/ceph-${id}/block.db_uuid")['return'][0].values()[0].split("\n")[0]
}
catch (Exception e) {
common.infoMsg(e)
}
+
try {
- block_partition_uuid = salt.cmdRun(pepperEnv, HOST, "cat /var/lib/ceph/osd/ceph-${id}/block_uuid")['return'][0].values()[0].split("\n")[0]
+ block_wal_partition_uuid = salt.cmdRun(pepperEnv, HOST, "cat /var/lib/ceph/osd/ceph-${id}/block.wal_uuid")['return'][0].values()[0].split("\n")[0]
}
catch (Exception e) {
common.infoMsg(e)
}
// remove partition_uuid = 2c76f144-f412-481e-b150-4046212ca932
- if (block_partition_uuid?.trim()) {
- ceph.removePartition(pepperEnv, HOST, block_partition_uuid)
- try{
- salt.cmdRun(pepperEnv, HOST, "ceph-volume lvm zap `readlink /var/lib/ceph/osd/ceph-${id}/block` --destroy")
+ if (journal_partition_uuid?.trim()) {
+ ceph.removePartition(pepperEnv, HOST, journal_partition_uuid)
+ }
+ if (block_db_partition_uuid?.trim()) {
+ ceph.removePartition(pepperEnv, HOST, block_db_partition_uuid)
+ }
+ if (block_wal_partition_uuid?.trim()) {
+ ceph.removePartition(pepperEnv, HOST, block_wal_partition_uuid)
+ }
+
+ try {
+ salt.cmdRun(pepperEnv, HOST, "partprobe")
+ }
+ catch (Exception e) {
+ common.warningMsg(e)
+ }
+ }
+ if (cleanDisk) {
+ // remove data / block / lockbox partition `parted /dev/sdj rm 3`
+ stage('Remove data / block / lockbox partition') {
+ def data_partition_uuid = ""
+ def block_partition_uuid = ""
+ def osd_fsid = ""
+ def lvm = ""
+ def lvm_enabled= salt.getPillar(pepperEnv,"I@ceph:osd","ceph:osd:lvm_enabled")['return'].first().containsValue(true)
+ try {
+ osd_fsid = salt.cmdRun(pepperEnv, HOST, "cat /var/lib/ceph/osd/ceph-${id}/fsid")['return'][0].values()[0].split("\n")[0]
+ if (lvm_enabled) {
+ lvm = salt.runSaltCommand(pepperEnv, 'local', ['expression': HOST, 'type': 'compound'], 'cmd.run', null, "salt-call lvm.lvdisplay --output json -l quiet")['return'][0].values()[0]
+ lvm = new groovy.json.JsonSlurperClassic().parseText(lvm)
+ lvm["local"].each { lv, params ->
+ if (params["Logical Volume Name"].contains(osd_fsid)) {
+ data_partition_uuid = params["Logical Volume Name"].minus("/dev/")
+ }
+ }
+ }
}
catch (Exception e) {
common.infoMsg(e)
}
- }
- if (data_partition_uuid?.trim()) {
- ceph.removePartition(pepperEnv, HOST, data_partition_uuid, 'data', id)
+ try {
+ block_partition_uuid = salt.cmdRun(pepperEnv, HOST, "cat /var/lib/ceph/osd/ceph-${id}/block_uuid")['return'][0].values()[0].split("\n")[0]
+ }
+ catch (Exception e) {
+ common.infoMsg(e)
+ }
+
+ // remove partition_uuid = 2c76f144-f412-481e-b150-4046212ca932
+ if (block_partition_uuid?.trim()) {
+ ceph.removePartition(pepperEnv, HOST, block_partition_uuid)
+ try{
+ salt.cmdRun(pepperEnv, HOST, "ceph-volume lvm zap `readlink /var/lib/ceph/osd/ceph-${id}/block` --destroy")
+ }
+ catch (Exception e) {
+ common.infoMsg(e)
+ }
+ }
+ if (data_partition_uuid?.trim()) {
+ ceph.removePartition(pepperEnv, HOST, data_partition_uuid, 'data', id)
+ }
}
}
}
}
- // remove cluster flags
- if (flags.size() > 0) {
- stage('Unset cluster flags') {
- for (flag in flags) {
- common.infoMsg('Removing flag ' + flag)
- salt.cmdRun(pepperEnv, ADMIN_HOST, 'ceph osd unset ' + flag)
+ finally {
+ // remove cluster flags
+ if (flags.size() > 0) {
+ stage('Unset cluster flags') {
+ for (flag in flags) {
+ common.infoMsg('Removing flag ' + flag)
+ salt.cmdRun(pepperEnv, ADMIN_HOST, 'ceph osd unset ' + flag)
+ }
}
}
}