unset flags even if pipeline failed with a exception
Related-Prod: PROD-35064
Change-Id: I8e037852f93cdcb157b3ef77d2a58dd1de3bbe5b
diff --git a/ceph-backend-migration.groovy b/ceph-backend-migration.groovy
index 22809c8..4ee9f17 100644
--- a/ceph-backend-migration.groovy
+++ b/ceph-backend-migration.groovy
@@ -92,153 +92,157 @@
def target_hosts = salt.getMinions(pepperEnv, TARGET)
def device_grain_name = "ceph_disk"
- for (tgt in target_hosts) {
- def osd_ids = []
+ try {
+ for (tgt in target_hosts) {
+ def osd_ids = []
- // get list of osd disks of the tgt
- salt.runSaltProcessStep(pepperEnv, tgt, 'saltutil.sync_grains', [], null, true, 5)
- def ceph_disks = salt.getGrain(pepperEnv, tgt, 'ceph')['return'][0].values()[0].values()[0][device_grain_name]
+ // get list of osd disks of the tgt
+ salt.runSaltProcessStep(pepperEnv, tgt, 'saltutil.sync_grains', [], null, true, 5)
+ def ceph_disks = salt.getGrain(pepperEnv, tgt, 'ceph')['return'][0].values()[0].values()[0][device_grain_name]
- for (i in ceph_disks) {
- def osd_id = i.getKey().toString()
- if (osd_id in osds || OSD == '*') {
- osd_ids.add('osd.' + osd_id)
- print("Will migrate " + osd_id)
- } else {
- print("Skipping " + osd_id)
- }
- }
-
- for (osd_id in osd_ids) {
-
- def id = osd_id.replaceAll('osd.', '')
- def backend = salt.cmdRun(pepperEnv, ADMIN_HOST, "ceph osd metadata ${id} | grep osd_objectstore")['return'][0].values()[0]
-
- if (backend.contains(ORIGIN_BACKEND.toLowerCase())) {
-
- // wait for healthy cluster before manipulating with osds
- if (WAIT_FOR_HEALTHY.toBoolean()) {
- ceph.waitForHealthy(pepperEnv, ADMIN_HOST)
- }
-
- // `ceph osd out <id> <id>`
- stage('Set OSDs out') {
- salt.cmdRun(pepperEnv, ADMIN_HOST, "ceph osd out ${osd_id}")
- }
-
- if (WAIT_FOR_HEALTHY.toBoolean()) {
- sleep(5)
- ceph.waitForHealthy(pepperEnv, ADMIN_HOST)
- }
-
- // stop osd daemons
- stage('Stop OSD daemons') {
- salt.runSaltProcessStep(pepperEnv, tgt, 'service.stop', ['ceph-osd@' + osd_id.replaceAll('osd.', '')], null, true)
- }
-
- // remove keyring `ceph auth del osd.3`
- stage('Remove OSD keyrings from auth') {
- salt.cmdRun(pepperEnv, ADMIN_HOST, 'ceph auth del ' + osd_id)
- }
-
- // remove osd `ceph osd rm osd.3`
- stage('Remove OSDs') {
- salt.cmdRun(pepperEnv, ADMIN_HOST, 'ceph osd rm ' + osd_id)
- }
-
- def dmcrypt = ""
- try {
- dmcrypt = salt.cmdRun(pepperEnv, tgt, "ls -la /var/lib/ceph/osd/ceph-${id}/ | grep dmcrypt")['return'][0].values()[0]
- } catch (Exception e) {
- common.warningMsg(e)
- }
-
- if (dmcrypt?.trim()) {
- def mount = salt.cmdRun(pepperEnv, tgt, "lsblk -rp | grep /var/lib/ceph/osd/ceph-${id} -B1")['return'][0].values()[0]
- dev = mount.split()[0].replaceAll("[0-9]","")
-
- // remove partition tables
- stage('dd part tables') {
- salt.cmdRun(pepperEnv, tgt, "dd if=/dev/zero of=${dev} bs=512 count=1 conv=notrunc")
- }
-
- // remove journal, block_db, block_wal partition `parted /dev/sdj rm 3`
- removeJournalOrBlockPartitions(pepperEnv, tgt, id)
-
- // reboot
- stage('reboot and wait') {
- salt.runSaltProcessStep(pepperEnv, tgt, 'system.reboot', null, null, true, 5)
- salt.minionsReachable(pepperEnv, 'I@salt:master', tgt)
- sleep(10)
- }
-
- // zap disks `ceph-disk zap /dev/sdi`
- stage('Zap devices') {
- try {
- salt.cmdRun(pepperEnv, tgt, 'ceph-disk zap ' + dev)
- } catch (Exception e) {
- common.warningMsg(e)
- }
- salt.cmdRun(pepperEnv, tgt, 'ceph-disk zap ' + dev)
- }
-
+ for (i in ceph_disks) {
+ def osd_id = i.getKey().toString()
+ if (osd_id in osds || OSD == '*') {
+ osd_ids.add('osd.' + osd_id)
+ print("Will migrate " + osd_id)
} else {
+ print("Skipping " + osd_id)
+ }
+ }
- def mount = salt.cmdRun(pepperEnv, tgt, "mount | grep /var/lib/ceph/osd/ceph-${id}")['return'][0].values()[0]
- dev = mount.split()[0].replaceAll("[0-9]","")
+ for (osd_id in osd_ids) {
- // remove journal, block_db, block_wal partition `parted /dev/sdj rm 3`
- removeJournalOrBlockPartitions(pepperEnv, tgt, id)
+ def id = osd_id.replaceAll('osd.', '')
+ def backend = salt.cmdRun(pepperEnv, ADMIN_HOST, "ceph osd metadata ${id} | grep osd_objectstore")['return'][0].values()[0]
- // umount `umount /dev/sdi1`
- stage('Umount devices') {
- salt.cmdRun(pepperEnv, tgt, "umount /var/lib/ceph/osd/ceph-${id}")
+ if (backend.contains(ORIGIN_BACKEND.toLowerCase())) {
+
+ // wait for healthy cluster before manipulating with osds
+ if (WAIT_FOR_HEALTHY.toBoolean()) {
+ ceph.waitForHealthy(pepperEnv, ADMIN_HOST)
}
- // zap disks `ceph-disk zap /dev/sdi`
- stage('Zap device') {
- salt.cmdRun(pepperEnv, tgt, 'ceph-disk zap ' + dev)
+ // `ceph osd out <id> <id>`
+ stage('Set OSDs out') {
+ salt.cmdRun(pepperEnv, ADMIN_HOST, "ceph osd out ${osd_id}")
}
- }
- // Deploy Ceph OSD
- stage('Deploy Ceph OSD') {
- salt.runSaltProcessStep(pepperEnv, tgt, 'saltutil.refresh_pillar', [], null, true, 5)
- salt.enforceState(pepperEnv, tgt, 'ceph.osd', true)
- }
-
- if (PER_OSD_CONTROL.toBoolean() == true) {
- stage("Verify backend version for osd.${id}") {
+ if (WAIT_FOR_HEALTHY.toBoolean()) {
sleep(5)
- salt.cmdRun(pepperEnv, tgt, "ceph osd metadata ${id} | grep osd_objectstore")
- salt.cmdRun(pepperEnv, tgt, "ceph -s")
+ ceph.waitForHealthy(pepperEnv, ADMIN_HOST)
}
- stage('Ask for manual confirmation') {
- input message: "From the verification commands above, please check the backend version of osd.${id} and ceph status. If it is correct, Do you want to continue to migrate next osd?"
+ // stop osd daemons
+ stage('Stop OSD daemons') {
+ salt.runSaltProcessStep(pepperEnv, tgt, 'service.stop', ['ceph-osd@' + osd_id.replaceAll('osd.', '')], null, true)
+ }
+
+ // remove keyring `ceph auth del osd.3`
+ stage('Remove OSD keyrings from auth') {
+ salt.cmdRun(pepperEnv, ADMIN_HOST, 'ceph auth del ' + osd_id)
+ }
+
+ // remove osd `ceph osd rm osd.3`
+ stage('Remove OSDs') {
+ salt.cmdRun(pepperEnv, ADMIN_HOST, 'ceph osd rm ' + osd_id)
+ }
+
+ def dmcrypt = ""
+ try {
+ dmcrypt = salt.cmdRun(pepperEnv, tgt, "ls -la /var/lib/ceph/osd/ceph-${id}/ | grep dmcrypt")['return'][0].values()[0]
+ } catch (Exception e) {
+ common.warningMsg(e)
+ }
+
+ if (dmcrypt?.trim()) {
+ def mount = salt.cmdRun(pepperEnv, tgt, "lsblk -rp | grep /var/lib/ceph/osd/ceph-${id} -B1")['return'][0].values()[0]
+ dev = mount.split()[0].replaceAll("[0-9]","")
+
+ // remove partition tables
+ stage('dd part tables') {
+ salt.cmdRun(pepperEnv, tgt, "dd if=/dev/zero of=${dev} bs=512 count=1 conv=notrunc")
+ }
+
+ // remove journal, block_db, block_wal partition `parted /dev/sdj rm 3`
+ removeJournalOrBlockPartitions(pepperEnv, tgt, id)
+
+ // reboot
+ stage('reboot and wait') {
+ salt.runSaltProcessStep(pepperEnv, tgt, 'system.reboot', null, null, true, 5)
+ salt.minionsReachable(pepperEnv, 'I@salt:master', tgt)
+ sleep(10)
+ }
+
+ // zap disks `ceph-disk zap /dev/sdi`
+ stage('Zap devices') {
+ try {
+ salt.cmdRun(pepperEnv, tgt, 'ceph-disk zap ' + dev)
+ } catch (Exception e) {
+ common.warningMsg(e)
+ }
+ salt.cmdRun(pepperEnv, tgt, 'ceph-disk zap ' + dev)
+ }
+
+ } else {
+
+ def mount = salt.cmdRun(pepperEnv, tgt, "mount | grep /var/lib/ceph/osd/ceph-${id}")['return'][0].values()[0]
+ dev = mount.split()[0].replaceAll("[0-9]","")
+
+ // remove journal, block_db, block_wal partition `parted /dev/sdj rm 3`
+ removeJournalOrBlockPartitions(pepperEnv, tgt, id)
+
+ // umount `umount /dev/sdi1`
+ stage('Umount devices') {
+ salt.cmdRun(pepperEnv, tgt, "umount /var/lib/ceph/osd/ceph-${id}")
+ }
+
+ // zap disks `ceph-disk zap /dev/sdi`
+ stage('Zap device') {
+ salt.cmdRun(pepperEnv, tgt, 'ceph-disk zap ' + dev)
+ }
+ }
+
+ // Deploy Ceph OSD
+ stage('Deploy Ceph OSD') {
+ salt.runSaltProcessStep(pepperEnv, tgt, 'saltutil.refresh_pillar', [], null, true, 5)
+ salt.enforceState(pepperEnv, tgt, 'ceph.osd', true)
+ }
+
+ if (PER_OSD_CONTROL.toBoolean() == true) {
+ stage("Verify backend version for osd.${id}") {
+ sleep(5)
+ salt.cmdRun(pepperEnv, tgt, "ceph osd metadata ${id} | grep osd_objectstore")
+ salt.cmdRun(pepperEnv, tgt, "ceph -s")
+ }
+
+ stage('Ask for manual confirmation') {
+ input message: "From the verification commands above, please check the backend version of osd.${id} and ceph status. If it is correct, Do you want to continue to migrate next osd?"
+ }
}
}
}
- }
- if (PER_OSD_HOST_CONTROL.toBoolean() == true) {
- stage("Verify backend versions") {
- sleep(5)
- salt.cmdRun(pepperEnv, tgt, "ceph osd metadata | grep osd_objectstore -B2")
- salt.cmdRun(pepperEnv, tgt, "ceph -s")
+ if (PER_OSD_HOST_CONTROL.toBoolean() == true) {
+ stage("Verify backend versions") {
+ sleep(5)
+ salt.cmdRun(pepperEnv, tgt, "ceph osd metadata | grep osd_objectstore -B2")
+ salt.cmdRun(pepperEnv, tgt, "ceph -s")
+ }
+
+ stage('Ask for manual confirmation') {
+ input message: "From the verification command above, please check the ceph status and backend version of osds on this host. If it is correct, Do you want to continue to migrate next OSD host?"
+ }
}
- stage('Ask for manual confirmation') {
- input message: "From the verification command above, please check the ceph status and backend version of osds on this host. If it is correct, Do you want to continue to migrate next OSD host?"
- }
}
-
}
- // remove cluster flags
- if (flags.size() > 0) {
- stage('Unset cluster flags') {
- for (flag in flags) {
- common.infoMsg('Removing flag ' + flag)
- salt.cmdRun(pepperEnv, ADMIN_HOST, 'ceph osd unset ' + flag)
+ finally {
+ // remove cluster flags
+ if (flags.size() > 0) {
+ stage('Unset cluster flags') {
+ for (flag in flags) {
+ common.infoMsg('Removing flag ' + flag)
+ salt.cmdRun(pepperEnv, ADMIN_HOST, 'ceph osd unset ' + flag)
+ }
}
}
}