unset flags even if pipeline failed with a exception
Related-Prod: PROD-35064
Change-Id: I8e037852f93cdcb157b3ef77d2a58dd1de3bbe5b
diff --git a/ceph-remove-osd.groovy b/ceph-remove-osd.groovy
index e461363..0a9d4cc 100644
--- a/ceph-remove-osd.groovy
+++ b/ceph-remove-osd.groovy
@@ -30,14 +30,6 @@
// create connection to salt master
python.setupPepperVirtualenv(pepperEnv, SALT_MASTER_URL, SALT_MASTER_CREDENTIALS)
- if (flags.size() > 0) {
- stage('Set cluster flags') {
- for (flag in flags) {
- salt.cmdRun(pepperEnv, ADMIN_HOST, 'ceph osd set ' + flag)
- }
- }
- }
-
def osd_ids = []
def checknode = salt.runSaltProcessStep(pepperEnv, HOST, 'test.ping')
@@ -49,167 +41,178 @@
// get list of osd disks of the host
salt.runSaltProcessStep(pepperEnv, HOST, 'saltutil.sync_grains', [], null, true, 5)
def cephGrain = salt.getGrain(pepperEnv, HOST, 'ceph')
+ def ceph_disks = cephGrain['return'][0].values()[0].values()[0]['ceph_disk']
if (cephGrain['return'].isEmpty()) {
throw new Exception("Ceph salt grain cannot be found!")
}
- common.print(cephGrain)
- def ceph_disks = cephGrain['return'][0].values()[0].values()[0]['ceph_disk']
- common.prettyPrint(ceph_disks)
- for (i in ceph_disks) {
- def osd_id = i.getKey().toString()
- if (osd_id in osds || OSD == '*') {
- osd_ids.add('osd.' + osd_id)
- print("Will delete " + osd_id)
- } else {
- print("Skipping " + osd_id)
- }
- }
-
- if (osd_ids == []) {
- currentBuild.result = 'SUCCESS'
- return
- }
-
- // `ceph osd out <id> <id>`
- stage('Set OSDs out') {
- salt.cmdRun(pepperEnv, ADMIN_HOST, 'ceph osd out ' + osd_ids.join(' '))
- }
-
- // wait for healthy cluster
- if (WAIT_FOR_HEALTHY.toBoolean()) {
- sleep(5)
- ceph.waitForHealthy(pepperEnv, ADMIN_HOST)
- }
-
- // stop osd daemons
- stage('Stop OSD daemons') {
- for (i in osd_ids) {
- salt.runSaltProcessStep(pepperEnv, HOST, 'service.stop', ['ceph-osd@' + i.replaceAll('osd.', '')], null, true)
- }
- }
-
- // `ceph osd crush remove osd.2`
- stage('Remove OSDs from CRUSH') {
- for (i in osd_ids) {
- salt.cmdRun(pepperEnv, ADMIN_HOST, 'ceph osd crush remove ' + i)
- }
- }
-
- // remove keyring `ceph auth del osd.3`
- stage('Remove OSD keyrings from auth') {
- for (i in osd_ids) {
- salt.cmdRun(pepperEnv, ADMIN_HOST, 'ceph auth del ' + i)
- }
- }
-
- // remove osd `ceph osd rm osd.3`
- stage('Remove OSDs') {
- for (i in osd_ids) {
- salt.cmdRun(pepperEnv, ADMIN_HOST, 'ceph osd rm ' + i)
- }
- }
-
- for (osd_id in osd_ids) {
- id = osd_id.replaceAll('osd.', '')
-
- // remove journal, block_db, block_wal partition `parted /dev/sdj rm 3`
- stage('Remove journal / block_db / block_wal partition') {
- def partition_uuid = ""
- def journal_partition_uuid = ""
- def block_db_partition_uuid = ""
- def block_wal_partition_uuid = ""
- try {
- journal_partition_uuid = salt.cmdRun(pepperEnv, HOST, "cat /var/lib/ceph/osd/ceph-${id}/journal_uuid")['return'][0].values()[0].split("\n")[0]
- }
- catch (Exception e) {
- common.infoMsg(e)
- }
- try {
- block_db_partition_uuid = salt.cmdRun(pepperEnv, HOST, "cat /var/lib/ceph/osd/ceph-${id}/block.db_uuid")['return'][0].values()[0].split("\n")[0]
- }
- catch (Exception e) {
- common.infoMsg(e)
- }
-
- try {
- block_wal_partition_uuid = salt.cmdRun(pepperEnv, HOST, "cat /var/lib/ceph/osd/ceph-${id}/block.wal_uuid")['return'][0].values()[0].split("\n")[0]
- }
- catch (Exception e) {
- common.infoMsg(e)
- }
-
- // remove partition_uuid = 2c76f144-f412-481e-b150-4046212ca932
- if (journal_partition_uuid?.trim()) {
- ceph.removePartition(pepperEnv, HOST, journal_partition_uuid)
- }
- if (block_db_partition_uuid?.trim()) {
- ceph.removePartition(pepperEnv, HOST, block_db_partition_uuid)
- }
- if (block_wal_partition_uuid?.trim()) {
- ceph.removePartition(pepperEnv, HOST, block_wal_partition_uuid)
- }
-
- try {
- salt.cmdRun(pepperEnv, HOST, "partprobe")
- }
- catch (Exception e) {
- common.warningMsg(e)
+ if (flags.size() > 0) {
+ stage('Set cluster flags') {
+ for (flag in flags) {
+ salt.cmdRun(pepperEnv, ADMIN_HOST, 'ceph osd set ' + flag)
}
}
- if (cleanDisk) {
- // remove data / block / lockbox partition `parted /dev/sdj rm 3`
- stage('Remove data / block / lockbox partition') {
- def data_partition_uuid = ""
- def block_partition_uuid = ""
- def osd_fsid = ""
- def lvm = ""
- def lvm_enabled= salt.getPillar(pepperEnv,"I@ceph:osd","ceph:osd:lvm_enabled")['return'].first().containsValue(true)
+ }
+
+ try {
+ for (i in ceph_disks) {
+ def osd_id = i.getKey().toString()
+ if (osd_id in osds || OSD == '*') {
+ osd_ids.add('osd.' + osd_id)
+ print("Will delete " + osd_id)
+ } else {
+ print("Skipping " + osd_id)
+ }
+ }
+
+ if ( osd_ids == [] )
+ {
+ currentBuild.result = 'SUCCESS'
+ return
+ }
+
+ // `ceph osd out <id> <id>`
+ stage('Set OSDs out') {
+ salt.cmdRun(pepperEnv, ADMIN_HOST, 'ceph osd out ' + osd_ids.join(' '))
+ }
+
+ // wait for healthy cluster
+ if (WAIT_FOR_HEALTHY.toBoolean()) {
+ sleep(5)
+ ceph.waitForHealthy(pepperEnv, ADMIN_HOST)
+ }
+
+ // stop osd daemons
+ stage('Stop OSD daemons') {
+ for (i in osd_ids) {
+ salt.runSaltProcessStep(pepperEnv, HOST, 'service.stop', ['ceph-osd@' + i.replaceAll('osd.', '')], null, true)
+ }
+ }
+
+ // `ceph osd crush remove osd.2`
+ stage('Remove OSDs from CRUSH') {
+ for (i in osd_ids) {
+ salt.cmdRun(pepperEnv, ADMIN_HOST, 'ceph osd crush remove ' + i)
+ }
+ }
+
+ // remove keyring `ceph auth del osd.3`
+ stage('Remove OSD keyrings from auth') {
+ for (i in osd_ids) {
+ salt.cmdRun(pepperEnv, ADMIN_HOST, 'ceph auth del ' + i)
+ }
+ }
+
+ // remove osd `ceph osd rm osd.3`
+ stage('Remove OSDs') {
+ for (i in osd_ids) {
+ salt.cmdRun(pepperEnv, ADMIN_HOST, 'ceph osd rm ' + i)
+ }
+ }
+
+ for (osd_id in osd_ids) {
+ id = osd_id.replaceAll('osd.', '')
+
+ // remove journal, block_db, block_wal partition `parted /dev/sdj rm 3`
+ stage('Remove journal / block_db / block_wal partition') {
+ def partition_uuid = ""
+ def journal_partition_uuid = ""
+ def block_db_partition_uuid = ""
+ def block_wal_partition_uuid = ""
try {
- osd_fsid = salt.cmdRun(pepperEnv, HOST, "cat /var/lib/ceph/osd/ceph-${id}/fsid")['return'][0].values()[0].split("\n")[0]
- if (lvm_enabled) {
- lvm = salt.runSaltCommand(pepperEnv, 'local', ['expression': HOST, 'type': 'compound'], 'cmd.run', null, "salt-call lvm.lvdisplay --output json -l quiet")['return'][0].values()[0]
- lvm = new groovy.json.JsonSlurperClassic().parseText(lvm)
- lvm["local"].each { lv, params ->
- if (params["Logical Volume Name"].contains(osd_fsid)) {
- data_partition_uuid = params["Logical Volume Name"].minus("/dev/")
- }
- }
- }
+ journal_partition_uuid = salt.cmdRun(pepperEnv, HOST, "cat /var/lib/ceph/osd/ceph-${id}/journal_uuid")['return'][0].values()[0].split("\n")[0]
+ }
+ catch (Exception e) {
+ common.infoMsg(e)
+ }
+ try {
+ block_db_partition_uuid = salt.cmdRun(pepperEnv, HOST, "cat /var/lib/ceph/osd/ceph-${id}/block.db_uuid")['return'][0].values()[0].split("\n")[0]
}
catch (Exception e) {
common.infoMsg(e)
}
+
try {
- block_partition_uuid = salt.cmdRun(pepperEnv, HOST, "cat /var/lib/ceph/osd/ceph-${id}/block_uuid")['return'][0].values()[0].split("\n")[0]
+ block_wal_partition_uuid = salt.cmdRun(pepperEnv, HOST, "cat /var/lib/ceph/osd/ceph-${id}/block.wal_uuid")['return'][0].values()[0].split("\n")[0]
}
catch (Exception e) {
common.infoMsg(e)
}
// remove partition_uuid = 2c76f144-f412-481e-b150-4046212ca932
- if (block_partition_uuid?.trim()) {
- ceph.removePartition(pepperEnv, HOST, block_partition_uuid)
- try{
- salt.cmdRun(pepperEnv, HOST, "ceph-volume lvm zap `readlink /var/lib/ceph/osd/ceph-${id}/block` --destroy")
+ if (journal_partition_uuid?.trim()) {
+ ceph.removePartition(pepperEnv, HOST, journal_partition_uuid)
+ }
+ if (block_db_partition_uuid?.trim()) {
+ ceph.removePartition(pepperEnv, HOST, block_db_partition_uuid)
+ }
+ if (block_wal_partition_uuid?.trim()) {
+ ceph.removePartition(pepperEnv, HOST, block_wal_partition_uuid)
+ }
+
+ try {
+ salt.cmdRun(pepperEnv, HOST, "partprobe")
+ }
+ catch (Exception e) {
+ common.warningMsg(e)
+ }
+ }
+ if (cleanDisk) {
+ // remove data / block / lockbox partition `parted /dev/sdj rm 3`
+ stage('Remove data / block / lockbox partition') {
+ def data_partition_uuid = ""
+ def block_partition_uuid = ""
+ def osd_fsid = ""
+ def lvm = ""
+ def lvm_enabled= salt.getPillar(pepperEnv,"I@ceph:osd","ceph:osd:lvm_enabled")['return'].first().containsValue(true)
+ try {
+ osd_fsid = salt.cmdRun(pepperEnv, HOST, "cat /var/lib/ceph/osd/ceph-${id}/fsid")['return'][0].values()[0].split("\n")[0]
+ if (lvm_enabled) {
+ lvm = salt.runSaltCommand(pepperEnv, 'local', ['expression': HOST, 'type': 'compound'], 'cmd.run', null, "salt-call lvm.lvdisplay --output json -l quiet")['return'][0].values()[0]
+ lvm = new groovy.json.JsonSlurperClassic().parseText(lvm)
+ lvm["local"].each { lv, params ->
+ if (params["Logical Volume Name"].contains(osd_fsid)) {
+ data_partition_uuid = params["Logical Volume Name"].minus("/dev/")
+ }
+ }
+ }
}
catch (Exception e) {
common.infoMsg(e)
}
- }
- if (data_partition_uuid?.trim()) {
- ceph.removePartition(pepperEnv, HOST, data_partition_uuid, 'data', id)
+ try {
+ block_partition_uuid = salt.cmdRun(pepperEnv, HOST, "cat /var/lib/ceph/osd/ceph-${id}/block_uuid")['return'][0].values()[0].split("\n")[0]
+ }
+ catch (Exception e) {
+ common.infoMsg(e)
+ }
+
+ // remove partition_uuid = 2c76f144-f412-481e-b150-4046212ca932
+ if (block_partition_uuid?.trim()) {
+ ceph.removePartition(pepperEnv, HOST, block_partition_uuid)
+ try{
+ salt.cmdRun(pepperEnv, HOST, "ceph-volume lvm zap `readlink /var/lib/ceph/osd/ceph-${id}/block` --destroy")
+ }
+ catch (Exception e) {
+ common.infoMsg(e)
+ }
+ }
+ if (data_partition_uuid?.trim()) {
+ ceph.removePartition(pepperEnv, HOST, data_partition_uuid, 'data', id)
+ }
}
}
}
}
- // remove cluster flags
- if (flags.size() > 0) {
- stage('Unset cluster flags') {
- for (flag in flags) {
- common.infoMsg('Removing flag ' + flag)
- salt.cmdRun(pepperEnv, ADMIN_HOST, 'ceph osd unset ' + flag)
+ finally {
+ // remove cluster flags
+ if (flags.size() > 0) {
+ stage('Unset cluster flags') {
+ for (flag in flags) {
+ common.infoMsg('Removing flag ' + flag)
+ salt.cmdRun(pepperEnv, ADMIN_HOST, 'ceph osd unset ' + flag)
+ }
}
}
}