Merge the tip of origin/release/proposed/2019.2.0 into origin/release/2019.2.0
f354656 Fix parameter in ceph upgrade pipeline
df8c353 Enforce new config with ceph.common instead of highstates
2683a0e add optional step for wipe orpaned ceph's partitions
d8fdf67 fix pipelines for adding and removal osd to use them for migration to ceph-volume
7c836ba Removed condition for short path in order to allow re-run the pipeline
e05bfbf Run highstate at the end of the upgrade
bddec29 Fix variable names
d43f4ed Add qoutes around the target when calling salt-cp
e46eb69 unset flags even if pipeline failed with a exception
Change-Id: If5dc24833af1aab333036b78f6e088b8e2cd8166
diff --git a/ceph-add-osd-upmap.groovy b/ceph-add-osd-upmap.groovy
index f873534..07fc662 100644
--- a/ceph-add-osd-upmap.groovy
+++ b/ceph-add-osd-upmap.groovy
@@ -28,7 +28,7 @@
def generatemapping(master,pgmap,map) {
def pg_new
def pg_old
- for (pg in pgmap) {
+ for (pg in pgmap.get('pg_stats',[])) {
pg_new = pg["up"].minus(pg["acting"])
pg_old = pg["acting"].minus(pg["up"])
for (i = 0; i < pg_new.size(); i++) {
@@ -50,15 +50,13 @@
common.errorMsg("Host not found")
throw new InterruptedException()
}
- // I@docker:swarm and I@prometheus:server - mon* nodes
- def nodes = salt.getMinions(pepperEnv, "I@ceph:common and not ( I@docker:swarm and I@prometheus:server ) and not " + HOST)
- for ( node in nodes )
- {
- def features = salt.cmdRun(pepperEnv, node, "ceph features --format json", checkResponse=true, batch=null, output=false).values()[0]
- features = new groovy.json.JsonSlurperClassic().parseText(features[0][node])
- if ( fetures['client']['group']['release'] != 'luminous' )
- {
- throw new Exception("client installed on " + node + " does not support upmap. Update all clients to luminous or newer before using this pipeline")
+ def cmn = salt.getFirstMinion(pepperEnv, "I@ceph:mon")
+ def features = salt.cmdRun(pepperEnv, cmn, "ceph features --format json", checkResponse=true, batch=null, output=false).values()[0]
+
+ features = new groovy.json.JsonSlurperClassic().parseText(features[0][cmn])
+ for ( group in features['client'] ) {
+ if ( group['release'] != 'luminous' ) {
+ throw new Exception("Some of installed clients does not support upmap. Update all clients to luminous or newer before using this pipeline")
}
}
}
@@ -81,6 +79,7 @@
}
stage('Install Ceph OSD') {
+ salt.enforceState(pepperEnv, HOST, 'linux.storage')
orchestrate.installCephOsd(pepperEnv, HOST)
}
@@ -103,7 +102,6 @@
salt.enforceState(pepperEnv, '*', 'linux.network.host')
}
- def mapping = []
stage("update mappings") {
def pgmap
@@ -112,6 +110,7 @@
if (pgmap == '') {
return 1
} else {
+ def mapping = []
pgmap = new groovy.json.JsonSlurperClassic().parseText(pgmap)
generatemapping(pepperEnv, pgmap, mapping)
mapping.each(this.&runCephCommand)
@@ -128,10 +127,8 @@
ceph.waitForHealthy(pepperEnv, "I@ceph:mon and I@ceph:common:keyring:admin", flags)
}
}
- catch (Throwable e) {
- // There was an error or exception thrown. Unset norebalance.
+ finally {
runCephCommand('ceph osd unset norebalance')['return'][0].values()[0]
- throw e
}
}
}
diff --git a/ceph-backend-migration.groovy b/ceph-backend-migration.groovy
index 22809c8..4ee9f17 100644
--- a/ceph-backend-migration.groovy
+++ b/ceph-backend-migration.groovy
@@ -92,153 +92,157 @@
def target_hosts = salt.getMinions(pepperEnv, TARGET)
def device_grain_name = "ceph_disk"
- for (tgt in target_hosts) {
- def osd_ids = []
+ try {
+ for (tgt in target_hosts) {
+ def osd_ids = []
- // get list of osd disks of the tgt
- salt.runSaltProcessStep(pepperEnv, tgt, 'saltutil.sync_grains', [], null, true, 5)
- def ceph_disks = salt.getGrain(pepperEnv, tgt, 'ceph')['return'][0].values()[0].values()[0][device_grain_name]
+ // get list of osd disks of the tgt
+ salt.runSaltProcessStep(pepperEnv, tgt, 'saltutil.sync_grains', [], null, true, 5)
+ def ceph_disks = salt.getGrain(pepperEnv, tgt, 'ceph')['return'][0].values()[0].values()[0][device_grain_name]
- for (i in ceph_disks) {
- def osd_id = i.getKey().toString()
- if (osd_id in osds || OSD == '*') {
- osd_ids.add('osd.' + osd_id)
- print("Will migrate " + osd_id)
- } else {
- print("Skipping " + osd_id)
- }
- }
-
- for (osd_id in osd_ids) {
-
- def id = osd_id.replaceAll('osd.', '')
- def backend = salt.cmdRun(pepperEnv, ADMIN_HOST, "ceph osd metadata ${id} | grep osd_objectstore")['return'][0].values()[0]
-
- if (backend.contains(ORIGIN_BACKEND.toLowerCase())) {
-
- // wait for healthy cluster before manipulating with osds
- if (WAIT_FOR_HEALTHY.toBoolean()) {
- ceph.waitForHealthy(pepperEnv, ADMIN_HOST)
- }
-
- // `ceph osd out <id> <id>`
- stage('Set OSDs out') {
- salt.cmdRun(pepperEnv, ADMIN_HOST, "ceph osd out ${osd_id}")
- }
-
- if (WAIT_FOR_HEALTHY.toBoolean()) {
- sleep(5)
- ceph.waitForHealthy(pepperEnv, ADMIN_HOST)
- }
-
- // stop osd daemons
- stage('Stop OSD daemons') {
- salt.runSaltProcessStep(pepperEnv, tgt, 'service.stop', ['ceph-osd@' + osd_id.replaceAll('osd.', '')], null, true)
- }
-
- // remove keyring `ceph auth del osd.3`
- stage('Remove OSD keyrings from auth') {
- salt.cmdRun(pepperEnv, ADMIN_HOST, 'ceph auth del ' + osd_id)
- }
-
- // remove osd `ceph osd rm osd.3`
- stage('Remove OSDs') {
- salt.cmdRun(pepperEnv, ADMIN_HOST, 'ceph osd rm ' + osd_id)
- }
-
- def dmcrypt = ""
- try {
- dmcrypt = salt.cmdRun(pepperEnv, tgt, "ls -la /var/lib/ceph/osd/ceph-${id}/ | grep dmcrypt")['return'][0].values()[0]
- } catch (Exception e) {
- common.warningMsg(e)
- }
-
- if (dmcrypt?.trim()) {
- def mount = salt.cmdRun(pepperEnv, tgt, "lsblk -rp | grep /var/lib/ceph/osd/ceph-${id} -B1")['return'][0].values()[0]
- dev = mount.split()[0].replaceAll("[0-9]","")
-
- // remove partition tables
- stage('dd part tables') {
- salt.cmdRun(pepperEnv, tgt, "dd if=/dev/zero of=${dev} bs=512 count=1 conv=notrunc")
- }
-
- // remove journal, block_db, block_wal partition `parted /dev/sdj rm 3`
- removeJournalOrBlockPartitions(pepperEnv, tgt, id)
-
- // reboot
- stage('reboot and wait') {
- salt.runSaltProcessStep(pepperEnv, tgt, 'system.reboot', null, null, true, 5)
- salt.minionsReachable(pepperEnv, 'I@salt:master', tgt)
- sleep(10)
- }
-
- // zap disks `ceph-disk zap /dev/sdi`
- stage('Zap devices') {
- try {
- salt.cmdRun(pepperEnv, tgt, 'ceph-disk zap ' + dev)
- } catch (Exception e) {
- common.warningMsg(e)
- }
- salt.cmdRun(pepperEnv, tgt, 'ceph-disk zap ' + dev)
- }
-
+ for (i in ceph_disks) {
+ def osd_id = i.getKey().toString()
+ if (osd_id in osds || OSD == '*') {
+ osd_ids.add('osd.' + osd_id)
+ print("Will migrate " + osd_id)
} else {
+ print("Skipping " + osd_id)
+ }
+ }
- def mount = salt.cmdRun(pepperEnv, tgt, "mount | grep /var/lib/ceph/osd/ceph-${id}")['return'][0].values()[0]
- dev = mount.split()[0].replaceAll("[0-9]","")
+ for (osd_id in osd_ids) {
- // remove journal, block_db, block_wal partition `parted /dev/sdj rm 3`
- removeJournalOrBlockPartitions(pepperEnv, tgt, id)
+ def id = osd_id.replaceAll('osd.', '')
+ def backend = salt.cmdRun(pepperEnv, ADMIN_HOST, "ceph osd metadata ${id} | grep osd_objectstore")['return'][0].values()[0]
- // umount `umount /dev/sdi1`
- stage('Umount devices') {
- salt.cmdRun(pepperEnv, tgt, "umount /var/lib/ceph/osd/ceph-${id}")
+ if (backend.contains(ORIGIN_BACKEND.toLowerCase())) {
+
+ // wait for healthy cluster before manipulating with osds
+ if (WAIT_FOR_HEALTHY.toBoolean()) {
+ ceph.waitForHealthy(pepperEnv, ADMIN_HOST)
}
- // zap disks `ceph-disk zap /dev/sdi`
- stage('Zap device') {
- salt.cmdRun(pepperEnv, tgt, 'ceph-disk zap ' + dev)
+ // `ceph osd out <id> <id>`
+ stage('Set OSDs out') {
+ salt.cmdRun(pepperEnv, ADMIN_HOST, "ceph osd out ${osd_id}")
}
- }
- // Deploy Ceph OSD
- stage('Deploy Ceph OSD') {
- salt.runSaltProcessStep(pepperEnv, tgt, 'saltutil.refresh_pillar', [], null, true, 5)
- salt.enforceState(pepperEnv, tgt, 'ceph.osd', true)
- }
-
- if (PER_OSD_CONTROL.toBoolean() == true) {
- stage("Verify backend version for osd.${id}") {
+ if (WAIT_FOR_HEALTHY.toBoolean()) {
sleep(5)
- salt.cmdRun(pepperEnv, tgt, "ceph osd metadata ${id} | grep osd_objectstore")
- salt.cmdRun(pepperEnv, tgt, "ceph -s")
+ ceph.waitForHealthy(pepperEnv, ADMIN_HOST)
}
- stage('Ask for manual confirmation') {
- input message: "From the verification commands above, please check the backend version of osd.${id} and ceph status. If it is correct, Do you want to continue to migrate next osd?"
+ // stop osd daemons
+ stage('Stop OSD daemons') {
+ salt.runSaltProcessStep(pepperEnv, tgt, 'service.stop', ['ceph-osd@' + osd_id.replaceAll('osd.', '')], null, true)
+ }
+
+ // remove keyring `ceph auth del osd.3`
+ stage('Remove OSD keyrings from auth') {
+ salt.cmdRun(pepperEnv, ADMIN_HOST, 'ceph auth del ' + osd_id)
+ }
+
+ // remove osd `ceph osd rm osd.3`
+ stage('Remove OSDs') {
+ salt.cmdRun(pepperEnv, ADMIN_HOST, 'ceph osd rm ' + osd_id)
+ }
+
+ def dmcrypt = ""
+ try {
+ dmcrypt = salt.cmdRun(pepperEnv, tgt, "ls -la /var/lib/ceph/osd/ceph-${id}/ | grep dmcrypt")['return'][0].values()[0]
+ } catch (Exception e) {
+ common.warningMsg(e)
+ }
+
+ if (dmcrypt?.trim()) {
+ def mount = salt.cmdRun(pepperEnv, tgt, "lsblk -rp | grep /var/lib/ceph/osd/ceph-${id} -B1")['return'][0].values()[0]
+ dev = mount.split()[0].replaceAll("[0-9]","")
+
+ // remove partition tables
+ stage('dd part tables') {
+ salt.cmdRun(pepperEnv, tgt, "dd if=/dev/zero of=${dev} bs=512 count=1 conv=notrunc")
+ }
+
+ // remove journal, block_db, block_wal partition `parted /dev/sdj rm 3`
+ removeJournalOrBlockPartitions(pepperEnv, tgt, id)
+
+ // reboot
+ stage('reboot and wait') {
+ salt.runSaltProcessStep(pepperEnv, tgt, 'system.reboot', null, null, true, 5)
+ salt.minionsReachable(pepperEnv, 'I@salt:master', tgt)
+ sleep(10)
+ }
+
+ // zap disks `ceph-disk zap /dev/sdi`
+ stage('Zap devices') {
+ try {
+ salt.cmdRun(pepperEnv, tgt, 'ceph-disk zap ' + dev)
+ } catch (Exception e) {
+ common.warningMsg(e)
+ }
+ salt.cmdRun(pepperEnv, tgt, 'ceph-disk zap ' + dev)
+ }
+
+ } else {
+
+ def mount = salt.cmdRun(pepperEnv, tgt, "mount | grep /var/lib/ceph/osd/ceph-${id}")['return'][0].values()[0]
+ dev = mount.split()[0].replaceAll("[0-9]","")
+
+ // remove journal, block_db, block_wal partition `parted /dev/sdj rm 3`
+ removeJournalOrBlockPartitions(pepperEnv, tgt, id)
+
+ // umount `umount /dev/sdi1`
+ stage('Umount devices') {
+ salt.cmdRun(pepperEnv, tgt, "umount /var/lib/ceph/osd/ceph-${id}")
+ }
+
+ // zap disks `ceph-disk zap /dev/sdi`
+ stage('Zap device') {
+ salt.cmdRun(pepperEnv, tgt, 'ceph-disk zap ' + dev)
+ }
+ }
+
+ // Deploy Ceph OSD
+ stage('Deploy Ceph OSD') {
+ salt.runSaltProcessStep(pepperEnv, tgt, 'saltutil.refresh_pillar', [], null, true, 5)
+ salt.enforceState(pepperEnv, tgt, 'ceph.osd', true)
+ }
+
+ if (PER_OSD_CONTROL.toBoolean() == true) {
+ stage("Verify backend version for osd.${id}") {
+ sleep(5)
+ salt.cmdRun(pepperEnv, tgt, "ceph osd metadata ${id} | grep osd_objectstore")
+ salt.cmdRun(pepperEnv, tgt, "ceph -s")
+ }
+
+ stage('Ask for manual confirmation') {
+ input message: "From the verification commands above, please check the backend version of osd.${id} and ceph status. If it is correct, Do you want to continue to migrate next osd?"
+ }
}
}
}
- }
- if (PER_OSD_HOST_CONTROL.toBoolean() == true) {
- stage("Verify backend versions") {
- sleep(5)
- salt.cmdRun(pepperEnv, tgt, "ceph osd metadata | grep osd_objectstore -B2")
- salt.cmdRun(pepperEnv, tgt, "ceph -s")
+ if (PER_OSD_HOST_CONTROL.toBoolean() == true) {
+ stage("Verify backend versions") {
+ sleep(5)
+ salt.cmdRun(pepperEnv, tgt, "ceph osd metadata | grep osd_objectstore -B2")
+ salt.cmdRun(pepperEnv, tgt, "ceph -s")
+ }
+
+ stage('Ask for manual confirmation') {
+ input message: "From the verification command above, please check the ceph status and backend version of osds on this host. If it is correct, Do you want to continue to migrate next OSD host?"
+ }
}
- stage('Ask for manual confirmation') {
- input message: "From the verification command above, please check the ceph status and backend version of osds on this host. If it is correct, Do you want to continue to migrate next OSD host?"
- }
}
-
}
- // remove cluster flags
- if (flags.size() > 0) {
- stage('Unset cluster flags') {
- for (flag in flags) {
- common.infoMsg('Removing flag ' + flag)
- salt.cmdRun(pepperEnv, ADMIN_HOST, 'ceph osd unset ' + flag)
+ finally {
+ // remove cluster flags
+ if (flags.size() > 0) {
+ stage('Unset cluster flags') {
+ for (flag in flags) {
+ common.infoMsg('Removing flag ' + flag)
+ salt.cmdRun(pepperEnv, ADMIN_HOST, 'ceph osd unset ' + flag)
+ }
}
}
}
diff --git a/ceph-remove-osd.groovy b/ceph-remove-osd.groovy
index e461363..405d478 100644
--- a/ceph-remove-osd.groovy
+++ b/ceph-remove-osd.groovy
@@ -11,6 +11,8 @@
* ADMIN_HOST Host (minion id) with admin keyring
* CLUSTER_FLAGS Comma separated list of tags to apply to cluster
* WAIT_FOR_HEALTHY Wait for cluster rebalance before stoping daemons
+ * CLEANDISK Wipe data disk of removed osd
+ * CLEAN_ORPHANS Wipe partition left over after unknown osd
*
*/
@@ -22,7 +24,8 @@
def pepperEnv = "pepperEnv"
def flags = CLUSTER_FLAGS.tokenize(',')
def osds = OSD.tokenize(',')
-def cleanDisk = CLEANDISK
+def cleanDisk = CLEANDISK.toBoolean()
+def cleanOrphans = CLEAN_ORPHANS.toBoolean()
timeout(time: 12, unit: 'HOURS') {
node("python") {
@@ -30,14 +33,6 @@
// create connection to salt master
python.setupPepperVirtualenv(pepperEnv, SALT_MASTER_URL, SALT_MASTER_CREDENTIALS)
- if (flags.size() > 0) {
- stage('Set cluster flags') {
- for (flag in flags) {
- salt.cmdRun(pepperEnv, ADMIN_HOST, 'ceph osd set ' + flag)
- }
- }
- }
-
def osd_ids = []
def checknode = salt.runSaltProcessStep(pepperEnv, HOST, 'test.ping')
@@ -49,167 +44,193 @@
// get list of osd disks of the host
salt.runSaltProcessStep(pepperEnv, HOST, 'saltutil.sync_grains', [], null, true, 5)
def cephGrain = salt.getGrain(pepperEnv, HOST, 'ceph')
+ def ceph_disks = cephGrain['return'][0].values()[0].values()[0]['ceph_disk']
if (cephGrain['return'].isEmpty()) {
throw new Exception("Ceph salt grain cannot be found!")
}
- common.print(cephGrain)
- def ceph_disks = cephGrain['return'][0].values()[0].values()[0]['ceph_disk']
- common.prettyPrint(ceph_disks)
- for (i in ceph_disks) {
- def osd_id = i.getKey().toString()
- if (osd_id in osds || OSD == '*') {
- osd_ids.add('osd.' + osd_id)
- print("Will delete " + osd_id)
- } else {
- print("Skipping " + osd_id)
- }
- }
-
- if (osd_ids == []) {
- currentBuild.result = 'SUCCESS'
- return
- }
-
- // `ceph osd out <id> <id>`
- stage('Set OSDs out') {
- salt.cmdRun(pepperEnv, ADMIN_HOST, 'ceph osd out ' + osd_ids.join(' '))
- }
-
- // wait for healthy cluster
- if (WAIT_FOR_HEALTHY.toBoolean()) {
- sleep(5)
- ceph.waitForHealthy(pepperEnv, ADMIN_HOST)
- }
-
- // stop osd daemons
- stage('Stop OSD daemons') {
- for (i in osd_ids) {
- salt.runSaltProcessStep(pepperEnv, HOST, 'service.stop', ['ceph-osd@' + i.replaceAll('osd.', '')], null, true)
- }
- }
-
- // `ceph osd crush remove osd.2`
- stage('Remove OSDs from CRUSH') {
- for (i in osd_ids) {
- salt.cmdRun(pepperEnv, ADMIN_HOST, 'ceph osd crush remove ' + i)
- }
- }
-
- // remove keyring `ceph auth del osd.3`
- stage('Remove OSD keyrings from auth') {
- for (i in osd_ids) {
- salt.cmdRun(pepperEnv, ADMIN_HOST, 'ceph auth del ' + i)
- }
- }
-
- // remove osd `ceph osd rm osd.3`
- stage('Remove OSDs') {
- for (i in osd_ids) {
- salt.cmdRun(pepperEnv, ADMIN_HOST, 'ceph osd rm ' + i)
- }
- }
-
- for (osd_id in osd_ids) {
- id = osd_id.replaceAll('osd.', '')
-
- // remove journal, block_db, block_wal partition `parted /dev/sdj rm 3`
- stage('Remove journal / block_db / block_wal partition') {
- def partition_uuid = ""
- def journal_partition_uuid = ""
- def block_db_partition_uuid = ""
- def block_wal_partition_uuid = ""
- try {
- journal_partition_uuid = salt.cmdRun(pepperEnv, HOST, "cat /var/lib/ceph/osd/ceph-${id}/journal_uuid")['return'][0].values()[0].split("\n")[0]
- }
- catch (Exception e) {
- common.infoMsg(e)
- }
- try {
- block_db_partition_uuid = salt.cmdRun(pepperEnv, HOST, "cat /var/lib/ceph/osd/ceph-${id}/block.db_uuid")['return'][0].values()[0].split("\n")[0]
- }
- catch (Exception e) {
- common.infoMsg(e)
- }
-
- try {
- block_wal_partition_uuid = salt.cmdRun(pepperEnv, HOST, "cat /var/lib/ceph/osd/ceph-${id}/block.wal_uuid")['return'][0].values()[0].split("\n")[0]
- }
- catch (Exception e) {
- common.infoMsg(e)
- }
-
- // remove partition_uuid = 2c76f144-f412-481e-b150-4046212ca932
- if (journal_partition_uuid?.trim()) {
- ceph.removePartition(pepperEnv, HOST, journal_partition_uuid)
- }
- if (block_db_partition_uuid?.trim()) {
- ceph.removePartition(pepperEnv, HOST, block_db_partition_uuid)
- }
- if (block_wal_partition_uuid?.trim()) {
- ceph.removePartition(pepperEnv, HOST, block_wal_partition_uuid)
- }
-
- try {
- salt.cmdRun(pepperEnv, HOST, "partprobe")
- }
- catch (Exception e) {
- common.warningMsg(e)
+ if (flags.size() > 0) {
+ stage('Set cluster flags') {
+ for (flag in flags) {
+ salt.cmdRun(pepperEnv, ADMIN_HOST, 'ceph osd set ' + flag)
}
}
- if (cleanDisk) {
- // remove data / block / lockbox partition `parted /dev/sdj rm 3`
- stage('Remove data / block / lockbox partition') {
- def data_partition_uuid = ""
- def block_partition_uuid = ""
- def osd_fsid = ""
- def lvm = ""
- def lvm_enabled= salt.getPillar(pepperEnv,"I@ceph:osd","ceph:osd:lvm_enabled")['return'].first().containsValue(true)
+ }
+
+ try {
+ for (i in ceph_disks) {
+ def osd_id = i.getKey().toString()
+ if (osd_id in osds || OSD == '*') {
+ osd_ids.add('osd.' + osd_id)
+ print("Will delete " + osd_id)
+ } else {
+ print("Skipping " + osd_id)
+ }
+ }
+
+ // `ceph osd out <id> <id>`
+ stage('Set OSDs out') {
+ if ( !osd_ids.isEmpty() ) {
+ salt.cmdRun(pepperEnv, ADMIN_HOST, 'ceph osd out ' + osd_ids.join(' '))
+ }
+ }
+
+ // wait for healthy cluster
+ if (WAIT_FOR_HEALTHY.toBoolean()) {
+ sleep(5)
+ ceph.waitForHealthy(pepperEnv, ADMIN_HOST)
+ }
+
+ // stop osd daemons
+ stage('Stop OSD daemons') {
+ for (i in osd_ids) {
+ salt.runSaltProcessStep(pepperEnv, HOST, 'service.stop', ['ceph-osd@' + i.replaceAll('osd.', '')], null, true)
+ }
+ }
+
+ // `ceph osd crush remove osd.2`
+ stage('Remove OSDs from CRUSH') {
+ for (i in osd_ids) {
+ salt.cmdRun(pepperEnv, ADMIN_HOST, 'ceph osd crush remove ' + i)
+ }
+ }
+
+ // remove keyring `ceph auth del osd.3`
+ stage('Remove OSD keyrings from auth') {
+ for (i in osd_ids) {
+ salt.cmdRun(pepperEnv, ADMIN_HOST, 'ceph auth del ' + i)
+ }
+ }
+
+ // remove osd `ceph osd rm osd.3`
+ stage('Remove OSDs') {
+ for (i in osd_ids) {
+ salt.cmdRun(pepperEnv, ADMIN_HOST, 'ceph osd rm ' + i)
+ }
+ }
+
+ for (osd_id in osd_ids) {
+ id = osd_id.replaceAll('osd.', '')
+
+ // remove journal, block_db, block_wal partition `parted /dev/sdj rm 3`
+ stage('Remove journal / block_db / block_wal partition') {
+ def partition_uuid = ""
+ def journal_partition_uuid = ""
+ def block_db_partition_uuid = ""
+ def block_wal_partition_uuid = ""
try {
- osd_fsid = salt.cmdRun(pepperEnv, HOST, "cat /var/lib/ceph/osd/ceph-${id}/fsid")['return'][0].values()[0].split("\n")[0]
- if (lvm_enabled) {
- lvm = salt.runSaltCommand(pepperEnv, 'local', ['expression': HOST, 'type': 'compound'], 'cmd.run', null, "salt-call lvm.lvdisplay --output json -l quiet")['return'][0].values()[0]
- lvm = new groovy.json.JsonSlurperClassic().parseText(lvm)
- lvm["local"].each { lv, params ->
- if (params["Logical Volume Name"].contains(osd_fsid)) {
- data_partition_uuid = params["Logical Volume Name"].minus("/dev/")
- }
- }
- }
+ journal_partition_uuid = salt.cmdRun(pepperEnv, HOST, "cat /var/lib/ceph/osd/ceph-${id}/journal_uuid")['return'][0].values()[0].split("\n")[0]
+ }
+ catch (Exception e) {
+ common.infoMsg(e)
+ }
+ try {
+ block_db_partition_uuid = salt.cmdRun(pepperEnv, HOST, "cat /var/lib/ceph/osd/ceph-${id}/block.db_uuid")['return'][0].values()[0].split("\n")[0]
}
catch (Exception e) {
common.infoMsg(e)
}
+
try {
- block_partition_uuid = salt.cmdRun(pepperEnv, HOST, "cat /var/lib/ceph/osd/ceph-${id}/block_uuid")['return'][0].values()[0].split("\n")[0]
+ block_wal_partition_uuid = salt.cmdRun(pepperEnv, HOST, "cat /var/lib/ceph/osd/ceph-${id}/block.wal_uuid")['return'][0].values()[0].split("\n")[0]
}
catch (Exception e) {
common.infoMsg(e)
}
// remove partition_uuid = 2c76f144-f412-481e-b150-4046212ca932
- if (block_partition_uuid?.trim()) {
- ceph.removePartition(pepperEnv, HOST, block_partition_uuid)
- try{
- salt.cmdRun(pepperEnv, HOST, "ceph-volume lvm zap `readlink /var/lib/ceph/osd/ceph-${id}/block` --destroy")
+ if (journal_partition_uuid?.trim()) {
+ ceph.removePartition(pepperEnv, HOST, journal_partition_uuid)
+ }
+ if (block_db_partition_uuid?.trim()) {
+ ceph.removePartition(pepperEnv, HOST, block_db_partition_uuid)
+ }
+ if (block_wal_partition_uuid?.trim()) {
+ ceph.removePartition(pepperEnv, HOST, block_wal_partition_uuid)
+ }
+
+ try {
+ salt.cmdRun(pepperEnv, HOST, "partprobe")
+ }
+ catch (Exception e) {
+ common.warningMsg(e)
+ }
+ }
+ if (cleanDisk) {
+ // remove data / block / lockbox partition `parted /dev/sdj rm 3`
+ stage('Remove data / block / lockbox partition') {
+ def data_partition_uuid = ""
+ def block_partition_uuid = ""
+ def osd_fsid = ""
+ def lvm = ""
+ def lvm_enabled= salt.getPillar(pepperEnv,"I@ceph:osd","ceph:osd:lvm_enabled")['return'].first().containsValue(true)
+ try {
+ osd_fsid = salt.cmdRun(pepperEnv, HOST, "cat /var/lib/ceph/osd/ceph-${id}/fsid")['return'][0].values()[0].split("\n")[0]
+ if (lvm_enabled) {
+ lvm = salt.runSaltCommand(pepperEnv, 'local', ['expression': HOST, 'type': 'compound'], 'cmd.run', null, "salt-call lvm.lvdisplay --output json -l quiet")['return'][0].values()[0]
+ lvm = new groovy.json.JsonSlurperClassic().parseText(lvm)
+ lvm["local"].each { lv, params ->
+ if (params["Logical Volume Name"].contains(osd_fsid)) {
+ data_partition_uuid = params["Logical Volume Name"].minus("/dev/")
+ }
+ }
+ }
}
catch (Exception e) {
common.infoMsg(e)
}
+ try {
+ block_partition_uuid = salt.cmdRun(pepperEnv, HOST, "cat /var/lib/ceph/osd/ceph-${id}/block_uuid")['return'][0].values()[0].split("\n")[0]
+ }
+ catch (Exception e) {
+ common.infoMsg(e)
+ }
+
+ // remove partition_uuid = 2c76f144-f412-481e-b150-4046212ca932
+ if (block_partition_uuid?.trim()) {
+ ceph.removePartition(pepperEnv, HOST, block_partition_uuid)
+ try{
+ salt.cmdRun(pepperEnv, HOST, "ceph-volume lvm zap `readlink /var/lib/ceph/osd/ceph-${id}/block` --destroy")
+ }
+ catch (Exception e) {
+ common.infoMsg(e)
+ }
+ }
+ if (data_partition_uuid?.trim()) {
+ ceph.removePartition(pepperEnv, HOST, data_partition_uuid, 'data', id)
+ }
+ else {
+ ceph.removePartition(pepperEnv, HOST, osd_fsid, 'data', id)
+ }
}
- if (data_partition_uuid?.trim()) {
- ceph.removePartition(pepperEnv, HOST, data_partition_uuid, 'data', id)
+ }
+ }
+ if (cleanOrphans) {
+ stage('Remove orphan partitions') {
+ def orphans = []
+ def disks = salt.cmdRun(pepperEnv, ADMIN_HOST, "ceph-disk list --format json")['return'][0].values()[0]
+ for (disk in disks) {
+ for (partition in disk.get('partitions')) {
+ if (partition.get('type') == 'block.db' && !partition.containsKey('block.db_for')) {
+ orphans.add(partition['uuid'])
+ }
+ }
+ }
+ for (orphan in orphans) {
+ ceph.removePartition(pepperEnv, HOST, orphan)
}
}
}
}
- // remove cluster flags
- if (flags.size() > 0) {
- stage('Unset cluster flags') {
- for (flag in flags) {
- common.infoMsg('Removing flag ' + flag)
- salt.cmdRun(pepperEnv, ADMIN_HOST, 'ceph osd unset ' + flag)
+ finally {
+ // remove cluster flags
+ if (flags.size() > 0) {
+ stage('Unset cluster flags') {
+ for (flag in flags) {
+ common.infoMsg('Removing flag ' + flag)
+ salt.cmdRun(pepperEnv, ADMIN_HOST, 'ceph osd unset ' + flag)
+ }
}
}
}
diff --git a/ceph-upgrade.groovy b/ceph-upgrade.groovy
index ab5706c..25b4748 100644
--- a/ceph-upgrade.groovy
+++ b/ceph-upgrade.groovy
@@ -27,11 +27,9 @@
def python = new com.mirantis.mk.Python()
ceph = new com.mirantis.mk.Ceph()
-def pepperEnv = "pepperEnv"
+pepperEnv = "pepperEnv"
flags = CLUSTER_FLAGS.tokenize(',')
-def runHighState = RUNHIGHSTATE
-
def backup(master, target) {
stage("backup ${target}") {
@@ -124,9 +122,6 @@
}
ceph.waitForHealthy(master, ADMIN_HOST, flags)
- if(runHighState) {
- salt.enforceHighstate(pepperEnv, tgt)
- }
}
stage("Verify services for ${minion}") {
@@ -246,6 +241,7 @@
if (TARGET_RELEASE == 'nautilus' ) {
salt.cmdRun(pepperEnv, ADMIN_HOST, "ceph mon enable-msgr2")
}
+ salt.enforceState(pepperEnv, "I@ceph:common", "ceph.common")
}
}
diff --git a/cvp-tempest.groovy b/cvp-tempest.groovy
index c4351b9..289d816 100644
--- a/cvp-tempest.groovy
+++ b/cvp-tempest.groovy
@@ -129,7 +129,7 @@
SKIP_LIST_PATH = (env.SKIP_LIST_PATH) ?: salt.getPillar(saltMaster, SERVICE_NODE, '_param:tempest_skip_list_path')['return'][0].values()[0]
if (SKIP_LIST_PATH) {
mounts = ["${runtest_tempest_cfg_dir}/skip.list": "/var/lib/tempest/skiplists/skip.list"]
- salt.cmdRun(saltMaster, SERVICE_NODE, "salt-cp ${TARGET_NODE} ${SKIP_LIST_PATH} ${runtest_tempest_cfg_dir}/skip.list")
+ salt.cmdRun(saltMaster, SERVICE_NODE, "salt-cp '${TARGET_NODE}' ${SKIP_LIST_PATH} ${runtest_tempest_cfg_dir}/skip.list")
}
}
else {