update pipelines for refactored grains
Releted-Prod: PROD-35018
Change-Id: I1c61799b0cabfeb0fd4601fd4a50fb311038cf37
diff --git a/ceph-remove-osd.groovy b/ceph-remove-osd.groovy
index 447a031..98c65a6 100644
--- a/ceph-remove-osd.groovy
+++ b/ceph-remove-osd.groovy
@@ -47,8 +47,7 @@
throw new Exception("Ceph salt grain cannot be found!")
}
common.print(cephGrain)
- def device_grain_name = salt.getPillar(pepperEnv,"I@ceph:osd","ceph:osd:lvm_enabled")['return'].first().containsValue(true) ? "ceph_volume" : "ceph_disk"
- def ceph_disks = cephGrain['return'][0].values()[0].values()[0][device_grain_name]
+ def ceph_disks = cephGrain['return'][0].values()[0].values()[0]['ceph_disk']
common.prettyPrint(ceph_disks)
for (i in ceph_disks) {
@@ -154,7 +153,6 @@
stage('Remove data / block / lockbox partition') {
def data_partition_uuid = ""
def block_partition_uuid = ""
- def lockbox_partition_uuid = ""
def osd_fsid = ""
def lvm = ""
def lvm_enabled= salt.getPillar(pepperEnv,"I@ceph:osd","ceph:osd:lvm_enabled")['return'].first().containsValue(true)
@@ -178,12 +176,6 @@
common.infoMsg(e)
}
- try {
- lockbox_partition_uuid = data_partition_uuid
- } catch (Exception e) {
- common.infoMsg(e)
- }
-
// remove partition_uuid = 2c76f144-f412-481e-b150-4046212ca932
if (block_partition_uuid?.trim()) {
ceph.removePartition(pepperEnv, HOST, block_partition_uuid)
@@ -191,9 +183,6 @@
if (data_partition_uuid?.trim()) {
ceph.removePartition(pepperEnv, HOST, data_partition_uuid, 'data', id)
}
- if (lockbox_partition_uuid?.trim()) {
- ceph.removePartition(pepperEnv, HOST, lockbox_partition_uuid, 'lockbox')
- }
}
}
// remove cluster flags
@@ -206,4 +195,4 @@
}
}
}
-}
\ No newline at end of file
+}
diff --git a/ceph-upgrade.groovy b/ceph-upgrade.groovy
index 86051c0..4cd20de 100644
--- a/ceph-upgrade.groovy
+++ b/ceph-upgrade.groovy
@@ -88,12 +88,12 @@
}
if (target == 'mgr') {
stage('Run ceph mgr state') {
- salt.enforceState(master, "I@ceph:mgr", "ceph.mgr", true)
+ salt.enforceState(master, "I@ceph:mgr", "ceph.mgr", true, failOnError=false, retries=3, retries_wait=10)
}
}
if (target == 'common') {
stage('Upgrade ceph-common pkgs') {
- salt.cmdRun(master, "I@ceph:${target}", "apt install ceph-${target} -y")
+ salt.runSaltProcessStep(master, "I@ceph:${target}", 'pkg.install', ["ceph-common"], 'only_upgrade=True')
}
} else {
minions = salt.getMinions(master, "I@ceph:${target}")
@@ -102,18 +102,17 @@
// upgrade pkgs
if (target == 'radosgw') {
stage('Upgrade radosgw pkgs') {
- salt.cmdRun(master, "I@ceph:${target}", "apt install ${target} -y ")
+ salt.runSaltProcessStep(master, "I@ceph:${target}", 'pkg.install', [target], 'only_upgrade=True')
}
} else {
stage("Upgrade ${target} pkgs on ${minion}") {
- salt.cmdRun(master, "${minion}", "apt install ceph-${target} -y")
+ salt.runSaltProcessStep(master, "${minion}", 'pkg.install', ["ceph-${target}"], 'only_upgrade=True')
}
}
// restart services
stage("Restart ${target} services on ${minion}") {
if (target == 'osd') {
- def device_grain_name = salt.getPillar(master,"I@ceph:osd","ceph:osd:lvm_enabled")['return'].first().containsValue(true) ? "ceph_volume" : "ceph_disk"
- def ceph_disks = salt.getGrain(master, minion, 'ceph')['return'][0].values()[0].values()[0][device_grain_name]
+ def ceph_disks = salt.getGrain(master, minion, 'ceph')['return'][0].values()[0].values()[0]['ceph_disk']
ceph_disks.each { osd, param ->
salt.cmdRun(master, "${minion}", "systemctl restart ceph-${target}@${osd}")
ceph.waitForHealthy(master, ADMIN_HOST, flags)
@@ -196,10 +195,6 @@
if (STAGE_UPGRADE_MON.toBoolean() == true) {
upgrade(pepperEnv, 'mon')
-
- if (TARGET_RELEASE == 'nautilus' ) {
- salt.cmdRun(pepperEnv, ADMIN_HOST, "ceph mon enable-msgr2")
- }
}
if (STAGE_UPGRADE_MGR.toBoolean() == true) {
@@ -243,6 +238,9 @@
} catch (Exception e) {
common.warningMsg(e)
}
+ if (TARGET_RELEASE == 'nautilus' ) {
+ salt.cmdRun(pepperEnv, ADMIN_HOST, "ceph mon enable-msgr2")
+ }
}
}