Ceph update/upgrade - run highstate after updating each node
Related-Prod: PROD-29831
Change-Id: I7675c2d4bdb607d44df55e8de14f1f1365598c9d
diff --git a/ceph-upgrade.groovy b/ceph-upgrade.groovy
index ee3ed83..ab5706c 100644
--- a/ceph-upgrade.groovy
+++ b/ceph-upgrade.groovy
@@ -30,6 +30,8 @@
def pepperEnv = "pepperEnv"
flags = CLUSTER_FLAGS.tokenize(',')
+def runHighState = RUNHIGHSTATE
+
def backup(master, target) {
stage("backup ${target}") {
@@ -111,15 +113,19 @@
}
// restart services
stage("Restart ${target} services on ${minion}") {
- if (target == 'osd') {
+ if(target == 'osd') {
def ceph_disks = salt.getGrain(master, minion, 'ceph')['return'][0].values()[0].values()[0]['ceph_disk']
ceph_disks.each { osd, param ->
salt.cmdRun(master, "${minion}", "systemctl restart ceph-${target}@${osd}")
- ceph.waitForHealthy(master, ADMIN_HOST, flags)
}
- } else {
+ }
+ else {
salt.cmdRun(master, "${minion}", "systemctl restart ceph-${target}.target")
- ceph.waitForHealthy(master, ADMIN_HOST, flags)
+ }
+
+ ceph.waitForHealthy(master, ADMIN_HOST, flags)
+ if(runHighState) {
+ salt.enforceHighstate(pepperEnv, tgt)
}
}
diff --git a/update-ceph.groovy b/update-ceph.groovy
index 1de1012..dbb45d5 100644
--- a/update-ceph.groovy
+++ b/update-ceph.groovy
@@ -15,6 +15,7 @@
def commandKwargs
def selMinions = []
def flags = CLUSTER_FLAGS ? CLUSTER_FLAGS.tokenize(',') : []
+def runHighState = RUNHIGHSTATE
timeout(time: 12, unit: 'HOURS') {
node() {
@@ -27,7 +28,6 @@
}
stage('Apply package upgrades on all nodes') {
-
targets.each { key, value ->
salt.enforceState(pepperEnv, "I@ceph:${key}", 'linux.system.repo', true)
command = "pkg.install"
@@ -39,23 +39,34 @@
}
}
- stage("Restart MONs and RGWs") {
+ stage('Set cluster flags') {
+ if (flags.size() > 0) {
+ stage('Set cluster flags') {
+ for (flag in flags) {
+ salt.cmdRun(pepperEnv, "I@ceph:mon and I@ceph:common:keyring:admin", 'ceph osd set ' + flag)
+ }
+ }
+ }
+ }
+
+ stage("Restart MONs") {
selMinions = salt.getMinions(pepperEnv, "I@ceph:mon")
for (tgt in selMinions) {
// runSaltProcessStep 'service.restart' don't work for this services
salt.cmdRun(pepperEnv, tgt, "systemctl restart ceph-mon.target")
ceph.waitForHealthy(pepperEnv, tgt, flags)
+ if (runHighState) {
+ salt.enforceHighstate(pepperEnv, tgt)
+ }
}
selMinions = salt.getMinions(pepperEnv, "I@ceph:mgr")
for (tgt in selMinions) {
// runSaltProcessStep 'service.restart' don't work for this services
salt.cmdRun(pepperEnv, tgt, "systemctl restart ceph-mgr.target")
ceph.waitForHealthy(pepperEnv, tgt, flags)
- }
- selMinions = salt.getMinions(pepperEnv, "I@ceph:radosgw")
- for (tgt in selMinions) {
- salt.cmdRun(pepperEnv, tgt, "systemctl restart ceph-radosgw.target")
- ceph.waitForHealthy(pepperEnv, tgt, flags)
+ if (runHighState) {
+ salt.enforceHighstate(pepperEnv, tgt)
+ }
}
}
@@ -81,11 +92,24 @@
ceph.waitForHealthy(pepperEnv, tgt, flags, 0, 100)
}
+ if (runHighState) {
+ salt.enforceHighstate(pepperEnv, tgt)
+ }
+
salt.cmdRun(pepperEnv, tgt, 'ceph osd unset noout')
}
}
-
+ stage('Restart RGWs') {
+ selMinions = salt.getMinions(pepperEnv, "I@ceph:radosgw")
+ for (tgt in selMinions) {
+ salt.cmdRun(pepperEnv, tgt, "systemctl restart ceph-radosgw.target")
+ ceph.waitForHealthy(pepperEnv, tgt, flags)
+ if (runHighState) {
+ salt.enforceHighstate(pepperEnv, tgt)
+ }
+ }
+ }
} catch (Throwable e) {
// If there was an error or exception thrown, the build failed
if (flags.size() > 0) {