ceph upgrade fix
PROD-15484
PROD-15483
Change-Id: I1e344b49407c07599da83aa52f930882910c208d
diff --git a/ceph-replace-failed-osd.groovy b/ceph-replace-failed-osd.groovy
index ee4ef38..9127581 100644
--- a/ceph-replace-failed-osd.groovy
+++ b/ceph-replace-failed-osd.groovy
@@ -31,6 +31,19 @@
return salt.cmdRun(master, target, cmd)
}
+def waitForHealthy(master, count=0, attempts=300) {
+ // wait for healthy cluster
+ while (count<attempts) {
+ def health = runCephCommand(master, ADMIN_HOST, 'ceph health')['return'][0].values()[0]
+ if (health.contains('HEALTH_OK')) {
+ common.infoMsg('Cluster is healthy')
+ break;
+ }
+ count++
+ sleep(10)
+ }
+}
+
node("python") {
// create connection to salt master
@@ -70,17 +83,8 @@
// wait for healthy cluster
if (WAIT_FOR_HEALTHY.toBoolean() == true) {
- stage('Waiting for healthy cluster') {
- sleep(5)
- while (true) {
- def health = runCephCommand(pepperEnv, ADMIN_HOST, 'ceph health')['return'][0].values()[0]
- if (health.contains('HEALTH_OK')) {
- common.infoMsg('Cluster is healthy')
- break;
- }
- sleep(10)
- }
- }
+ sleep(5)
+ waitForHealthy(pepperEnv)
}
// stop osd daemons