Use common waitForHealthy function from pipeline-library
Additional improvements:
* small refactoring of common functions
* add check for prometheus nodes before running
prometheus state
* improve usage of waitForHealthy function with flags
PROD-33010
Change-Id: Ic8679fb0178965c9788c12793902c9bc265a6146
diff --git a/ceph-add-osd-upmap.groovy b/ceph-add-osd-upmap.groovy
index 26ed68e..a4dd8f2 100644
--- a/ceph-add-osd-upmap.groovy
+++ b/ceph-add-osd-upmap.groovy
@@ -9,85 +9,59 @@
*
*/
-common = new com.mirantis.mk.Common()
salt = new com.mirantis.mk.Salt()
def python = new com.mirantis.mk.Python()
+def ceph = new com.mirantis.mk.Ceph()
orchestrate = new com.mirantis.mk.Orchestrate()
-
-def waitForHealthy(master, count=0, attempts=100) {
- // wait for healthy cluster
- while (count<attempts) {
- def health = runCephCommand('ceph health')['return'][0].values()[0]
- if (health.contains('HEALTH_OK')) {
- common.infoMsg('Cluster is healthy')
- break;
- }
- count++
- sleep(10)
- }
-}
+pepperEnv = "pepperEnv"
def runCephCommand(cmd) {
- return salt.cmdRun("pepperEnv", "I@ceph:mon and I@ceph:common:keyring:admin", cmd, checkResponse=true, batch=null, output=false)
+ return salt.cmdRun(pepperEnv, "I@ceph:mon and I@ceph:common:keyring:admin", cmd, checkResponse = true, batch = null, output = false)
}
-def getpgmap(master) {
- return runCephCommand('ceph pg ls remapped --format=json')['return'][0].values()[0]
+def getpgmap() {
+ return runCephCommand('ceph pg ls remapped --format=json')['return'][0].values()[0]
}
def generatemapping(master,pgmap,map) {
- def pg_new
- def pg_old
-
- for ( pg in pgmap )
- {
-
- pg_new = pg["up"].minus(pg["acting"])
- pg_old = pg["acting"].minus(pg["up"])
-
- for ( i = 0; i < pg_new.size(); i++ )
- {
- def string = "ceph osd pg-upmap-items " + pg["pgid"].toString() + " " + pg_new[i] + " " + pg_old[i] + ";"
- map.add(string)
+ def pg_new
+ def pg_old
+ for (pg in pgmap) {
+ pg_new = pg["up"].minus(pg["acting"])
+ pg_old = pg["acting"].minus(pg["up"])
+ for (i = 0; i < pg_new.size(); i++) {
+ def string = "ceph osd pg-upmap-items " + pg["pgid"].toString() + " " + pg_new[i] + " " + pg_old[i] + ";"
+ map.add(string)
+ }
}
-
- }
}
-def pepperEnv = "pepperEnv"
-
timeout(time: 12, unit: 'HOURS') {
node("python") {
-
// create connection to salt master
python.setupPepperVirtualenv(pepperEnv, SALT_MASTER_URL, SALT_MASTER_CREDENTIALS)
- stage ("verify client versions")
- {
- def admin = salt.getMinions("pepperEnv", "I@ceph:mon and I@ceph:common:keyring:admin")[0]
- def versions = salt.cmdRun("pepperEnv", admin, "ceph features", checkResponse=true, batch=null, output=false).values()[0]
+ stage("verify client versions") {
+ def admin = salt.getMinions(pepperEnv, "I@ceph:mon and I@ceph:common:keyring:admin")[0]
+ def versions = salt.cmdRun(pepperEnv, admin, "ceph features", checkResponse = true, batch = null, output = false).values()[0]
- if ( versions[0][admin].contains('jewel') )
- {
- throw new Exception("Update all clients to luminous before using this pipeline")
- }
+ if (versions[0][admin].contains('jewel')) {
+ throw new Exception("Update all clients to luminous before using this pipeline")
+ }
}
- stage ("enable luminous compat")
- {
- runCephCommand('ceph osd set-require-min-compat-client luminous')['return'][0].values()[0]
+ stage("enable luminous compat") {
+ runCephCommand('ceph osd set-require-min-compat-client luminous')['return'][0].values()[0]
}
- stage ("enable upmap balancer")
- {
- runCephCommand('ceph balancer on')['return'][0].values()[0]
- runCephCommand('ceph balancer mode upmap')['return'][0].values()[0]
+ stage("enable upmap balancer") {
+ runCephCommand('ceph balancer on')['return'][0].values()[0]
+ runCephCommand('ceph balancer mode upmap')['return'][0].values()[0]
}
- stage ("set norebalance")
- {
- runCephCommand('ceph osd set norebalance')['return'][0].values()[0]
+ stage("set norebalance") {
+ runCephCommand('ceph osd set norebalance')['return'][0].values()[0]
}
stage('Install Ceph OSD') {
@@ -96,42 +70,26 @@
def mapping = []
- stage ("update mappings")
- {
- def pgmap = getpgmap(pepperEnv)
- if ( pgmap == '' )
- {
- return 1
- }
- else
- {
- pgmap = new groovy.json.JsonSlurperClassic().parseText(pgmap)
- for(int x=1; x<=3; x++){
- pgmap = getpgmap(pepperEnv)
- if ( pgmap == '' )
- {
- return 1
- }
- else
- {
- pgmap = new groovy.json.JsonSlurperClassic().parseText(pgmap)
- generatemapping(pepperEnv,pgmap,mapping)
- mapping.each(this.&runCephCommand)
- }
+ stage("update mappings") {
+ def pgmap
+ for (int x = 1; x <= 3; x++) {
+ pgmap = getpgmap()
+ if (pgmap == '') {
+ return 1
+ } else {
+ pgmap = new groovy.json.JsonSlurperClassic().parseText(pgmap)
+ generatemapping(pepperEnv, pgmap, mapping)
+ mapping.each(this.&runCephCommand)
+ }
}
- }
-
}
- stage ("unset norebalance")
- {
- runCephCommand('ceph osd unset norebalance')['return'][0].values()[0]
+ stage("unset norebalance") {
+ runCephCommand('ceph osd unset norebalance')['return'][0].values()[0]
}
- stage ("wait for healthy cluster")
- {
- waitForHealthy(pepperEnv)
+ stage("wait for healthy cluster") {
+ ceph.waitForHealthy(pepperEnv, "I@ceph:mon and I@ceph:common:keyring:admin")
}
-
}
}