Merge "Enforce linux repo update before calling package update" into release/proposed/2019.2.0
diff --git a/ceph-add-node.groovy b/ceph-add-node.groovy
index 9ef1f88..d1315b2 100644
--- a/ceph-add-node.groovy
+++ b/ceph-add-node.groovy
@@ -125,11 +125,13 @@
if (pgmap.trim()) {
pgmap = "{\"pgs\":$pgmap}" // common.parseJSON() can't parse a list of maps
pgmap = common.parseJSON(pgmap)['pgs']
- ceph.generateMapping(pgmap, mapping)
- for(map in mapping) {
- ceph.cmdRun(pepperEnv, map)
+ if (!pgmap.get('pg_ready', false)) {
+ ceph.generateMapping(pgmap, mapping)
+ for(map in mapping) {
+ ceph.cmdRun(pepperEnv, map)
+ }
+ sleep(30)
}
- sleep(30)
}
}
}
diff --git a/ceph-remove-node.groovy b/ceph-remove-node.groovy
index 9fd38b6..42c92db 100644
--- a/ceph-remove-node.groovy
+++ b/ceph-remove-node.groovy
@@ -11,6 +11,7 @@
* FAST_WIPE Clean only partition table insted of full wipe
* CLEAN_ORPHANS Clean ceph partition which are no longer part of the cluster
* OSD Coma separated list of OSDs to remove while keep the rest intact
+ * OSD_NODE_IS_DOWN Remove unavailable (offline) osd node from cluster, provided in HOST parameter
* GENERATE_CRUSHMAP Generate new crush map. Excludes OSD
*
*/
@@ -30,6 +31,7 @@
def osdOnly = OSD.trim() as Boolean
def generateCrushmap = osdOnly ? false : GENERATE_CRUSHMAP.toBoolean()
+def osdNodeUnavailable = OSD_NODE_IS_DOWN.toBoolean()
timeout(time: 12, unit: 'HOURS') {
node("python") {
@@ -37,6 +39,33 @@
// create connection to salt master
python.setupPepperVirtualenv(pepperEnv, SALT_MASTER_URL, SALT_MASTER_CREDENTIALS)
+ if (osdNodeUnavailable) {
+ stage('Remove unavailable OSD node') {
+ osdHostName = salt.stripDomainName("${HOST}")
+ osdTreeString = ceph.cmdRun(pepperEnv, "ceph osd tree --format json-pretty")
+ osdTree = common.parseJSON(osdTreeString)
+ osdIDs = []
+ for(osd in osdTree["nodes"]) {
+ if (osd["type"] == "host" && osd["name"] == osdHostName) {
+ osdIDs = osd["children"]
+ break
+ }
+ }
+ if (osdIDs.size() == 0) {
+ common.warningMsg("Can't find any OSDs placed on host ${HOST} (${osdHostName}). Is it correct name?")
+ currentBuild.result = "UNSTABLE"
+ } else {
+ common.infoMsg("Found next OSDs for host ${HOST} (${osdHostName}): ${osdIDs}")
+ input message: "Do you want to continue node remove?"
+ for (osdId in osdIDs) {
+ ceph.cmdRun(master, "ceph osd purge ${osdId} --yes-i-really-mean-it", true, true)
+ }
+ salt.cmdRun(pepperEnv, "I@salt:master", "salt-key -d ${HOST} --include-all -y")
+ }
+ }
+ return
+ }
+
def target = salt.getMinions(pepperEnv, HOST)
if(target.isEmpty()) {
common.errorMsg("Host not found")
@@ -104,7 +133,6 @@
def hostname = ceph.getGrain(pepperEnv, HOST, 'host')
ceph.cmdRun(pepperEnv, 'ceph mon getmap -o monmap.backup')
ceph.cmdRun(pepperEnv, "ceph mon remove $hostname")
- salt.cmdRun(pepperEnv, 'I@ceph:mon', "monmaptool /tmp/monmap --rm $hostname")
}
else {
common.infoMsg('Stage skipped.')