Merge "add missing packages removal for Ceph MON and RGW nodes" into release/proposed/2019.2.0
diff --git a/ceph-upgrade.groovy b/ceph-upgrade.groovy
index 86a1f0f..c4881bc 100644
--- a/ceph-upgrade.groovy
+++ b/ceph-upgrade.groovy
@@ -16,6 +16,9 @@
* STAGE_UPGRADE_OSD Set to True if Ceph osd nodes upgrade is desired
* STAGE_UPGRADE_RGW Set to True if Ceph rgw nodes upgrade is desired
* STAGE_UPGRADE_CLIENT Set to True if Ceph client nodes upgrade is desired (includes for example ctl/cmp nodes)
+ * STAGE_FINALIZE Set to True if configs recommended for TARGET_RELEASE should be set after upgrade is done
+ * BACKUP_ENABLED Select to copy the disks of Ceph VMs before upgrade and backup Ceph directories on OSD nodes
+ * BACKUP_DIR Select the target dir to backup to when BACKUP_ENABLED
*
*/
@@ -71,12 +74,12 @@
waitForHealthy(master)
try {
- salt.cmdRun(master, "${minionProvider}", "[ ! -f /root/${minion_name}.${domain}.qcow2.bak ] && virsh destroy ${minion_name}.${domain}")
+ salt.cmdRun(master, "${minionProvider}", "[ ! -f ${BACKUP_DIR}/${minion_name}.${domain}.qcow2.bak ] && virsh destroy ${minion_name}.${domain}")
} catch (Exception e) {
common.warningMsg('Backup already exists')
}
try {
- salt.cmdRun(master, "${minionProvider}", "[ ! -f /root/${minion_name}.${domain}.qcow2.bak ] && cp /var/lib/libvirt/images/${minion_name}.${domain}/system.qcow2 /root/${minion_name}.${domain}.qcow2.bak")
+ salt.cmdRun(master, "${minionProvider}", "[ ! -f ${BACKUP_DIR}/${minion_name}.${domain}.qcow2.bak ] && cp /var/lib/libvirt/images/${minion_name}.${domain}/system.qcow2 ${BACKUP_DIR}/${minion_name}.${domain}.qcow2.bak")
} catch (Exception e) {
common.warningMsg('Backup already exists')
}
diff --git a/stacklight-upgrade.groovy b/stacklight-upgrade.groovy
index 85b93e9..7c932d0 100644
--- a/stacklight-upgrade.groovy
+++ b/stacklight-upgrade.groovy
@@ -51,22 +51,35 @@
try {
def retries_wait = 20
def retries = 15
+
def elasticsearch_vip
- def pillar = salt.getPillar(master, "I@elasticsearch:client", 'elasticsearch:client:server:host')
- if(!pillar['return'].isEmpty()) {
- elasticsearch_vip = pillar['return'][0].values()[0]
+ def pillar = salt.getReturnValues(salt.getPillar(master, "I@elasticsearch:client", 'elasticsearch:client:server:host'))
+ if(pillar) {
+ elasticsearch_vip = pillar
} else {
errorOccured = true
common.errorMsg('[ERROR] Elasticsearch VIP address could not be retrieved')
}
- pillar = salt.getPillar(master, "I@elasticsearch:client", 'elasticsearch:client:server:port')
+
+ pillar = salt.getReturnValues(salt.getPillar(master, "I@elasticsearch:client", 'elasticsearch:client:server:port'))
def elasticsearch_port
- if(!pillar['return'].isEmpty()) {
- elasticsearch_port = pillar['return'][0].values()[0]
+ if(pillar) {
+ elasticsearch_port = pillar
} else {
errorOccured = true
common.errorMsg('[ERROR] Elasticsearch VIP port could not be retrieved')
}
+
+ pillar = salt.getReturnValues(salt.getPillar(master, "I@elasticsearch:client ${extra_tgt}", 'elasticsearch:client:server:scheme'))
+ def elasticsearch_scheme
+ if(pillar) {
+ elasticsearch_scheme = pillar
+ common.infoMsg("[INFO] Using elasticsearch scheme: ${elasticsearch_scheme}")
+ } else {
+ common.infoMsg('[INFO] No pillar with Elasticsearch server scheme, using scheme: http')
+ elasticsearch_scheme = "http"
+ }
+
common.retry(retries,retries_wait) {
common.infoMsg('Waiting for Elasticsearch to become green..')
salt.cmdRun(master, "I@elasticsearch:client", "curl -sf ${elasticsearch_vip}:${elasticsearch_port}/_cat/health | awk '{print \$4}' | grep green")