Merge the tip of origin/release/proposed/2019.2.0 into origin/release/2019.2.0
abf4ef5 Ceph update/upgrade - run highstate after updating each node
1535aa2 Fix partition removal in Ceph pipelines, make cleaning data partition optional
4bed4ad Add repositories refresh to ceph update pipeline
574aefb ceph update - unset cluster flags in case of error
c61f744 ceph-add-osd-upmap: verify if target host is responsive and ignore it while checking installed ceph version
b5663e6 Ceph - Add checks for host parameter
e28ebd7 update grain name for ceph OSDs
fd7586b Add a sleep after salt master update
68eb72c Add package management upgrade step
396632d [SALT] add option to update salt
2ea6b2f refactor and change exception message for more adequate
Change-Id: I9b9aa32cd71d588ee6a82eab2fa3ddb6a52329bc
diff --git a/ceph-add-node.groovy b/ceph-add-node.groovy
index 9ec96c2..294f1ed 100644
--- a/ceph-add-node.groovy
+++ b/ceph-add-node.groovy
@@ -35,6 +35,12 @@
throw new InterruptedException()
}
+ def checknode = salt.runSaltProcessStep(pepperEnv, HOST, 'test.ping')
+ if (checknode['return'][0].values().isEmpty()) {
+ common.errorMsg("Host not found")
+ throw new InterruptedException()
+ }
+
if (HOST_TYPE.toLowerCase() != 'osd') {
// launch VMs
diff --git a/ceph-add-osd-upmap.groovy b/ceph-add-osd-upmap.groovy
index 5c90c72..f873534 100644
--- a/ceph-add-osd-upmap.groovy
+++ b/ceph-add-osd-upmap.groovy
@@ -44,17 +44,21 @@
// create connection to salt master
python.setupPepperVirtualenv(pepperEnv, SALT_MASTER_URL, SALT_MASTER_CREDENTIALS)
- stage ("verify client versions")
- {
+ stage ("verification of supported features") {
+ def checknode = salt.runSaltProcessStep(pepperEnv, HOST, 'test.ping')
+ if (checknode['return'][0].values().isEmpty()) {
+ common.errorMsg("Host not found")
+ throw new InterruptedException()
+ }
// I@docker:swarm and I@prometheus:server - mon* nodes
- def nodes = salt.getMinions(pepperEnv, "I@ceph:common and not ( I@docker:swarm and I@prometheus:server )")
+ def nodes = salt.getMinions(pepperEnv, "I@ceph:common and not ( I@docker:swarm and I@prometheus:server ) and not " + HOST)
for ( node in nodes )
{
- def versions = salt.cmdRun(pepperEnv, node, "ceph features --format json", checkResponse=true, batch=null, output=false).values()[0]
- versions = new groovy.json.JsonSlurperClassic().parseText(versions[0][node])
- if ( versions['client']['group']['release'] != 'luminous' )
+ def features = salt.cmdRun(pepperEnv, node, "ceph features --format json", checkResponse=true, batch=null, output=false).values()[0]
+ features = new groovy.json.JsonSlurperClassic().parseText(features[0][node])
+ if ( fetures['client']['group']['release'] != 'luminous' )
{
- throw new Exception("client installed on " + node + " is not luminous. Update all clients to luminous before using this pipeline")
+ throw new Exception("client installed on " + node + " does not support upmap. Update all clients to luminous or newer before using this pipeline")
}
}
}
diff --git a/ceph-backend-migration.groovy b/ceph-backend-migration.groovy
index a9bf720..22809c8 100644
--- a/ceph-backend-migration.groovy
+++ b/ceph-backend-migration.groovy
@@ -91,7 +91,7 @@
}
def target_hosts = salt.getMinions(pepperEnv, TARGET)
- def device_grain_name = salt.getPillar(pepperEnv,"I@ceph:osd","ceph:osd:lvm_enabled")['return'].first().containsValue(true) ? "ceph_volume" : "ceph_disk"
+ def device_grain_name = "ceph_disk"
for (tgt in target_hosts) {
def osd_ids = []
diff --git a/ceph-remove-node.groovy b/ceph-remove-node.groovy
index 39ed07e..771a399 100644
--- a/ceph-remove-node.groovy
+++ b/ceph-remove-node.groovy
@@ -19,6 +19,8 @@
def python = new com.mirantis.mk.Python()
def pepperEnv = "pepperEnv"
+def cleanDisk = CLEANDISK
+
timeout(time: 12, unit: 'HOURS') {
node("python") {
@@ -38,6 +40,12 @@
throw new InterruptedException()
}
+ def checknode = salt.runSaltProcessStep(pepperEnv, HOST, 'test.ping')
+ if (checknode['return'][0].values().isEmpty()) {
+ common.errorMsg("Host not found")
+ throw new InterruptedException()
+ }
+
stage('Refresh_pillar') {
salt.runSaltProcessStep(pepperEnv, '*', 'saltutil.refresh_pillar', [], null, true, 5)
}
@@ -73,9 +81,10 @@
salt.cmdRun(pepperEnv, "${targetProvider}", "virsh destroy ${target}.${domain}")
salt.cmdRun(pepperEnv, "${targetProvider}", "virsh undefine ${target}.${domain}")
}
- } else if (HOST_TYPE.toLowerCase() == 'osd') {
+ }
+ else if (HOST_TYPE.toLowerCase() == 'osd') {
def osd_ids = []
- def device_grain_name = salt.getPillar(pepperEnv,"I@ceph:osd","ceph:osd:lvm_enabled")['return'].first().containsValue(true) ? "ceph_volume" : "ceph_disk"
+ def device_grain_name = "ceph_disk"
// get list of osd disks of the host
salt.runSaltProcessStep(pepperEnv, HOST, 'saltutil.sync_grains', [], null, true, 5)
def ceph_disks = salt.getGrain(pepperEnv, HOST, 'ceph')['return'][0].values()[0].values()[0][device_grain_name]
@@ -126,65 +135,107 @@
}
for (osd_id in osd_ids) {
-
id = osd_id.replaceAll('osd.', '')
- def dmcrypt = ""
- try {
- dmcrypt = salt.cmdRun(pepperEnv, HOST, "ls -la /var/lib/ceph/osd/ceph-${id}/ | grep dmcrypt")['return'][0].values()[0]
- } catch (Exception e) {
- common.warningMsg(e)
- }
- if (dmcrypt?.trim()) {
- mount = salt.cmdRun(pepperEnv, HOST, "lsblk -rp | grep /var/lib/ceph/osd/ceph-${id} -B1")['return'][0].values()[0]
- dev = mount.split()[0].replaceAll("[0-9]", "")
-
- // remove partition tables
- stage("dd part table on ${dev}") {
- salt.cmdRun(pepperEnv, HOST, "dd if=/dev/zero of=${dev} bs=512 count=1 conv=notrunc")
- }
-
- }
// remove journal, block_db, block_wal partition `parted /dev/sdj rm 3`
stage('Remove journal / block_db / block_wal partition') {
def partition_uuid = ""
def journal_partition_uuid = ""
def block_db_partition_uuid = ""
def block_wal_partition_uuid = ""
- try {
- journal_partition_uuid = salt.cmdRun(pepperEnv, HOST, "ls -la /var/lib/ceph/osd/ceph-${id}/ | grep journal | grep partuuid")
- journal_partition_uuid = journal_partition_uuid.toString().trim().split("\n")[0].substring(journal_partition_uuid.toString().trim().lastIndexOf("/") + 1)
- } catch (Exception e) {
- common.infoMsg(e)
+ def ceph_version = salt.getPillar(pepperEnv, HOST, 'ceph:common:ceph_version').get('return')[0].values()[0]
+
+ if (ceph_version == "luminous") {
+ try {
+ journal_partition_uuid = salt.cmdRun(pepperEnv, HOST, "cat /var/lib/ceph/osd/ceph-${id}/journal_uuid")['return'][0].values()[0].split("\n")[0]
+ }
+ catch(Exception e) {
+ common.infoMsg(e)
+ }
+ try {
+ block_db_partition_uuid = salt.cmdRun(pepperEnv, HOST, "cat /var/lib/ceph/osd/ceph-${id}/block.db_uuid")['return'][0].values()[0].split("\n")[0]
+ }
+ catch(Exception e) {
+ common.infoMsg(e)
+ }
+ try {
+ block_wal_partition_uuid = salt.cmdRun(pepperEnv, HOST, "cat /var/lib/ceph/osd/ceph-${id}/block.wal_uuid")['return'][0].values()[0].split("\n")[0]
+ }
+ catch(Exception e) {
+ common.infoMsg(e)
+ }
}
- try {
- block_db_partition_uuid = salt.cmdRun(pepperEnv, HOST, "ls -la /var/lib/ceph/osd/ceph-${id}/ | grep 'block.db' | grep partuuid")
- block_db_partition_uuid = block_db_partition_uuid.toString().trim().split("\n")[0].substring(block_db_partition_uuid.toString().trim().lastIndexOf("/") + 1)
- } catch (Exception e) {
- common.infoMsg(e)
+ else {
+ def volumes = salt.cmdRun(pepperEnv, HOST, "ceph-volume lvm list --format=json", checkResponse=true, batch=null, output=false)
+ volumes = new groovy.json.JsonSlurperClassic().parseText(volumes['return'][0].values()[0])
+
+ block_db_partition_uuid = volumes[id][0]['tags'].get('ceph.db_uuid')
+ block_wal_partition_uuid = volumes[id][0]['tags'].get('ceph.wal_uuid')
}
- try {
- block_wal_partition_uuid = salt.cmdRun(pepperEnv, HOST, "ls -la /var/lib/ceph/osd/ceph-${id}/ | grep 'block.wal' | grep partuuid")
- block_wal_partition_uuid = block_wal_partition_uuid.toString().trim().split("\n")[0].substring(block_wal_partition_uuid.toString().trim().lastIndexOf("/") + 1)
- } catch (Exception e) {
- common.infoMsg(e)
- }
- // set partition_uuid = 2c76f144-f412-481e-b150-4046212ca932
if (journal_partition_uuid?.trim()) {
- partition_uuid = journal_partition_uuid
- } else if (block_db_partition_uuid?.trim()) {
- partition_uuid = block_db_partition_uuid
+ ceph.removePartition(pepperEnv, HOST, journal_partition_uuid)
}
-
- // if disk has journal, block_db or block_wal on different disk, then remove the partition
- if (partition_uuid?.trim()) {
- ceph.removePartition(pepperEnv, HOST, partition_uuid)
+ if (block_db_partition_uuid?.trim()) {
+ ceph.removePartition(pepperEnv, HOST, block_db_partition_uuid)
}
if (block_wal_partition_uuid?.trim()) {
ceph.removePartition(pepperEnv, HOST, block_wal_partition_uuid)
}
+
+ try {
+ salt.cmdRun(pepperEnv, HOST, "partprobe")
+ } catch (Exception e) {
+ common.warningMsg(e)
+ }
+ }
+
+ if (cleanDisk) {
+ // remove data / block / lockbox partition `parted /dev/sdj rm 3`
+ stage('Remove data / block / lockbox partition') {
+ def data_partition_uuid = ""
+ def block_partition_uuid = ""
+ def osd_fsid = ""
+ def lvm = ""
+ def lvm_enabled= salt.getPillar(pepperEnv,"I@ceph:osd","ceph:osd:lvm_enabled")['return'].first().containsValue(true)
+ try {
+ osd_fsid = salt.cmdRun(pepperEnv, HOST, "cat /var/lib/ceph/osd/ceph-${id}/fsid")['return'][0].values()[0].split("\n")[0]
+ if (lvm_enabled) {
+ lvm = salt.runSaltCommand(pepperEnv, 'local', ['expression': HOST, 'type': 'compound'], 'cmd.run', null, "salt-call lvm.lvdisplay --output json -l quiet")['return'][0].values()[0]
+ lvm = new groovy.json.JsonSlurperClassic().parseText(lvm)
+ lvm["local"].each { lv, params ->
+ if (params["Logical Volume Name"].contains(osd_fsid)) {
+ data_partition_uuid = params["Logical Volume Name"].minus("/dev/")
+ }
+ }
+ } else {
+ data_partition_uuid = osd_fsid
+ }
+ } catch (Exception e) {
+ common.infoMsg(e)
+ }
+ try {
+ block_partition_uuid = salt.cmdRun(pepperEnv, HOST, "cat /var/lib/ceph/osd/ceph-${id}/block_uuid")['return'][0].values()[0].split("\n")[0]
+ }
+ catch (Exception e) {
+ common.infoMsg(e)
+ }
+
+ // remove partition_uuid = 2c76f144-f412-481e-b150-4046212ca932
+ if (block_partition_uuid?.trim()) {
+ ceph.removePartition(pepperEnv, HOST, block_partition_uuid)
+ try {
+ salt.cmdRun(pepperEnv, HOST, "ceph-volume lvm zap `readlink /var/lib/ceph/osd/ceph-${id}/block` --destroy")
+ }
+ catch (Exception e) {
+ common.infoMsg(e)
+ }
+ }
+ if (data_partition_uuid?.trim()) {
+ ceph.removePartition(pepperEnv, HOST, data_partition_uuid, 'data', id)
+ }
+ }
}
}
@@ -207,37 +258,18 @@
salt.cmdRun(pepperEnv, HOST, "mv /etc/salt/minion.d/minion.conf minion.conf")
salt.runSaltProcessStep(pepperEnv, HOST, 'service.stop', ['salt-minion'], [], null, true, 5)
}
- }
- stage('Remove salt-key') {
- try {
- salt.cmdRun(pepperEnv, 'I@salt:master', "salt-key -d ${target}.${domain} -y")
- } catch (Exception e) {
- common.warningMsg(e)
- }
- try {
- salt.cmdRun(pepperEnv, 'I@salt:master', "rm /srv/salt/reclass/nodes/_generated/${target}.${domain}.yml")
- } catch (Exception e) {
- common.warningMsg(e)
- }
- }
-
- stage('Remove keyring') {
- def keyring = ""
- def keyring_lines = ""
- try {
- keyring_lines = salt.cmdRun(pepperEnv, ADMIN_HOST, "ceph auth list | grep ${target}")['return'][0].values()[0].split('\n')
- } catch (Exception e) {
- common.warningMsg(e)
- }
- for (line in keyring_lines) {
- if (line.toLowerCase().contains(target.toLowerCase())) {
- keyring = line
- break
+ stage('Remove salt-key') {
+ try {
+ salt.cmdRun(pepperEnv, 'I@salt:master', "salt-key -d ${target}.${domain} -y")
+ } catch (Exception e) {
+ common.warningMsg(e)
}
- }
- if (keyring?.trim()) {
- salt.cmdRun(pepperEnv, ADMIN_HOST, "ceph auth del ${keyring}")
+ try {
+ salt.cmdRun(pepperEnv, 'I@salt:master', "rm /srv/salt/reclass/nodes/_generated/${target}.${domain}.yml")
+ } catch (Exception e) {
+ common.warningMsg(e)
+ }
}
}
@@ -254,7 +286,6 @@
}
def target_hosts = salt.getMinions(pepperEnv, 'I@ceph:common')
- print target_hosts
// Update configs
stage('Update Ceph configs') {
diff --git a/ceph-remove-osd.groovy b/ceph-remove-osd.groovy
index e5d4893..e461363 100644
--- a/ceph-remove-osd.groovy
+++ b/ceph-remove-osd.groovy
@@ -22,6 +22,7 @@
def pepperEnv = "pepperEnv"
def flags = CLUSTER_FLAGS.tokenize(',')
def osds = OSD.tokenize(',')
+def cleanDisk = CLEANDISK
timeout(time: 12, unit: 'HOURS') {
node("python") {
@@ -39,6 +40,12 @@
def osd_ids = []
+ def checknode = salt.runSaltProcessStep(pepperEnv, HOST, 'test.ping')
+ if (checknode['return'][0].values().isEmpty()) {
+ common.errorMsg("Host not found")
+ throw new InterruptedException()
+ }
+
// get list of osd disks of the host
salt.runSaltProcessStep(pepperEnv, HOST, 'saltutil.sync_grains', [], null, true, 5)
def cephGrain = salt.getGrain(pepperEnv, HOST, 'ceph')
@@ -115,18 +122,21 @@
def block_wal_partition_uuid = ""
try {
journal_partition_uuid = salt.cmdRun(pepperEnv, HOST, "cat /var/lib/ceph/osd/ceph-${id}/journal_uuid")['return'][0].values()[0].split("\n")[0]
- } catch (Exception e) {
+ }
+ catch (Exception e) {
common.infoMsg(e)
}
try {
block_db_partition_uuid = salt.cmdRun(pepperEnv, HOST, "cat /var/lib/ceph/osd/ceph-${id}/block.db_uuid")['return'][0].values()[0].split("\n")[0]
- } catch (Exception e) {
+ }
+ catch (Exception e) {
common.infoMsg(e)
}
try {
block_wal_partition_uuid = salt.cmdRun(pepperEnv, HOST, "cat /var/lib/ceph/osd/ceph-${id}/block.wal_uuid")['return'][0].values()[0].split("\n")[0]
- } catch (Exception e) {
+ }
+ catch (Exception e) {
common.infoMsg(e)
}
@@ -143,44 +153,54 @@
try {
salt.cmdRun(pepperEnv, HOST, "partprobe")
- } catch (Exception e) {
+ }
+ catch (Exception e) {
common.warningMsg(e)
}
}
-
- // remove data / block / lockbox partition `parted /dev/sdj rm 3`
- stage('Remove data / block / lockbox partition') {
- def data_partition_uuid = ""
- def block_partition_uuid = ""
- def osd_fsid = ""
- def lvm = ""
- def lvm_enabled= salt.getPillar(pepperEnv,"I@ceph:osd","ceph:osd:lvm_enabled")['return'].first().containsValue(true)
- try {
- osd_fsid = salt.cmdRun(pepperEnv, HOST, "cat /var/lib/ceph/osd/ceph-${id}/fsid")['return'][0].values()[0].split("\n")[0]
- if (lvm_enabled) {
- lvm = salt.runSaltCommand(pepperEnv, 'local', ['expression': HOST, 'type': 'compound'], 'cmd.run', null, "salt-call lvm.lvdisplay --output json -l quiet")['return'][0].values()[0]
- lvm = new groovy.json.JsonSlurperClassic().parseText(lvm)
- lvm["local"].each { lv, params ->
- if (params["Logical Volume Name"].contains(osd_fsid)) {
- data_partition_uuid = params["Logical Volume Name"].minus("/dev/")
+ if (cleanDisk) {
+ // remove data / block / lockbox partition `parted /dev/sdj rm 3`
+ stage('Remove data / block / lockbox partition') {
+ def data_partition_uuid = ""
+ def block_partition_uuid = ""
+ def osd_fsid = ""
+ def lvm = ""
+ def lvm_enabled= salt.getPillar(pepperEnv,"I@ceph:osd","ceph:osd:lvm_enabled")['return'].first().containsValue(true)
+ try {
+ osd_fsid = salt.cmdRun(pepperEnv, HOST, "cat /var/lib/ceph/osd/ceph-${id}/fsid")['return'][0].values()[0].split("\n")[0]
+ if (lvm_enabled) {
+ lvm = salt.runSaltCommand(pepperEnv, 'local', ['expression': HOST, 'type': 'compound'], 'cmd.run', null, "salt-call lvm.lvdisplay --output json -l quiet")['return'][0].values()[0]
+ lvm = new groovy.json.JsonSlurperClassic().parseText(lvm)
+ lvm["local"].each { lv, params ->
+ if (params["Logical Volume Name"].contains(osd_fsid)) {
+ data_partition_uuid = params["Logical Volume Name"].minus("/dev/")
+ }
}
}
}
- } catch (Exception e) {
- common.infoMsg(e)
- }
- try {
- block_partition_uuid = salt.cmdRun(pepperEnv, HOST, "cat /var/lib/ceph/osd/ceph-${id}/block_uuid")['return'][0].values()[0].split("\n")[0]
- } catch (Exception e) {
- common.infoMsg(e)
- }
+ catch (Exception e) {
+ common.infoMsg(e)
+ }
+ try {
+ block_partition_uuid = salt.cmdRun(pepperEnv, HOST, "cat /var/lib/ceph/osd/ceph-${id}/block_uuid")['return'][0].values()[0].split("\n")[0]
+ }
+ catch (Exception e) {
+ common.infoMsg(e)
+ }
- // remove partition_uuid = 2c76f144-f412-481e-b150-4046212ca932
- if (block_partition_uuid?.trim()) {
- ceph.removePartition(pepperEnv, HOST, block_partition_uuid)
- }
- if (data_partition_uuid?.trim()) {
- ceph.removePartition(pepperEnv, HOST, data_partition_uuid, 'data', id)
+ // remove partition_uuid = 2c76f144-f412-481e-b150-4046212ca932
+ if (block_partition_uuid?.trim()) {
+ ceph.removePartition(pepperEnv, HOST, block_partition_uuid)
+ try{
+ salt.cmdRun(pepperEnv, HOST, "ceph-volume lvm zap `readlink /var/lib/ceph/osd/ceph-${id}/block` --destroy")
+ }
+ catch (Exception e) {
+ common.infoMsg(e)
+ }
+ }
+ if (data_partition_uuid?.trim()) {
+ ceph.removePartition(pepperEnv, HOST, data_partition_uuid, 'data', id)
+ }
}
}
}
diff --git a/ceph-upgrade.groovy b/ceph-upgrade.groovy
index ee3ed83..ab5706c 100644
--- a/ceph-upgrade.groovy
+++ b/ceph-upgrade.groovy
@@ -30,6 +30,8 @@
def pepperEnv = "pepperEnv"
flags = CLUSTER_FLAGS.tokenize(',')
+def runHighState = RUNHIGHSTATE
+
def backup(master, target) {
stage("backup ${target}") {
@@ -111,15 +113,19 @@
}
// restart services
stage("Restart ${target} services on ${minion}") {
- if (target == 'osd') {
+ if(target == 'osd') {
def ceph_disks = salt.getGrain(master, minion, 'ceph')['return'][0].values()[0].values()[0]['ceph_disk']
ceph_disks.each { osd, param ->
salt.cmdRun(master, "${minion}", "systemctl restart ceph-${target}@${osd}")
- ceph.waitForHealthy(master, ADMIN_HOST, flags)
}
- } else {
+ }
+ else {
salt.cmdRun(master, "${minion}", "systemctl restart ceph-${target}.target")
- ceph.waitForHealthy(master, ADMIN_HOST, flags)
+ }
+
+ ceph.waitForHealthy(master, ADMIN_HOST, flags)
+ if(runHighState) {
+ salt.enforceHighstate(pepperEnv, tgt)
}
}
diff --git a/cloud-deploy-pipeline.groovy b/cloud-deploy-pipeline.groovy
index 9143b97..b1339ef 100644
--- a/cloud-deploy-pipeline.groovy
+++ b/cloud-deploy-pipeline.groovy
@@ -50,6 +50,7 @@
- 10 - number of nodes
- 10% - percentage of all targeted nodes
* DIST_UPGRADE_NODES Whether to run "apt-get dist-upgrade" on all nodes in cluster before deployment
+ * UPGRADE_SALTSTACK Whether to install recent versions of saltstack packages
*
* Test settings:
@@ -118,6 +119,10 @@
if (common.validInputParam('DIST_UPGRADE_NODES')) {
upgrade_nodes = "${DIST_UPGRADE_NODES}".toBoolean()
}
+def upgrade_salt = false
+if (common.validInputParam('UPGRADE_SALTSTACK')){
+ upgrade_salt = "${UPGRADE_SALTSTACK}".toBoolean()
+}
timeout(time: 12, unit: 'HOURS') {
node(slave_node) {
@@ -366,6 +371,11 @@
}
orchestrate.installFoundationInfra(venvPepper, staticMgmtNetwork, extra_tgt, batch_size)
+ if (upgrade_salt) {
+ debian.upgradeSaltPackages(venvPepper, 'I@salt:master')
+ debian.upgradeSaltPackages(venvPepper, 'I@salt:minion and not I@salt:master')
+ }
+
if (common.checkContains('STACK_INSTALL', 'kvm')) {
if (upgrade_nodes) {
debian.osUpgradeNode(venvPepper, 'I@salt:control', 'dist-upgrade', 30, 20, batch_size)
@@ -497,7 +507,7 @@
// Workaround for PROD-17765 issue to prevent crashes of keystone.role_present state.
// More details: https://mirantis.jira.com/browse/PROD-17765
salt.restartSaltMinion(venvPepper, "I@keystone:client ${extra_tgt}")
- salt.minionsReachable(venvPepper, 'I@salt:master', 'I@keystone:client ${extra_tgt}', null, 10, 6)
+ salt.minionsReachable(venvPepper, 'I@salt:master', "I@keystone:client ${extra_tgt}", null, 10, 6)
stage('Install OpenStack network') {
diff --git a/cloud-update.groovy b/cloud-update.groovy
index f45e4ec..b76c4b2 100644
--- a/cloud-update.groovy
+++ b/cloud-update.groovy
@@ -897,7 +897,7 @@
def salt = new com.mirantis.mk.Salt()
def common = new com.mirantis.mk.Common()
def targetHosts = salt.getMinionsSorted(pepperEnv, target)
- def device_grain_name = salt.getPillar(pepperEnv,"I@ceph:osd","ceph:osd:lvm_enabled")['return'].first().containsValue(true) ? "ceph_volume" : "ceph_disk"
+ def device_grain_name = "ceph_disk"
for (t in targetHosts) {
def osd_ids = []
// get list of osd disks of the host
diff --git a/update-ceph.groovy b/update-ceph.groovy
index 00c16b3..dbb45d5 100644
--- a/update-ceph.groovy
+++ b/update-ceph.groovy
@@ -15,6 +15,7 @@
def commandKwargs
def selMinions = []
def flags = CLUSTER_FLAGS ? CLUSTER_FLAGS.tokenize(',') : []
+def runHighState = RUNHIGHSTATE
timeout(time: 12, unit: 'HOURS') {
node() {
@@ -27,9 +28,8 @@
}
stage('Apply package upgrades on all nodes') {
-
targets.each { key, value ->
- // try {
+ salt.enforceState(pepperEnv, "I@ceph:${key}", 'linux.system.repo', true)
command = "pkg.install"
packages = value
commandKwargs = ['only_upgrade': 'true', 'force_yes': 'true']
@@ -39,28 +39,39 @@
}
}
- stage("Restart MONs and RGWs") {
+ stage('Set cluster flags') {
+ if (flags.size() > 0) {
+ stage('Set cluster flags') {
+ for (flag in flags) {
+ salt.cmdRun(pepperEnv, "I@ceph:mon and I@ceph:common:keyring:admin", 'ceph osd set ' + flag)
+ }
+ }
+ }
+ }
+
+ stage("Restart MONs") {
selMinions = salt.getMinions(pepperEnv, "I@ceph:mon")
for (tgt in selMinions) {
// runSaltProcessStep 'service.restart' don't work for this services
salt.cmdRun(pepperEnv, tgt, "systemctl restart ceph-mon.target")
ceph.waitForHealthy(pepperEnv, tgt, flags)
+ if (runHighState) {
+ salt.enforceHighstate(pepperEnv, tgt)
+ }
}
selMinions = salt.getMinions(pepperEnv, "I@ceph:mgr")
for (tgt in selMinions) {
// runSaltProcessStep 'service.restart' don't work for this services
salt.cmdRun(pepperEnv, tgt, "systemctl restart ceph-mgr.target")
ceph.waitForHealthy(pepperEnv, tgt, flags)
- }
- selMinions = salt.getMinions(pepperEnv, "I@ceph:radosgw")
- for (tgt in selMinions) {
- salt.cmdRun(pepperEnv, tgt, "systemctl restart ceph-radosgw.target")
- ceph.waitForHealthy(pepperEnv, tgt, flags)
+ if (runHighState) {
+ salt.enforceHighstate(pepperEnv, tgt)
+ }
}
}
stage('Restart OSDs') {
- def device_grain_name = salt.getPillar(pepperEnv,"I@ceph:osd","ceph:osd:lvm_enabled")['return'].first().containsValue(true) ? "ceph_volume" : "ceph_disk"
+ def device_grain_name = "ceph_disk"
selMinions = salt.getMinions(pepperEnv, "I@ceph:osd")
for (tgt in selMinions) {
salt.runSaltProcessStep(pepperEnv, tgt, 'saltutil.sync_grains', [], null, true, 5)
@@ -81,13 +92,33 @@
ceph.waitForHealthy(pepperEnv, tgt, flags, 0, 100)
}
+ if (runHighState) {
+ salt.enforceHighstate(pepperEnv, tgt)
+ }
+
salt.cmdRun(pepperEnv, tgt, 'ceph osd unset noout')
}
}
-
+ stage('Restart RGWs') {
+ selMinions = salt.getMinions(pepperEnv, "I@ceph:radosgw")
+ for (tgt in selMinions) {
+ salt.cmdRun(pepperEnv, tgt, "systemctl restart ceph-radosgw.target")
+ ceph.waitForHealthy(pepperEnv, tgt, flags)
+ if (runHighState) {
+ salt.enforceHighstate(pepperEnv, tgt)
+ }
+ }
+ }
} catch (Throwable e) {
// If there was an error or exception thrown, the build failed
+ if (flags.size() > 0) {
+ stage('Unset cluster flags') {
+ for (flag in flags) {
+ salt.cmdRun(pepperEnv, "I@ceph:mon and I@ceph:common:keyring:admin", 'ceph osd unset ' + flag)
+ }
+ }
+ }
currentBuild.result = "FAILURE"
currentBuild.description = currentBuild.description ? e.message + " " + currentBuild.description : e.message
throw e
diff --git a/upgrade-mcp-release.groovy b/upgrade-mcp-release.groovy
index 6ed9829..9d3981b 100644
--- a/upgrade-mcp-release.groovy
+++ b/upgrade-mcp-release.groovy
@@ -63,6 +63,10 @@
salt.cmdRun(venvPepper, 'I@salt:master', "salt -C '${target}' --async pkg.install force_yes=True pkgs='$pkgs'")
// can't use same function from pipeline lib, as at the moment of running upgrade pipeline Jenkins
// still using pipeline lib from current old mcp-version
+
+ // sleep to make sure package update started, otherwise checks will pass on still running old instance
+ sleep(120)
+
common.retry(20, 60) {
salt.minionsReachable(venvPepper, 'I@salt:master', '*')
def running = salt.runSaltProcessStep(venvPepper, target, 'saltutil.running', [], null, true, 5)
@@ -869,6 +873,8 @@
}
stage('Update Drivetrain') {
+ salt.enforceState(venvPepper, '*', 'linux.system.package', true, true, batchSize, false, 60, 2)
+
if (upgradeSaltStack) {
updateSaltStack('I@salt:master', '["salt-master", "salt-common", "salt-api", "salt-minion"]')