Merge the tip of origin/release/proposed/2019.2.0 into origin/release/2019.2.0
a766d43 Reconnect to new slave after docker.client state
f5c911b Allow to use tags as MCP_VERSION
4cc84ad Keep /etc/maas permissions as well
a172522 Get ready deploy-update-opencontrail4 to automation
6dbdbeb Sync stacklight-upgrade pipeline with master
373e8a3 Add CLUSTER_FLAGS parameter to ceph jobs
e9fc059 Add service client states to common openstack control class
bfd9078 Hide password from cvp-finc tests output
f32758b [CVP-Sanity] Override nested params of global config in Jenkins
76bea2c Add restart of the td-agent to upgrade pipeline
2325dcb Use common waitForHealthy function from pipeline-library
579f184 Remove wrong Gerrit plugin before service upgrade to 2019.2.6
9b7f097 Drop installInfra step on cicd stage
b97ba47 Fix dogtag post restore steps pipeline
9056c62 Add check for Dogtag present before start backup
Change-Id: Ide37334756ebd42bbe034b8d11801cfa20e9dfc8
diff --git a/backupninja-backup-pipeline.groovy b/backupninja-backup-pipeline.groovy
index 74f38e6..028ccbe 100644
--- a/backupninja-backup-pipeline.groovy
+++ b/backupninja-backup-pipeline.groovy
@@ -60,16 +60,22 @@
}
}
if (backupDogtag) {
- try {
- def dogtagPillar = salt.getPillar(pepperEnv, "I@dogtag:server", "dogtag:server").get('return')[0].values()[0]
- if (dogtagPillar.isEmpty()) {
- throw new Exception("Problem with dogtag pillar on I@dogtag:server node.")
+ def barbicanBackendPresent = salt.getPillar(pepperEnv, "I@salt:master", "_param:barbican_backend").get('return')[0].values()[0]
+ if (barbicanBackendPresent == 'dogtag') {
+ try {
+ def dogtagPillar = salt.getPillar(pepperEnv, "I@dogtag:server", "dogtag:server").get('return')[0].values()[0]
+ if (dogtagPillar.isEmpty()) {
+ throw new Exception("Problem with dogtag pillar on I@dogtag:server node.")
+ }
}
- }
- catch (Exception e) {
- common.errorMsg(e.getMessage())
- common.errorMsg("Looks like dogtag pillar is not defined. Fix your pillar or disable dogtag backup by setting the BACKUP_DOGTAG parameter to False if you're using different barbican backend.")
- throw e
+ catch (Exception e) {
+ common.errorMsg(e.getMessage())
+ common.errorMsg("Looks like dogtag pillar is not defined. Fix your pillar or disable dogtag backup by setting the BACKUP_DOGTAG parameter to False if you're using different barbican backend.")
+ throw e
+ }
+ } else {
+ backupDogtag = false
+ common.warningMsg('Backup for Dogtag is enabled, but service itself is not present. Skipping...')
}
}
}
@@ -152,7 +158,9 @@
def maasNodes = salt.getMinions(pepperEnv, 'I@maas:region')
if (!maasNodes.isEmpty()) {
common.infoMsg("Trying to save maas file permissions on ${maasNodes} if possible")
- salt.cmdRun(pepperEnv, 'I@maas:region', 'which getfacl && getfacl -pR /var/lib/maas/ > /var/lib/maas/file_permissions.txt || true')
+ salt.cmdRun(pepperEnv, 'I@maas:region', 'which getfacl && ' +
+ 'getfacl -pR /var/lib/maas/ > /var/lib/maas/file_permissions.txt &&' +
+ 'getfacl -pR /etc/maas/ > /etc/maas/file_permissions.txt || true')
}
}
if (backupDogtag) {
diff --git a/backupninja-restore-pipeline.groovy b/backupninja-restore-pipeline.groovy
index f617f1b..32f3962 100644
--- a/backupninja-restore-pipeline.groovy
+++ b/backupninja-restore-pipeline.groovy
@@ -8,10 +8,6 @@
timeout(time: 12, unit: 'HOURS') {
node() {
- if (restoreDogtag) {
- common.warningMsg("Dogtag restore does not work and disabled by default. For more information check the docs https://docs.mirantis.com/mcp/q4-18/mcp-operations-guide/backup-restore.html")
- }
- restoreDogtag = false
stage('Setup virtualenv for Pepper') {
python.setupPepperVirtualenv(pepperEnv, SALT_MASTER_URL, SALT_MASTER_CREDENTIALS)
}
@@ -50,16 +46,26 @@
common.warningMsg("No MaaS Pillar was found. You can ignore this if it's expected. Otherwise you should fix you pillar. Check: https://docs.mirantis.com/mcp/latest/mcp-operations-guide/backup-restore/maas-postgresql/backupninja-postgresql-restore.html")
}
if (restoreDogtag) {
- try {
- def dogtagPillar = salt.getPillar(pepperEnv, "I@dogtag:server:role:master", 'dogtag:server:initial_data').get('return')[0].values()[0]
- if (dogtagPillar.isEmpty()) {
- throw new Exception("Problem with Dogtag pillar on 'I@dogtag:server:role:master' node.")
+ def barbicanBackendPresent = salt.getPillar(pepperEnv, "I@salt:master", "_param:barbican_backend").get('return')[0].values()[0]
+ if (barbicanBackendPresent == 'dogtag') {
+ try {
+ def dogtagPillar = salt.getPillar(pepperEnv, "I@dogtag:server:role:master", 'dogtag:server:initial_data').get('return')[0].values()[0]
+ if (dogtagPillar.isEmpty()) {
+ throw new Exception("Problem with Dogtag pillar on 'I@dogtag:server:role:master' node.")
+ }
+ def mineCertPresent = salt.runSaltProcessStep(pepperEnv, "I@dogtag:server:role:master", 'mine.get', ['*', 'dogtag_admin_cert'], null, false).get('return')[0].values()[0]
+ if (mineCertPresent.isEmpty()) {
+ throw new Exception("Problem with Dogtag Admin cert mine data on 'I@dogtag:server:role:master' node.")
+ }
}
- }
- catch (Exception e) {
- common.errorMsg(e.getMessage())
- common.errorMsg('Please fix your pillar. For more information check docs: https://docs.mirantis.com/mcp/latest/mcp-operations-guide/backup-restore/dogtag/restore-dogtag.html')
- throw e
+ catch (Exception e) {
+ common.errorMsg(e.getMessage())
+ common.errorMsg('Please fix your pillar or missed mine data. For more information check docs: https://docs.mirantis.com/mcp/latest/mcp-operations-guide/backup-restore/dogtag/2.6-and-newer/restore-dogtag.html')
+ throw e
+ }
+ } else {
+ restoreDogtag = false
+ common.warningMsg('Restore for Dogtag is enabled, but service itself is not present. Skipping...')
}
}
}
@@ -90,13 +96,11 @@
common.infoMsg("No more steps for Salt Master and MaaS restore are required.")
}
if (restoreDogtag) {
- salt.enforceState(['saltId': pepperEnv, 'target': 'I@salt:master', 'state': ['salt', 'reclass']])
salt.enforceState(['saltId': pepperEnv, 'target': 'I@dogtag:server:role:master', 'state': 'dogtag.server'])
salt.enforceState(['saltId': pepperEnv, 'target': 'I@dogtag:server', 'state': 'dogtag.server'])
- salt.enforceState(['saltId': pepperEnv, 'target': 'I@haproxy:proxy', 'state': 'haproxy'])
salt.enforceState(['saltId': pepperEnv, 'target': 'I@barbican:server:role:primary', 'state': 'barbican.server'])
salt.enforceState(['saltId': pepperEnv, 'target': 'I@barbican:server', 'state': 'barbican.server'])
- salt.cmdRun(pepperEnv, 'I@barbican:server', 'rm /etc/barbican/alias/*')
+ salt.cmdRun(pepperEnv, 'I@barbican:server', 'rm -rf /etc/barbican/alias')
salt.runSaltProcessStep(pepperEnv, 'I@barbican:server', 'service.restart', 'apache2')
}
}
diff --git a/ceph-add-node.groovy b/ceph-add-node.groovy
index 33a5a67..9ec96c2 100644
--- a/ceph-add-node.groovy
+++ b/ceph-add-node.groovy
@@ -75,14 +75,18 @@
}
stage("Update/Install monitoring") {
- //Collect Grains
- salt.enforceState(pepperEnv, HOST, 'salt.minion.grains')
- salt.runSaltProcessStep(pepperEnv, HOST, 'saltutil.refresh_modules')
- salt.runSaltProcessStep(pepperEnv, HOST, 'mine.update')
- sleep(5)
-
- salt.enforceState(pepperEnv, HOST, 'prometheus')
- salt.enforceState(pepperEnv, 'I@prometheus:server', 'prometheus')
+ def prometheusNodes = salt.getMinions(pepperEnv, 'I@prometheus:server')
+ if (!prometheusNodes.isEmpty()) {
+ //Collect Grains
+ salt.enforceState(pepperEnv, HOST, 'salt.minion.grains')
+ salt.runSaltProcessStep(pepperEnv, HOST, 'saltutil.refresh_modules')
+ salt.runSaltProcessStep(pepperEnv, HOST, 'mine.update')
+ sleep(5)
+ salt.enforceState(pepperEnv, HOST, 'prometheus')
+ salt.enforceState(pepperEnv, 'I@prometheus:server', 'prometheus')
+ } else {
+ common.infoMsg('No Prometheus nodes in cluster. Nothing to do')
+ }
}
}
}
diff --git a/ceph-add-osd-upmap.groovy b/ceph-add-osd-upmap.groovy
index 26ed68e..c193d39 100644
--- a/ceph-add-osd-upmap.groovy
+++ b/ceph-add-osd-upmap.groovy
@@ -9,85 +9,60 @@
*
*/
-common = new com.mirantis.mk.Common()
salt = new com.mirantis.mk.Salt()
def python = new com.mirantis.mk.Python()
+def ceph = new com.mirantis.mk.Ceph()
orchestrate = new com.mirantis.mk.Orchestrate()
-
-def waitForHealthy(master, count=0, attempts=100) {
- // wait for healthy cluster
- while (count<attempts) {
- def health = runCephCommand('ceph health')['return'][0].values()[0]
- if (health.contains('HEALTH_OK')) {
- common.infoMsg('Cluster is healthy')
- break;
- }
- count++
- sleep(10)
- }
-}
+pepperEnv = "pepperEnv"
+def flags = CLUSTER_FLAGS ? CLUSTER_FLAGS.tokenize(',') : []
def runCephCommand(cmd) {
- return salt.cmdRun("pepperEnv", "I@ceph:mon and I@ceph:common:keyring:admin", cmd, checkResponse=true, batch=null, output=false)
+ return salt.cmdRun(pepperEnv, "I@ceph:mon and I@ceph:common:keyring:admin", cmd, checkResponse = true, batch = null, output = false)
}
-def getpgmap(master) {
- return runCephCommand('ceph pg ls remapped --format=json')['return'][0].values()[0]
+def getpgmap() {
+ return runCephCommand('ceph pg ls remapped --format=json')['return'][0].values()[0]
}
def generatemapping(master,pgmap,map) {
- def pg_new
- def pg_old
-
- for ( pg in pgmap )
- {
-
- pg_new = pg["up"].minus(pg["acting"])
- pg_old = pg["acting"].minus(pg["up"])
-
- for ( i = 0; i < pg_new.size(); i++ )
- {
- def string = "ceph osd pg-upmap-items " + pg["pgid"].toString() + " " + pg_new[i] + " " + pg_old[i] + ";"
- map.add(string)
+ def pg_new
+ def pg_old
+ for (pg in pgmap) {
+ pg_new = pg["up"].minus(pg["acting"])
+ pg_old = pg["acting"].minus(pg["up"])
+ for (i = 0; i < pg_new.size(); i++) {
+ def string = "ceph osd pg-upmap-items " + pg["pgid"].toString() + " " + pg_new[i] + " " + pg_old[i] + ";"
+ map.add(string)
+ }
}
-
- }
}
-def pepperEnv = "pepperEnv"
-
timeout(time: 12, unit: 'HOURS') {
node("python") {
-
// create connection to salt master
python.setupPepperVirtualenv(pepperEnv, SALT_MASTER_URL, SALT_MASTER_CREDENTIALS)
- stage ("verify client versions")
- {
- def admin = salt.getMinions("pepperEnv", "I@ceph:mon and I@ceph:common:keyring:admin")[0]
- def versions = salt.cmdRun("pepperEnv", admin, "ceph features", checkResponse=true, batch=null, output=false).values()[0]
+ stage("verify client versions") {
+ def admin = salt.getMinions(pepperEnv, "I@ceph:mon and I@ceph:common:keyring:admin")[0]
+ def versions = salt.cmdRun(pepperEnv, admin, "ceph features", checkResponse = true, batch = null, output = false).values()[0]
- if ( versions[0][admin].contains('jewel') )
- {
- throw new Exception("Update all clients to luminous before using this pipeline")
- }
+ if (versions[0][admin].contains('jewel')) {
+ throw new Exception("Update all clients to luminous before using this pipeline")
+ }
}
- stage ("enable luminous compat")
- {
- runCephCommand('ceph osd set-require-min-compat-client luminous')['return'][0].values()[0]
+ stage("enable luminous compat") {
+ runCephCommand('ceph osd set-require-min-compat-client luminous')['return'][0].values()[0]
}
- stage ("enable upmap balancer")
- {
- runCephCommand('ceph balancer on')['return'][0].values()[0]
- runCephCommand('ceph balancer mode upmap')['return'][0].values()[0]
+ stage("enable upmap balancer") {
+ runCephCommand('ceph balancer on')['return'][0].values()[0]
+ runCephCommand('ceph balancer mode upmap')['return'][0].values()[0]
}
- stage ("set norebalance")
- {
- runCephCommand('ceph osd set norebalance')['return'][0].values()[0]
+ stage("set norebalance") {
+ runCephCommand('ceph osd set norebalance')['return'][0].values()[0]
}
stage('Install Ceph OSD') {
@@ -96,42 +71,26 @@
def mapping = []
- stage ("update mappings")
- {
- def pgmap = getpgmap(pepperEnv)
- if ( pgmap == '' )
- {
- return 1
- }
- else
- {
- pgmap = new groovy.json.JsonSlurperClassic().parseText(pgmap)
- for(int x=1; x<=3; x++){
- pgmap = getpgmap(pepperEnv)
- if ( pgmap == '' )
- {
- return 1
- }
- else
- {
- pgmap = new groovy.json.JsonSlurperClassic().parseText(pgmap)
- generatemapping(pepperEnv,pgmap,mapping)
- mapping.each(this.&runCephCommand)
- }
+ stage("update mappings") {
+ def pgmap
+ for (int x = 1; x <= 3; x++) {
+ pgmap = getpgmap()
+ if (pgmap == '') {
+ return 1
+ } else {
+ pgmap = new groovy.json.JsonSlurperClassic().parseText(pgmap)
+ generatemapping(pepperEnv, pgmap, mapping)
+ mapping.each(this.&runCephCommand)
+ }
}
- }
-
}
- stage ("unset norebalance")
- {
- runCephCommand('ceph osd unset norebalance')['return'][0].values()[0]
+ stage("unset norebalance") {
+ runCephCommand('ceph osd unset norebalance')['return'][0].values()[0]
}
- stage ("wait for healthy cluster")
- {
- waitForHealthy(pepperEnv)
+ stage("wait for healthy cluster") {
+ ceph.waitForHealthy(pepperEnv, "I@ceph:mon and I@ceph:common:keyring:admin", flags)
}
-
}
}
diff --git a/ceph-backend-migration.groovy b/ceph-backend-migration.groovy
index 7a5821d..676c236 100644
--- a/ceph-backend-migration.groovy
+++ b/ceph-backend-migration.groovy
@@ -20,6 +20,7 @@
common = new com.mirantis.mk.Common()
salt = new com.mirantis.mk.Salt()
def python = new com.mirantis.mk.Python()
+ceph = new com.mirantis.mk.Ceph()
MIGRATION_METHOD = "per-osd"
// TBD: per-host
@@ -28,27 +29,7 @@
def flags = CLUSTER_FLAGS.tokenize(',')
def osds = OSD.tokenize(',')
-def removePartition(master, target, partition_uuid) {
- def partition = ""
- try {
- // partition = /dev/sdi2
- partition = runCephCommand(master, target, "blkid | grep ${partition_uuid} ")['return'][0].values()[0].split("(?<=[0-9])")[0]
- } catch (Exception e) {
- common.warningMsg(e)
- }
-
- if (partition?.trim()) {
- // dev = /dev/sdi
- def dev = partition.replaceAll('\\d+$', "")
- // part_id = 2
- def part_id = partition.substring(partition.lastIndexOf("/")+1).replaceAll("[^0-9]", "")
- runCephCommand(master, target, "parted ${dev} rm ${part_id}")
- }
- return
-}
-
def removeJournalOrBlockPartitions(master, target, id) {
-
// remove journal, block_db, block_wal partition `parted /dev/sdj rm 3`
stage('Remove journal / block_db / block_wal partition') {
def partition_uuid = ""
@@ -56,20 +37,20 @@
def block_db_partition_uuid = ""
def block_wal_partition_uuid = ""
try {
- journal_partition_uuid = runCephCommand(master, target, "ls -la /var/lib/ceph/osd/ceph-${id}/ | grep journal | grep partuuid")
+ journal_partition_uuid = salt.cmdRun(master, target, "ls -la /var/lib/ceph/osd/ceph-${id}/ | grep journal | grep partuuid")
journal_partition_uuid = journal_partition_uuid.toString().trim().split("\n")[0].substring(journal_partition_uuid.toString().trim().lastIndexOf("/")+1)
} catch (Exception e) {
common.infoMsg(e)
}
try {
- block_db_partition_uuid = runCephCommand(master, target, "ls -la /var/lib/ceph/osd/ceph-${id}/ | grep 'block.db' | grep partuuid")
+ block_db_partition_uuid = salt.cmdRun(master, target, "ls -la /var/lib/ceph/osd/ceph-${id}/ | grep 'block.db' | grep partuuid")
block_db_partition_uuid = block_db_partition_uuid.toString().trim().split("\n")[0].substring(block_db_partition_uuid.toString().trim().lastIndexOf("/")+1)
} catch (Exception e) {
common.infoMsg(e)
}
try {
- block_wal_partition_uuid = runCephCommand(master, target, "ls -la /var/lib/ceph/osd/ceph-${id}/ | grep 'block.wal' | grep partuuid")
+ block_wal_partition_uuid = salt.cmdRun(master, target, "ls -la /var/lib/ceph/osd/ceph-${id}/ | grep 'block.wal' | grep partuuid")
block_wal_partition_uuid = block_wal_partition_uuid.toString().trim().split("\n")[0].substring(block_wal_partition_uuid.toString().trim().lastIndexOf("/")+1)
} catch (Exception e) {
common.infoMsg(e)
@@ -84,31 +65,15 @@
// if disk has journal, block_db or block_wal on different disk, then remove the partition
if (partition_uuid?.trim()) {
- removePartition(master, target, partition_uuid)
+ ceph.removePartition(master, target, partition_uuid)
}
if (block_wal_partition_uuid?.trim()) {
- removePartition(master, target, block_wal_partition_uuid)
+ ceph.removePartition(master, target, block_wal_partition_uuid)
}
}
return
}
-def runCephCommand(master, target, cmd) {
- return salt.cmdRun(master, target, cmd)
-}
-
-def waitForHealthy(master, count=0, attempts=300) {
- // wait for healthy cluster
- while (count<attempts) {
- def health = runCephCommand(master, ADMIN_HOST, 'ceph health')['return'][0].values()[0]
- if (health.contains('HEALTH_OK')) {
- common.infoMsg('Cluster is healthy')
- break;
- }
- count++
- sleep(10)
- }
-}
timeout(time: 12, unit: 'HOURS') {
node("python") {
@@ -120,7 +85,7 @@
if (flags.size() > 0) {
stage('Set cluster flags') {
for (flag in flags) {
- runCephCommand(pepperEnv, ADMIN_HOST, 'ceph osd set ' + flag)
+ salt.cmdRun(pepperEnv, ADMIN_HOST, 'ceph osd set ' + flag)
}
}
}
@@ -147,23 +112,23 @@
for (osd_id in osd_ids) {
def id = osd_id.replaceAll('osd.', '')
- def backend = runCephCommand(pepperEnv, ADMIN_HOST, "ceph osd metadata ${id} | grep osd_objectstore")['return'][0].values()[0]
+ def backend = salt.cmdRun(pepperEnv, ADMIN_HOST, "ceph osd metadata ${id} | grep osd_objectstore")['return'][0].values()[0]
if (backend.contains(ORIGIN_BACKEND.toLowerCase())) {
// wait for healthy cluster before manipulating with osds
- if (WAIT_FOR_HEALTHY.toBoolean() == true) {
- waitForHealthy(pepperEnv)
+ if (WAIT_FOR_HEALTHY.toBoolean()) {
+ ceph.waitForHealthy(pepperEnv, ADMIN_HOST)
}
// `ceph osd out <id> <id>`
stage('Set OSDs out') {
- runCephCommand(pepperEnv, ADMIN_HOST, "ceph osd out ${osd_id}")
+ salt.cmdRun(pepperEnv, ADMIN_HOST, "ceph osd out ${osd_id}")
}
- if (WAIT_FOR_HEALTHY.toBoolean() == true) {
+ if (WAIT_FOR_HEALTHY.toBoolean()) {
sleep(5)
- waitForHealthy(pepperEnv)
+ ceph.waitForHealthy(pepperEnv, ADMIN_HOST)
}
// stop osd daemons
@@ -173,28 +138,28 @@
// remove keyring `ceph auth del osd.3`
stage('Remove OSD keyrings from auth') {
- runCephCommand(pepperEnv, ADMIN_HOST, 'ceph auth del ' + osd_id)
+ salt.cmdRun(pepperEnv, ADMIN_HOST, 'ceph auth del ' + osd_id)
}
// remove osd `ceph osd rm osd.3`
stage('Remove OSDs') {
- runCephCommand(pepperEnv, ADMIN_HOST, 'ceph osd rm ' + osd_id)
+ salt.cmdRun(pepperEnv, ADMIN_HOST, 'ceph osd rm ' + osd_id)
}
def dmcrypt = ""
try {
- dmcrypt = runCephCommand(pepperEnv, tgt, "ls -la /var/lib/ceph/osd/ceph-${id}/ | grep dmcrypt")['return'][0].values()[0]
+ dmcrypt = salt.cmdRun(pepperEnv, tgt, "ls -la /var/lib/ceph/osd/ceph-${id}/ | grep dmcrypt")['return'][0].values()[0]
} catch (Exception e) {
common.warningMsg(e)
}
if (dmcrypt?.trim()) {
- def mount = runCephCommand(pepperEnv, tgt, "lsblk -rp | grep /var/lib/ceph/osd/ceph-${id} -B1")['return'][0].values()[0]
+ def mount = salt.cmdRun(pepperEnv, tgt, "lsblk -rp | grep /var/lib/ceph/osd/ceph-${id} -B1")['return'][0].values()[0]
dev = mount.split()[0].replaceAll("[0-9]","")
// remove partition tables
stage('dd part tables') {
- runCephCommand(pepperEnv, tgt, "dd if=/dev/zero of=${dev} bs=512 count=1 conv=notrunc")
+ salt.cmdRun(pepperEnv, tgt, "dd if=/dev/zero of=${dev} bs=512 count=1 conv=notrunc")
}
// remove journal, block_db, block_wal partition `parted /dev/sdj rm 3`
@@ -210,16 +175,16 @@
// zap disks `ceph-disk zap /dev/sdi`
stage('Zap devices') {
try {
- runCephCommand(pepperEnv, tgt, 'ceph-disk zap ' + dev)
+ salt.cmdRun(pepperEnv, tgt, 'ceph-disk zap ' + dev)
} catch (Exception e) {
common.warningMsg(e)
}
- runCephCommand(pepperEnv, tgt, 'ceph-disk zap ' + dev)
+ salt.cmdRun(pepperEnv, tgt, 'ceph-disk zap ' + dev)
}
} else {
- def mount = runCephCommand(pepperEnv, tgt, "mount | grep /var/lib/ceph/osd/ceph-${id}")['return'][0].values()[0]
+ def mount = salt.cmdRun(pepperEnv, tgt, "mount | grep /var/lib/ceph/osd/ceph-${id}")['return'][0].values()[0]
dev = mount.split()[0].replaceAll("[0-9]","")
// remove journal, block_db, block_wal partition `parted /dev/sdj rm 3`
@@ -227,12 +192,12 @@
// umount `umount /dev/sdi1`
stage('Umount devices') {
- runCephCommand(pepperEnv, tgt, "umount /var/lib/ceph/osd/ceph-${id}")
+ salt.cmdRun(pepperEnv, tgt, "umount /var/lib/ceph/osd/ceph-${id}")
}
// zap disks `ceph-disk zap /dev/sdi`
stage('Zap device') {
- runCephCommand(pepperEnv, tgt, 'ceph-disk zap ' + dev)
+ salt.cmdRun(pepperEnv, tgt, 'ceph-disk zap ' + dev)
}
}
@@ -245,8 +210,8 @@
if (PER_OSD_CONTROL.toBoolean() == true) {
stage("Verify backend version for osd.${id}") {
sleep(5)
- runCephCommand(pepperEnv, tgt, "ceph osd metadata ${id} | grep osd_objectstore")
- runCephCommand(pepperEnv, tgt, "ceph -s")
+ salt.cmdRun(pepperEnv, tgt, "ceph osd metadata ${id} | grep osd_objectstore")
+ salt.cmdRun(pepperEnv, tgt, "ceph -s")
}
stage('Ask for manual confirmation') {
@@ -258,8 +223,8 @@
if (PER_OSD_HOST_CONTROL.toBoolean() == true) {
stage("Verify backend versions") {
sleep(5)
- runCephCommand(pepperEnv, tgt, "ceph osd metadata | grep osd_objectstore -B2")
- runCephCommand(pepperEnv, tgt, "ceph -s")
+ salt.cmdRun(pepperEnv, tgt, "ceph osd metadata | grep osd_objectstore -B2")
+ salt.cmdRun(pepperEnv, tgt, "ceph -s")
}
stage('Ask for manual confirmation') {
@@ -273,7 +238,7 @@
stage('Unset cluster flags') {
for (flag in flags) {
common.infoMsg('Removing flag ' + flag)
- runCephCommand(pepperEnv, ADMIN_HOST, 'ceph osd unset ' + flag)
+ salt.cmdRun(pepperEnv, ADMIN_HOST, 'ceph osd unset ' + flag)
}
}
}
diff --git a/ceph-remove-node.groovy b/ceph-remove-node.groovy
index 766dda1..e1d6ce8 100644
--- a/ceph-remove-node.groovy
+++ b/ceph-remove-node.groovy
@@ -13,48 +13,12 @@
*
*/
-common = new com.mirantis.mk.Common()
-salt = new com.mirantis.mk.Salt()
-orchestrate = new com.mirantis.mk.Orchestrate()
+def common = new com.mirantis.mk.Common()
+def salt = new com.mirantis.mk.Salt()
+def ceph = new com.mirantis.mk.Ceph()
def python = new com.mirantis.mk.Python()
-
def pepperEnv = "pepperEnv"
-def removePartition(master, target, partition_uuid) {
- def partition = ""
- try {
- // partition = /dev/sdi2
- partition = runCephCommand(master, target, "blkid | grep ${partition_uuid} ")['return'][0].values()[0].split("(?<=[0-9])")[0]
- } catch (Exception e) {
- common.warningMsg(e)
- }
-
- if (partition?.trim()) {
- // dev = /dev/sdi
- def dev = partition.replaceAll('\\d+$', "")
- // part_id = 2
- def part_id = partition.substring(partition.lastIndexOf("/")+1).replaceAll("[^0-9]", "")
- runCephCommand(master, target, "parted ${dev} rm ${part_id}")
- }
- return
-}
-
-def runCephCommand(master, target, cmd) {
- return salt.cmdRun(master, target, cmd)
-}
-
-def waitForHealthy(master, count=0, attempts=300) {
- // wait for healthy cluster
- while (count<attempts) {
- def health = runCephCommand(master, ADMIN_HOST, 'ceph health')['return'][0].values()[0]
- if (health.contains('HEALTH_OK')) {
- common.infoMsg('Cluster is healthy')
- break;
- }
- count++
- sleep(10)
- }
-}
timeout(time: 12, unit: 'HOURS') {
node("python") {
@@ -124,40 +88,40 @@
// `ceph osd out <id> <id>`
stage('Set OSDs out') {
- runCephCommand(pepperEnv, ADMIN_HOST, 'ceph osd out ' + osd_ids.join(' '))
+ salt.cmdRun(pepperEnv, ADMIN_HOST, 'ceph osd out ' + osd_ids.join(' '))
}
// wait for healthy cluster
- if (WAIT_FOR_HEALTHY.toBoolean() == true) {
+ if (WAIT_FOR_HEALTHY.toBoolean()) {
sleep(5)
- waitForHealthy(pepperEnv)
+ ceph.waitForHealthy(pepperEnv, ADMIN_HOST)
}
// stop osd daemons
stage('Stop OSD daemons') {
for (i in osd_ids) {
- salt.runSaltProcessStep(pepperEnv, HOST, 'service.stop', ['ceph-osd@' + i.replaceAll('osd.', '')], null, true)
+ salt.runSaltProcessStep(pepperEnv, HOST, 'service.stop', ['ceph-osd@' + i.replaceAll('osd.', '')], null, true)
}
}
// `ceph osd crush remove osd.2`
stage('Remove OSDs from CRUSH') {
for (i in osd_ids) {
- runCephCommand(pepperEnv, ADMIN_HOST, 'ceph osd crush remove ' + i)
+ salt.cmdRun(pepperEnv, ADMIN_HOST, 'ceph osd crush remove ' + i)
}
}
// remove keyring `ceph auth del osd.3`
stage('Remove OSD keyrings from auth') {
for (i in osd_ids) {
- runCephCommand(pepperEnv, ADMIN_HOST, 'ceph auth del ' + i)
+ salt.cmdRun(pepperEnv, ADMIN_HOST, 'ceph auth del ' + i)
}
}
// remove osd `ceph osd rm osd.3`
stage('Remove OSDs') {
for (i in osd_ids) {
- runCephCommand(pepperEnv, ADMIN_HOST, 'ceph osd rm ' + i)
+ salt.cmdRun(pepperEnv, ADMIN_HOST, 'ceph osd rm ' + i)
}
}
@@ -166,18 +130,18 @@
id = osd_id.replaceAll('osd.', '')
def dmcrypt = ""
try {
- dmcrypt = runCephCommand(pepperEnv, HOST, "ls -la /var/lib/ceph/osd/ceph-${id}/ | grep dmcrypt")['return'][0].values()[0]
+ dmcrypt = salt.cmdRun(pepperEnv, HOST, "ls -la /var/lib/ceph/osd/ceph-${id}/ | grep dmcrypt")['return'][0].values()[0]
} catch (Exception e) {
common.warningMsg(e)
}
if (dmcrypt?.trim()) {
- mount = runCephCommand(pepperEnv, HOST, "lsblk -rp | grep /var/lib/ceph/osd/ceph-${id} -B1")['return'][0].values()[0]
- dev = mount.split()[0].replaceAll("[0-9]","")
+ mount = salt.cmdRun(pepperEnv, HOST, "lsblk -rp | grep /var/lib/ceph/osd/ceph-${id} -B1")['return'][0].values()[0]
+ dev = mount.split()[0].replaceAll("[0-9]", "")
// remove partition tables
stage("dd part table on ${dev}") {
- runCephCommand(pepperEnv, HOST, "dd if=/dev/zero of=${dev} bs=512 count=1 conv=notrunc")
+ salt.cmdRun(pepperEnv, HOST, "dd if=/dev/zero of=${dev} bs=512 count=1 conv=notrunc")
}
}
@@ -188,21 +152,21 @@
def block_db_partition_uuid = ""
def block_wal_partition_uuid = ""
try {
- journal_partition_uuid = runCephCommand(pepperEnv, HOST, "ls -la /var/lib/ceph/osd/ceph-${id}/ | grep journal | grep partuuid")
- journal_partition_uuid = journal_partition_uuid.toString().trim().split("\n")[0].substring(journal_partition_uuid.toString().trim().lastIndexOf("/")+1)
+ journal_partition_uuid = salt.cmdRun(pepperEnv, HOST, "ls -la /var/lib/ceph/osd/ceph-${id}/ | grep journal | grep partuuid")
+ journal_partition_uuid = journal_partition_uuid.toString().trim().split("\n")[0].substring(journal_partition_uuid.toString().trim().lastIndexOf("/") + 1)
} catch (Exception e) {
common.infoMsg(e)
}
try {
- block_db_partition_uuid = runCephCommand(pepperEnv, HOST, "ls -la /var/lib/ceph/osd/ceph-${id}/ | grep 'block.db' | grep partuuid")
- block_db_partition_uuid = block_db_partition_uuid.toString().trim().split("\n")[0].substring(block_db_partition_uuid.toString().trim().lastIndexOf("/")+1)
+ block_db_partition_uuid = salt.cmdRun(pepperEnv, HOST, "ls -la /var/lib/ceph/osd/ceph-${id}/ | grep 'block.db' | grep partuuid")
+ block_db_partition_uuid = block_db_partition_uuid.toString().trim().split("\n")[0].substring(block_db_partition_uuid.toString().trim().lastIndexOf("/") + 1)
} catch (Exception e) {
common.infoMsg(e)
}
try {
- block_wal_partition_uuid = runCephCommand(pepperEnv, HOST, "ls -la /var/lib/ceph/osd/ceph-${id}/ | grep 'block.wal' | grep partuuid")
- block_wal_partition_uuid = block_wal_partition_uuid.toString().trim().split("\n")[0].substring(block_wal_partition_uuid.toString().trim().lastIndexOf("/")+1)
+ block_wal_partition_uuid = salt.cmdRun(pepperEnv, HOST, "ls -la /var/lib/ceph/osd/ceph-${id}/ | grep 'block.wal' | grep partuuid")
+ block_wal_partition_uuid = block_wal_partition_uuid.toString().trim().split("\n")[0].substring(block_wal_partition_uuid.toString().trim().lastIndexOf("/") + 1)
} catch (Exception e) {
common.infoMsg(e)
}
@@ -216,10 +180,10 @@
// if disk has journal, block_db or block_wal on different disk, then remove the partition
if (partition_uuid?.trim()) {
- removePartition(pepperEnv, HOST, partition_uuid)
+ ceph.removePartition(pepperEnv, HOST, partition_uuid)
}
if (block_wal_partition_uuid?.trim()) {
- removePartition(pepperEnv, HOST, block_wal_partition_uuid)
+ ceph.removePartition(pepperEnv, HOST, block_wal_partition_uuid)
}
}
}
@@ -230,9 +194,9 @@
}
stage('Remove OSD host from crushmap') {
- def hostname = runCephCommand(pepperEnv, HOST, "hostname -s")['return'][0].values()[0].split('\n')[0]
+ def hostname = salt.cmdRun(pepperEnv, HOST, "hostname -s")['return'][0].values()[0].split('\n')[0]
try {
- runCephCommand(pepperEnv, ADMIN_HOST, "ceph osd crush remove ${hostname}")
+ salt.cmdRun(pepperEnv, ADMIN_HOST, "ceph osd crush remove ${hostname}")
} catch (Exception e) {
common.warningMsg(e)
}
@@ -262,7 +226,7 @@
def keyring = ""
def keyring_lines = ""
try {
- keyring_lines = runCephCommand(pepperEnv, ADMIN_HOST, "ceph auth list | grep ${target}")['return'][0].values()[0].split('\n')
+ keyring_lines = salt.cmdRun(pepperEnv, ADMIN_HOST, "ceph auth list | grep ${target}")['return'][0].values()[0].split('\n')
} catch (Exception e) {
common.warningMsg(e)
}
@@ -273,20 +237,20 @@
}
}
if (keyring?.trim()) {
- runCephCommand(pepperEnv, ADMIN_HOST, "ceph auth del ${keyring}")
+ salt.cmdRun(pepperEnv, ADMIN_HOST, "ceph auth del ${keyring}")
}
}
if (HOST_TYPE.toLowerCase() == 'mon') {
// Update Monmap
stage('Update monmap') {
- runCephCommand(pepperEnv, 'I@ceph:mon', "ceph mon getmap -o monmap.backup")
+ salt.cmdRun(pepperEnv, 'I@ceph:mon', "ceph mon getmap -o monmap.backup")
try {
- runCephCommand(pepperEnv, 'I@ceph:mon', "ceph mon remove ${target}")
+ salt.cmdRun(pepperEnv, 'I@ceph:mon', "ceph mon remove ${target}")
} catch (Exception e) {
common.warningMsg(e)
}
- runCephCommand(pepperEnv, 'I@ceph:mon', "monmaptool /tmp/monmap --rm ${target}")
+ salt.cmdRun(pepperEnv, 'I@ceph:mon', "monmaptool /tmp/monmap --rm ${target}")
}
def target_hosts = salt.getMinions(pepperEnv, 'I@ceph:common')
@@ -305,7 +269,7 @@
}
def crushmap_target = salt.getMinions(pepperEnv, "I@ceph:setup:crush")
- if (HOST_TYPE.toLowerCase() == 'osd' && GENERATE_CRUSHMAP.toBoolean() == true && crushmap_target ) {
+ if (HOST_TYPE.toLowerCase() == 'osd' && GENERATE_CRUSHMAP.toBoolean() == true && crushmap_target) {
stage('Generate CRUSHMAP') {
salt.enforceState(pepperEnv, 'I@ceph:setup:crush', 'ceph.setup.crush', true)
}
diff --git a/ceph-remove-osd.groovy b/ceph-remove-osd.groovy
index 098fb98..e643017 100644
--- a/ceph-remove-osd.groovy
+++ b/ceph-remove-osd.groovy
@@ -14,84 +14,15 @@
*
*/
-common = new com.mirantis.mk.Common()
-salt = new com.mirantis.mk.Salt()
+def common = new com.mirantis.mk.Common()
+def salt = new com.mirantis.mk.Salt()
+def ceph = new com.mirantis.mk.Ceph()
def python = new com.mirantis.mk.Python()
def pepperEnv = "pepperEnv"
def flags = CLUSTER_FLAGS.tokenize(',')
def osds = OSD.tokenize(',')
-
-def removePartition(master, target, partition_uuid, type='', id=-1) {
- def partition = ""
- if (type == 'lockbox') {
- try {
- // umount - partition = /dev/sdi2
- partition = runCephCommand(master, target, "lsblk -rp | grep -v mapper | grep ${partition_uuid} ")['return'][0].values()[0].split()[0]
- runCephCommand(master, target, "umount ${partition}")
- } catch (Exception e) {
- common.warningMsg(e)
- }
- } else if (type == 'data') {
- try {
- // umount - partition = /dev/sdi2
- partition = runCephCommand(master, target, "df | grep /var/lib/ceph/osd/ceph-${id}")['return'][0].values()[0].split()[0]
- runCephCommand(master, target, "umount ${partition}")
- } catch (Exception e) {
- common.warningMsg(e)
- }
- try {
- // partition = /dev/sdi2
- partition = runCephCommand(master, target, "blkid | grep ${partition_uuid} ")['return'][0].values()[0].split(":")[0]
- } catch (Exception e) {
- common.warningMsg(e)
- }
- } else {
- try {
- // partition = /dev/sdi2
- partition = runCephCommand(master, target, "blkid | grep ${partition_uuid} ")['return'][0].values()[0].split(":")[0]
- } catch (Exception e) {
- common.warningMsg(e)
- }
- }
- if (partition?.trim()) {
- if (partition.contains("nvme")) {
- // dev = /dev/nvme1n1p1
- def dev = partition.replaceAll('\\d+$', "")
- print("Skipping " + dev)
- // part_id = 2
- def part_id = partition.substring(partition.lastIndexOf("p")+1).replaceAll("[^0-9]+", "")
- print("Skipping" + part_id)
- runCephCommand(master, target, "Ignore | parted ${dev} rm ${part_id}")
- }
- else {
- // dev = /dev/sdi
- def dev = partition.replaceAll('\\d+$', "")
- // part_id = 2
- def part_id = partition.substring(partition.lastIndexOf("/")+1).replaceAll("[^0-9]+", "")
- runCephCommand(master, target, "Ignore | parted ${dev} rm ${part_id}")
- }
- }
- return
-}
-
-def runCephCommand(master, target, cmd) {
- return salt.cmdRun(master, target, cmd)
-}
-
-def waitForHealthy(master, count=0, attempts=300) {
- // wait for healthy cluster
- while (count<attempts) {
- def health = runCephCommand(master, ADMIN_HOST, 'ceph health')['return'][0].values()[0]
- if (health.contains('HEALTH_OK')) {
- common.infoMsg('Cluster is healthy')
- break;
- }
- count++
- sleep(10)
- }
-}
timeout(time: 12, unit: 'HOURS') {
node("python") {
@@ -101,7 +32,7 @@
if (flags.size() > 0) {
stage('Set cluster flags') {
for (flag in flags) {
- runCephCommand(pepperEnv, ADMIN_HOST, 'ceph osd set ' + flag)
+ salt.cmdRun(pepperEnv, ADMIN_HOST, 'ceph osd set ' + flag)
}
}
}
@@ -112,7 +43,7 @@
salt.runSaltProcessStep(pepperEnv, HOST, 'saltutil.sync_grains', [], null, true, 5)
def cephGrain = salt.getGrain(pepperEnv, HOST, 'ceph')
- if(cephGrain['return'].isEmpty()){
+ if (cephGrain['return'].isEmpty()) {
throw new Exception("Ceph salt grain cannot be found!")
}
common.print(cephGrain)
@@ -129,78 +60,52 @@
}
}
- // wait for healthy cluster
- // if (WAIT_FOR_HEALTHY.toBoolean()) {
- // waitForHealthy(pepperEnv)
- // }
-
- if ( osd_ids == [] )
- {
- currentBuild.result = 'SUCCESS'
- return
+ if (osd_ids == []) {
+ currentBuild.result = 'SUCCESS'
+ return
}
// `ceph osd out <id> <id>`
stage('Set OSDs out') {
- runCephCommand(pepperEnv, ADMIN_HOST, 'ceph osd out ' + osd_ids.join(' '))
+ salt.cmdRun(pepperEnv, ADMIN_HOST, 'ceph osd out ' + osd_ids.join(' '))
}
// wait for healthy cluster
if (WAIT_FOR_HEALTHY.toBoolean()) {
sleep(5)
- waitForHealthy(pepperEnv)
+ ceph.waitForHealthy(pepperEnv, ADMIN_HOST)
}
// stop osd daemons
stage('Stop OSD daemons') {
for (i in osd_ids) {
- salt.runSaltProcessStep(pepperEnv, HOST, 'service.stop', ['ceph-osd@' + i.replaceAll('osd.', '')], null, true)
+ salt.runSaltProcessStep(pepperEnv, HOST, 'service.stop', ['ceph-osd@' + i.replaceAll('osd.', '')], null, true)
}
}
// `ceph osd crush remove osd.2`
stage('Remove OSDs from CRUSH') {
for (i in osd_ids) {
- runCephCommand(pepperEnv, ADMIN_HOST, 'ceph osd crush remove ' + i)
+ salt.cmdRun(pepperEnv, ADMIN_HOST, 'ceph osd crush remove ' + i)
}
}
// remove keyring `ceph auth del osd.3`
stage('Remove OSD keyrings from auth') {
for (i in osd_ids) {
- runCephCommand(pepperEnv, ADMIN_HOST, 'ceph auth del ' + i)
+ salt.cmdRun(pepperEnv, ADMIN_HOST, 'ceph auth del ' + i)
}
}
// remove osd `ceph osd rm osd.3`
stage('Remove OSDs') {
for (i in osd_ids) {
- runCephCommand(pepperEnv, ADMIN_HOST, 'ceph osd rm ' + i)
+ salt.cmdRun(pepperEnv, ADMIN_HOST, 'ceph osd rm ' + i)
}
}
for (osd_id in osd_ids) {
id = osd_id.replaceAll('osd.', '')
- /*
-
- def dmcrypt = ""
- try {
- dmcrypt = runCephCommand(pepperEnv, HOST, "ls -la /var/lib/ceph/osd/ceph-${id}/ | grep dmcrypt")['return'][0].values()[0]
- } catch (Exception e) {
- common.warningMsg(e)
- }
-
- if (dmcrypt?.trim()) {
- mount = runCephCommand(pepperEnv, HOST, "lsblk -rp | grep /var/lib/ceph/osd/ceph-${id} -B1")['return'][0].values()[0]
- dev = mount.split()[0].replaceAll("[0-9]","")
-
- // remove partition tables
- stage("dd part table on ${dev}") {
- runCephCommand(pepperEnv, HOST, "dd if=/dev/zero of=${dev} bs=512 count=1 conv=notrunc")
- }
-
- }
- */
// remove journal, block_db, block_wal partition `parted /dev/sdj rm 3`
stage('Remove journal / block_db / block_wal partition') {
@@ -209,35 +114,35 @@
def block_db_partition_uuid = ""
def block_wal_partition_uuid = ""
try {
- journal_partition_uuid = runCephCommand(pepperEnv, HOST, "cat /var/lib/ceph/osd/ceph-${id}/journal_uuid")['return'][0].values()[0].split("\n")[0]
+ journal_partition_uuid = salt.cmdRun(pepperEnv, HOST, "cat /var/lib/ceph/osd/ceph-${id}/journal_uuid")['return'][0].values()[0].split("\n")[0]
} catch (Exception e) {
common.infoMsg(e)
}
try {
- block_db_partition_uuid = runCephCommand(pepperEnv, HOST, "cat /var/lib/ceph/osd/ceph-${id}/block.db_uuid")['return'][0].values()[0].split("\n")[0]
+ block_db_partition_uuid = salt.cmdRun(pepperEnv, HOST, "cat /var/lib/ceph/osd/ceph-${id}/block.db_uuid")['return'][0].values()[0].split("\n")[0]
} catch (Exception e) {
common.infoMsg(e)
}
try {
- block_wal_partition_uuid = runCephCommand(pepperEnv, HOST, "cat /var/lib/ceph/osd/ceph-${id}/block.wal_uuid")['return'][0].values()[0].split("\n")[0]
+ block_wal_partition_uuid = salt.cmdRun(pepperEnv, HOST, "cat /var/lib/ceph/osd/ceph-${id}/block.wal_uuid")['return'][0].values()[0].split("\n")[0]
} catch (Exception e) {
common.infoMsg(e)
}
// remove partition_uuid = 2c76f144-f412-481e-b150-4046212ca932
if (journal_partition_uuid?.trim()) {
- removePartition(pepperEnv, HOST, journal_partition_uuid)
+ ceph.removePartition(pepperEnv, HOST, journal_partition_uuid)
}
if (block_db_partition_uuid?.trim()) {
- removePartition(pepperEnv, HOST, block_db_partition_uuid)
+ ceph.removePartition(pepperEnv, HOST, block_db_partition_uuid)
}
if (block_wal_partition_uuid?.trim()) {
- removePartition(pepperEnv, HOST, block_wal_partition_uuid)
+ ceph.removePartition(pepperEnv, HOST, block_wal_partition_uuid)
}
try {
- runCephCommand(pepperEnv, HOST, "partprobe")
+ salt.cmdRun(pepperEnv, HOST, "partprobe")
} catch (Exception e) {
common.warningMsg(e)
}
@@ -249,13 +154,13 @@
def block_partition_uuid = ""
def lockbox_partition_uuid = ""
try {
- data_partition_uuid = runCephCommand(pepperEnv, HOST, "cat /var/lib/ceph/osd/ceph-${id}/fsid")['return'][0].values()[0].split("\n")[0]
+ data_partition_uuid = salt.cmdRun(pepperEnv, HOST, "cat /var/lib/ceph/osd/ceph-${id}/fsid")['return'][0].values()[0].split("\n")[0]
common.print(data_partition_uuid)
} catch (Exception e) {
common.infoMsg(e)
}
try {
- block_partition_uuid = runCephCommand(pepperEnv, HOST, "cat /var/lib/ceph/osd/ceph-${id}/block_uuid")['return'][0].values()[0].split("\n")[0]
+ block_partition_uuid = salt.cmdRun(pepperEnv, HOST, "cat /var/lib/ceph/osd/ceph-${id}/block_uuid")['return'][0].values()[0].split("\n")[0]
} catch (Exception e) {
common.infoMsg(e)
}
@@ -268,13 +173,13 @@
// remove partition_uuid = 2c76f144-f412-481e-b150-4046212ca932
if (block_partition_uuid?.trim()) {
- removePartition(pepperEnv, HOST, block_partition_uuid)
+ ceph.removePartition(pepperEnv, HOST, block_partition_uuid)
}
if (data_partition_uuid?.trim()) {
- removePartition(pepperEnv, HOST, data_partition_uuid, 'data', id)
+ ceph.removePartition(pepperEnv, HOST, data_partition_uuid, 'data', id)
}
if (lockbox_partition_uuid?.trim()) {
- removePartition(pepperEnv, HOST, lockbox_partition_uuid, 'lockbox')
+ ceph.removePartition(pepperEnv, HOST, lockbox_partition_uuid, 'lockbox')
}
}
}
@@ -283,7 +188,7 @@
stage('Unset cluster flags') {
for (flag in flags) {
common.infoMsg('Removing flag ' + flag)
- runCephCommand(pepperEnv, ADMIN_HOST, 'ceph osd unset ' + flag)
+ salt.cmdRun(pepperEnv, ADMIN_HOST, 'ceph osd unset ' + flag)
}
}
}
diff --git a/ceph-upgrade.groovy b/ceph-upgrade.groovy
index dd75875..297feaf 100644
--- a/ceph-upgrade.groovy
+++ b/ceph-upgrade.groovy
@@ -25,41 +25,18 @@
common = new com.mirantis.mk.Common()
salt = new com.mirantis.mk.Salt()
def python = new com.mirantis.mk.Python()
+ceph = new com.mirantis.mk.Ceph()
def pepperEnv = "pepperEnv"
-def flags = CLUSTER_FLAGS.tokenize(',')
+flags = CLUSTER_FLAGS.tokenize(',')
-def runCephCommand(master, target, cmd) {
- return salt.cmdRun(master, target, cmd)
-}
-
-def waitForHealthy(master, flags, count=0, attempts=300) {
- // wait for healthy cluster
- while (count<attempts) {
- def health = runCephCommand(master, ADMIN_HOST, 'ceph health')['return'][0].values()[0]
- if (health.contains('HEALTH_OK')) {
- common.infoMsg('Cluster is healthy')
- break;
- } else {
- for (flag in flags) {
- if (health.contains(flag + ' flag(s) set') && !(health.contains('down'))) {
- common.infoMsg('Cluster is healthy')
- return;
- }
- }
- }
- count++
- sleep(10)
- }
-}
-
-def backup(master, flags, target) {
+def backup(master, target) {
stage("backup ${target}") {
if (target == 'osd') {
try {
salt.enforceState(master, "I@ceph:${target}", "ceph.backup", true)
- runCephCommand(master, "I@ceph:${target}", "su root -c '/usr/local/bin/ceph-backup-runner-call.sh'")
+ salt.cmdRun(master, "I@ceph:${target}", "su root -c '/usr/local/bin/ceph-backup-runner-call.sh'")
} catch (Exception e) {
common.errorMsg(e)
common.errorMsg("Make sure Ceph backup on OSD nodes is enabled")
@@ -79,7 +56,7 @@
def provider_pillar = salt.getPillar(master, "${kvm01}", "salt:control:cluster:internal:node:${minion_name}:provider")
def minionProvider = provider_pillar['return'][0].values()[0]
- waitForHealthy(master, flags)
+ ceph.waitForHealthy(master, ADMIN_HOST, flags)
try {
salt.cmdRun(master, "${minionProvider}", "[ ! -f ${BACKUP_DIR}/${minion_name}.${domain}.qcow2.bak ] && virsh destroy ${minion_name}.${domain}")
} catch (Exception e) {
@@ -96,14 +73,14 @@
common.warningMsg(e)
}
salt.minionsReachable(master, 'I@salt:master', "${minion_name}*")
- waitForHealthy(master, flags)
+ ceph.waitForHealthy(master, ADMIN_HOST, flags)
}
}
}
return
}
-def upgrade(master, target, flags) {
+def upgrade(master, target) {
stage("Change ${target} repos") {
salt.runSaltProcessStep(master, "I@ceph:${target}", 'saltutil.refresh_pillar', [], null, true, 5)
@@ -116,7 +93,7 @@
}
if (target == 'common') {
stage('Upgrade ceph-common pkgs') {
- runCephCommand(master, "I@ceph:${target}", "apt install ceph-${target} -y")
+ salt.cmdRun(master, "I@ceph:${target}", "apt install ceph-${target} -y")
}
} else {
minions = salt.getMinions(master, "I@ceph:${target}")
@@ -125,30 +102,30 @@
// upgrade pkgs
if (target == 'radosgw') {
stage('Upgrade radosgw pkgs') {
- runCephCommand(master, "I@ceph:${target}", "apt install ${target} -y ")
+ salt.cmdRun(master, "I@ceph:${target}", "apt install ${target} -y ")
}
} else {
stage("Upgrade ${target} pkgs on ${minion}") {
- runCephCommand(master, "${minion}", "apt install ceph-${target} -y")
+ salt.cmdRun(master, "${minion}", "apt install ceph-${target} -y")
}
}
// restart services
stage("Restart ${target} services on ${minion}") {
if (target == 'osd') {
- def osds = salt.getGrain(master, "${minion}", 'ceph:ceph_disk').values()[0]
- osds[0].values()[0].values()[0].each { osd,param ->
- runCephCommand(master, "${minion}", "systemctl restart ceph-${target}@${osd}")
- waitForHealthy(master, flags)
- }
+ def osds = salt.getGrain(master, "${minion}", 'ceph:ceph_disk').values()[0]
+ osds[0].values()[0].values()[0].each { osd, param ->
+ salt.cmdRun(master, "${minion}", "systemctl restart ceph-${target}@${osd}")
+ ceph.waitForHealthy(master, ADMIN_HOST, flags)
+ }
} else {
- runCephCommand(master, "${minion}", "systemctl restart ceph-${target}.target")
- waitForHealthy(master, flags)
+ salt.cmdRun(master, "${minion}", "systemctl restart ceph-${target}.target")
+ ceph.waitForHealthy(master, ADMIN_HOST, flags)
}
}
stage("Verify services for ${minion}") {
sleep(10)
- runCephCommand(master, "${minion}", "systemctl status ceph-${target}.target")
+ salt.cmdRun(master, "${minion}", "systemctl status ceph-${target}.target")
}
stage('Ask for manual confirmation') {
@@ -156,32 +133,33 @@
}
}
}
- runCephCommand(master, ADMIN_HOST, "ceph versions")
+ salt.cmdRun(master, ADMIN_HOST, "ceph versions")
sleep(5)
return
}
+
timeout(time: 12, unit: 'HOURS') {
node("python") {
// create connection to salt master
python.setupPepperVirtualenv(pepperEnv, SALT_MASTER_URL, SALT_MASTER_CREDENTIALS)
- stage ('Check user choices') {
+ stage('Check user choices') {
if (STAGE_UPGRADE_RGW.toBoolean() == true) {
// if rgw, check if other stuff has required version
def mon_ok = true
if (STAGE_UPGRADE_MON.toBoolean() == false) {
- def mon_v = runCephCommand(pepperEnv, ADMIN_HOST, "ceph mon versions")['return'][0].values()[0]
+ def mon_v = salt.cmdRun(pepperEnv, ADMIN_HOST, "ceph mon versions")['return'][0].values()[0]
mon_ok = mon_v.contains("${TARGET_RELEASE}") && !mon_v.contains("${ORIGIN_RELEASE}")
}
def mgr_ok = true
if (STAGE_UPGRADE_MGR.toBoolean() == false) {
- def mgr_v = runCephCommand(pepperEnv, ADMIN_HOST, "ceph mgr versions")['return'][0].values()[0]
+ def mgr_v = salt.cmdRun(pepperEnv, ADMIN_HOST, "ceph mgr versions")['return'][0].values()[0]
mgr_ok = mgr_v.contains("${TARGET_RELEASE}") && !mgr_v.contains("${ORIGIN_RELEASE}")
}
def osd_ok = true
if (STAGE_UPGRADE_OSD.toBoolean() == false) {
- def osd_v = runCephCommand(pepperEnv, ADMIN_HOST, "ceph osd versions")['return'][0].values()[0]
+ def osd_v = salt.cmdRun(pepperEnv, ADMIN_HOST, "ceph osd versions")['return'][0].values()[0]
osd_ok = osd_v.contains("${TARGET_RELEASE}") && !osd_v.contains("${ORIGIN_RELEASE}")
}
if (!mon_ok || !osd_ok || !mgr_ok) {
@@ -206,29 +184,29 @@
if (flags.size() > 0) {
stage('Set cluster flags') {
for (flag in flags) {
- runCephCommand(pepperEnv, ADMIN_HOST, 'ceph osd set ' + flag)
+ salt.cmdRun(pepperEnv, ADMIN_HOST, 'ceph osd set ' + flag)
}
}
}
if (STAGE_UPGRADE_MON.toBoolean() == true) {
- upgrade(pepperEnv, 'mon', flags)
+ upgrade(pepperEnv, 'mon')
}
if (STAGE_UPGRADE_MGR.toBoolean() == true) {
- upgrade(pepperEnv, 'mgr', flags)
+ upgrade(pepperEnv, 'mgr')
}
if (STAGE_UPGRADE_OSD.toBoolean() == true) {
- upgrade(pepperEnv, 'osd', flags)
+ upgrade(pepperEnv, 'osd')
}
if (STAGE_UPGRADE_RGW.toBoolean() == true) {
- upgrade(pepperEnv, 'radosgw', flags)
+ upgrade(pepperEnv, 'radosgw')
}
if (STAGE_UPGRADE_CLIENT.toBoolean() == true) {
- upgrade(pepperEnv, 'common', flags)
+ upgrade(pepperEnv, 'common')
}
// remove cluster flags
@@ -237,7 +215,7 @@
for (flag in flags) {
if (!flag.contains('sortbitwise')) {
common.infoMsg('Removing flag ' + flag)
- runCephCommand(pepperEnv, ADMIN_HOST, 'ceph osd unset ' + flag)
+ salt.cmdRun(pepperEnv, ADMIN_HOST, 'ceph osd unset ' + flag)
}
}
@@ -246,14 +224,14 @@
if (STAGE_FINALIZE.toBoolean() == true) {
stage("Finalize ceph version upgrade") {
- runCephCommand(pepperEnv, ADMIN_HOST, "ceph osd require-osd-release ${TARGET_RELEASE}")
+ salt.cmdRun(pepperEnv, ADMIN_HOST, "ceph osd require-osd-release ${TARGET_RELEASE}")
try {
- runCephCommand(pepperEnv, ADMIN_HOST, "ceph osd set-require-min-compat-client ${ORIGIN_RELEASE}")
+ salt.cmdRun(pepperEnv, ADMIN_HOST, "ceph osd set-require-min-compat-client ${ORIGIN_RELEASE}")
} catch (Exception e) {
common.warningMsg(e)
}
try {
- runCephCommand(pepperEnv, ADMIN_HOST, "ceph osd crush tunables optimal")
+ salt.cmdRun(pepperEnv, ADMIN_HOST, "ceph osd crush tunables optimal")
} catch (Exception e) {
common.warningMsg(e)
}
@@ -261,8 +239,8 @@
}
// wait for healthy cluster
- if (WAIT_FOR_HEALTHY.toBoolean() == true) {
- waitForHealthy(pepperEnv, flags)
+ if (WAIT_FOR_HEALTHY.toBoolean()) {
+ ceph.waitForHealthy(pepperEnv, ADMIN_HOST, flags)
}
}
}
diff --git a/cloud-deploy-pipeline.groovy b/cloud-deploy-pipeline.groovy
index 5f19480..1471acf 100644
--- a/cloud-deploy-pipeline.groovy
+++ b/cloud-deploy-pipeline.groovy
@@ -368,7 +368,8 @@
stage('Install infra') {
if (common.checkContains('STACK_INSTALL', 'core') ||
common.checkContains('STACK_INSTALL', 'openstack') ||
- common.checkContains('STACK_INSTALL', 'oss')) {
+ common.checkContains('STACK_INSTALL', 'oss') ||
+ common.checkContains('STACK_INSTALL', 'cicd')) {
orchestrate.installInfra(venvPepper, extra_tgt)
}
}
@@ -534,7 +535,6 @@
if (common.checkContains('STACK_INSTALL', 'cicd')) {
stage('Install Cicd') {
- orchestrate.installInfra(venvPepper, extra_tgt)
orchestrate.installCicd(venvPepper, extra_tgt)
}
}
diff --git a/cvp-func.groovy b/cvp-func.groovy
index 80160ab..6baf15b 100644
--- a/cvp-func.groovy
+++ b/cvp-func.groovy
@@ -46,7 +46,9 @@
if (!keystone_creds) {
keystone_creds = validate._get_keystone_creds_v2(saltMaster)
}
- validate.runContainer(saltMaster, TARGET_NODE, TEST_IMAGE, 'cvp', keystone_creds)
+ def containerParams = ['master': saltMaster, 'target': TARGET_NODE, 'dockerImageLink': TEST_IMAGE,
+ 'name': 'cvp', 'env_var': keystone_creds, 'output_replacing': [/ (OS_PASSWORD=)(.*?)+ /]]
+ validate.runContainer(containerParams)
validate.configureContainer(saltMaster, TARGET_NODE, PROXY, TOOLS_REPO, TEMPEST_REPO, TEMPEST_ENDPOINT_TYPE)
}
diff --git a/cvp-runner.groovy b/cvp-runner.groovy
index edbe902..13f41ea 100644
--- a/cvp-runner.groovy
+++ b/cvp-runner.groovy
@@ -16,6 +16,7 @@
def EXTRA_PARAMS = readYaml(text: env.getProperty('EXTRA_PARAMS')) ?: [:]
def env_vars = EXTRA_PARAMS.get("envs") ?: []
+def override_config = env.getProperty('EXTRA_PARAMS') ?: ""
def IMAGE = (env.getProperty('IMAGE')) ?: 'docker-prod-local.docker.mirantis.net/mirantis/cvp/cvp-sanity-checks:stable'
def SLAVE_NODE = (env.getProperty('SLAVE_NODE')) ?: 'docker'
@@ -62,7 +63,8 @@
"SALT_USERNAME=${creds.username}",
"SALT_PASSWORD=${creds.password}",
"SALT_URL=${SALT_MASTER_URL}",
- "REQUESTS_CA_BUNDLE=/etc/ssl/certs/ca-certificates.crt"
+ "REQUESTS_CA_BUNDLE=/etc/ssl/certs/ca-certificates.crt",
+ "OVERRIDE_CONFIG=${override_config}"
] + env_vars
// Generating final config
diff --git a/opencontrail4-update.groovy b/opencontrail4-update.groovy
index 01aae14..f71b7be 100644
--- a/opencontrail4-update.groovy
+++ b/opencontrail4-update.groovy
@@ -14,6 +14,7 @@
python = new com.mirantis.mk.Python()
def pepperEnv = "pepperEnv"
+def askConfirmation = (env.getProperty('ASK_CONFIRMATION') ?: true).toBoolean()
def supportedOcTargetVersions = ['4.0', '4.1']
def neutronServerPkgs = 'neutron-plugin-contrail,contrail-heat,python-contrail'
def config4Services = ['zookeeper', 'contrail-webui-middleware', 'contrail-webui', 'contrail-api', 'contrail-schema', 'contrail-svc-monitor', 'contrail-device-manager', 'contrail-config-nodemgr', 'contrail-database']
@@ -280,7 +281,9 @@
}
stage('Confirm update on sample nodes') {
- input message: "Do you want to continue with the Opencontrail components update on compute sample nodes? ${cmpTargetFirstSubset}"
+ if (askConfirmation) {
+ input message: "Do you want to continue with the Opencontrail components update on compute sample nodes? ${cmpTargetFirstSubset}"
+ }
}
stage("Opencontrail compute update on sample nodes") {
@@ -289,8 +292,9 @@
}
stage('Confirm update on all remaining target nodes') {
-
- input message: "Do you want to continue with the Opencontrail components update on all targeted compute nodes? Node list: ${cmpTargetSecondSubset}"
+ if (askConfirmation) {
+ input message: "Do you want to continue with the Opencontrail components update on all targeted compute nodes? Node list: ${cmpTargetSecondSubset}"
+ }
}
stage("Opencontrail compute update on all targeted nodes") {
diff --git a/stacklight-upgrade.groovy b/stacklight-upgrade.groovy
index c7a90db..9b2d760 100644
--- a/stacklight-upgrade.groovy
+++ b/stacklight-upgrade.groovy
@@ -70,10 +70,10 @@
common.errorMsg('[ERROR] Elasticsearch VIP port could not be retrieved')
}
- pillar = salt.getPillar(master, "I@elasticsearch:client ${extra_tgt}", 'elasticsearch:client:server:scheme')
+ pillar = salt.getReturnValues(salt.getPillar(master, "I@elasticsearch:client", 'elasticsearch:client:server:scheme'))
def elasticsearch_scheme
- if (!pillar['return'].isEmpty()) {
- elasticsearch_scheme = pillar['return'][0].values()[0]
+ if(pillar) {
+ elasticsearch_scheme = pillar
common.infoMsg("[INFO] Using elasticsearch scheme: ${elasticsearch_scheme}")
} else {
common.infoMsg('[INFO] No pillar with Elasticsearch server scheme, using scheme: http')
@@ -229,5 +229,9 @@
}
}
}
+ stage('Post upgrade steps') {
+ common.infoMsg('Apply workaround for PROD-33878')
+ salt.runSaltProcessStep(pepperEnv, "I@fluentd:agent and I@rabbitmq:server", "service.restart", "td-agent", null, true)
+ }
}
}
diff --git a/update-ceph.groovy b/update-ceph.groovy
index c26c229..55407f5 100644
--- a/update-ceph.groovy
+++ b/update-ceph.groovy
@@ -7,43 +7,20 @@
*/
pepperEnv = "pepperEnv"
-salt = new com.mirantis.mk.Salt()
-def common = new com.mirantis.mk.Common()
+def salt = new com.mirantis.mk.Salt()
+def ceph = new com.mirantis.mk.Ceph()
def python = new com.mirantis.mk.Python()
-def targetLiveSubset
-def targetLiveAll
-def minions
-def result
def packages
def command
def commandKwargs
def selMinions = []
-def check_mon
-
-def runCephCommand(master, target, cmd) {
- return salt.cmdRun(master, target, cmd)
-}
-
-def waitForHealthy(master, tgt, count = 0, attempts=100) {
- // wait for healthy cluster
- common = new com.mirantis.mk.Common()
- while (count<attempts) {
- def health = runCephCommand(master, tgt, 'ceph health')['return'][0].values()[0]
- if (health.contains('HEALTH_OK') || health.contains('HEALTH_WARN noout flag(s) set\n')) {
- common.infoMsg('Cluster is healthy')
- break;
- }
- count++
- sleep(10)
- }
-}
+def flags = CLUSTER_FLAGS ? CLUSTER_FLAGS.tokenize(',') : []
timeout(time: 12, unit: 'HOURS') {
node() {
try {
-
def targets = ["common": "ceph-common", "osd": "ceph-osd", "mon": "ceph-mon",
- "mgr":"ceph-mgr", "radosgw": "radosgw"]
+ "mgr" : "ceph-mgr", "radosgw": "radosgw"]
stage('Setup virtualenv for Pepper') {
python.setupPepperVirtualenv(pepperEnv, SALT_MASTER_URL, SALT_MASTER_CREDENTIALS)
@@ -52,13 +29,13 @@
stage('Apply package upgrades on all nodes') {
targets.each { key, value ->
- // try {
- command = "pkg.install"
- packages = value
- commandKwargs = ['only_upgrade': 'true','force_yes': 'true']
- target = "I@ceph:${key}"
- out = salt.runSaltCommand(pepperEnv, 'local', ['expression': target, 'type': 'compound'], command, true, packages, commandKwargs)
- salt.printSaltCommandResult(out)
+ // try {
+ command = "pkg.install"
+ packages = value
+ commandKwargs = ['only_upgrade': 'true', 'force_yes': 'true']
+ target = "I@ceph:${key}"
+ out = salt.runSaltCommand(pepperEnv, 'local', ['expression': target, 'type': 'compound'], command, true, packages, commandKwargs)
+ salt.printSaltCommandResult(out)
}
}
@@ -66,13 +43,13 @@
selMinions = salt.getMinions(pepperEnv, "I@ceph:mon")
for (tgt in selMinions) {
// runSaltProcessStep 'service.restart' don't work for this services
- runCephCommand(pepperEnv, tgt, "systemctl restart ceph-mon.target")
- waitForHealthy(pepperEnv, tgt)
+ salt.cmdRun(pepperEnv, tgt, "systemctl restart ceph-mon.target")
+ ceph.waitForHealthy(pepperEnv, tgt, flags)
}
selMinions = salt.getMinions(pepperEnv, "I@ceph:radosgw")
for (tgt in selMinions) {
- runCephCommand(pepperEnv, tgt, "systemctl restart ceph-radosgw.target")
- waitForHealthy(pepperEnv, tgt)
+ salt.cmdRun(pepperEnv, tgt, "systemctl restart ceph-radosgw.target")
+ ceph.waitForHealthy(pepperEnv, tgt, flags)
}
}
@@ -89,15 +66,16 @@
osd_ids.add('osd.' + osd_id)
}
- runCephCommand(pepperEnv, tgt, 'ceph osd set noout')
+ salt.cmdRun(pepperEnv, tgt, 'ceph osd set noout')
+ flags = 'noout' in flags ? flags : flags + ['noout']
for (i in osd_ids) {
- salt.runSaltProcessStep(pepperEnv, tgt, 'service.restart', ['ceph-osd@' + i.replaceAll('osd.', '')], null, true)
+ salt.runSaltProcessStep(pepperEnv, tgt, 'service.restart', ['ceph-osd@' + i.replaceAll('osd.', '')], null, true)
// wait for healthy cluster
- waitForHealthy(pepperEnv, tgt)
+ ceph.waitForHealthy(pepperEnv, tgt, flags, 0, 100)
}
- runCephCommand(pepperEnv, tgt, 'ceph osd unset noout')
+ salt.cmdRun(pepperEnv, tgt, 'ceph osd unset noout')
}
}
diff --git a/upgrade-mcp-release.groovy b/upgrade-mcp-release.groovy
index 203c7af..9e65fca 100644
--- a/upgrade-mcp-release.groovy
+++ b/upgrade-mcp-release.groovy
@@ -22,6 +22,8 @@
def pipelineTimeout = 12
venvPepper = "venvPepper"
workspace = ""
+def saltMastURL = ''
+def saltMastCreds = ''
def triggerMirrorJob(String jobName, String reclassSystemBranch) {
params = jenkinsUtils.getJobParameters(jobName)
@@ -78,7 +80,7 @@
return threads['return'][0].values()[0].replaceAll('Salt command execution success','').trim()
}
-def wa29352(ArrayList saltMinions, String cname) {
+def wa29352(String cname) {
// WA for PROD-29352. Issue cause due patch https://gerrit.mcp.mirantis.com/#/c/37932/12/openssh/client/root.yml
// Default soft-param has been removed, what now makes not possible to render some old env's.
// Like fix, we found copy-paste already generated key from backups, to secrets.yml with correct key name
@@ -141,7 +143,6 @@
return
}
salt.fullRefresh(venvPepper, 'I@salt:master')
- salt.fullRefresh(venvPepper, 'I@nova:compute')
for (String minion in saltMinions) {
// First attempt, second will be performed in next validateReclassModel() stages
try {
@@ -184,8 +185,8 @@
salt.cmdRun(venvPepper, 'I@salt:master', "cd /srv/salt/reclass/classes/cluster/$cname && " +
"grep -q '${wa29155ClassName}' infra/secrets.yml || sed -i '/classes:/ a - $wa29155ClassName' infra/secrets.yml")
- salt.fullRefresh(venvPepper, 'cfg*')
- salt.fullRefresh(venvPepper, 'cmp*')
+ salt.fullRefresh(venvPepper, 'I@salt:master')
+ salt.fullRefresh(venvPepper, saltMinions)
patched = true
}
}
@@ -273,6 +274,35 @@
}
}
+def wa33930_33931(String cluster_name) {
+ def openstackControlFile = "/srv/salt/reclass/classes/cluster/${cluster_name}/openstack/control.yml"
+ def fixName = 'clients_common_wa33930_33931'
+ def fixFile = "/srv/salt/reclass/classes/cluster/${cluster_name}/openstack/${fixName}.yml"
+ def containsFix = salt.cmdRun(venvPepper, 'I@salt:master', "grep -E '^- cluster\\.${cluster_name}\\.openstack\\.${fixName}\$' ${openstackControlFile}", false, null, true).get('return')[0].values()[0].replaceAll('Salt command execution success', '').trim()
+ if (! containsFix) {
+ def fixContext = [
+ 'classes': [ 'service.nova.client', 'service.glance.client', 'service.neutron.client' ]
+ ]
+ if (salt.getMinions(venvPepper, 'I@manila:api:enabled')) {
+ fixContext['classes'] << 'service.manila.client'
+ }
+ if (salt.getMinions(venvPepper, 'I@ironic:api:enabled')) {
+ fixContext['classes'] << 'service.ironic.client'
+ }
+ if (salt.getMinions(venvPepper, 'I@gnocchi:server:enabled')) {
+ fixContext['classes'] << 'service.gnocchi.client'
+ }
+ if (salt.getMinions(venvPepper, 'I@barbican:server:enabled')) {
+ fixContext['classes'] << 'service.barbican.client.single'
+ }
+ def _tempFile = '/tmp/wa33930_33931' + UUID.randomUUID().toString().take(8)
+ writeYaml file: _tempFile , data: fixContext
+ def fixFileContent = sh(script: "cat ${_tempFile} | base64", returnStdout: true).trim()
+ salt.cmdRun(venvPepper, 'I@salt:master', "echo '${fixFileContent}' | base64 -d > ${fixFile}", false, null, false)
+ salt.cmdRun(venvPepper, 'I@salt:master', "sed -i '/^parameters:/i - cluster.${cluster_name}.openstack.${fixName}' ${openstackControlFile}")
+ }
+}
+
def archiveReclassInventory(filename) {
def _tmp_file = '/tmp/' + filename + UUID.randomUUID().toString().take(8)
// jenkins may fail at overheap. Compress data with gzip like WA
@@ -359,8 +389,6 @@
gitTargetMcpVersion = "release/${targetMcpVersion}"
}
common.warningMsg("gitTargetMcpVersion has been changed to:${gitTargetMcpVersion}")
- def saltMastURL = ''
- def saltMastCreds = ''
def upgradeSaltStack = ''
def updateClusterModel = ''
def updatePipelines = ''
@@ -368,7 +396,9 @@
def reclassSystemBranch = ''
def reclassSystemBranchDefault = gitTargetMcpVersion
def batchSize = ''
- if (gitTargetMcpVersion != 'proposed') {
+ if (gitTargetMcpVersion ==~ /^\d\d\d\d\.\d\d?\.\d+$/) {
+ reclassSystemBranchDefault = "tags/${gitTargetMcpVersion}"
+ } else if (gitTargetMcpVersion != 'proposed') {
reclassSystemBranchDefault = "origin/${gitTargetMcpVersion}"
}
def driveTrainParamsYaml = env.getProperty('DRIVE_TRAIN_PARAMS')
@@ -401,6 +431,7 @@
if (!batchSize) {
batchSize = getWorkerThreads(venvPepper)
}
+ def computeMinions = salt.getMinions(venvPepper, 'I@nova:compute')
stage('Update Reclass and Salt-Formulas') {
common.infoMsg('Perform: Full salt sync')
@@ -497,6 +528,7 @@
wa32182(cluster_name)
wa33771(cluster_name)
wa33867(cluster_name)
+ wa33930_33931(cluster_name)
// Add new defaults
common.infoMsg("Add new defaults")
salt.cmdRun(venvPepper, 'I@salt:master', "grep '^ mcp_version: ' /srv/salt/reclass/classes/cluster/$cluster_name/infra/init.yml || " +
@@ -526,8 +558,7 @@
input message: 'Continue anyway?'
}
- wa29352(minions, cluster_name)
- def computeMinions = salt.getMinions(venvPepper, 'I@nova:compute')
+ wa29352(cluster_name)
wa29155(computeMinions, cluster_name)
try {
@@ -541,6 +572,8 @@
salt.fullRefresh(venvPepper, 'I@salt:master')
salt.enforceState(venvPepper, 'I@salt:master', 'reclass.storage', true, true, null, false, 60, 2)
+ salt.cmdRun(venvPepper, 'I@salt:master', "cd /srv/salt/reclass/classes/cluster/${cluster_name} && git status && " +
+ "git add -u && git commit --allow-empty -m 'Reclass nodes update to the release ${targetMcpVersion} on ${common.getDatetime()}'")
try {
salt.enforceState(venvPepper, 'I@salt:master', 'reclass', true, true, null, false, 60, 2)
}
@@ -642,15 +675,28 @@
salt.enforceState(venvPepper, 'I@salt:master', 'nginx', true, true, null, false, 60, 2)
}
- // Apply changes for HaProxy on CI/CD nodes
- salt.enforceState(venvPepper, 'I@keepalived:cluster:instance:cicd_control_vip and I@haproxy:proxy', 'haproxy.proxy', true)
+ // Gerrit 2019.2.0 (2.13.6) version has wrong file name for download-commands plugin and was not loaded, let's remove if still there before upgrade
+ def gerritGlusterPath = salt.getPillar(venvPepper, 'I@gerrit:client', 'glusterfs:client:volumes:gerrit:path').get('return')[0].values()[0]
+ def wrongPluginJarName = "${gerritGlusterPath}/plugins/project-download-commands.jar"
+ salt.cmdRun(venvPepper, 'I@gerrit:client', "test -f ${wrongPluginJarName} && rm ${wrongPluginJarName} || true")
salt.cmdRun(venvPepper, "I@salt:master", "salt -C 'I@jenkins:client and I@docker:client and not I@salt:master' state.sls docker.client --async")
-
- sleep(180)
-
+ }
+ }
+ catch (Throwable e) {
+ // If there was an error or exception thrown, the build failed
+ currentBuild.result = "FAILURE"
+ throw e
+ }
+ }
+ // docker.client state may trigger change of jenkins master or jenkins slave services,
+ // so we need wait for slave to reconnect and continue pipeline
+ sleep(180)
+ node('python') {
+ try {
+ stage('Update Drivetrain: Phase 2') {
+ python.setupPepperVirtualenv(venvPepper, saltMastURL, saltMastCreds)
common.infoMsg('Perform: Checking if Docker containers are up')
-
try {
common.retry(20, 30) {
salt.cmdRun(venvPepper, 'I@jenkins:client and I@docker:client', "! docker service ls | tail -n +2 | grep -v -E '\\s([0-9])/\\1\\s'")
@@ -660,6 +706,9 @@
error("Docker containers for CI/CD services are having troubles with starting.")
}
+ // Apply changes for HaProxy on CI/CD nodes
+ salt.enforceState(venvPepper, 'I@keepalived:cluster:instance:cicd_control_vip and I@haproxy:proxy', 'haproxy.proxy', true)
+
salt.enforceState(venvPepper, 'I@jenkins:client and not I@salt:master', 'jenkins.client', true, true, null, false, 60, 2)
// update Nginx proxy settings for Jenkins/Gerrit if needed