Merge "Pipeline for promotion images from dev to prod repositories"
diff --git a/backupninja-backup-pipeline.groovy b/backupninja-backup-pipeline.groovy
index 80467d4..1af5e5b 100644
--- a/backupninja-backup-pipeline.groovy
+++ b/backupninja-backup-pipeline.groovy
@@ -2,26 +2,135 @@
def salt = new com.mirantis.mk.Salt()
def python = new com.mirantis.mk.Python()
def pepperEnv = "pepperEnv"
+def askConfirmation = (env.getProperty('ASK_CONFIRMATION') ?: true).toBoolean()
+def backupSaltMasterAndMaas = (env.getProperty('BACKUP_SALTMASTER_AND_MAAS') ?: true).toBoolean()
+def backupDogtag = (env.getProperty('BACKUP_DOGTAG') ?: true).toBoolean()
+def saltMasterTargetMatcher = "I@backupninja:client and I@salt:master"
+def dogtagTagetMatcher = "I@backupninja:client and I@dogtag:server"
+logBackupSuccess = []
+logBackupFailure = []
+
+def checkBackupninjaLog(output, backupName='', terminateOnFailure=true) {
+ def common = new com.mirantis.mk.Common()
+ def outputPattern = java.util.regex.Pattern.compile("\\d+")
+ def outputMatcher = outputPattern.matcher(output)
+ if (outputMatcher.find()) {
+ try {
+ result = outputMatcher.getAt([0, 1, 2, 3])
+ if (result[1] != null && result[1] instanceof String && result[1].isInteger() && (result[1].toInteger() < 1)) {
+ common.successMsg("[${backupName}] - Backup successfully finished " + result[1] + " fatals, " + result[2] + " errors " + result[3] + " warnings.")
+ logBackupSuccess.add(backupName)
+ } else {
+ common.errorMsg("[${backupName}] - Backup failed. Found " + result[1] + " fatals, " + result[2] + " errors " + result[3] + " warnings.")
+ logBackupFailure.add(backupName)
+ }
+ }
+ catch (Exception e) {
+ common.errorMsg(e.getMessage())
+ common.errorMsg("[${backupName}] - Backupninja log parsing failed.")
+ logBackupFailure.add(backupName)
+ }
+ }
+}
timeout(time: 12, unit: 'HOURS') {
node() {
+ def saltMasterBackupNode = ''
+ def dogtagBackupNode = ''
+ def backupServer = ''
stage('Setup virtualenv for Pepper') {
python.setupPepperVirtualenv(pepperEnv, SALT_MASTER_URL, SALT_MASTER_CREDENTIALS)
}
+ stage('Verify pillar for backups') {
+ if (backupSaltMasterAndMaas) {
+ try {
+ def masterPillar = salt.getPillar(pepperEnv, "I@salt:master", 'salt:master:initial_data')
+ if (masterPillar['return'].isEmpty()) {
+ throw new Exception("Problem with salt-master pillar on 'I@salt:master' node.")
+ }
+ def minionPillar = salt.getPillar(pepperEnv, "I@salt:master", 'salt:minion:initial_data')
+ if (minionPillar['return'].isEmpty()) {
+ throw new Exception("Problem with salt-minion pillar on I@salt:master node.")
+ }
+ }
+ catch (Exception e) {
+ common.errorMsg(e.getMessage())
+ common.errorMsg('Please fix your pillar. For more information check docs: https://docs.mirantis.com/mcp/latest/mcp-operations-guide/backup-restore/salt-master.html')
+ return
+ }
+ }
+ if (backupDogtag) {
+ try {
+ def dogtagPillar = salt.getPillar(pepperEnv, "I@salt:master", "dogtag:server")
+ if (dogtagPillar['return'].isEmpty()) {
+ throw new Exception("Problem with dogtag pillar on I@dogtag:server node.")
+ }
+ }
+ catch (Exception e) {
+ common.errorMsg(e.getMessage())
+ common.errorMsg("Looks like dogtag pillar is not defined. Fix your pillar or disable dogtag backup by setting the BACKUP_DOGTAG parameter to False if you're using different barbican backend.")
+ return
+ }
+ }
+ }
stage('Check backup location') {
- try{
- backupNode = salt.getMinions(pepperEnv, "I@backupninja:client")[0]
- salt.minionsReachable(pepperEnv, "I@salt:master", backupNode)
+ if (backupSaltMasterAndMaas) {
+ try {
+ saltMasterBackupNode = salt.getMinionsSorted(pepperEnv, saltMasterTargetMatcher)[0]
+ salt.minionsReachable(pepperEnv, "I@salt:master", saltMasterBackupNode)
+ }
+ catch (Exception e) {
+ common.errorMsg(e.getMessage())
+ common.errorMsg("Pipeline wasn't able to detect backupninja:client pillar on Salt master node or the minion is not reachable")
+ currentBuild.result = "FAILURE"
+ return
+ }
+
+ def maasNodes = salt.getMinions(pepperEnv, 'I@maas:server')
+ if (!maasNodes.isEmpty()) {
+ def postgresqlMajorVersion = salt.getPillar(pepperEnv, 'I@salt:master', '_param:postgresql_major_version').get('return')[0].values()[0]
+ if (! postgresqlMajorVersion) {
+ common.errorMsg("Can't get _param:postgresql_major_version parameter, which is required to determine postgresql-client version. Is it defined in pillar?")
+ if (askConfirmation) {
+ input message: "Confirm to proceed anyway."
+ }
+ } else {
+ def postgresqlClientPackage = "postgresql-client-${postgresqlMajorVersion}"
+ try {
+ if (!salt.isPackageInstalled(['saltId': pepperEnv, 'target': saltMasterBackupNode, 'packageName': postgresqlClientPackage, 'output': false])) {
+ if (askConfirmation) {
+ input message: "Do you want to install ${postgresqlClientPackages} package on targeted nodes: ${saltMasterBackupNode}? It's required to make backup. Click to confirm."
+ } else {
+ common.infoMsg("Package ${postgresqlClientPackages} will be installed. It's required to make backup.")
+ }
+ // update also common fake package
+ salt.runSaltProcessStep(pepperEnv, saltMasterBackupNode, 'pkg.install', ["postgresql-client,${postgresqlClientPackage}"])
+ }
+ } catch (Exception e) {
+ common.errorMsg("Unable to determine status of ${postgresqlClientPackages} packages on target nodes: ${saltMasterBackupNode}.")
+ if (askConfirmation) {
+ input message: "Do you want to continue? Click to confirm"
+ }
+ }
+ }
+ }
}
- catch (Exception e) {
- common.errorMsg(e.getMessage())
- common.errorMsg("Pipeline wasn't able to detect backupninja:client pillar or the minion is not reachable")
- currentBuild.result = "FAILURE"
- return
+ if (backupDogtag) {
+ try {
+ dogtagBackupNode = salt.getMinionsSorted(pepperEnv, dogtagTagetMatcher)[0]
+ salt.minionsReachable(pepperEnv, "I@salt:master", dogtagBackupNode)
+ }
+ catch (Exception e) {
+ common.errorMsg(e.getMessage())
+ common.errorMsg("Pipeline wasn't able to detect node with backupninja:client and dogtag:server pillars defined or the minion is not reachable")
+ currentBuild.result = "FAILURE"
+ return
+ }
}
- try{
- backupServer = salt.getMinions(pepperEnv, "I@backupninja:server")[0]
- salt.minionsReachable(pepperEnv, "I@salt:master", backupServer)
+
+ try {
+ backupServer = salt.getMinions(pepperEnv, "I@backupninja:server")[0]
+ salt.minionsReachable(pepperEnv, "I@salt:master", backupServer)
}
catch (Exception e) {
common.errorMsg(e.getMessage())
@@ -30,32 +139,44 @@
return
}
}
- stage ('Prepare for backup') {
+ stage('Prepare for backup') {
+ if (backupSaltMasterAndMaas) {
salt.enforceState(['saltId': pepperEnv, 'target': 'I@backupninja:server', 'state': 'backupninja'])
- salt.enforceState(['saltId': pepperEnv, 'target': 'I@backupninja:client', 'state': 'backupninja'])
+ salt.enforceState(['saltId': pepperEnv, 'target': saltMasterTargetMatcher, 'state': 'backupninja'])
+ def backupMasterSource = salt.getReturnValues(salt.getPillar(pepperEnv, saltMasterBackupNode, 'salt:master:initial_data:source'))
+ def backupMinionSource = salt.getReturnValues(salt.getPillar(pepperEnv, saltMasterBackupNode, 'salt:minion:initial_data:source'))
+ // TODO: Remove ssh-keyscan once we have openssh meta for backupninja implemented
+ [backupServer, backupMasterSource, backupMinionSource].unique().each {
+ salt.cmdRun(pepperEnv, saltMasterBackupNode, "ssh-keygen -F ${it} || ssh-keyscan -H ${it} >> /root/.ssh/known_hosts")
+ }
+ def maasNodes = salt.getMinions(pepperEnv, 'I@maas:region')
+ if (!maasNodes.isEmpty()) {
+ common.infoMsg("Trying to save maas file permissions on ${maasNodes} if possible")
+ salt.cmdRun(pepperEnv, 'I@maas:region', 'which getfacl && getfacl -pR /var/lib/maas/ > /var/lib/maas/file_permissions.txt || true')
+ }
+ }
+ if (backupDogtag) {
+ salt.enforceState(['saltId': pepperEnv, 'target': 'I@backupninja:server', 'state': 'backupninja'])
+ salt.enforceState(['saltId': pepperEnv, 'target': dogtagTagetMatcher, 'state': 'backupninja'])
+ }
}
stage('Backup') {
- def output = salt.getReturnValues(salt.cmdRun(pepperEnv, backupNode, "su root -c 'backupninja --now -d'")).readLines()[-2]
- def outputPattern = java.util.regex.Pattern.compile("\\d+")
- def outputMatcher = outputPattern.matcher(output)
- if (outputMatcher.find()) {
- try{
- result = outputMatcher.getAt([0,1,2,3])
- }
- catch (Exception e){
- common.errorMsg(e.getMessage())
- common.errorMsg("Parsing failed.")
- currentBuild.result = "FAILURE"
- return
- }
+ if (backupSaltMasterAndMaas) {
+ def output = salt.getReturnValues(salt.cmdRun(pepperEnv, saltMasterBackupNode, "su root -c 'backupninja --now -d'")).readLines()[-2]
+ checkBackupninjaLog(output, "Salt Master/MAAS")
}
- if (result[1] != null && result[1] instanceof String && result[1].isInteger() && (result[1].toInteger() < 1)){
- common.successMsg("Backup successfully finished " + result[1] + " fatals, " + result[2] + " errors " + result[3] +" warnings.")
+ if (backupDogtag) {
+ def output = salt.getReturnValues(salt.cmdRun(pepperEnv, dogtagBackupNode, "su root -c 'backupninja --now -d'")).readLines()[-2]
+ checkBackupninjaLog(output, "Dogtag")
}
- else {
- common.errorMsg("Backup failed. Found " + result[1] + " fatals, " + result[2] + " errors " + result[3] +" warnings.")
+ }
+ stage('Results') {
+ if (logBackupSuccess.size() > 0) {
+ common.infoMsg("Following backups finished successfully: ${logBackupSuccess.join(",")}")
+ }
+ if (logBackupFailure.size() > 0) {
+ common.errorMsg("Following backups has failed: ${logBackupFailure.join(",")}. Make sure to check the logs.")
currentBuild.result = "FAILURE"
- return
}
}
}
diff --git a/backupninja-restore-pipeline.groovy b/backupninja-restore-pipeline.groovy
index b38cd6a..a869fe3 100644
--- a/backupninja-restore-pipeline.groovy
+++ b/backupninja-restore-pipeline.groovy
@@ -2,54 +2,99 @@
def salt = new com.mirantis.mk.Salt()
def python = new com.mirantis.mk.Python()
def pepperEnv = "pepperEnv"
+def maasNodes = {}
+def restoreSaltMasterAndMaas = (env.getProperty('RESTORE_SALTMASTER_AND_MAAS') ?: true).toBoolean()
+def restoreDogtag = (env.getProperty('RESTORE_DOGTAG') ?: true).toBoolean()
timeout(time: 12, unit: 'HOURS') {
node() {
stage('Setup virtualenv for Pepper') {
python.setupPepperVirtualenv(pepperEnv, SALT_MASTER_URL, SALT_MASTER_CREDENTIALS)
}
- stage('Salt-Master restore') {
- common.infoMsg('Verify pillar for salt-master backups')
- try {
- def masterPillar = salt.getPillar(pepperEnv, "I@salt:master", 'salt:minion:initial_data')
- if(masterPillar['return'].isEmpty()) {
- throw new Exception('Problem with salt-master pillar.')
+ stage('Verify pillar for restore') {
+ if (restoreSaltMasterAndMaas) {
+ try {
+ def masterPillar = salt.getPillar(pepperEnv, "I@salt:master", 'salt:master:initial_data')
+ if(masterPillar['return'].isEmpty()) {
+ throw new Exception("Problem with salt-master pillar on 'I@salt:master' node.")
+ }
+ def minionPillar = salt.getPillar(pepperEnv, "I@salt:master", 'salt:minion:initial_data')
+ if(minionPillar['return'].isEmpty()) {
+ throw new Exception("Problem with salt-minion pillar on 'I@salt:master' node.")
+ }
}
- def minionPillar = salt.getPillar(pepperEnv, "I@salt:master", 'salt:minion:initial_data')
- if(minionPillar['return'].isEmpty()) {
- throw new Exception('Problem with salt-minion pillar.')
+ catch (Exception e){
+ common.errorMsg(e.getMessage())
+ common.errorMsg('Please fix your pillar. For more information check docs: https://docs.mirantis.com/mcp/latest/mcp-operations-guide/backup-restore/salt-master/salt-master-restore.html')
+ return
+ }
+ maasNodes = salt.getMinions(pepperEnv, 'I@maas:region')
+ }
+ if (!maasNodes.isEmpty()) {
+ try {
+ def maaSPillar = salt.getPillar(pepperEnv, "I@maas:region", 'maas:region:database:initial_data')
+ if (maaSPillar['return'].isEmpty()) {
+ throw new Exception("Problem with MaaS pillar on 'I@maas:region' node.")
+ }
+ }
+ catch (Exception e) {
+ common.errorMsg(e.getMessage())
+ common.errorMsg('Please fix your pillar. For more information check docs: https://docs.mirantis.com/mcp/latest/mcp-operations-guide/backup-restore/maas-postgresql/backupninja-postgresql-restore.html')
+ return
+ }
+ } else {
+ common.warningMsg("No MaaS Pillar was found. You can ignore this if it's expected. Otherwise you should fix you pillar. Check: https://docs.mirantis.com/mcp/latest/mcp-operations-guide/backup-restore/maas-postgresql/backupninja-postgresql-restore.html")
+ }
+ if (restoreDogtag) {
+ try {
+ def dogtagPillar = salt.getPillar(pepperEnv, "I@dogtag:server:role:master", 'dogtag:server:initial_data')
+ if (dogtagPillar['return'].isEmpty()) {
+ throw new Exception("Problem with Dogtag pillar on 'I@dogtag:server:role:master' node.")
+ }
+ }
+ catch (Exception e) {
+ common.errorMsg(e.getMessage())
+ common.errorMsg('Please fix your pillar. For more information check docs: https://docs.mirantis.com/mcp/latest/mcp-operations-guide/backup-restore/dogtag/restore-dogtag.html')
+ return
}
}
- catch (Exception e){
- common.errorMsg(e.getMessage())
- common.errorMsg('Please fix your pillar. For more information check docs: https://docs.mirantis.com/mcp/latest/mcp-operations-guide/backup-restore/salt-master/salt-master-restore.html')
- return
- }
- common.infoMsg('Performing restore')
- salt.enforceState(['saltId': pepperEnv, 'target': 'I@salt:master', 'state': 'salt.master.restore'])
- salt.enforceState(['saltId': pepperEnv, 'target': 'I@salt:master', 'state': 'salt.minion.restore'])
- salt.fullRefresh(pepperEnv, '*')
-
- common.infoMsg('Validating output')
- common.infoMsg('Salt-Keys')
- salt.cmdRun(pepperEnv, 'I@salt:master', "salt-key")
- common.infoMsg('Salt-master CA')
- salt.cmdRun(pepperEnv, 'I@salt:master', "ls -la /etc/pki/ca/salt_master_ca/")
}
- stage('MAAS Restore') {
- common.infoMsg('Verify pillar for MaaS backup')
- try {
- def maaSPillar = salt.getPillar(pepperEnv, "I@maas:server", 'maas:region:database:initial_data')
- if(maaSPillar['return'].isEmpty()) {
- throw new Exception('Problem with MaaS pillar.')
+ stage('Restore') {
+ if (restoreSaltMasterAndMaas) {
+ common.infoMsg('Starting salt-master restore')
+ salt.enforceState(['saltId': pepperEnv, 'target': 'I@salt:master', 'state': 'salt.master.restore'])
+ salt.enforceState(['saltId': pepperEnv, 'target': 'I@salt:master', 'state': 'salt.minion.restore'])
+ salt.fullRefresh(pepperEnv, '*')
+ common.infoMsg('Validating output')
+ common.infoMsg('Salt-Keys')
+ salt.cmdRun(pepperEnv, 'I@salt:master', "salt-key")
+ common.infoMsg('Salt-master CA')
+ salt.cmdRun(pepperEnv, 'I@salt:master', "ls -la /etc/pki/ca/salt_master_ca/")
+ if (!maasNodes.isEmpty()) {
+ common.infoMsg('Starting MaaS restore')
+ salt.enforceState(['saltId': pepperEnv, 'target': 'I@maas:region', 'state': 'maas.region'])
}
}
- catch (Exception e){
- common.errorMsg(e.getMessage())
- common.errorMsg('Please fix your pillar. For more information check docs: https://docs.mirantis.com/mcp/latest/mcp-operations-guide/backup-restore/backupninja-postgresql/backupninja-postgresql-restore.html')
- return
+ if (restoreDogtag) {
+ salt.runSaltProcessStep(pepperEnv, 'I@dogtag:server:role:slave', 'service.stop', ['dirsrv@pki-tomcat.service'])
+ salt.enforceState(['saltId': pepperEnv, 'target': 'I@dogtag:server:role:master', 'state': 'dogtag.server.restore'])
+ salt.runSaltProcessStep(pepperEnv, 'I@dogtag:server:role:slave', 'service.start', ['dirsrv@pki-tomcat.service'])
}
- salt.enforceState(['saltId': pepperEnv, 'target': 'I@maas:region', 'state': 'maas.region'])
+ }
+ stage('After restore steps') {
+ if (restoreSaltMasterAndMaas) {
+ common.infoMsg("No more steps for Salt Master and MaaS restore are required.")
+ }
+ if (restoreDogtag) {
+ salt.enforceState(['saltId': pepperEnv, 'target': 'I@salt:master', 'state': ['salt', 'reclass']])
+ salt.enforceState(['saltId': pepperEnv, 'target': 'I@dogtag:server:role:master', 'state': 'dogtag.server'])
+ salt.enforceState(['saltId': pepperEnv, 'target': 'I@dogtag:server', 'state': 'dogtag.server'])
+ salt.enforceState(['saltId': pepperEnv, 'target': 'I@haproxy:proxy', 'state': 'haproxy'])
+ salt.enforceState(['saltId': pepperEnv, 'target': 'I@barbican:server:role:primary', 'state': 'barbican.server'])
+ salt.enforceState(['saltId': pepperEnv, 'target': 'I@barbican:server', 'state': 'barbican.server'])
+ salt.cmdRun(pepperEnv, 'I@barbican:server', 'rm /etc/barbican/alias/*')
+ salt.runSaltProcessStep(pepperEnv, 'I@barbican:server', 'service.restart', 'apache2')
+ }
}
}
}
diff --git a/ceph-add-osd-upmap.groovy b/ceph-add-osd-upmap.groovy
index 96ca29d..4bbb78d 100644
--- a/ceph-add-osd-upmap.groovy
+++ b/ceph-add-osd-upmap.groovy
@@ -64,7 +64,8 @@
stage ("verify client versions")
{
- def nodes = salt.getMinions("pepperEnv", "I@ceph:common and not E@mon*")
+ // I@docker:swarm and I@prometheus:server - mon* nodes
+ def nodes = salt.getMinions("pepperEnv", "I@ceph:common and not ( I@docker:swarm and I@prometheus:server )")
for ( node in nodes )
{
def versions = salt.cmdRun("pepperEnv", node, "ceph features --format json", checkResponse=true, batch=null, output=false).values()[0]
diff --git a/ceph-remove-node.groovy b/ceph-remove-node.groovy
index e616a28..766dda1 100644
--- a/ceph-remove-node.groovy
+++ b/ceph-remove-node.groovy
@@ -90,6 +90,10 @@
stage('Remove Ceph RGW') {
salt.enforceState(pepperEnv, 'I@ceph:radosgw', ['keepalived', 'haproxy'], true)
}
+
+ stage('Purge Ceph RGW pkgs') {
+ salt.runSaltProcessStep(pepperEnv, HOST, 'pkg.purge', 'ceph-common,libcephfs2,python-cephfs,radosgw,python-rados,python-rbd,python-rgw')
+ }
}
if (HOST_TYPE.toLowerCase() != 'osd') {
@@ -222,7 +226,7 @@
// purge Ceph pkgs
stage('Purge Ceph OSD pkgs') {
- runCephCommand(pepperEnv, HOST, 'apt purge ceph-base ceph-common ceph-fuse ceph-mds ceph-osd python-cephfs librados2 python-rados -y')
+ salt.runSaltProcessStep(pepperEnv, HOST, 'pkg.purge', 'ceph-base,ceph-common,ceph-fuse,ceph-mds,ceph-osd,python-cephfs,librados2,python-rados,python-rbd,python-rgw')
}
stage('Remove OSD host from crushmap') {
@@ -294,9 +298,14 @@
salt.enforceState(pepperEnv, tgt, 'ceph.common', true)
}
}
+
+ stage('Purge Ceph MON pkgs') {
+ salt.runSaltProcessStep(pepperEnv, HOST, 'pkg.purge', 'ceph-base,ceph-common,ceph-mgr,ceph-mon,libcephfs2,python-cephfs,python-rbd,python-rgw')
+ }
}
- if (HOST_TYPE.toLowerCase() == 'osd' && GENERATE_CRUSHMAP.toBoolean() == true) {
+ def crushmap_target = salt.getMinions(pepperEnv, "I@ceph:setup:crush")
+ if (HOST_TYPE.toLowerCase() == 'osd' && GENERATE_CRUSHMAP.toBoolean() == true && crushmap_target ) {
stage('Generate CRUSHMAP') {
salt.enforceState(pepperEnv, 'I@ceph:setup:crush', 'ceph.setup.crush', true)
}
diff --git a/ceph-remove-osd.groovy b/ceph-remove-osd.groovy
index 169bbd0..66e9422 100644
--- a/ceph-remove-osd.groovy
+++ b/ceph-remove-osd.groovy
@@ -56,10 +56,14 @@
}
}
if (partition?.trim()) {
- // dev = /dev/sdi
+ def part_id
+ if (partition.contains("nvme")) {
+ part_id = partition.substring(partition.lastIndexOf("p")+1).replaceAll("[^0-9]+", "")
+ }
+ else {
+ part_id = partition.substring(partition.lastIndexOf("/")+1).replaceAll("[^0-9]+", "")
+ }
def dev = partition.replaceAll('\\d+$', "")
- // part_id = 2
- def part_id = partition.substring(partition.lastIndexOf("/")+1).replaceAll("[^0-9]+", "")
runCephCommand(master, target, "Ignore | parted ${dev} rm ${part_id}")
}
return
diff --git a/ceph-upgrade.groovy b/ceph-upgrade.groovy
index 86a1f0f..a50c253 100644
--- a/ceph-upgrade.groovy
+++ b/ceph-upgrade.groovy
@@ -16,6 +16,9 @@
* STAGE_UPGRADE_OSD Set to True if Ceph osd nodes upgrade is desired
* STAGE_UPGRADE_RGW Set to True if Ceph rgw nodes upgrade is desired
* STAGE_UPGRADE_CLIENT Set to True if Ceph client nodes upgrade is desired (includes for example ctl/cmp nodes)
+ * STAGE_FINALIZE Set to True if configs recommended for TARGET_RELEASE should be set after upgrade is done
+ * BACKUP_ENABLED Select to copy the disks of Ceph VMs before upgrade and backup Ceph directories on OSD nodes
+ * BACKUP_DIR Select the target dir to backup to when BACKUP_ENABLED
*
*/
@@ -30,20 +33,27 @@
return salt.cmdRun(master, target, cmd)
}
-def waitForHealthy(master, count=0, attempts=300) {
+def waitForHealthy(master, flags, count=0, attempts=300) {
// wait for healthy cluster
while (count<attempts) {
def health = runCephCommand(master, ADMIN_HOST, 'ceph health')['return'][0].values()[0]
if (health.contains('HEALTH_OK')) {
common.infoMsg('Cluster is healthy')
break;
+ } else {
+ for (flag in flags) {
+ if (health.contains(flag + ' flag(s) set') && !(health.contains('down'))) {
+ common.infoMsg('Cluster is healthy')
+ return;
+ }
+ }
}
count++
sleep(10)
}
}
-def backup(master, target) {
+def backup(master, flags, target) {
stage("backup ${target}") {
if (target == 'osd') {
@@ -69,14 +79,14 @@
def provider_pillar = salt.getPillar(master, "${kvm01}", "salt:control:cluster:internal:node:${minion_name}:provider")
def minionProvider = provider_pillar['return'][0].values()[0]
- waitForHealthy(master)
+ waitForHealthy(master, flags)
try {
- salt.cmdRun(master, "${minionProvider}", "[ ! -f /root/${minion_name}.${domain}.qcow2.bak ] && virsh destroy ${minion_name}.${domain}")
+ salt.cmdRun(master, "${minionProvider}", "[ ! -f ${BACKUP_DIR}/${minion_name}.${domain}.qcow2.bak ] && virsh destroy ${minion_name}.${domain}")
} catch (Exception e) {
common.warningMsg('Backup already exists')
}
try {
- salt.cmdRun(master, "${minionProvider}", "[ ! -f /root/${minion_name}.${domain}.qcow2.bak ] && cp /var/lib/libvirt/images/${minion_name}.${domain}/system.qcow2 /root/${minion_name}.${domain}.qcow2.bak")
+ salt.cmdRun(master, "${minionProvider}", "[ ! -f ${BACKUP_DIR}/${minion_name}.${domain}.qcow2.bak ] && cp /var/lib/libvirt/images/${minion_name}.${domain}/system.qcow2 ${BACKUP_DIR}/${minion_name}.${domain}.qcow2.bak")
} catch (Exception e) {
common.warningMsg('Backup already exists')
}
@@ -86,14 +96,14 @@
common.warningMsg(e)
}
salt.minionsReachable(master, 'I@salt:master', "${minion_name}*")
- waitForHealthy(master)
+ waitForHealthy(master, flags)
}
}
}
return
}
-def upgrade(master, target) {
+def upgrade(master, target, flags) {
stage("Change ${target} repos") {
salt.runSaltProcessStep(master, "I@ceph:${target}", 'saltutil.refresh_pillar', [], null, true, 5)
@@ -124,15 +134,25 @@
}
// restart services
stage("Restart ${target} services on ${minion}") {
- runCephCommand(master, "${minion}", "systemctl restart ceph-${target}.target")
+ if (target == 'osd') {
+ def osds = salt.getGrain(master, "${minion}", 'ceph:ceph_disk').values()[0]
+ osds[0].values()[0].values()[0].each { osd,param ->
+ runCephCommand(master, "${minion}", "systemctl restart ceph-${target}@${osd}")
+ waitForHealthy(master, flags)
+ }
+ } else {
+ runCephCommand(master, "${minion}", "systemctl restart ceph-${target}.target")
+ waitForHealthy(master, flags)
+ }
}
stage("Verify services for ${minion}") {
sleep(10)
- runCephCommand(master, ADMIN_HOST, "ceph -s")
+ runCephCommand(master, "${minion}", "systemctl status ceph-${target}.target")
}
stage('Ask for manual confirmation') {
+ runCephCommand(master, ADMIN_HOST, "ceph -s")
input message: "From the verification command above, please check Ceph ${target} joined the cluster correctly. If so, Do you want to continue to upgrade next node?"
}
}
@@ -193,23 +213,23 @@
}
if (STAGE_UPGRADE_MON.toBoolean() == true) {
- upgrade(pepperEnv, 'mon')
+ upgrade(pepperEnv, 'mon', flags)
}
if (STAGE_UPGRADE_MGR.toBoolean() == true) {
- upgrade(pepperEnv, 'mgr')
+ upgrade(pepperEnv, 'mgr', flags)
}
if (STAGE_UPGRADE_OSD.toBoolean() == true) {
- upgrade(pepperEnv, 'osd')
+ upgrade(pepperEnv, 'osd', flags)
}
if (STAGE_UPGRADE_RGW.toBoolean() == true) {
- upgrade(pepperEnv, 'radosgw')
+ upgrade(pepperEnv, 'radosgw', flags)
}
if (STAGE_UPGRADE_CLIENT.toBoolean() == true) {
- upgrade(pepperEnv, 'common')
+ upgrade(pepperEnv, 'common', flags)
}
// remove cluster flags
@@ -243,7 +263,7 @@
// wait for healthy cluster
if (WAIT_FOR_HEALTHY.toBoolean() == true) {
- waitForHealthy(pepperEnv)
+ waitForHealthy(pepperEnv, flags)
}
}
}
diff --git a/cloud-deploy-pipeline.groovy b/cloud-deploy-pipeline.groovy
index d59f313..d5b991c 100644
--- a/cloud-deploy-pipeline.groovy
+++ b/cloud-deploy-pipeline.groovy
@@ -46,6 +46,10 @@
* SALT_VERSION Version of Salt which is going to be installed i.e. 'stable 2016.3' or 'stable 2017.7' etc.
*
* EXTRA_TARGET The value will be added to target nodes
+ * BATCH_SIZE Use batching for states, which may be targeted for huge amount of nodes. Format:
+ - 10 - number of nodes
+ - 10% - percentage of all targeted nodes
+
*
* Test settings:
* TEST_K8S_API_SERVER Kubernetes API address
@@ -105,6 +109,10 @@
if (common.validInputParam('EXTRA_TARGET')) {
extra_tgt = "${EXTRA_TARGET}"
}
+def batch_size = ''
+if (common.validInputParam('BATCH_SIZE')) {
+ batch_size = "${BATCH_SIZE}"
+}
timeout(time: 12, unit: 'HOURS') {
node(slave_node) {
@@ -347,9 +355,12 @@
//
// Install
//
+ if (!batch_size) {
+ batch_size = salt.getWorkerThreads(venvPepper)
+ }
// Check if all minions are reachable and ready
- salt.checkTargetMinionsReady(['saltId': venvPepper, 'target': '*'])
+ salt.checkTargetMinionsReady(['saltId': venvPepper, 'target': '*', batch: batch_size])
if (common.checkContains('STACK_INSTALL', 'core')) {
stage('Install core infrastructure') {
@@ -357,7 +368,7 @@
if (common.validInputParam('STATIC_MGMT_NETWORK')) {
staticMgmtNetwork = STATIC_MGMT_NETWORK.toBoolean()
}
- orchestrate.installFoundationInfra(venvPepper, staticMgmtNetwork, extra_tgt)
+ orchestrate.installFoundationInfra(venvPepper, staticMgmtNetwork, extra_tgt, batch_size)
if (common.checkContains('STACK_INSTALL', 'kvm')) {
orchestrate.installInfraKvm(venvPepper, extra_tgt)
@@ -384,7 +395,7 @@
// install k8s
if (common.checkContains('STACK_INSTALL', 'k8s')) {
extra_tgt_bckp = extra_tgt
- extra_tgt = 'and not kdt* and not cfg* ' + extra_tgt_bckp
+ extra_tgt = 'and not kdt* and not I@salt:master ' + extra_tgt_bckp
stage('Install Kubernetes infra') {
if (STACK_TYPE == 'aws') {
// configure kubernetes_control_address - save loadbalancer
@@ -508,8 +519,9 @@
// Workaround for PROD-17765 issue to prevent crashes of keystone.role_present state.
// More details: https://mirantis.jira.com/browse/PROD-17765
- salt.runSaltProcessStep(venvPepper, "I@keystone:client ${extra_tgt}", 'service.restart', ['salt-minion'])
- salt.minionsReachable(venvPepper, "I@salt:master and *01* ${extra_tgt}", 'I@keystone:client', null, 10, 6)
+ salt.restartSaltMinion(venvPepper, "I@keystone:client ${extra_tgt}")
+ //
+ salt.minionsReachable(venvPepper, 'I@salt:master', 'I@keystone:client ${extra_tgt}', null, 10, 6)
stage('Install OpenStack network') {
@@ -539,7 +551,7 @@
}
stage('Install OpenStack compute') {
- orchestrate.installOpenstackCompute(venvPepper, extra_tgt)
+ orchestrate.installOpenstackCompute(venvPepper, extra_tgt, batch_size)
if (common.checkContains('STACK_INSTALL', 'contrail')) {
orchestrate.installContrailCompute(venvPepper, extra_tgt)
@@ -560,7 +572,7 @@
if (common.checkContains('STACK_INSTALL', 'cicd')) {
stage('Install Cicd') {
extra_tgt_bckp = extra_tgt
- extra_tgt = 'and cid* ' + extra_tgt_bckp
+ extra_tgt = 'and I@_param:drivetrain_role:cicd ' + extra_tgt_bckp
orchestrate.installInfra(venvPepper, extra_tgt)
orchestrate.installCicd(venvPepper, extra_tgt)
extra_tgt = extra_tgt_bckp
@@ -612,7 +624,7 @@
test.executeConformance(config)
} else {
def output_file = image.replaceAll('/', '-') + '.output'
- def target = "ctl01* ${extra_tgt}"
+ def target = "I@keystone:server:role:primary ${extra_tgt}"
def conformance_output_file = 'conformance_test.tar'
// run image
@@ -642,7 +654,7 @@
"py.test --junit-xml=${report_dir}report.xml" +
" --html=${report_dir}report.html -v vapor/tests/ -k 'not destructive' "
- salt.runSaltProcessStep(venvPepper, 'cfg*', 'saltutil.refresh_pillar', [], null, true)
+ salt.runSaltProcessStep(venvPepper, 'I@salt:master', 'saltutil.refresh_pillar', [], null, true)
salt.enforceState(venvPepper, 'I@opencontrail:test' , 'opencontrail.test' , true)
salt.cmdRun(venvPepper, 'I@opencontrail:test', cmd, false)
@@ -659,7 +671,7 @@
def gluster_compound = "I@glusterfs:server ${extra_tgt}"
def salt_ca_compound = "I@salt:minion:ca:salt_master_ca ${extra_tgt}"
// Enforce highstate asynchronous only on the nodes which are not glusterfs servers
- salt.enforceHighstate(venvPepper, '* and not ' + gluster_compound + ' and not ' + salt_ca_compound)
+ salt.enforceHighstate(venvPepper, '* and not ' + gluster_compound + ' and not ' + salt_ca_compound, batch_size)
// Iterate over nonempty set of gluster servers and apply highstates one by one
// TODO: switch to batch once salt 2017.7+ would be used
def saltcaMinions = salt.getMinionsSorted(venvPepper, salt_ca_compound)
diff --git a/cloud-update.groovy b/cloud-update.groovy
index 0ea5fea..9945d33 100644
--- a/cloud-update.groovy
+++ b/cloud-update.groovy
@@ -190,6 +190,70 @@
}
} else {
if (distUpgrade) {
+ common.infoMsg("Checking availability of Linux HWE Kernel...")
+ def switchHwe = false
+ def nodesOut = salt.runSaltCommand(pepperEnv, 'local', ['expression': target, 'type': 'compound'], 'linux_kernel_switch.check_hwe_kernel').get('return')[0]
+ def targetHWE = []
+ for (node in nodesOut) {
+ def nodeName = node.getKey()
+ def statusPkgs = node.getValue()
+ if (statusPkgs) {
+ statusPkgs.each { pkg, pkgStatus ->
+ if (pkgStatus instanceof String) {
+ common.warningMsg("Target ${nodeName} has no installed Linux HWE Kernel package: ${pkg}")
+ if (! targetHWE.contains(nodeName)) {
+ targetHWE.add(nodeName)
+ }
+ }
+ }
+ } else {
+ common.warningMsg("Target ${nodeName} has no info about Linux HWE Kernel, check formula or resync minion data.")
+ }
+ }
+ if (targetHWE) {
+ if (INTERACTIVE.toBoolean()) {
+ try {
+ input message: "Do you want to switch from generic to hwe kernel for ${targetHWE} nodes? Click to confirm", ok: 'Switch to HWE'
+ switchHwe = true
+ } catch (Exception ex) {
+ common.warningMsg("Kernel switch from generic to hwe for ${targetHWE} cancelled. Continue dist-upgrade with existing kernel.")
+ }
+ } else {
+ switchHwe = true
+ }
+ }
+ if (switchHwe) {
+ def onlyKernel='True'
+ def targetHWECompound = targetHWE.join(' or ')
+ if (INTERACTIVE.toBoolean()) {
+ try {
+ input message: "Install HWE headers and generic packages?", ok: 'Install'
+ onlyKernel='False'
+ common.infoMsg("HWE Kernel, headers and generic packages will be installed.")
+ } catch (Exception e) {
+ common.infoMsg("Only HWE Kernel packages will be installed.")
+ }
+ } else {
+ onlyKernel='False'
+ }
+ salt.runSaltCommand(pepperEnv, 'local', ['expression': targetHWECompound, 'type': 'compound'], 'linux_kernel_switch.switch_kernel', false, "only_kernel=${onlyKernel}")
+ common.infoMsg("HWE Kernel has been installed on ${targetHWE} nodes")
+ def rebootNow = true
+ if (INTERACTIVE.toBoolean()) {
+ try {
+ input message: "To finish switch on HWE kernel it is needed to reboot. Reboot nodes ${targetHWE} now?", ok: 'Reboot'
+ } catch (Exception e) {
+ common.warningMsg("HWE Kernel is not used. Please reboot nodes ${targetHWE} manually to finish kernel switch.")
+ rebootNow = false
+ }
+ }
+ if (rebootNow) {
+ common.infoMsg('Performing nodes reboot after kernel install...')
+ salt.runSaltCommand(pepperEnv, 'local', ['expression': targetHWECompound, 'type': 'compound'], 'system.reboot', null, 'at_time=1')
+ sleep(180)
+ salt.minionsReachable(pepperEnv, 'I@salt:master', targetHWECompound, null, 10, 20)
+ }
+ }
common.retry(3){
out = salt.runSaltProcessStep(pepperEnv, target, 'cmd.run', [args + ' dist-upgrade'])
}
@@ -335,6 +399,22 @@
}
} else {
if (distUpgrade) {
+ salt.runSaltCommand(pepperEnv, 'local', ['expression': target, 'type': 'compound'], 'linux_kernel_switch.rollback_switch_kernel', false)
+ def rebootNow = true
+ if (INTERACTIVE.toBoolean()) {
+ try {
+ input message: "To finish kernel downgrade it is needed to reboot. Reboot nodes ${target} now?", ok: 'Reboot'
+ } catch (Exception e) {
+ common.warningMsg("Please reboot nodes ${target} manually to finish kernel downgrade.")
+ rebootNow = false
+ }
+ }
+ if (rebootNow) {
+ common.infoMsg('Performing nodes reboot after kernel downgrade...')
+ salt.runSaltCommand(pepperEnv, 'local', ['expression': target, 'type': 'compound'], 'system.reboot', null, 'at_time=1')
+ sleep(180)
+ salt.minionsReachable(pepperEnv, 'I@salt:master', target, null, 10, 20)
+ }
common.retry(3){
out = salt.runSaltProcessStep(pepperEnv, target, 'cmd.run', [args + ' dist-upgrade'])
}
diff --git a/cvp-func.groovy b/cvp-func.groovy
index 0c657a5..80160ab 100644
--- a/cvp-func.groovy
+++ b/cvp-func.groovy
@@ -31,7 +31,15 @@
try{
stage('Initialization') {
sh "rm -rf ${artifacts_dir}"
+ if (!TARGET_NODE) {
+ // This pillar will return us cid01
+ TARGET_NODE = "I@gerrit:client"
+ }
saltMaster = salt.connection(SALT_MASTER_URL, SALT_MASTER_CREDENTIALS)
+ os_version=salt.getPillar(saltMaster, 'I@salt:master', '_param:openstack_version')['return'][0].values()[0]
+ if (!os_version) {
+ throw new Exception("Openstack is not found on this env. Exiting")
+ }
salt.cmdRun(saltMaster, TARGET_NODE, "rm -rf ${remote_artifacts_dir}")
salt.cmdRun(saltMaster, TARGET_NODE, "mkdir -p ${remote_artifacts_dir}")
keystone_creds = validate._get_keystone_creds_v3(saltMaster)
diff --git a/cvp-ha.groovy b/cvp-ha.groovy
index b33cda6..e933984 100644
--- a/cvp-ha.groovy
+++ b/cvp-ha.groovy
@@ -37,7 +37,15 @@
try {
stage('Initialization') {
sh "rm -rf ${artifacts_dir}"
+ if (!TEMPEST_TARGET_NODE) {
+ // This pillar will return us cid01
+ TEMPEST_TARGET_NODE = "I@gerrit:client"
+ }
saltMaster = salt.connection(SALT_MASTER_URL, SALT_MASTER_CREDENTIALS)
+ os_version=salt.getPillar(saltMaster, 'I@salt:master', '_param:openstack_version')['return'][0].values()[0]
+ if (!os_version) {
+ throw new Exception("Openstack is not found on this env. Exiting")
+ }
salt.cmdRun(saltMaster, TEMPEST_TARGET_NODE, "rm -rf ${remote_artifacts_dir}")
salt.cmdRun(saltMaster, TEMPEST_TARGET_NODE, "mkdir -p ${remote_artifacts_dir}")
keystone_creds = validate._get_keystone_creds_v3(saltMaster)
diff --git a/cvp-perf.groovy b/cvp-perf.groovy
index 74c9a63..ebb7987 100644
--- a/cvp-perf.groovy
+++ b/cvp-perf.groovy
@@ -27,20 +27,29 @@
try{
stage('Initialization') {
sh "rm -rf ${artifacts_dir}"
+ if (!TARGET_NODE) {
+ // This pillar will return us cid01
+ TARGET_NODE = "I@gerrit:client"
+ }
saltMaster = salt.connection(SALT_MASTER_URL, SALT_MASTER_CREDENTIALS)
+ os_version=salt.getPillar(saltMaster, 'I@salt:master', '_param:openstack_version')['return'][0].values()[0]
+ if (!os_version) {
+ throw new Exception("Openstack is not found on this env. Exiting")
+ }
+ container_name = "${env.JOB_NAME}"
salt.cmdRun(saltMaster, TARGET_NODE, "rm -rf ${remote_artifacts_dir}")
salt.cmdRun(saltMaster, TARGET_NODE, "mkdir -p ${remote_artifacts_dir}")
keystone_creds = validate._get_keystone_creds_v3(saltMaster)
if (!keystone_creds) {
keystone_creds = validate._get_keystone_creds_v2(saltMaster)
}
- validate.runContainer(saltMaster, TARGET_NODE, TEST_IMAGE, 'cvp', keystone_creds)
- validate.configureContainer(saltMaster, TARGET_NODE, PROXY, TOOLS_REPO, "")
+ validate.runContainer(saltMaster, TARGET_NODE, TEST_IMAGE, container_name, keystone_creds)
+ validate.configureContainer(saltMaster, TARGET_NODE, PROXY, TOOLS_REPO, "", "internalURL", "", "", [], container_name)
}
stage('Run Rally tests') {
sh "mkdir -p ${artifacts_dir}"
- validate.runCVPrally(saltMaster, TARGET_NODE, RALLY_SCENARIO_FILE, remote_artifacts_dir)
+ validate.runCVPrally(saltMaster, TARGET_NODE, RALLY_SCENARIO_FILE, remote_artifacts_dir, "docker-rally", container_name)
}
stage('Collect results') {
@@ -55,7 +64,8 @@
throw e
} finally {
if (DEBUG_MODE == 'false') {
- validate.runCleanup(saltMaster, TARGET_NODE)
+ validate.openstack_cleanup(saltMaster, TARGET_NODE, container_name)
+ validate.runCleanup(saltMaster, TARGET_NODE, container_name)
salt.cmdRun(saltMaster, TARGET_NODE, "rm -rf ${remote_artifacts_dir}")
}
}
diff --git a/cvp-runner.groovy b/cvp-runner.groovy
index 1b1d5e0..edbe902 100644
--- a/cvp-runner.groovy
+++ b/cvp-runner.groovy
@@ -23,7 +23,6 @@
/*
YAML example
=====
-
# commands is a map of commands which looks like step_name: shell_command
commands:
001_prepare: rm /var/lib/g.txt
@@ -35,21 +34,21 @@
- SALT_USERNAME=admin
- SALT_PASSWORD=password
- drivetrain_version=testing
-
*/
node (SLAVE_NODE) {
def artifacts_dir = 'validation_artifacts'
+ def test_suite_name = "${env.JOB_NAME}"
+ def xml_file = "${test_suite_name}_report.xml"
+
def configRun = [:]
try {
withEnv(env_vars) {
stage('Initialization') {
def container_workdir = '/var/lib'
- def test_suite_name = "${env.JOB_NAME}"
def workdir = "${container_workdir}/${test_suite_name}"
- def xml_file = "${test_suite_name}_report.xml"
def tests_set = (env.getProperty('tests_set')) ?: ''
- def script = "pytest --junitxml ${container_workdir}/${artifacts_dir}/${xml_file} --tb=short -vv ${tests_set}"
+ def script = "pytest --junitxml ${container_workdir}/${artifacts_dir}/${xml_file} -vv ${tests_set}"
sh "mkdir -p ${artifacts_dir}"
@@ -62,17 +61,21 @@
def env_vars_list = [
"SALT_USERNAME=${creds.username}",
"SALT_PASSWORD=${creds.password}",
- "SALT_URL=${SALT_MASTER_URL}"
+ "SALT_URL=${SALT_MASTER_URL}",
+ "REQUESTS_CA_BUNDLE=/etc/ssl/certs/ca-certificates.crt"
] + env_vars
// Generating final config
+ def force_pull = (env.getProperty('force_pull')) ?: false
configRun = [
'image': IMAGE,
+ 'dockerPull': force_pull.toBoolean(),
'baseRepoPreConfig': false,
'dockerMaxCpus': 2,
'dockerExtraOpts' : [
"--network=host",
"-v /root/qa_results/:/root/qa_results/",
+ "-v /etc/ssl/certs/:/etc/ssl/certs/:ro",
"-v ${env.WORKSPACE}/${artifacts_dir}/:${container_workdir}/${artifacts_dir}/",
],
'envOpts' : env_vars_list,
@@ -93,28 +96,28 @@
style: 'line',
title: 'SPT Glance results',
xmlSeries: [[
- file: "${env.JOB_NAME}_report.xml",
+ file: "${artifacts_dir}/${xml_file}",
nodeType: 'NODESET',
url: '',
- xpath: '/testsuite/testcase[@name="test_speed_glance"]/properties/property']]
+ xpath: '/testsuite/testcase[@classname="tests.test_glance"]/properties/property']]
plot csvFileName: 'plot-hw2hw.csv',
group: 'SPT',
style: 'line',
title: 'SPT HW2HW results',
xmlSeries: [[
- file: "${env.JOB_NAME}_report.xml",
+ file: "${artifacts_dir}/${xml_file}",
nodeType: 'NODESET',
url: '',
- xpath: '/testsuite/testcase[@classname="cvp_spt.tests.test_hw2hw"]/properties/property']]
+ xpath: '/testsuite/testcase[@classname="tests.test_hw2hw"]/properties/property']]
plot csvFileName: 'plot-vm2vm.csv',
group: 'SPT',
style: 'line',
title: 'SPT VM2VM results',
xmlSeries: [[
- file: "${env.JOB_NAME}_report.xml",
+ file: "${artifacts_dir}/${xml_file}",
nodeType: 'NODESET',
url: '',
- xpath: '/testsuite/testcase[@classname="cvp_spt.tests.test_vm2vm"]/properties/property']]
+ xpath: '/testsuite/testcase[@classname="tests.test_vm2vm"]/properties/property']]
}
}
}
diff --git a/cvp-shaker.groovy b/cvp-shaker.groovy
index 08f9315..1f04bc1 100644
--- a/cvp-shaker.groovy
+++ b/cvp-shaker.groovy
@@ -154,9 +154,11 @@
}
stage('Run Shaker tests') {
- if (! salt_testing.setupDockerAndTest(configRun)) {
- common.warningMsg('Docker contrainer failed to run Shaker')
- currentBuild.result = 'FAILURE'
+ timeout(time: 10, unit: 'HOURS') {
+ if (! salt_testing.setupDockerAndTest(configRun)) {
+ common.warningMsg('Docker contrainer failed to run Shaker')
+ currentBuild.result = 'FAILURE'
+ }
}
}
diff --git a/cvp-tempest.groovy b/cvp-tempest.groovy
index e8eb286..c4351b9 100644
--- a/cvp-tempest.groovy
+++ b/cvp-tempest.groovy
@@ -19,6 +19,8 @@
* TEMPEST_ENDPOINT_TYPE Type of OS endpoint to use during test run (not in use right now)
* concurrency Number of threads to use for Tempest test run
* remote_artifacts_dir Folder to use for artifacts on remote node
+ * runtest_tempest_cfg_dir Folder to use to generate and store tempest.conf
+ * runtest_tempest_cfg_name Tempest config name
* report_prefix Some prefix to put to report name
*
*/
@@ -37,99 +39,110 @@
def DEBUG_MODE = (env.DEBUG_MODE) ?: false
def STOP_ON_ERROR = (env.STOP_ON_ERROR) ? env.STOP_ON_ERROR.toBoolean() : false
def GENERATE_CONFIG = (env.GENERATE_CONFIG) ?: true
+// do not change unless you know what you're doing
def remote_artifacts_dir = (env.remote_artifacts_dir) ?: '/root/test/'
def report_prefix = (env.report_prefix) ?: ''
def args = ''
+def mounts = [:]
node() {
- try{
- stage('Initialization') {
- deleteDir()
- saltMaster = salt.connection(SALT_MASTER_URL, SALT_MASTER_CREDENTIALS)
- cluster_name=salt.getPillar(saltMaster, 'I@salt:master', '_param:cluster_name')['return'][0].values()[0]
- os_version=salt.getPillar(saltMaster, 'I@salt:master', '_param:openstack_version')['return'][0].values()[0]
- if (os_version == '') {
- throw new Exception("Openstack is not found on this env. Exiting")
- }
- TEST_IMAGE = (env.TEST_IMAGE) ?: "docker-prod-virtual.docker.mirantis.net/mirantis/cicd/ci-tempest:${os_version}"
- runtest_node = salt.runSaltProcessStep(saltMaster, 'I@runtest:*', 'test.ping')['return'][0]
- if (runtest_node.values()[0]) {
- // Let's use Service node that was defined in reclass. If several nodes are defined
- // we will use the first from salt output
- common.infoMsg("Service node ${runtest_node.keySet()[0]} is defined in reclass")
- SERVICE_NODE = runtest_node.keySet()[0]
- }
- else {
- common.infoMsg("Service node is not defined in reclass")
- SERVICE_NODE = (env.SERVICE_NODE) ?: 'I@salt:master'
- common.infoMsg("${SERVICE_NODE} will be used as Service node")
- def classes_to_add = ["cluster.${cluster_name}.infra.runtest"]
- fullnodename = salt.getMinions(saltMaster, SERVICE_NODE).get(0)
- common.infoMsg("Full service node name ${fullnodename}")
+ stage('Initialization') {
+ deleteDir()
+ saltMaster = salt.connection(SALT_MASTER_URL, SALT_MASTER_CREDENTIALS)
+ container_name = "${env.JOB_NAME}"
+ cluster_name=salt.getPillar(saltMaster, 'I@salt:master', '_param:cluster_name')['return'][0].values()[0]
+ os_version=salt.getPillar(saltMaster, 'I@salt:master', '_param:openstack_version')['return'][0].values()[0]
+ if (!os_version) {
+ throw new Exception("Openstack is not found on this env. Exiting")
+ }
+ TEST_IMAGE = (env.TEST_IMAGE) ?: "docker-prod-virtual.docker.mirantis.net/mirantis/cicd/ci-tempest:${os_version}"
+ runtest_node = salt.runSaltProcessStep(saltMaster, 'I@runtest:*', 'test.ping')['return'][0]
+ if (runtest_node.values()[0]) {
+ // Let's use Service node that was defined in reclass. If several nodes are defined
+ // we will use the first from salt output
+ common.infoMsg("Service node ${runtest_node.keySet()[0]} is defined in reclass")
+ SERVICE_NODE = runtest_node.keySet()[0]
+ }
+ else {
+ throw new Exception("Runtest config is not found in reclass. Please create runtest.yml and include it " +
+ "into reclass. Check documentation for more details")
+ }
+ common.infoMsg('Refreshing pillars on service node')
+ salt.runSaltProcessStep(saltMaster, SERVICE_NODE, 'saltutil.refresh_pillar', [], null, VERBOSE)
+ // default node is cid01 (preferably) or cfg01
+ default_node=salt.getPillar(saltMaster, 'I@salt:master', '_param:cicd_control_node01_hostname')['return'][0].values()[0] ?: 'cfg01'
+ // fetch tempest_test_target from runtest.yaml, otherwise fallback to default_node
+ tempest_node=salt.getPillar(saltMaster, SERVICE_NODE, '_param:tempest_test_target')['return'][0].values()[0] ?: default_node+'*'
+ // TARGET_NODE will always override any settings above
+ TARGET_NODE = (env.TARGET_NODE) ?: tempest_node
+ // default is /root/test/
+ runtest_tempest_cfg_dir = (env.runtest_tempest_cfg_dir) ?: salt.getPillar(saltMaster, SERVICE_NODE, '_param:runtest_tempest_cfg_dir')['return'][0].values()[0]
+ // default is tempest_generated.conf
+ runtest_tempest_cfg_name = (env.runtest_tempest_cfg_name) ?: salt.getPillar(saltMaster, SERVICE_NODE, '_param:runtest_tempest_cfg_name')['return'][0].values()[0]
+ common.infoMsg("runtest_tempest_cfg is ${runtest_tempest_cfg_dir}/${runtest_tempest_cfg_name}")
+ }
+ stage('Preparing resources') {
+ if ( PREPARE_RESOURCES.toBoolean() ) {
+ common.infoMsg('Running salt.minion state on service node')
+ salt.enforceState(saltMaster, SERVICE_NODE, ['salt.minion'], VERBOSE, STOP_ON_ERROR, null, false, 300, 2, true, [], 60)
+ common.infoMsg('Running keystone.client on service node')
+ salt.enforceState(saltMaster, SERVICE_NODE, 'keystone.client', VERBOSE, STOP_ON_ERROR)
+ common.infoMsg('Running glance.client on service node')
+ salt.enforceState(saltMaster, SERVICE_NODE, 'glance.client', VERBOSE, STOP_ON_ERROR)
+ common.infoMsg('Running nova.client on service node')
+ salt.enforceState(saltMaster, SERVICE_NODE, 'nova.client', VERBOSE, STOP_ON_ERROR)
+ }
+ else {
+ common.infoMsg('Skipping resources preparation')
+ }
+ }
+ stage('Generate config') {
+ if ( GENERATE_CONFIG.toBoolean() ) {
+ salt.runSaltProcessStep(saltMaster, SERVICE_NODE, 'file.remove', ["${runtest_tempest_cfg_dir}"])
+ salt.runSaltProcessStep(saltMaster, SERVICE_NODE, 'file.mkdir', ["${runtest_tempest_cfg_dir}"])
+ fullnodename = salt.getMinions(saltMaster, SERVICE_NODE).get(0)
+ if (TARGET_NODE != tempest_node) {
+ common.infoMsg("TARGET_NODE is defined in Jenkins")
+ def params_to_update = ['tempest_test_target': "${TARGET_NODE}"]
+ common.infoMsg("Overriding default ${tempest_node} value of tempest_test_target parameter")
result = salt.runSaltCommand(saltMaster, 'local', ['expression': SERVICE_NODE, 'type': 'compound'], 'reclass.node_update',
- null, null, ['name': fullnodename, 'classes': classes_to_add])
+ null, null, ['name': fullnodename, 'parameters': ['tempest_test_target': "${TARGET_NODE}"]])
salt.checkResult(result)
}
- common.infoMsg('Refreshing pillars on service node')
- salt.runSaltProcessStep(saltMaster, SERVICE_NODE, 'saltutil.refresh_pillar', [], null, VERBOSE)
- tempest_node=salt.getPillar(saltMaster, SERVICE_NODE, '_param:tempest_test_target')['return'][0].values()[0] ?: 'I@gerrit:client'
- }
- stage('Preparing resources') {
- if ( PREPARE_RESOURCES.toBoolean() ) {
- common.infoMsg('Running salt.minion state on service node')
- salt.enforceState(saltMaster, SERVICE_NODE, ['salt.minion'], VERBOSE, STOP_ON_ERROR, null, false, 300, 2, true, [], 60)
- common.infoMsg('Running keystone.client on service node')
- salt.enforceState(saltMaster, SERVICE_NODE, 'keystone.client', VERBOSE, STOP_ON_ERROR)
- common.infoMsg('Running glance.client on service node')
- salt.enforceState(saltMaster, SERVICE_NODE, 'glance.client', VERBOSE, STOP_ON_ERROR)
- common.infoMsg('Running nova.client on service node')
- salt.enforceState(saltMaster, SERVICE_NODE, 'nova.client', VERBOSE, STOP_ON_ERROR)
- }
- else {
- common.infoMsg('Skipping resources preparation')
- }
- }
- stage('Generate config') {
- if ( GENERATE_CONFIG.toBoolean() ) {
- salt.runSaltProcessStep(saltMaster, SERVICE_NODE, 'file.remove', ["${remote_artifacts_dir}"])
- salt.runSaltProcessStep(saltMaster, SERVICE_NODE, 'file.mkdir', ["${remote_artifacts_dir}"])
- fullnodename = salt.getMinions(saltMaster, SERVICE_NODE).get(0)
- TARGET_NODE = (env.TARGET_NODE) ?: tempest_node
- if (TARGET_NODE != tempest_node) {
- common.infoMsg("TARGET_NODE is defined in Jenkins")
- def params_to_update = ['tempest_test_target': "${TARGET_NODE}"]
- common.infoMsg("Overriding default ${tempest_node} value of tempest_test_target parameter")
- result = salt.runSaltCommand(saltMaster, 'local', ['expression': SERVICE_NODE, 'type': 'compound'], 'reclass.node_update',
- null, null, ['name': fullnodename, 'parameters': ['tempest_test_target': "${TARGET_NODE}"]])
- salt.checkResult(result)
- }
- common.infoMsg("TARGET_NODE is ${TARGET_NODE}")
- salt.runSaltProcessStep(saltMaster, TARGET_NODE, 'file.remove', ["${remote_artifacts_dir}"])
- salt.runSaltProcessStep(saltMaster, TARGET_NODE, 'file.mkdir', ["${remote_artifacts_dir}"])
+ common.infoMsg("TARGET_NODE is ${TARGET_NODE}")
+ salt.runSaltProcessStep(saltMaster, TARGET_NODE, 'file.remove', ["${remote_artifacts_dir}"])
+ salt.runSaltProcessStep(saltMaster, TARGET_NODE, 'file.mkdir', ["${remote_artifacts_dir}"])
+ // runtest state hangs if tempest_test_target is cfg01*
+ // let's run runtest.generate_tempest_config only for this case
+ if (TARGET_NODE == 'cfg01*') {
+ common.warningMsg("It is not recommended to run Tempest container on cfg node, but.. proceeding")
+ salt.enforceState(saltMaster, SERVICE_NODE, 'runtest.generate_tempest_config', VERBOSE, STOP_ON_ERROR)
+ } else {
salt.enforceState(saltMaster, SERVICE_NODE, 'runtest', VERBOSE, STOP_ON_ERROR)
- // we need to refresh pillars on target node after runtest state
- salt.runSaltProcessStep(saltMaster, TARGET_NODE, 'saltutil.refresh_pillar', [], null, VERBOSE)
- if (TARGET_NODE != tempest_node) {
- common.infoMsg("Reverting tempest_test_target parameter")
- result = salt.runSaltCommand(saltMaster, 'local', ['expression': SERVICE_NODE, 'type': 'compound'], 'reclass.node_update',
- null, null, ['name': fullnodename, 'parameters': ['tempest_test_target': "${tempest_node}"]])
- }
- SKIP_LIST_PATH = (env.SKIP_LIST_PATH) ?: salt.getPillar(saltMaster, SERVICE_NODE, '_param:tempest_skip_list_path')['return'][0].values()[0]
- runtest_tempest_cfg_dir = salt.getPillar(saltMaster, SERVICE_NODE, '_param:runtest_tempest_cfg_dir')['return'][0].values()[0] ?: '/root/test/'
- if (SKIP_LIST_PATH) {
- salt.cmdRun(saltMaster, SERVICE_NODE, "salt-cp ${TARGET_NODE} ${SKIP_LIST_PATH} ${runtest_tempest_cfg_dir}/skip.list")
- args += ' --blacklist-file /root/tempest/skip.list '
- }
}
- else {
- common.infoMsg('Skipping Tempest config generation')
- salt.cmdRun(saltMaster, TARGET_NODE, "rm -rf ${remote_artifacts_dir}/reports")
+ // we need to refresh pillars on target node after runtest state
+ salt.runSaltProcessStep(saltMaster, TARGET_NODE, 'saltutil.refresh_pillar', [], null, VERBOSE)
+ if (TARGET_NODE != tempest_node) {
+ common.infoMsg("Reverting tempest_test_target parameter")
+ result = salt.runSaltCommand(saltMaster, 'local', ['expression': SERVICE_NODE, 'type': 'compound'], 'reclass.node_update',
+ null, null, ['name': fullnodename, 'parameters': ['tempest_test_target': "${tempest_node}"]])
+ }
+ SKIP_LIST_PATH = (env.SKIP_LIST_PATH) ?: salt.getPillar(saltMaster, SERVICE_NODE, '_param:tempest_skip_list_path')['return'][0].values()[0]
+ if (SKIP_LIST_PATH) {
+ mounts = ["${runtest_tempest_cfg_dir}/skip.list": "/var/lib/tempest/skiplists/skip.list"]
+ salt.cmdRun(saltMaster, SERVICE_NODE, "salt-cp ${TARGET_NODE} ${SKIP_LIST_PATH} ${runtest_tempest_cfg_dir}/skip.list")
}
}
+ else {
+ common.infoMsg('Skipping Tempest config generation')
+ salt.cmdRun(saltMaster, TARGET_NODE, "rm -rf ${remote_artifacts_dir}/reports")
+ }
+ }
+ try{
stage('Run Tempest tests') {
- mounts = ['/root/test/tempest_generated.conf': '/etc/tempest/tempest.conf']
+ mounts = mounts + ["${runtest_tempest_cfg_dir}/${runtest_tempest_cfg_name}": "/etc/tempest/tempest.conf"]
validate.runContainer(master: saltMaster, target: TARGET_NODE, dockerImageLink: TEST_IMAGE,
- mounts: mounts)
+ mounts: mounts, name: container_name)
report_prefix += 'tempest_'
if (env.concurrency) {
args += ' -w ' + env.concurrency
@@ -141,10 +154,10 @@
else {
if (TEMPEST_TEST_PATTERN != 'set=full') {
args += " -r ${TEMPEST_TEST_PATTERN} "
- report_prefix += 'full'
+ report_prefix += 'custom'
}
}
- salt.cmdRun(saltMaster, TARGET_NODE, "docker exec -e ARGS=\'${args}\' cvp /bin/bash -c 'run-tempest'")
+ salt.cmdRun(saltMaster, TARGET_NODE, "docker exec -e ARGS=\'${args}\' ${container_name} /bin/bash -c 'run-tempest'")
}
stage('Collect results') {
report_prefix += "_report_${env.BUILD_NUMBER}"
@@ -156,13 +169,9 @@
archiveArtifacts artifacts: "${report_prefix}.*"
junit "${report_prefix}.xml"
}
- } catch (Throwable e) {
- // If there was an error or exception thrown, the build failed
- currentBuild.result = "FAILURE"
- throw e
} finally {
- if (DEBUG_MODE == 'false') {
- validate.runCleanup(saltMaster, TARGET_NODE)
+ if ( ! DEBUG_MODE.toBoolean() ) {
+ validate.runCleanup(saltMaster, TARGET_NODE, container_name)
}
}
}
diff --git a/deploy-virtual-edge-mom.groovy b/deploy-virtual-edge-mom.groovy
index 875195b..8d35d37 100644
--- a/deploy-virtual-edge-mom.groovy
+++ b/deploy-virtual-edge-mom.groovy
@@ -171,7 +171,7 @@
saltMasterURL = "http://${edgeBuildsInfra[ed_].description.tokenize(' ')[1]}:6969"
- enableSyndic(saltMasterURL, 'cfg01*', SALT_MASTER_CREDENTIALS, salt_mom_ip)
+ enableSyndic(saltMasterURL, 'I@salt:master', SALT_MASTER_CREDENTIALS, salt_mom_ip)
props_ = edge_deploy_schemas[ed_]['properties']
deploy_job = edge_deploy_schemas[ed_]['deploy_job_name']
diff --git a/docker-mirror-images.groovy b/docker-mirror-images.groovy
index dca2462..7ec1092 100644
--- a/docker-mirror-images.groovy
+++ b/docker-mirror-images.groovy
@@ -71,7 +71,7 @@
try {
allowedGroups = ['release-engineering']
releaseTags = ['proposed', 'release', '2018', '2019', '2020']
- tags = [env.SOURCE_IMAGE_TAG, env.IMAGE_TAG]
+ tags = [env.IMAGE_TAG]
tagInRelease = tags.any { tag -> releaseTags.any { tag.contains(it) } }
if (tagInRelease) {
if (!jenkinsUtils.currentUserInGroups(allowedGroups)) {
diff --git a/galera-cluster-verify-restore.groovy b/galera-cluster-verify-restore.groovy
index 054c0cc..0962f88 100644
--- a/galera-cluster-verify-restore.groovy
+++ b/galera-cluster-verify-restore.groovy
@@ -20,6 +20,7 @@
def restoreType = env.RESTORE_TYPE
def runRestoreDb = false
def runBackupDb = false
+def restartCluster = false
askConfirmation = (env.getProperty('ASK_CONFIRMATION') ?: true).toBoolean()
checkTimeSync = (env.getProperty('CHECK_TIME_SYNC') ?: true).toBoolean()
@@ -34,90 +35,131 @@
if (restoreType.equals("BACKUP_AND_RESTORE")) {
runBackupDb = true
}
+if (restoreType.equals("RESTART_CLUSTER")) {
+ restartCluster = true
+}
timeout(time: 12, unit: 'HOURS') {
node() {
stage('Setup virtualenv for Pepper') {
python.setupPepperVirtualenv(pepperEnv, SALT_MASTER_URL, SALT_MASTER_CREDENTIALS)
}
+
+ def galeraStatus = [:]
stage('Verify status') {
- resultCode = galera.verifyGaleraStatus(pepperEnv, false, checkTimeSync)
- if (resultCode == 128) {
- common.errorMsg("Unable to connect to Galera Master. Trying slaves...")
- resultCode = galera.verifyGaleraStatus(pepperEnv, true, checkTimeSync)
- if (resultCode == 129) {
- common.errorMsg("Unable to obtain Galera slave minions list". "Without fixing this issue, pipeline cannot continue in verification, backup and restoration.")
+ def sysstatTargets = 'I@xtrabackup:client or I@xtrabackup:server'
+ def sysstatTargetsNodes = salt.getMinions(pepperEnv, sysstatTargets)
+ try {
+ if (!salt.isPackageInstalled(['saltId': pepperEnv, 'target': sysstatTargets, 'packageName': 'sysstat', 'output': false])) {
+ if (askConfirmation) {
+ input message: "Do you want to install 'sysstat' package on targeted nodes: ${sysstatTargetsNodes}? Click to confirm"
+ }
+ salt.runSaltProcessStep(pepperEnv, sysstatTargets, 'pkg.install', ['sysstat'])
+ }
+ } catch (Exception e) {
+ common.errorMsg("Unable to determine status of sysstat package on target nodes: ${sysstatTargetsNodes}.")
+ common.errorMsg(e.getMessage())
+ if (askConfirmation) {
+ input message: "Do you want to continue? Click to confirm"
+ }
+ }
+ galeraStatus = galera.verifyGaleraStatus(pepperEnv, checkTimeSync)
+
+ switch (galeraStatus.error) {
+ case 128:
+ common.errorMsg("Unable to obtain Galera members minions list. Without fixing this issue, pipeline cannot continue in verification, backup and restoration. This may be caused by wrong Galera configuration or corrupted pillar data.")
currentBuild.result = "FAILURE"
return
- } else if (resultCode == 130) {
- common.errorMsg("Neither master or slaves are reachable. Without fixing this issue, pipeline cannot continue in verification, backup and restoration.")
+ case 130:
+ common.errorMsg("Neither master or slaves are reachable. Without fixing this issue, pipeline cannot continue in verification, backup and restoration. Is at least one member of the Galera cluster up and running?")
currentBuild.result = "FAILURE"
return
- }
- }
- if (resultCode == 131) {
- common.errorMsg("Time desynced - Please fix this issue and rerun the pipeline.")
- currentBuild.result = "FAILURE"
- return
- }
- if (resultCode == 140 || resultCode == 141) {
- common.errorMsg("Disk utilization check failed - Please fix this issue and rerun the pipeline.")
- currentBuild.result = "FAILURE"
- return
- }
- if (resultCode == 1) {
- if(askConfirmation){
- common.warningMsg("There was a problem with parsing the status output or with determining it. Do you want to run a restore?")
- } else {
- common.warningMsg("There was a problem with parsing the status output or with determining it. Try to restore.")
- }
- } else if (resultCode > 1) {
- if(askConfirmation){
- common.warningMsg("There's something wrong with the cluster, do you want to continue with backup and/or restore?")
- } else {
- common.warningMsg("There's something wrong with the cluster, try to backup and/or restore.")
- }
- } else {
- if(askConfirmation){
- common.warningMsg("There seems to be everything alright with the cluster, do you still want to continue with backup and/or restore?")
- } else {
- common.warningMsg("There seems to be everything alright with the cluster, no backup and no restoration will be done.")
- currentBuild.result = "SUCCESS"
- return
- }
+ case 131:
+ common.errorMsg("Time desynced - Please fix this issue and rerun the pipeline.")
+ currentBuild.result = "FAILURE"
+ return
+ case 140..141:
+ common.errorMsg("Disk utilization check failed - Please fix this issue and rerun the pipeline.")
+ currentBuild.result = "FAILURE"
+ return
+ case 1:
+ if (askConfirmation) {
+ input message: "There was a problem with parsing the status output or with determining it. Do you want to run a next action: ${restoreType}?"
+ } else {
+ common.warningMsg("There was a problem with parsing the status output or with determining it. Trying to perform action: ${restoreType}.")
+ }
+ break
+ case 0:
+ if (askConfirmation) {
+ input message: "There seems to be everything alright with the cluster, do you still want to continue with next action: ${restoreType}?"
+ break
+ } else {
+ common.warningMsg("There seems to be everything alright with the cluster, no backup and no restoration will be done.")
+ currentBuild.result = "SUCCESS"
+ return
+ }
+ default:
+ if (askConfirmation) {
+ input message: "There's something wrong with the cluster, do you want to continue with action: ${restoreType}?"
+ } else {
+ common.warningMsg("There's something wrong with the cluster, trying to perform action: ${restoreType}")
+ }
+ break
}
}
if (runBackupDb) {
+ if (askConfirmation) {
+ input message: "Are you sure you want to run a backup? Click to confirm"
+ }
stage('Backup') {
- common.infoMsg("Running backup job.")
- deployBuild = build( job: "galera-database-backup-pipeline", parameters: [
- [$class: 'StringParameterValue', name: 'SALT_MASTER_URL', value: SALT_MASTER_URL],
- [$class: 'StringParameterValue', name: 'SALT_MASTER_CREDENTIALS', value: SALT_MASTER_CREDENTIALS],
- [$class: 'StringParameterValue', name: 'OVERRIDE_BACKUP_NODE', value: "none"],
- ]
+ deployBuild = build(job: 'galera_backup_database', parameters: [
+ [$class: 'StringParameterValue', name: 'SALT_MASTER_URL', value: SALT_MASTER_URL],
+ [$class: 'StringParameterValue', name: 'SALT_MASTER_CREDENTIALS', value: SALT_MASTER_CREDENTIALS],
+ [$class: 'StringParameterValue', name: 'OVERRIDE_BACKUP_NODE', value: "none"],
+ ]
)
}
}
- stage('Restore') {
- if(askConfirmation){
- input message: "Are you sure you want to run a restore? Click to confirm"
- }
- try {
- if((!askConfirmation && resultCode > 0) || askConfirmation){
- galera.restoreGaleraCluster(pepperEnv, runRestoreDb)
+ if (runRestoreDb || restartCluster) {
+ if (runRestoreDb) {
+ stage('Restore') {
+ if (askConfirmation) {
+ input message: "Are you sure you want to run a restore? Click to confirm"
+ }
+ try {
+ if ((!askConfirmation && resultCode > 0) || askConfirmation) {
+ galera.restoreGaleraCluster(pepperEnv, galeraStatus)
+ }
+ } catch (Exception e) {
+ common.errorMsg("Restoration process has failed.")
+ common.errorMsg(e.getMessage())
+ }
}
- } catch (Exception e) {
- common.errorMsg("Restoration process has failed.")
}
- }
- stage('Verify restoration result') {
- common.retry(verificationRetries, 15) {
- exitCode = galera.verifyGaleraStatus(pepperEnv, false, false)
- if (exitCode >= 1) {
- error("Verification attempt finished with an error. This may be caused by cluster not having enough time to come up or to sync. Next verification attempt in 15 seconds.")
- } else {
- common.infoMsg("Restoration procedure seems to be successful. See verification report to be sure.")
- currentBuild.result = "SUCCESS"
+ if (restartCluster) {
+ stage('Restart cluster') {
+ if (askConfirmation) {
+ input message: "Are you sure you want to run a restart? Click to confirm"
+ }
+ try {
+ if ((!askConfirmation && resultCode > 0) || askConfirmation) {
+ galera.restoreGaleraCluster(pepperEnv, galeraStatus, false)
+ }
+ } catch (Exception e) {
+ common.errorMsg("Restart process has failed.")
+ common.errorMsg(e.getMessage())
+ }
+ }
+ }
+ stage('Verify restoration result') {
+ common.retry(verificationRetries, 15) {
+ def status = galera.verifyGaleraStatus(pepperEnv, false)
+ if (status.error >= 1) {
+ error("Verification attempt finished with an error. This may be caused by cluster not having enough time to come up or to sync. Next verification attempt in 5 seconds.")
+ } else {
+ common.infoMsg("Restoration procedure seems to be successful. See verification report to be sure.")
+ currentBuild.result = "SUCCESS"
+ }
}
}
}
diff --git a/galera-database-backup-pipeline.groovy b/galera-database-backup-pipeline.groovy
index a6d0af5..8239aa6 100644
--- a/galera-database-backup-pipeline.groovy
+++ b/galera-database-backup-pipeline.groovy
@@ -57,5 +57,19 @@
stage('Clean-up') {
salt.cmdRun(pepperEnv, backupNode, "su root -c '/usr/local/bin/innobackupex-runner.sh -c'")
}
+ stage('Backup Dogtag') {
+ if (!salt.getMinions(pepperEnv, "I@dogtag:server:enabled").isEmpty()) {
+ dogtagBackupBuild = build(job: 'backupninja_backup', parameters: [
+ [$class: 'StringParameterValue', name: 'SALT_MASTER_URL', value: SALT_MASTER_URL],
+ [$class: 'StringParameterValue', name: 'SALT_MASTER_CREDENTIALS', value: SALT_MASTER_CREDENTIALS],
+ [$class: 'BooleanParameterValue', name: 'ASK_CONFIRMATION', value: "false"],
+ [$class: 'BooleanParameterValue', name: 'BACKUP_SALTMASTER_AND_MAAS', value: "false"],
+ [$class: 'BooleanParameterValue', name: 'BACKUP_DOGTAG', value: "true"],
+ ]
+ )
+ } else {
+ common.warningMsg("Dogtag pillar not found. This is fine if you are using different Barbican backend.")
+ }
+ }
}
}
\ No newline at end of file
diff --git a/gating-pipeline.groovy b/gating-pipeline.groovy
index 324b0e2..3313d48 100644
--- a/gating-pipeline.groovy
+++ b/gating-pipeline.groovy
@@ -43,20 +43,18 @@
def skipProjectsVerify = ['mk/docker-jnlp-slave']
stage("test") {
+ //notification about Start job
+ ssh.agentSh(String.format("ssh -p %s %s@%s gerrit review %s,%s -m \"'Build Started %s'\"", defGerritPort, GERRIT_NAME, GERRIT_HOST, GERRIT_CHANGE_NUMBER, GERRIT_PATCHSET_NUMBER, BUILD_URL))
//check Code-Review
- if (gerrit.patchsetHasApproval(gerritChange.currentPatchSet, "Code-Review", "+")) {
- continue
- } else {
- common.errorMsg("Change don't have a CodeReview, skipping gate")
- throw new Exception ("Change don't have a CodeReview, skipping gate")
+ if (!gerrit.patchsetHasApproval(gerritChange.currentPatchSet, "Code-Review", "+")) {
+ throw new Exception('Change don\'t have a CodeReview+1, reject gate')
}
//check Verify
if (!gerrit.patchsetHasApproval(gerritChange.currentPatchSet, "Verified", "+")) {
- common.errorMsg("Change don't have true Verify, skipping gate")
- throw new Exception ("Change don't have true Verify, skipping gate")
+ throw new Exception('Change don\'t have initial Verify+1, reject gate')
} else if (gerritChange.status != "MERGED" && !env.SKIP_TEST.toBoolean()) {
//Verify-label off
- ssh.agentSh(String.format("ssh -p %s %s@%s gerrit review --verified 0", defGerritPort, GERRIT_NAME, GERRIT_HOST, GERRIT_CHANGE_NUMBER, GERRIT_PATCHSET_NUMBER))
+ ssh.agentSh(String.format("ssh -p %s %s@%s gerrit review %s,%s --verified 0", defGerritPort, GERRIT_NAME, GERRIT_HOST, GERRIT_CHANGE_NUMBER, GERRIT_PATCHSET_NUMBER))
//Do stage (test)
doSubmit = true
def gerritProjectArray = GERRIT_PROJECT.tokenize("/")
@@ -79,7 +77,7 @@
if (env.GERRIT_PROJECT == 'mk/cookiecutter-templates' || env.GERRIT_PROJECT == 'salt-models/reclass-system') {
callJobWithExtraVars('test-salt-model-ci-wrapper')
} else {
- if (isJobExists(testJob)) {
+ if (isJobExists(testJob)) {
common.infoMsg("Test job ${testJob} found, running")
def patchsetVerified = gerrit.patchsetHasApproval(gerritChange.currentPatchSet, "Verified", "+")
build job: testJob, parameters: [
@@ -88,14 +86,14 @@
]
giveVerify = true
} else {
- common.infoMsg("Test job ${testJob} not found")
+ common.infoMsg("Test job ${testJob} not found")
+ }
}
}
+ } else {
+ common.infoMsg('Test job skipped')
}
- } else {
- common.infoMsg("Test job skipped")
}
- }
stage("submit review") {
if (gerritChange.status == "MERGED") {
diff --git a/generate-cookiecutter-products.groovy b/generate-cookiecutter-products.groovy
index 261193e..d8bfe3a 100644
--- a/generate-cookiecutter-products.groovy
+++ b/generate-cookiecutter-products.groovy
@@ -23,6 +23,50 @@
distribRevision = 'proposed'
gitGuessedVersion = false
+def GenerateModelToxDocker(Map params) {
+ def ccRoot = params['ccRoot']
+ def context = params['context']
+ def outDir = params['outDir']
+ def envOpts = params['envOpts']
+ def tempContextFile = new File(ccRoot, 'tempContext.yaml_' + UUID.randomUUID().toString()).toString()
+ writeFile file: tempContextFile, text: context
+ // Get Jenkins user UID and GID
+ def jenkinsUID = sh(script: 'id -u', returnStdout: true).trim()
+ def jenkinsGID = sh(script: 'id -g', returnStdout: true).trim()
+ /*
+ by default, process in image operates via root user
+ Otherwise, gpg key for model and all files managed by jenkins user
+ To make it compatible, install rrequirementfrom user, but generate model via jenkins
+ for build use upstream Ubuntu Bionic image
+ */
+ def configRun = ['distribRevision': 'nightly',
+ 'envOpts' : envOpts + ["CONFIG_FILE=$tempContextFile",
+ "OUTPUT_DIR=${outDir}"
+ ],
+ 'image': 'docker-prod-local.artifactory.mirantis.com/mirantis/cicd/jnlp-slave',
+ 'runCommands' : [
+ '001_prepare_generate_auto_reqs': {
+ sh('''
+ pip install tox
+ ''')
+ },
+ // user & group can be different on host and in docker
+ '002_set_jenkins_id': {
+ sh("""
+ usermod -u ${jenkinsUID} jenkins
+ groupmod -g ${jenkinsUID} jenkins
+ """)
+ },
+ '003_run_generate_auto': {
+ print('[Cookiecutter build] Result:\n' +
+ sh(returnStdout: true, script: 'cd ' + ccRoot + '; su jenkins -c "tox -ve generate_auto" '))
+ }
+ ]
+ ]
+
+ saltModelTesting.setupDockerAndTest(configRun)
+}
+
def globalVariatorsUpdate() {
def templateContext = readYaml text: env.COOKIECUTTER_TEMPLATE_CONTEXT
def context = templateContext['default_context']
@@ -31,34 +75,19 @@
// because each of them, might be 'refs/' variable, we need to add some tricky trigger of using
// 'release/XXX' logic. This is totall guess - so,if even those one failed, to definitely must pass
// correct variable finally!
- [context.get('cookiecutter_template_branch'), context.get('shared_reclass_branch'), context.get('mcp_common_scripts_branch')].any { branch ->
+ [ context.get('cookiecutter_template_branch'), context.get('shared_reclass_branch'), context.get('mcp_common_scripts_branch') ].any { branch ->
if (branch.toString().startsWith('release/')) {
gitGuessedVersion = branch
return true
}
}
- // Use mcpVersion git tag if not specified branch for cookiecutter-templates
- if (!context.get('cookiecutter_template_branch')) {
- context['cookiecutter_template_branch'] = gitGuessedVersion ?: context['mcp_version']
- }
- // Don't have n/t/s for cookiecutter-templates repo, therefore use master
- if (["nightly", "testing", "stable"].contains(context['cookiecutter_template_branch'])) {
- context['cookiecutter_template_branch'] = 'master'
- }
- if (!context.get('shared_reclass_branch')) {
- context['shared_reclass_branch'] = gitGuessedVersion ?: context['mcp_version']
- }
- // Don't have nightly/testing for reclass-system repo, therefore use master
- if (["nightly", "testing", "stable"].contains(context['shared_reclass_branch'])) {
- context['shared_reclass_branch'] = 'master'
- }
- if (!context.get('mcp_common_scripts_branch')) {
- // Pin exactly to CC branch, since it might use 'release/XXX' format
- context['mcp_common_scripts_branch'] = gitGuessedVersion ?: context['mcp_version']
- }
- // Don't have n/t/s for mcp-common-scripts repo, therefore use master
- if (["nightly", "testing", "stable"].contains(context['mcp_common_scripts_branch'])) {
- context['mcp_common_scripts_branch'] = 'master'
+
+ [ 'cookiecutter_template_branch', 'shared_reclass_branch', 'mcp_common_scripts_branch' ].each { repoName ->
+ if (context['mcp_version'] in [ "nightly", "testing", "stable" ] && ! context.get(repoName)) {
+ context[repoName] = 'master'
+ } else if (! context.get(repoName)) {
+ context[repoName] = gitGuessedVersion ?: "release/${context['mcp_version']}".toString()
+ }
}
//
distribRevision = context['mcp_version']
@@ -86,6 +115,16 @@
updateSaltFormulasDuringTest = false
}
+ if (gitGuessedVersion == 'release/proposed/2019.2.0') {
+ def mcpSaltRepoUpdateVar = 'deb [arch=amd64] http://mirror.mirantis.com/update/proposed/salt-formulas/xenial xenial main'
+ if (context.get('offline_deployment', 'False').toBoolean()) {
+ mcpSaltRepoUpdateVar = "deb [arch=amd64] http://${context.get('aptly_server_deploy_address')}/update/proposed/salt-formulas/xenial xenial main".toString()
+ }
+ // CFG node in 2019.2.X update has to be bootstrapped with update/proposed repository for salt formulas
+ context['cloudinit_master_config'] = context.get('cloudinit_master_config', false) ?: [:]
+ context['cloudinit_master_config']['MCP_SALT_REPO_UPDATES'] = context['cloudinit_master_config'].get('MCP_SALT_REPO_UPDATES', false) ?: mcpSaltRepoUpdateVar
+ }
+
common.infoMsg("Using context:\n" + context)
print prettyPrint(toJson(context))
return context
@@ -141,7 +180,8 @@
stage('Generate model') {
// GNUPGHOME environment variable is required for all gpg commands
// and for python.generateModel execution
- withEnv(["GNUPGHOME=${env.WORKSPACE}/gpghome"]) {
+ def envOpts = ["GNUPGHOME=${env.WORKSPACE}/gpghome"]
+ withEnv(envOpts) {
if (context['secrets_encryption_enabled'] == 'True') {
sh "mkdir gpghome; chmod 700 gpghome"
def secretKeyID = RequesterEmail ?: "salt@${context['cluster_domain']}".toString()
@@ -183,7 +223,10 @@
// still expect only lower lvl of project, aka model/classes/cluster/XXX/. So,lets dump result into
// temp dir, and then copy it over initial structure.
reclassTempRootDir = sh(script: "mktemp -d -p ${env.WORKSPACE}", returnStdout: true).trim()
- python.generateModel(common2.dumpYAML(['default_context': context]), 'default_context', context['salt_master_hostname'], cutterEnv, reclassTempRootDir, templateEnv, false)
+ GenerateModelToxDocker(['context': common2.dumpYAML(['default_context': context]),
+ 'ccRoot' : templateEnv,
+ 'outDir' : reclassTempRootDir,
+ 'envOpts': envOpts])
dir(modelEnv) {
common.warningMsg('Forming reclass-root structure...')
sh("cp -ra ${reclassTempRootDir}/reclass/* .")
@@ -268,6 +311,11 @@
def smc = [:]
smc['SALT_MASTER_MINION_ID'] = "${context['salt_master_hostname']}.${context['cluster_domain']}"
smc['SALT_MASTER_DEPLOY_IP'] = context['salt_master_management_address']
+ if (context.get('cloudinit_master_config', false)) {
+ context['cloudinit_master_config'].each { k, v ->
+ smc[k] = v
+ }
+ }
if (outdateGeneration) {
smc['DEPLOY_NETWORK_GW'] = context['deploy_network_gateway']
smc['DEPLOY_NETWORK_NETMASK'] = context['deploy_network_netmask']
@@ -302,7 +350,7 @@
}
for (i in common.entries(smc)) {
- sh "sed -i 's,${i[0]}=.*,${i[0]}=${i[1]},' user_data"
+ sh "sed -i 's,export ${i[0]}=.*,export ${i[0]}=\${${i[0]}:-\"${i[1]}\"},' user_data"
}
// calculate netmask
@@ -371,10 +419,31 @@
archiveArtifacts artifacts: "${context['cluster_name']}.tar.gz"
if (RequesterEmail != '' && !RequesterEmail.contains('example')) {
- emailext(to: RequesterEmail,
- attachmentsPattern: "output-${context['cluster_name']}/*",
- body: "Mirantis Jenkins\n\nRequested reclass model ${context['cluster_name']} has been created and attached to this email.\nEnjoy!\n\nMirantis",
- subject: "Your Salt model ${context['cluster_name']}")
+ def mailSubject = "Your Salt model ${context['cluster_name']}"
+ if (context.get('send_method') == 'gcs') {
+ def gcs = new com.mirantis.mk.GoogleCloudStorage()
+ def uploadIsos = [ "output-${context['cluster_name']}/${context['salt_master_hostname']}.${context['cluster_domain']}-config.iso" ]
+ if (context['local_repositories'] == 'True') {
+ uploadIsos << "output-${context['cluster_name']}/${aptlyServerHostname}.${context['cluster_domain']}-config.iso"
+ }
+ // generate random hash to have uniq and unpredictable link to file
+ def randHash = common.generateRandomHashString(64)
+ def config = [
+ 'creds': context['gcs_creds'],
+ 'project': context['gcs_project'],
+ 'dest': "gs://${context['gcs_bucket']}/${randHash}",
+ 'sources': uploadIsos
+ ]
+ def fileURLs = gcs.uploadArtifactToGoogleStorageBucket(config).join(' ').replace('gs://', 'https://storage.googleapis.com/')
+ emailext(to: RequesterEmail,
+ body: "Mirantis Jenkins\n\nRequested reclass model ${context['cluster_name']} has been created and available to download via next URL: ${fileURLs} within 7 days.\nEnjoy!\n\nMirantis",
+ subject: mailSubject)
+ } else {
+ emailext(to: RequesterEmail,
+ attachmentsPattern: "output-${context['cluster_name']}/*",
+ body: "Mirantis Jenkins\n\nRequested reclass model ${context['cluster_name']} has been created and attached to this email.\nEnjoy!\n\nMirantis",
+ subject: mailSubject)
+ }
}
dir("output-${context['cluster_name']}") {
deleteDir()
diff --git a/git-mirror-pipeline.groovy b/git-mirror-pipeline.groovy
index fa49bbc..6f14866 100644
--- a/git-mirror-pipeline.groovy
+++ b/git-mirror-pipeline.groovy
@@ -5,6 +5,22 @@
timeout(time: 12, unit: 'HOURS') {
node() {
try {
+ def sourceCreds = env.SOURCE_CREDENTIALS
+ if (sourceCreds && common.getCredentialsById(sourceCreds, 'password')) {
+ withCredentials([
+ [$class : 'UsernamePasswordMultiBinding',
+ credentialsId : sourceCreds,
+ passwordVariable: 'GIT_PASS',
+ usernameVariable: 'GIT_USER']
+ ]) {
+ sh """
+ set +x
+ git config --global credential.${SOURCE_URL}.username \${GIT_USER}
+ echo "echo \${GIT_PASS}" > askpass.sh && chmod +x askpass.sh
+ """
+ env.GIT_ASKPASS = "${env.WORKSPACE}/askpass.sh"
+ }
+ }
if (BRANCHES == '*' || BRANCHES.contains('*')) {
branches = git.getBranchesForGitRepo(SOURCE_URL, BRANCHES)
} else {
@@ -18,7 +34,8 @@
dir('source') {
checkout changelog: true, poll: true,
scm: [$class : 'GitSCM', branches: pollBranches, doGenerateSubmoduleConfigurations: false,
- extensions: [[$class: 'CleanCheckout']], submoduleCfg: [], userRemoteConfigs: [[credentialsId: CREDENTIALS_ID, url: SOURCE_URL]]]
+ extensions: [[$class: 'CleanCheckout']], submoduleCfg: [],
+ userRemoteConfigs: [[credentialsId: sourceCreds, url: SOURCE_URL]]]
git.mirrorGit(SOURCE_URL, TARGET_URL, CREDENTIALS_ID, branches, true)
}
} catch (Throwable e) {
@@ -26,6 +43,9 @@
currentBuild.result = 'FAILURE'
currentBuild.description = currentBuild.description ? e.message + '' + currentBuild.description : e.message
throw e
+ } finally {
+ sh "git config --global --unset credential.${SOURCE_URL}.username || true"
+ deleteDir()
}
}
}
diff --git a/opencontrail-upgrade.groovy b/opencontrail-upgrade.groovy
index a358222..7c761a0 100644
--- a/opencontrail-upgrade.groovy
+++ b/opencontrail-upgrade.groovy
@@ -66,7 +66,7 @@
stage('Opencontrail controllers upgrade') {
- oc_component_repo = salt.runSaltProcessStep(pepperEnv, 'I@opencontrail:control and *01*', 'cmd.shell', ['grep -RE \'oc[0-9]{2,3}\' /etc/apt/sources.list* | awk \'{print $1}\' | sed \'s/ *:.*//\''], null, true)
+ oc_component_repo = salt.runSaltProcessStep(pepperEnv, 'I@opencontrail:control:role:primary', 'cmd.shell', ['grep -RE \'oc[0-9]{2,3}\' /etc/apt/sources.list* | awk \'{print $1}\' | sed \'s/ *:.*//\''], null, true)
oc_component_repo = oc_component_repo['return'][0].values()[0]
@@ -103,22 +103,14 @@
args = 'apt install contrail-database -y;'
check = 'nodetool status'
- // ntw01
- runCommonCommands('I@opencontrail:control and *01*', command, args, check, salt, pepperEnv, common)
- // ntw02
- runCommonCommands('I@opencontrail:control and *02*', command, args, check, salt, pepperEnv, common)
- // ntw03
- runCommonCommands('I@opencontrail:control and *03*', command, args, check, salt, pepperEnv, common)
+ runCommonCommands('I@opencontrail:control:role:primary', command, args, check, salt, pepperEnv, common)
+ runCommonCommands('I@opencontrail:control:role:secondary', command, args, check, salt, pepperEnv, common)
args = "apt install -o Dpkg::Options::=\"--force-confold\" ${CONTROL_PKGS} -y --force-yes;"
check = 'contrail-status'
- // ntw01
- runCommonCommands('I@opencontrail:control and *01*', command, args, check, salt, pepperEnv, common)
- // ntw02
- runCommonCommands('I@opencontrail:control and *02*', command, args, check, salt, pepperEnv, common)
- // ntw03
- runCommonCommands('I@opencontrail:control and *03*', command, args, check, salt, pepperEnv, common)
+ runCommonCommands('I@opencontrail:control:role:primary', command, args, check, salt, pepperEnv, common)
+ runCommonCommands('I@opencontrail:control:role:secondary', command, args, check, salt, pepperEnv, common)
try {
salt.enforceState(pepperEnv, 'I@opencontrail:control', 'opencontrail')
@@ -144,7 +136,7 @@
stage('Opencontrail analytics upgrade') {
- oc_component_repo = salt.runSaltProcessStep(pepperEnv, 'I@opencontrail:collector and *01*', 'cmd.shell', ['grep -RE \'oc[0-9]{2,3}\' /etc/apt/sources.list* | awk \'{print $1}\' | sed \'s/ *:.*//\''], null, true)
+ oc_component_repo = salt.runSaltProcessStep(pepperEnv, 'I@opencontrail:collector:role:primary', 'cmd.shell', ['grep -RE \'oc[0-9]{2,3}\' /etc/apt/sources.list* | awk \'{print $1}\' | sed \'s/ *:.*//\''], null, true)
oc_component_repo = oc_component_repo['return'][0].values()[0]
@@ -161,22 +153,14 @@
args = 'apt install contrail-database -y;'
check = 'nodetool status'
- // nal01
- runCommonCommands('I@opencontrail:collector and *01*', command, args, check, salt, pepperEnv, common)
- // nal02
- runCommonCommands('I@opencontrail:collector and *02*', command, args, check, salt, pepperEnv, common)
- // nal03
- runCommonCommands('I@opencontrail:collector and *03*', command, args, check, salt, pepperEnv, common)
+ runCommonCommands('I@opencontrail:collector:role:primary', command, args, check, salt, pepperEnv, common)
+ runCommonCommands('I@opencontrail:collector:role:secondary', command, args, check, salt, pepperEnv, common)
args = "apt install -o Dpkg::Options::=\"--force-confold\" ${ANALYTIC_PKGS} -y --force-yes;"
check = 'contrail-status'
- // nal01
- runCommonCommands('I@opencontrail:collector and *01*', command, args, check, salt, pepperEnv, common)
- // nal02
- runCommonCommands('I@opencontrail:collector and *02*', command, args, check, salt, pepperEnv, common)
- // nal03
- runCommonCommands('I@opencontrail:collector and *03*', command, args, check, salt, pepperEnv, common)
+ runCommonCommands('I@opencontrail:collector:role:primary', command, args, check, salt, pepperEnv, common)
+ runCommonCommands('I@opencontrail:collector:role:secondary', command, args, check, salt, pepperEnv, common)
try {
salt.enforceState(pepperEnv, 'I@opencontrail:collector', 'opencontrail')
@@ -302,7 +286,7 @@
stage('Opencontrail controllers rollback') {
- oc_component_repo = salt.runSaltProcessStep(pepperEnv, 'I@opencontrail:control and *01*', 'cmd.shell', ['grep -RE \'oc[0-9]{2,3}\' /etc/apt/sources.list* | awk \'{print $1}\' | sed \'s/ *:.*//\''], null, true)
+ oc_component_repo = salt.runSaltProcessStep(pepperEnv, 'I@opencontrail:control:role:primary', 'cmd.shell', ['grep -RE \'oc[0-9]{2,3}\' /etc/apt/sources.list* | awk \'{print $1}\' | sed \'s/ *:.*//\''], null, true)
oc_component_repo = oc_component_repo['return'][0].values()[0]
try {
@@ -318,22 +302,14 @@
args = 'apt install contrail-database -y --force-yes;'
check = 'nodetool status'
- // ntw01
- runCommonCommands('I@opencontrail:control and *01*', command, args, check, salt, pepperEnv, common)
- // ntw02
- runCommonCommands('I@opencontrail:control and *02*', command, args, check, salt, pepperEnv, common)
- // ntw03
- runCommonCommands('I@opencontrail:control and *03*', command, args, check, salt, pepperEnv, common)
+ runCommonCommands('I@opencontrail:control:role:primary', command, args, check, salt, pepperEnv, common)
+ runCommonCommands('I@opencontrail:control:role:secondary', command, args, check, salt, pepperEnv, common)
args = "apt install -o Dpkg::Options::=\"--force-confold\" ${CONTROL_PKGS} -y --force-yes;"
check = 'contrail-status'
- // ntw01
- runCommonCommands('I@opencontrail:control and *01*', command, args, check, salt, pepperEnv, common)
- // ntw02
- runCommonCommands('I@opencontrail:control and *02*', command, args, check, salt, pepperEnv, common)
- // ntw03
- runCommonCommands('I@opencontrail:control and *03*', command, args, check, salt, pepperEnv, common)
+ runCommonCommands('I@opencontrail:control:role:primary', command, args, check, salt, pepperEnv, common)
+ runCommonCommands('I@opencontrail:control:role:secondary', command, args, check, salt, pepperEnv, common)
try {
salt.enforceState(pepperEnv, 'I@opencontrail:control', 'opencontrail')
@@ -361,7 +337,7 @@
stage('Opencontrail analytics rollback') {
- oc_component_repo = salt.runSaltProcessStep(pepperEnv, 'I@opencontrail:collector and *01*', 'cmd.shell', ['grep -RE \'oc[0-9]{2,3}\' /etc/apt/sources.list* | awk \'{print $1}\' | sed \'s/ *:.*//\''], null, true)
+ oc_component_repo = salt.runSaltProcessStep(pepperEnv, 'I@opencontrail:collector:role:primary', 'cmd.shell', ['grep -RE \'oc[0-9]{2,3}\' /etc/apt/sources.list* | awk \'{print $1}\' | sed \'s/ *:.*//\''], null, true)
oc_component_repo = oc_component_repo['return'][0].values()[0]
try {
@@ -377,22 +353,14 @@
args = 'apt install contrail-database -y --force-yes;'
check = 'nodetool status'
- // nal01
- runCommonCommands('I@opencontrail:collector and *01*', command, args, check, salt, pepperEnv, common)
- // nal02
- runCommonCommands('I@opencontrail:collector and *02*', command, args, check, salt, pepperEnv, common)
- // nal03
- runCommonCommands('I@opencontrail:collector and *03*', command, args, check, salt, pepperEnv, common)
+ runCommonCommands('I@opencontrail:collector:role:primary', command, args, check, salt, pepperEnv, common)
+ runCommonCommands('I@opencontrail:collector:role:secondary', command, args, check, salt, pepperEnv, common)
args = "apt install -o Dpkg::Options::=\"--force-confold\" ${ANALYTIC_PKGS} -y --force-yes;"
check = 'contrail-status'
- // nal01
- runCommonCommands('I@opencontrail:collector and *01*', command, args, check, salt, pepperEnv, common)
- // nal02
- runCommonCommands('I@opencontrail:collector and *02*', command, args, check, salt, pepperEnv, common)
- // nal03
- runCommonCommands('I@opencontrail:collector and *03*', command, args, check, salt, pepperEnv, common)
+ runCommonCommands('I@opencontrail:collector:role:primary', command, args, check, salt, pepperEnv, common)
+ runCommonCommands('I@opencontrail:collector:role:secondary', command, args, check, salt, pepperEnv, common)
try {
salt.enforceState(pepperEnv, 'I@opencontrail:collector', 'opencontrail')
diff --git a/openstack-compute-install.groovy b/openstack-compute-install.groovy
index 581168a..780beac 100644
--- a/openstack-compute-install.groovy
+++ b/openstack-compute-install.groovy
@@ -5,6 +5,7 @@
* SALT_MASTER_CREDENTIALS Credentials to the Salt API.
* SALT_MASTER_URL Full Salt API address [https://10.10.10.1:8000].
* TARGET_SERVERS Salt compound target to match nodes to be updated [*, G@osfamily:debian].
+ * BATCH_SIZE Use batching for large amount of target nodes
*
**/
@@ -18,6 +19,11 @@
def command
def commandKwargs
+def batch_size = ''
+if (common.validInputParam('BATCH_SIZE')) {
+ batch_size = "${BATCH_SIZE}"
+}
+
timeout(time: 12, unit: 'HOURS') {
node() {
try {
@@ -43,76 +49,76 @@
common.infoMsg("First node %nodename% has trusty")
common.infoMsg("Assuming trusty on all cluster, running extra network states...")
common.infoMsg("Network iteration #1. Bonding")
- salt.enforceState(pepperEnv, targetLiveAll, 'linux.network', true)
+ salt.enforceState(pepperEnv, targetLiveAll, 'linux.network', true, true, batch_size)
common.infoMsg("Network iteration #2. Vlan tagging and bridging")
- salt.enforceState(pepperEnv, targetLiveAll, 'linux.network', true)
+ salt.enforceState(pepperEnv, targetLiveAll, 'linux.network', true, true, batch_size)
}
}
stage("Setup repositories") {
- salt.enforceState(pepperEnv, targetLiveAll, 'linux.system.repo', true)
+ salt.enforceState(pepperEnv, targetLiveAll, 'linux.system.repo', true, true, batch_size)
}
stage("Upgrade packages") {
- salt.runSaltProcessStep(pepperEnv, targetLiveAll, 'pkg.upgrade', [], null, true)
+ salt.runSaltProcessStep(pepperEnv, targetLiveAll, 'pkg.upgrade', [], batch_size, true)
}
stage("Update Hosts file") {
- salt.enforceState(pepperEnv, "I@linux:system", 'linux.network.host', true)
+ salt.enforceState(pepperEnv, "I@linux:system", 'linux.network.host', true, true, batch_size)
}
stage("Setup networking") {
// Sync all of the modules from the salt master.
- salt.syncAll(pepperEnv, targetLiveAll)
+ salt.syncAll(pepperEnv, targetLiveAll, batch_size)
// Apply state 'salt' to install python-psutil for network configuration without restarting salt-minion to avoid losing connection.
- salt.runSaltProcessStep(pepperEnv, targetLiveAll, 'state.apply', ['salt', 'exclude=[{\'id\': \'salt_minion_service\'}, {\'id\': \'salt_minion_service_restart\'}, {\'id\': \'salt_minion_sync_all\'}]'], null, true)
+ salt.runSaltProcessStep(pepperEnv, targetLiveAll, 'state.apply', ['salt', 'exclude=[{\'id\': \'salt_minion_service\'}, {\'id\': \'salt_minion_service_restart\'}, {\'id\': \'salt_minion_sync_all\'}]'], batch_size, true)
// Restart salt-minion to take effect.
- salt.runSaltProcessStep(pepperEnv, targetLiveAll, 'service.restart', ['salt-minion'], null, true, 10)
+ salt.runSaltProcessStep(pepperEnv, targetLiveAll, 'service.restart', ['salt-minion'], batch_size, true, 10)
// Configure networking excluding vhost0 interface.
- salt.runSaltProcessStep(pepperEnv, targetLiveAll, 'state.apply', ['linux.network', 'exclude=[{\'id\': \'linux_interface_vhost0\'}]'], null, true)
+ salt.runSaltProcessStep(pepperEnv, targetLiveAll, 'state.apply', ['linux.network', 'exclude=[{\'id\': \'linux_interface_vhost0\'}]'], batch_size, true)
// Kill unnecessary processes ifup/ifdown which is stuck from previous state linux.network.
- salt.runSaltProcessStep(pepperEnv, targetLiveAll, 'ps.pkill', ['ifup'], null, false)
- salt.runSaltProcessStep(pepperEnv, targetLiveAll, 'ps.pkill', ['ifdown'], null, false)
+ salt.runSaltProcessStep(pepperEnv, targetLiveAll, 'ps.pkill', ['ifup'], batch_size, false)
+ salt.runSaltProcessStep(pepperEnv, targetLiveAll, 'ps.pkill', ['ifdown'], batch_size, false)
// Restart networking to bring UP all interfaces.
- salt.runSaltProcessStep(pepperEnv, targetLiveAll, 'service.restart', ['networking'], null, true, 300)
+ salt.runSaltProcessStep(pepperEnv, targetLiveAll, 'service.restart', ['networking'], batch_size, true, 300)
}
stage("Highstate compute") {
// Execute highstate without state opencontrail.client.
common.retry(2){
- salt.runSaltProcessStep(pepperEnv, targetLiveAll, 'state.highstate', ['exclude=opencontrail.client'], null, true)
+ salt.runSaltProcessStep(pepperEnv, targetLiveAll, 'state.highstate', ['exclude=opencontrail.client'], batch_size, true)
}
// Apply nova state to remove libvirt default bridge virbr0.
- salt.enforceState(pepperEnv, targetLiveAll, 'nova', true)
+ salt.enforceState(pepperEnv, targetLiveAll, 'nova', true, true, batch_size)
// Execute highstate.
- salt.enforceHighstate(pepperEnv, targetLiveAll, true)
+ salt.enforceHighstate(pepperEnv, targetLiveAll, true, true, batch_size)
// Restart supervisor-vrouter.
- salt.runSaltProcessStep(pepperEnv, targetLiveAll, 'service.restart', ['supervisor-vrouter'], null, true, 300)
+ salt.runSaltProcessStep(pepperEnv, targetLiveAll, 'service.restart', ['supervisor-vrouter'], batch_size, true, 300)
// Apply salt and collectd if is present to update information about current network interfaces.
- salt.enforceState(pepperEnv, targetLiveAll, 'salt', true)
+ salt.enforceState(pepperEnv, targetLiveAll, 'salt', true, true, batch_size)
if(!salt.getPillar(pepperEnv, minions[0], "collectd")['return'][0].values()[0].isEmpty()) {
- salt.enforceState(pepperEnv, targetLiveAll, 'collectd', true)
+ salt.enforceState(pepperEnv, targetLiveAll, 'collectd', true, true, batch_size)
}
}
stage("Update/Install monitoring") {
//Collect Grains
- salt.enforceState(pepperEnv, targetLiveAll, 'salt.minion.grains')
- salt.runSaltProcessStep(pepperEnv, targetLiveAll, 'saltutil.refresh_modules')
- salt.runSaltProcessStep(pepperEnv, targetLiveAll, 'mine.update')
+ salt.enforceState(pepperEnv, targetLiveAll, 'salt.minion.grains', true, true, batch_size)
+ salt.runSaltProcessStep(pepperEnv, targetLiveAll, 'saltutil.refresh_modules', [], batch_size)
+ salt.runSaltProcessStep(pepperEnv, targetLiveAll, 'mine.update', [], batch_size)
sleep(5)
- salt.enforceState(pepperEnv, targetLiveAll, 'prometheus')
- salt.enforceState(pepperEnv, 'I@prometheus:server', 'prometheus')
+ salt.enforceState(pepperEnv, targetLiveAll, 'prometheus', true, true, batch_size)
+ salt.enforceState(pepperEnv, 'I@prometheus:server', 'prometheus', true, true, batch_size)
}
} catch (Throwable e) {
diff --git a/openstack-control-upgrade.groovy b/openstack-control-upgrade.groovy
index f2dd78c..5929390 100644
--- a/openstack-control-upgrade.groovy
+++ b/openstack-control-upgrade.groovy
@@ -32,6 +32,8 @@
* No service downtime
* No workload downtime''',
'Launched actions': '''
+ * Refresh pillars on the target nodes.
+ * Apply the 'linux.system.repo' state on the target nodes.
* Verify API, perform basic CRUD operations for services.
* Verify that compute/neutron agents on hosts are up.
* Run some service built in checkers like keystone-manage doctor or nova-status upgrade.''',
@@ -153,6 +155,8 @@
for (target in upgradeTargets){
common.stageWrapper(upgradeStageMap, "Pre upgrade", target, interactive) {
openstack.runOpenStackUpgradePhase(env, target, 'pre')
+ salt.runSaltProcessStep(env, target, 'saltutil.refresh_pillar', [], null, true)
+ salt.enforceState(env, target, 'linux.system.repo')
openstack.runOpenStackUpgradePhase(env, target, 'verify')
}
}
@@ -173,6 +177,9 @@
if (OS_DIST_UPGRADE.toBoolean() == true || OS_UPGRADE.toBoolean() == true) {
debian.osUpgradeNode(env, target, upgrade_mode, false)
}
+ // Workaround for PROD-31413, install python-tornado from latest release if available and
+ // restart minion to apply new code.
+ salt.upgradePackageAndRestartSaltMinion(env, target, 'python-tornado')
}
common.stageWrapper(upgradeStageMap, "Upgrade OpenStack", target, interactive) {
diff --git a/openstack-data-upgrade.groovy b/openstack-data-upgrade.groovy
index 7458a27..e768564 100644
--- a/openstack-data-upgrade.groovy
+++ b/openstack-data-upgrade.groovy
@@ -31,6 +31,8 @@
* No service downtime
* No workload downtime''',
'Launched actions': '''
+ * Refresh pillars on the target nodes.
+ * Apply the 'linux.system.repo' state on the target nodes.
* Verify API, perform basic CRUD operations for services.
* Verify that compute/neutron agents on hosts are up.
* Run some service built in checkers like keystone-manage doctor or nova-status upgrade.''',
@@ -138,6 +140,8 @@
for (target in targetNodes){
common.stageWrapper(upgradeStageMap, "Pre upgrade", target, interactive) {
openstack.runOpenStackUpgradePhase(env, target, 'pre')
+ salt.runSaltProcessStep(env, target, 'saltutil.refresh_pillar', [], null, true)
+ salt.enforceState(env, target, 'linux.system.repo')
openstack.runOpenStackUpgradePhase(env, target, 'verify')
}
@@ -158,6 +162,9 @@
if (OS_DIST_UPGRADE.toBoolean() == true || OS_UPGRADE.toBoolean() == true) {
debian.osUpgradeNode(env, target, upgrade_mode, false)
}
+ // Workaround for PROD-31413, install python-tornado from latest release if available and
+ // restart minion to apply new code.
+ salt.upgradePackageAndRestartSaltMinion(env, target, 'python-tornado')
}
common.stageWrapper(upgradeStageMap, "Upgrade OpenStack", target, interactive) {
diff --git a/openstack-galera-upgrade.groovy b/openstack-galera-upgrade.groovy
new file mode 100644
index 0000000..f124051
--- /dev/null
+++ b/openstack-galera-upgrade.groovy
@@ -0,0 +1,206 @@
+/**
+ * Upgrade MySQL and Galera packages on dbs nodes.
+ * Update packages on given nodes
+ *
+ * Expected parameters:
+ * SALT_MASTER_CREDENTIALS Credentials to the Salt API.
+ * SALT_MASTER_URL Full Salt API address [http://10.10.10.15:6969].
+ * SHUTDOWN_CLUSTER Shutdown all mysql instances on target nodes at the same time.
+ * OS_DIST_UPGRADE Upgrade system packages including kernel (apt-get dist-upgrade).
+ * OS_UPGRADE Upgrade all installed applications (apt-get upgrade)
+ * TARGET_SERVERS Comma separated list of salt compound definitions to upgrade.
+ * INTERACTIVE Ask interactive questions during pipeline run (bool).
+ *
+**/
+
+def common = new com.mirantis.mk.Common()
+def salt = new com.mirantis.mk.Salt()
+def python = new com.mirantis.mk.Python()
+def debian = new com.mirantis.mk.Debian()
+def openstack = new com.mirantis.mk.Openstack()
+def galera = new com.mirantis.mk.Galera()
+def shutdownCluster = SHUTDOWN_CLUSTER.toBoolean()
+def interactive = INTERACTIVE.toBoolean()
+def LinkedHashMap upgradeStageMap = [:]
+
+upgradeStageMap.put('Pre upgrade',
+ [
+ 'Description': 'Only non destructive actions will be applied during this phase. Basic service verification will be performed.',
+ 'Status': 'NOT_LAUNCHED',
+ 'Expected behaviors': '''
+ * No service downtime
+ * No workload downtime''',
+ 'Launched actions': '''
+ * Verify API, perform basic CRUD operations for services.
+ * Verify MySQL is running and Galera cluster is operational.''',
+ 'State result': 'Basic checks around wsrep Galera status are passed.'
+ ])
+
+upgradeStageMap.put('Stop MySQL service',
+ [
+ 'Description': 'All MySQL services will be stopped on All TARGET_SERVERS nodes.',
+ 'Status': 'NOT_LAUNCHED',
+ 'Expected behaviors': '''
+ * MySQL services are stopped.
+ * OpenStack APIs are not accessible from this point.
+ * No workload downtime''',
+ 'Launched actions': '''
+ * Stop MySQL services''',
+ 'State result': 'MySQL service is stopped',
+ ])
+
+upgradeStageMap.put('Upgrade OS',
+ [
+ 'Description': 'Optional step. OS packages will be upgraded during this phase, depending on the job parameters dist-upgrade might be called. And reboot of node executed.',
+ 'Status': 'NOT_LAUNCHED',
+ 'Expected behaviors': '''
+ * No workload downtime
+ * The nodes might be rebooted''',
+ 'Launched actions': '''
+ * Install new version of system packages
+ * If doing dist-upgrade new kernel might be installed and node rebooted
+ * System packages are updated
+ * Node might be rebooted
+'''
+ ])
+
+upgradeStageMap.put('Upgrade MySQL server',
+ [
+ 'Description': 'MySQL and Erlang code will be upgraded during this stage. No workload downtime is expected.',
+ 'Status': 'NOT_LAUNCHED',
+ 'Expected behaviors': '''
+ * OpenStack services loose connection to MySQL server
+ * No workload downtime''',
+ 'Launched actions': '''
+ * Install new version of MySQL and Galera packages
+ * Render version of configs''',
+ 'State result': '''
+ * MySQL packages are upgraded''',
+ ])
+
+upgradeStageMap.put('Start MySQL service',
+ [
+ 'Description': 'All MySQL services will be running on All TARGET_SERVERS nodes.',
+ 'Status': 'NOT_LAUNCHED',
+ 'Expected behaviors': '''
+ * MySQL service is running.
+ * OpenStack API are accessible from this point.
+ * No workload downtime''',
+ 'Launched actions': '''
+ * Start MySQL service''',
+ 'State result': 'MySQL service is running',
+ ])
+
+def env = "env"
+timeout(time: 12, unit: 'HOURS') {
+ node() {
+
+ stage('Setup virtualenv for Pepper') {
+ python.setupPepperVirtualenv(env, SALT_MASTER_URL, SALT_MASTER_CREDENTIALS)
+ }
+
+ def upgradeTargets = salt.getMinionsSorted(env, TARGET_SERVERS)
+
+ if (upgradeTargets.isEmpty()) {
+ error("No servers for upgrade matched by ${TARGET_SERVERS}")
+ }
+
+ def targetSecMapping = [:]
+ def secNoList = []
+ def out
+ def stopTargets = upgradeTargets.reverse()
+ common.printStageMap(upgradeStageMap)
+
+ if (interactive){
+ input message: common.getColorizedString(
+ "Above you can find detailed info this pipeline will execute.\nThe info provides brief description of each stage, actions that will be performed and service/workload impact during each stage.\nPlease read it carefully.", "yellow")
+ }
+
+ for (target in upgradeTargets) {
+ salt.runSaltProcessStep(env, target, 'saltutil.refresh_pillar', [], null, true)
+ salt.enforceState(env, target, ['linux.system.repo'])
+ common.stageWrapper(upgradeStageMap, "Pre upgrade", target, interactive) {
+ openstack.runOpenStackUpgradePhase(env, target, 'pre')
+ openstack.runOpenStackUpgradePhase(env, target, 'verify')
+ }
+ }
+
+ if (shutdownCluster){
+ for (target in stopTargets) {
+ common.stageWrapper(upgradeStageMap, "Stop MySQL service", target, interactive) {
+ openstack.runOpenStackUpgradePhase(env, target, 'service_stopped')
+ }
+ }
+ }
+
+ for (target in upgradeTargets) {
+ out = salt.cmdRun(env, target, 'cat /var/lib/mysql/grastate.dat | grep "seqno" | cut -d ":" -f2', true, null, false).get('return')[0].values()[0].replaceAll('Salt command execution success', '').trim()
+ common.infoMsg("Get seqno: ${out} for node ${target}")
+ if (!out.isNumber()){
+ out = -2
+ }
+ targetSecMapping[out.toInteger()] = target
+ secNoList.add(out.toInteger())
+ }
+
+ def masterNode = targetSecMapping[secNoList.max()]
+ common.infoMsg("Master node is: ${masterNode}")
+
+ // Make sure we start upgrade always from master node
+ upgradeTargets.remove(masterNode)
+ upgradeTargets = [masterNode] + upgradeTargets
+ common.infoMsg("Upgrade targets are: ${upgradeTargets}")
+
+ for (target in upgradeTargets) {
+
+ common.stageWrapper(upgradeStageMap, "Stop MySQL service", target, interactive) {
+ openstack.runOpenStackUpgradePhase(env, target, 'service_stopped')
+ }
+
+ common.stageWrapper(upgradeStageMap, "Upgrade OS", target, interactive) {
+ if (OS_DIST_UPGRADE.toBoolean() == true){
+ upgrade_mode = 'dist-upgrade'
+ } else if (OS_UPGRADE.toBoolean() == true){
+ upgrade_mode = 'upgrade'
+ }
+ if (OS_DIST_UPGRADE.toBoolean() == true || OS_UPGRADE.toBoolean() == true) {
+ debian.osUpgradeNode(env, target, upgrade_mode, false)
+ }
+ }
+
+ common.stageWrapper(upgradeStageMap, "Upgrade MySQL server", target, interactive) {
+ openstack.runOpenStackUpgradePhase(env, target, 'pkgs_latest')
+ openstack.runOpenStackUpgradePhase(env, target, 'render_config')
+ }
+
+ if (shutdownCluster && target == masterNode){
+ //Start first node.
+ common.stageWrapper(upgradeStageMap, "Start MySQL service", target, interactive) {
+ galera.startFirstNode(env, target)
+ }
+ }
+
+ common.stageWrapper(upgradeStageMap, "Start MySQL service", target, interactive) {
+ openstack.runOpenStackUpgradePhase(env, target, 'service_running')
+ openstack.runOpenStackUpgradePhase(env, target, 'verify')
+ }
+ }
+
+ // restart first node by applying state.
+
+ if (shutdownCluster) {
+ openstack.runOpenStackUpgradePhase(env, masterNode, 'render_config')
+ salt.cmdRun(env, masterNode, "service mysql reload")
+ openstack.runOpenStackUpgradePhase(env, masterNode, 'verify')
+ }
+
+ for (target in upgradeTargets) {
+ ensureClusterState = galera.getWsrepParameters(env, target, 'wsrep_evs_state')
+ if (ensureClusterState['wsrep_evs_state'] == 'OPERATIONAL') {
+ common.infoMsg('Node is in OPERATIONAL state.')
+ } else {
+ throw new Exception("Node is NOT in OPERATIONAL state.")
+ }
+ }
+ }
+}
diff --git a/openstack-rabbitmq-upgrade.groovy b/openstack-rabbitmq-upgrade.groovy
index aabdafc..bc252da 100644
--- a/openstack-rabbitmq-upgrade.groovy
+++ b/openstack-rabbitmq-upgrade.groovy
@@ -29,6 +29,8 @@
* No service downtime
* No workload downtime''',
'Launched actions': '''
+ * Refresh pillars on the target nodes.
+ * Apply the 'linux.system.repo' state on the target nodes.
* Verify API, perform basic CRUD operations for services.
* Verify rabbitmq is running and operational.''',
'State result': 'Basic checks around services API are passed.'
@@ -114,6 +116,8 @@
for (target in upgradeTargets){
common.stageWrapper(upgradeStageMap, "Pre upgrade", target, interactive) {
openstack.runOpenStackUpgradePhase(env, target, 'pre')
+ salt.runSaltProcessStep(env, target, 'saltutil.refresh_pillar', [], null, true)
+ salt.enforceState(env, target, 'linux.system.repo')
openstack.runOpenStackUpgradePhase(env, target, 'verify')
}
}
diff --git a/restore-cassandra.groovy b/restore-cassandra.groovy
index b585e7e..f1964ab 100644
--- a/restore-cassandra.groovy
+++ b/restore-cassandra.groovy
@@ -7,11 +7,20 @@
*
**/
-def common = new com.mirantis.mk.Common()
-def salt = new com.mirantis.mk.Salt()
-def python = new com.mirantis.mk.Python()
+common = new com.mirantis.mk.Common()
+salt = new com.mirantis.mk.Salt()
+python = new com.mirantis.mk.Python()
def pepperEnv = "pepperEnv"
+
+def getValueForPillarKey(pepperEnv, target, pillarKey) {
+ def out = salt.getReturnValues(salt.getPillar(pepperEnv, target, pillarKey))
+ if (out == '') {
+ throw new Exception("Cannot get value for ${pillarKey} key on ${target} target")
+ }
+ return out.toString()
+}
+
timeout(time: 12, unit: 'HOURS') {
node() {
@@ -23,59 +32,86 @@
try {
salt.enforceState(pepperEnv, 'I@opencontrail:control or I@opencontrail:collector', 'opencontrail.upgrade.verify', true, true)
} catch (Exception er) {
- common.errorMsg("Opencontrail controllers health check stage found issues with services. Please take a look at the logs above.")
- throw er
+ common.errorMsg("Opencontrail controllers health check stage found issues with currently running services.")
}
}
- stage('Backup') {
- salt.cmdRun(pepperEnv, 'I@cassandra:backup:client', 'bash /usr/local/bin/cassandra-backup-runner-call.sh')
- }
-
stage('Restore') {
+ // stop neutron-server to prevent CRUD api calls to contrail-api service
+ try {
+ salt.runSaltProcessStep(pepperEnv, 'I@neutron:server', 'service.stop', ['neutron-server'], null, true)
+ } catch (Exception er) {
+ common.warningMsg('neutron-server service already stopped')
+ }
// get opencontrail version
- def _pillar = salt.getPillar(pepperEnv, "I@opencontrail:control", '_param:opencontrail_version')
- def contrailVersion = _pillar['return'][0].values()[0]
- common.infoMsg("Contrail version is ${contrailVersion}")
- if (contrailVersion >= 4) {
- common.infoMsg("There will be steps for OC4.0 restore")
+ def contrailVersion = getValueForPillarKey(pepperEnv, "I@opencontrail:control:role:primary", "_param:opencontrail_version")
+ def configDbIp = getValueForPillarKey(pepperEnv, "I@opencontrail:control:role:primary", "opencontrail:database:bind:host")
+ def configDbPort = getValueForPillarKey(pepperEnv, "I@opencontrail:control:role:primary", "opencontrail:database:bind:port_configdb")
+ common.infoMsg("OpenContrail version is ${contrailVersion}")
+ if (contrailVersion.startsWith('4')) {
+ controllerImage = getValueForPillarKey(pepperEnv, "I@opencontrail:control:role:primary",
+ "docker:client:compose:opencontrail:service:controller:container_name")
+ common.infoMsg("Applying db restore procedure for OpenContrail 4.X version")
try {
- salt.cmdRun(pepperEnv, 'I@opencontrail:control', 'doctrail controller systemctl stop contrail-database' )
+ common.infoMsg("Stop contrail control plane containers")
+ salt.cmdRun(pepperEnv, 'I@opencontrail:control or I@opencontrail:collector', 'cd /etc/docker/compose/opencontrail/; docker-compose down')
} catch (Exception err) {
- common.warningMsg('contrail-database already stopped? ' + err.getMessage())
+ common.errorMsg('An error has been occurred during contrail containers shutdown: ' + err.getMessage())
+ throw err
}
try {
- salt.cmdRun(pepperEnv, 'I@opencontrail:control', 'doctrail controller bash -c "for f in $(ls /var/lib/cassandra/); do rm -r /var/lib/cassandra/$f; done"')
+ common.infoMsg("Cleanup cassandra data")
+ salt.cmdRun(pepperEnv, 'I@opencontrail:control', 'for f in $(ls /var/lib/configdb/); do rm -r /var/lib/configdb/$f; done')
} catch (Exception err) {
- common.warningMsg('cassandra data already removed? ' + err.getMessage())
+ common.errorMsg('Cannot cleanup cassandra data on control nodes: ' + err.getMessage())
+ throw err
}
try {
- salt.cmdRun(pepperEnv, 'I@cassandra:backup:client', 'doctrail controller systemctl start contrail-database' )
+ common.infoMsg("Start cassandra db on I@cassandra:backup:client node")
+ salt.cmdRun(pepperEnv, 'I@cassandra:backup:client', 'cd /etc/docker/compose/opencontrail/; docker-compose up -d')
} catch (Exception err) {
- common.warningMsg('contrail-database already started? ' + err.getMessage())
+ common.errorMsg('An error has been occurred during cassandra db startup on I@cassandra:backup:client node: ' + err.getMessage())
+ throw err
}
- // remove restore-already-happenned file if any is present
+ // wait for cassandra to be online
+ common.retry(6, 20){
+ common.infoMsg("Trying to connect to casandra db on I@cassandra:backup:client node ...")
+ salt.cmdRun(pepperEnv, 'I@cassandra:backup:client', "nc -v -z -w2 ${configDbIp} ${configDbPort}")
+ }
+ // remove restore-already-happened file if any is present
try {
- salt.cmdRun(pepperEnv, 'I@cassandra:backup:client', 'rm /var/backups/cassandra/dbrestored')
+ salt.cmdRun(pepperEnv, 'I@cassandra:backup:client', 'rm /var/backups/cassandra/dbrestored')
} catch (Exception err) {
common.warningMsg('/var/backups/cassandra/dbrestored not present? ' + err.getMessage())
}
- // perform actual backup
salt.enforceState(pepperEnv, 'I@cassandra:backup:client', "cassandra")
- salt.runSaltProcessStep(pepperEnv, 'I@cassandra:backup:client', 'system.reboot', null, [], true, 5)
- sleep(5)
- salt.runSaltProcessStep(pepperEnv, 'I@opencontrail:control and not I@cassandra:backup:client', 'system.reboot', null, [], true, 5)
- // the lovely wait-60-seconds mantra before restarting supervisor-database service
- sleep(60)
- salt.cmdRun(pepperEnv, 'I@opencontrail:control', "doctrail controller systemctl restart contrail-database")
- // another mantra, wait till all services are up
- sleep(60)
- } else {
try {
- salt.runSaltProcessStep(pepperEnv, 'I@neutron:server', 'service.stop', ['neutron-server'], null, true)
- } catch (Exception er) {
- common.warningMsg('neutron-server service already stopped')
+ salt.cmdRun(pepperEnv, 'I@opencontrail:control and not I@cassandra:backup:client', 'cd /etc/docker/compose/opencontrail/; docker-compose up -d')
+ } catch (Exception err) {
+ common.errorMsg('An error has been occurred during cassandra db startup on I@opencontrail:control and not I@cassandra:backup:client nodes: ' + err.getMessage())
+ throw err
}
+ // wait till outstanding cassandra dbs are up
+ common.retry(6, 20){
+ common.infoMsg("Trying to connect to casandra db on I@opencontrail:control and not I@cassandra:backup:client nodes ...")
+ salt.cmdRun(pepperEnv, 'I@opencontrail:control and not I@cassandra:backup:client', "nc -v -z -w2 ${configDbIp} ${configDbPort}")
+ }
+ try {
+ common.infoMsg("Start analytics containers node")
+ salt.cmdRun(pepperEnv, 'I@opencontrail:collector', 'cd /etc/docker/compose/opencontrail/; docker-compose up -d')
+ } catch (Exception err) {
+ common.errorMsg('An error has been occurred during analytics containers startup: ' + err.getMessage())
+ throw err
+ }
+ // contrail-control service needs to be restart after db sync to re-initialize with recovered data
+ try {
+ common.infoMsg("Restart contrail-control services on control nodes")
+ salt.cmdRun(pepperEnv, 'I@opencontrail:control', 'doctrail controller service contrail-control restart')
+ } catch (Exception err) {
+ common.errorMsg('An error has been occurred during contrail-control services restart: ' + err.getMessage())
+ throw err
+ }
+ } else {
try {
salt.runSaltProcessStep(pepperEnv, 'I@opencontrail:control', 'service.stop', ['supervisor-config'], null, true)
} catch (Exception er) {
@@ -104,8 +140,7 @@
common.warningMsg('Directory already empty')
}
- _pillar = salt.getPillar(pepperEnv, "I@cassandra:backup:client", 'cassandra:backup:backup_dir')
- def backupDir = _pillar['return'][0].values()[0] ?: '/var/backups/cassandra'
+ def backupDir = getValueForPillarKey(pepperEnv, "I@cassandra:backup:client", "cassandra:backup:backup_dir")
common.infoMsg("Backup directory is ${backupDir}")
salt.runSaltProcessStep(pepperEnv, 'I@cassandra:backup:client', 'file.remove', ["${backupDir}/dbrestored"], null, true)
@@ -127,7 +162,6 @@
sleep(5)
salt.runSaltProcessStep(pepperEnv, 'I@opencontrail:control', 'service.restart', ['supervisor-database'], null, true)
- salt.runSaltProcessStep(pepperEnv, 'I@neutron:server', 'service.start', ['neutron-server'], null, true)
// wait until contrail-status is up
salt.commandStatus(pepperEnv, 'I@opencontrail:control', "contrail-status | grep -v == | grep -v \'disabled on boot\' | grep -v nodemgr | grep -v active | grep -v backup", null, false)
@@ -135,11 +169,12 @@
salt.cmdRun(pepperEnv, 'I@opencontrail:control', "nodetool status")
salt.cmdRun(pepperEnv, 'I@opencontrail:control', "contrail-status")
}
+
+ salt.runSaltProcessStep(pepperEnv, 'I@neutron:server', 'service.start', ['neutron-server'], null, true)
}
stage('Opencontrail controllers health check') {
- common.retry(3, 20){
- salt.cmdRun(pepperEnv, 'I@opencontrail:control', "doctrail controller contrail-status")
+ common.retry(9, 20){
salt.enforceState(pepperEnv, 'I@opencontrail:control or I@opencontrail:collector', 'opencontrail.upgrade.verify', true, true)
}
}
diff --git a/stacklight-upgrade.groovy b/stacklight-upgrade.groovy
index 85b93e9..7554530 100644
--- a/stacklight-upgrade.groovy
+++ b/stacklight-upgrade.groovy
@@ -49,27 +49,40 @@
def verify_es_is_green(master) {
common.infoMsg('Verify that the Elasticsearch cluster status is green')
try {
- def retries_wait = 20
- def retries = 15
+ def retries_wait = 120
+ def retries = 60
+
def elasticsearch_vip
- def pillar = salt.getPillar(master, "I@elasticsearch:client", 'elasticsearch:client:server:host')
- if(!pillar['return'].isEmpty()) {
- elasticsearch_vip = pillar['return'][0].values()[0]
+ def pillar = salt.getReturnValues(salt.getPillar(master, "I@elasticsearch:client", 'elasticsearch:client:server:host'))
+ if(pillar) {
+ elasticsearch_vip = pillar
} else {
errorOccured = true
common.errorMsg('[ERROR] Elasticsearch VIP address could not be retrieved')
}
- pillar = salt.getPillar(master, "I@elasticsearch:client", 'elasticsearch:client:server:port')
+
+ pillar = salt.getReturnValues(salt.getPillar(master, "I@elasticsearch:client", 'elasticsearch:client:server:port'))
def elasticsearch_port
- if(!pillar['return'].isEmpty()) {
- elasticsearch_port = pillar['return'][0].values()[0]
+ if(pillar) {
+ elasticsearch_port = pillar
} else {
errorOccured = true
common.errorMsg('[ERROR] Elasticsearch VIP port could not be retrieved')
}
+
+ pillar = salt.getReturnValues(salt.getPillar(master, "I@elasticsearch:client", 'elasticsearch:client:server:scheme'))
+ def elasticsearch_scheme
+ if(pillar) {
+ elasticsearch_scheme = pillar
+ common.infoMsg("[INFO] Using elasticsearch scheme: ${elasticsearch_scheme}")
+ } else {
+ common.infoMsg('[INFO] No pillar with Elasticsearch server scheme, using scheme: http')
+ elasticsearch_scheme = "http"
+ }
+
common.retry(retries,retries_wait) {
common.infoMsg('Waiting for Elasticsearch to become green..')
- salt.cmdRun(master, "I@elasticsearch:client", "curl -sf ${elasticsearch_vip}:${elasticsearch_port}/_cat/health | awk '{print \$4}' | grep green")
+ salt.cmdRun(master, "I@elasticsearch:client", "curl -sfk ${elasticsearch_scheme}://${elasticsearch_vip}:${elasticsearch_port}/_cat/health | awk '{print \$4}' | grep green")
}
} catch (Exception er) {
errorOccured = true
@@ -204,8 +217,11 @@
common.infoMsg('Start the monitoring services')
salt.enforceState([saltId: pepperEnv, target: 'I@docker:swarm:role:master and I@prometheus:server', state: 'docker'])
salt.runSaltProcessStep(pepperEnv, '*', 'saltutil.sync_all', [], null, true)
+ common.infoMsg("Waiting grafana service to start")
+ sleep(120)
+
common.infoMsg('Refresh the Grafana dashboards')
- salt.enforceState([saltId: pepperEnv, target: 'I@grafana:client', state: 'grafana.client'])
+ salt.enforceState([saltId: pepperEnv, target: 'I@grafana:client', state: 'grafana.client', retries: 10, retries_wait: 30])
} catch (Exception er) {
errorOccured = true
common.errorMsg("[ERROR] Upgrade of docker components failed. Please fix it manually.")
diff --git a/test-cookiecutter-reclass.groovy b/test-cookiecutter-reclass.groovy
index b7004f6..9c34f58 100644
--- a/test-cookiecutter-reclass.groovy
+++ b/test-cookiecutter-reclass.groovy
@@ -146,6 +146,10 @@
contextYaml['default_context']['secrets_encryption_enabled'] = 'False'
}
+ // disabling strong_usernames for tests to reduce diff between head and patched model
+ common.warningMsg('Disabling strong_usernames for tests!')
+ contextYaml['default_context']['strong_usernames'] = 'False'
+
def context = mcpCommon.dumpYAML(contextYaml)
if (!fileExists(new File(_templateEnvDir, 'tox.ini').toString())) {
common.warningMsg('Forming NEW reclass-root structure...')
diff --git a/test-model-generator.groovy b/test-model-generator.groovy
index 02e1789..8c08493 100644
--- a/test-model-generator.groovy
+++ b/test-model-generator.groovy
@@ -27,6 +27,9 @@
def dockerRegistry = env.DOCKER_REGISTRY ?: 'docker-prod-local.docker.mirantis.net'
def dockerReviewRegistry = env.DOCKER_REVIEW_REGISTRY ?: 'docker-dev-local.docker.mirantis.net'
def cvpImageName = env.CVP_DOCKER_IMG ? "${dockerRegistry}/${env.CVP_DOCKER_IMG}:${version}" : "${dockerRegistry}/mirantis/cvp/cvp-trymcp-tests:${version}"
+if (env.CVP_DEV_TAG && env.CVP_DOCKER_IMG) {
+ cvpImageName = "${dockerReviewRegistry}/${env.CVP_DOCKER_IMG}:${env.CVP_DEV_TAG}"
+}
def checkouted = false
def testReportHTMLFile = 'reports/report.html'
@@ -44,6 +47,15 @@
sh "mkdir -p reports ${apiProject} ${uiProject}"
def testImage = docker.image(cvpImageName)
def testImageOptions = "-u root:root --network=host -v ${env.WORKSPACE}/reports:/var/lib/qa_reports --entrypoint=''"
+ withCredentials([
+ [$class : 'UsernamePasswordMultiBinding',
+ credentialsId : 'scale-ci',
+ passwordVariable: 'JENKINS_PASSWORD',
+ usernameVariable: 'JENKINS_USER']
+ ]) {
+ env.JENKINS_USER = JENKINS_USER
+ env.JENKINS_PASSWORD = JENKINS_PASSWORD
+ }
try {
stage("checkout") {
if (event) {
@@ -137,7 +149,7 @@
dir(apiProject) {
python.runVirtualenvCommand("${env.WORKSPACE}/venv",
- "export IMAGE=${apiImage.id}; ./bootstrap_env.sh up")
+ "export IMAGE=${apiImage.id}; export DOCKER_COMPOSE=docker-compose-test.yml; ./bootstrap_env.sh up")
common.retry(5, 20) {
sh 'curl -v http://127.0.0.1:8001/api/v1 > /dev/null'
}
@@ -158,7 +170,7 @@
export TEST_PASSWORD=default
export TEST_MODELD_URL=127.0.0.1
export TEST_MODELD_PORT=3000
- export TEST_TIMEOUT=30
+ export TEST_TIMEOUT=15
cd /var/lib/trymcp-tests
pytest ${component}
"""
diff --git a/update-ceph.groovy b/update-ceph.groovy
index 59c616e..0302d5e 100644
--- a/update-ceph.groovy
+++ b/update-ceph.groovy
@@ -1,10 +1,9 @@
/**
- * Update packages on given nodes
+ * Update packages
*
* Expected parameters:
* SALT_MASTER_CREDENTIALS Credentials to the Salt API.
* SALT_MASTER_URL Full Salt API address [https://10.10.10.1:8000].
- * TARGET_SERVERS Salt compound target to match nodes to be updated [*, G@osfamily:debian].
*/
pepperEnv = "pepperEnv"
@@ -19,23 +18,23 @@
def command
def commandKwargs
def selMinions = []
+def check_mon
def runCephCommand(master, target, cmd) {
return salt.cmdRun(master, target, cmd)
}
-def waitForHealthy(master, tgt, attempts=100, timeout=10) {
+def waitForHealthy(master, tgt, count = 0, attempts=100) {
// wait for healthy cluster
common = new com.mirantis.mk.Common()
- common.retry(attempts, timeout){
+ while (count<attempts) {
def health = runCephCommand(master, tgt, 'ceph health')['return'][0].values()[0]
if (health.contains('HEALTH_OK') || health.contains('HEALTH_WARN noout flag(s) set\n')) {
common.infoMsg('Cluster is healthy')
- return 0
- } else {
- common.infoMsg(health)
- throw new Exception()
+ break;
}
+ count++
+ sleep(10)
}
}
@@ -43,87 +42,62 @@
node() {
try {
+ def targets = ["common": "ceph-common", "osd": "ceph-osd", "mon": "ceph-mon",
+ "mgr":"ceph-mgr", "radosgw": "radosgw"]
+
stage('Setup virtualenv for Pepper') {
python.setupPepperVirtualenv(pepperEnv, SALT_MASTER_URL, SALT_MASTER_CREDENTIALS)
}
- stage('List target servers') {
- minions = salt.getMinions(pepperEnv, TARGET_SERVERS)
-
- if (minions.isEmpty()) {
- throw new Exception("No minion was targeted")
- }
-
- for (m in minions) {
- if (m.startsWith("osd") || m.startsWith("cmn") || m.startsWith("rgw")) {
- selMinions.add(m)
- }
- }
- }
-
-
-
stage('Apply package upgrades on all nodes') {
- for (tgt in selMinions) {
- try {
- if (tgt.startsWith("osd")) {
- out = runCephCommand(pepperEnv, tgt, "apt install --only-upgrade ceph-osd -y")
- salt.printSaltCommandResult(out)
- } else if (tgt.startsWith("cmn")) {
- out = runCephCommand(pepperEnv, tgt, "apt install --only-upgrade ceph-mon -y")
- salt.printSaltCommandResult(out)
- } else if (tgt.startsWith("rgw")) {
- out = runCephCommand(pepperEnv, tgt, "apt install --only-upgrade radosgw -y")
- salt.printSaltCommandResult(out)
- }
- } catch (Throwable e) {
- if (e.message.contains("Unmet dependencies")) {
- out = runCephCommand(pepperEnv, tgt, "apt -f install -y")
- salt.printSaltCommandResult(out)
- } else {
- throw (e)
- }
- }
+ targets.each { key, value ->
+ // try {
+ command = "pkg.install"
+ packages = value
+ commandKwargs = ['only_upgrade': 'true','force_yes': 'true']
+ target = "I@ceph:${key}"
+ out = salt.runSaltCommand(pepperEnv, 'local', ['expression': target, 'type': 'compound'], command, true, packages, commandKwargs)
+ salt.printSaltCommandResult(out)
}
}
stage("Restart MONs and RGWs") {
+ selMinions = salt.getMinions(pepperEnv, "I@ceph:mon")
for (tgt in selMinions) {
- if (tgt.contains("cmn")) {
- runCephCommand(pepperEnv, tgt, "systemctl restart ceph-mon.target")
- waitForHealthy(pepperEnv, tgt)
- } else if (tgt.contains("rgw")) {
- runCephCommand(pepperEnv, tgt, "systemctl restart ceph-radosgw.target")
- waitForHealthy(pepperEnv, tgt)
- }
+ // runSaltProcessStep 'service.restart' don't work for this services
+ runCephCommand(pepperEnv, tgt, "systemctl restart ceph-mon.target")
+ waitForHealthy(pepperEnv, tgt)
+ }
+ selMinions = salt.getMinions(pepperEnv, "I@ceph:radosgw")
+ for (tgt in selMinions) {
+ runCephCommand(pepperEnv, tgt, "systemctl restart ceph-radosgw.target")
+ waitForHealthy(pepperEnv, tgt)
}
}
stage('Restart OSDs') {
+ selMinions = salt.getMinions(pepperEnv, "I@ceph:osd")
for (tgt in selMinions) {
- if (tgt.contains("osd")) {
- salt.runSaltProcessStep(pepperEnv, tgt, 'saltutil.sync_grains', [], null, true, 5)
- def ceph_disks = salt.getGrain(pepperEnv, tgt, 'ceph')['return'][0].values()[0].values()[0]['ceph_disk']
+ salt.runSaltProcessStep(pepperEnv, tgt, 'saltutil.sync_grains', [], null, true, 5)
+ def ceph_disks = salt.getGrain(pepperEnv, tgt, 'ceph')['return'][0].values()[0].values()[0]['ceph_disk']
- def osd_ids = []
- for (i in ceph_disks) {
- def osd_id = i.getKey().toString()
- osd_ids.add('osd.' + osd_id)
- }
-
- runCephCommand(pepperEnv, tgt, 'ceph osd set noout')
-
- for (i in osd_ids) {
-
- salt.runSaltProcessStep(pepperEnv, tgt, 'service.restart', ['ceph-osd@' + i.replaceAll('osd.', '')], null, true)
- // wait for healthy cluster
- waitForHealthy(pepperEnv, tgt)
- }
-
- runCephCommand(pepperEnv, tgt, 'ceph osd unset noout')
+ def osd_ids = []
+ for (i in ceph_disks) {
+ def osd_id = i.getKey().toString()
+ osd_ids.add('osd.' + osd_id)
}
+
+ runCephCommand(pepperEnv, tgt, 'ceph osd set noout')
+
+ for (i in osd_ids) {
+ salt.runSaltProcessStep(pepperEnv, tgt, 'service.restart', ['ceph-osd@' + i.replaceAll('osd.', '')], null, true)
+ // wait for healthy cluster
+ waitForHealthy(pepperEnv, tgt)
+ }
+
+ runCephCommand(pepperEnv, tgt, 'ceph osd unset noout')
}
}
diff --git a/update-glusterfs-clients.groovy b/update-glusterfs-clients.groovy
new file mode 100644
index 0000000..02e889a
--- /dev/null
+++ b/update-glusterfs-clients.groovy
@@ -0,0 +1,119 @@
+/**
+ * Update packages on given server nodes
+ *
+ * Expected parameters:
+ * DRIVE_TRAIN_PARAMS Yaml, DriveTrain releated params:
+ * SALT_MASTER_CREDENTIALS Credentials to the Salt API
+ * SALT_MASTER_URL Full Salt API address [https://10.10.10.1:8000]
+ * IGNORE_SERVER_STATUS Does not validate server availability/status before update
+ * IGNORE_SERVER_VERSION Does not validate that all servers have been updated
+ * TARGET_SERVERS Salt compound target to match nodes to be updated [*, G@osfamily:debian]
+ */
+
+// Convert parameters from yaml to env variables
+params = readYaml text: env.DRIVE_TRAIN_PARAMS
+for (key in params.keySet()) {
+ value = params[key]
+ env.setProperty(key, value)
+}
+
+@NonCPS
+def getNextNode() {
+ for (n in hudson.model.Hudson.instance.slaves) {
+ node_name = n.getNodeName()
+ if (node_name != env.SLAVE_NAME) {
+ return node_name
+ }
+ }
+}
+
+def update() {
+ def pEnv = "pepperEnv"
+ def salt = new com.mirantis.mk.Salt()
+ def common = new com.mirantis.mk.Common()
+ def python = new com.mirantis.mk.Python()
+ def pkg_name = 'glusterfs-client'
+
+ /**
+ * - choose only those hosts where update is available. Exclude minion on which job is running
+ * - validate that all gluasterfs servers are in normal working state. Can be skipped with option
+ * - validate that glusterfs on all servers has been updated, otherwise stop update. Can be skipped with option
+ * - run update state on one client at a time
+ */
+
+ try {
+
+ stage('Setup virtualenv for Pepper') {
+ python.setupPepperVirtualenv(pEnv, SALT_MASTER_URL, SALT_MASTER_CREDENTIALS)
+ }
+
+ stage('List target servers') {
+ all_minions = salt.getMinions(pEnv, TARGET_SERVERS)
+
+ if (all_minions.isEmpty()) {
+ throw new Exception("No minion was targeted")
+ }
+
+ minions = []
+ for (minion in all_minions) {
+ latest_version = salt.getReturnValues(salt.runSaltProcessStep(pEnv, minion, 'pkg.latest_version', [pkg_name, 'show_installed=True'])).split('\n')[0]
+ current_version = salt.getReturnValues(salt.runSaltProcessStep(pEnv, minion, 'pkg.version', [pkg_name])).split('\n')[0]
+ slave_container_id = salt.getReturnValues(salt.cmdRun(pEnv, minion, "which docker >/dev/null && docker ps --filter name=jenkins_${env.NODE_NAME} --filter status=running -q", false)).split('\n')[0]
+ if (latest_version != current_version) {
+ if (!slave_container_id.isEmpty() && !minion.startsWith('cfg')) {
+ env.SLAVE_NAME = env.NODE_NAME
+ env.SLAVE_MINION = minion
+ } else {
+ minions.add(minion)
+ }
+ } else {
+ common.infoMsg("${pkg_name} has been already upgraded or newer version is not available on ${minion}. Skip upgrade")
+ }
+ }
+ }
+ if (!minions.isEmpty()) {
+ if (!IGNORE_SERVER_STATUS.toBoolean()){
+ stage('Validate servers availability') {
+ salt.commandStatus(pEnv, 'I@glusterfs:server', "gluster pool list | fgrep localhost", 'Connected', true, true, null, true, 1)
+ common.successMsg("All glusterfs servers are available")
+ }
+ } else {
+ common.warningMsg("Check of glusterfs servers availability has been disabled")
+ }
+ if (!IGNORE_SERVER_VERSION.toBoolean()){
+ stage('Check that all glusterfs servers have been updated') {
+ latest_version = salt.getReturnValues(salt.runSaltProcessStep(pEnv, minions[0], 'pkg.latest_version', [pkg_name, 'show_installed=True'])).split('\n')[0].split('-')[0]
+ salt.commandStatus(pEnv, 'I@glusterfs:server', "glusterfsd --version | head -n1 | awk '{print \$2}' | egrep '^${latest_version}' || echo none", latest_version, true, true, null, true, 1)
+ common.successMsg('All glusterfs servers have been updated to desired version')
+ }
+ } else {
+ common.warningMsg("Check of glusterfs servers' version has been disabled")
+ }
+ // Actual update
+ for (tgt in minions) {
+ stage("Update glusterfs on ${tgt}") {
+ salt.runSaltProcessStep(pEnv, tgt, 'state.apply', ['glusterfs.update.client'])
+ }
+ }
+ } else if (env.SLAVE_MINION == null) {
+ common.warningMsg("No hosts to update glusterfs on")
+ }
+ } catch (Throwable e) {
+ // If there was an error or exception thrown, the build failed
+ currentBuild.result = "FAILURE"
+ currentBuild.description = currentBuild.description ? e.message + " " + currentBuild.description : e.message
+ salt.runSaltProcessStep(pEnv, TARGET_SERVERS, 'state.apply', ['glusterfs'])
+ throw e
+ }
+}
+timeout(time: 12, unit: 'HOURS') {
+ node() {
+ update()
+ }
+ // Perform an update from another slave to finish update on previous slave host
+ if (env.SLAVE_NAME != null && !env.SLAVE_NAME.isEmpty()) {
+ node(getNextNode()) {
+ update()
+ }
+ }
+}
diff --git a/update-glusterfs-cluster-op-version.groovy b/update-glusterfs-cluster-op-version.groovy
new file mode 100644
index 0000000..9623481
--- /dev/null
+++ b/update-glusterfs-cluster-op-version.groovy
@@ -0,0 +1,110 @@
+/**
+ * Update packages on given server nodes
+ *
+ * Expected parameters:
+ * DRIVE_TRAIN_PARAMS Yaml, DriveTrain releated params:
+ * SALT_MASTER_CREDENTIALS Credentials to the Salt API
+ * SALT_MASTER_URL Full Salt API address [https://10.10.10.1:8000]
+ * IGNORE_CLIENT_VERSION Does not validate that all clients have been updated
+ * IGNORE_SERVER_VERSION Does not validate that all servers have been updated
+ * CLUSTER_OP_VERSION GlusterFS cluster.op-verion option to set. Default is to be set to current cluster.max-op-version if available.
+ */
+
+def pEnv = "pepperEnv"
+def salt = new com.mirantis.mk.Salt()
+def common = new com.mirantis.mk.Common()
+def python = new com.mirantis.mk.Python()
+
+// Convert parameters from yaml to env variables
+params = readYaml text: env.DRIVE_TRAIN_PARAMS
+for (key in params.keySet()) {
+ value = params[key]
+ env.setProperty(key, value)
+}
+
+/**
+ * - ensure that cluster.op-version can be updated
+ * - check that all servers have been updated to version no less then CLUSTER_OP_VERSION or cluster.max-op-version
+ * - check that all clients have been updated to version no less then CLUSTER_OP_VERSION or cluster.max-op-version
+ * - set cluster.op-version
+ */
+
+/**
+ * Convert glusterfs' cluster.op-version to regular version string
+ *
+ * @param version string representing cluster.op-version, i.e. 50400
+ * @return string version number, i.e. 5.4.0
+ */
+def convertVersion(version) {
+ new_version = version[0]
+ for (i=1;i<version.length();i++) {
+ if (i%2 == 0) {
+ new_version += version[i]
+ } else if (version[i] == '0') {
+ new_version += '.'
+ } else {
+ new_version += '.' + version[i]
+ }
+ }
+ return new_version
+}
+
+timeout(time: 12, unit: 'HOURS') {
+ node() {
+ try {
+
+ stage('Setup virtualenv for Pepper') {
+ python.setupPepperVirtualenv(pEnv, SALT_MASTER_URL, SALT_MASTER_CREDENTIALS)
+ }
+ stage('Get current cluster.op-version') {
+ volume = salt.getReturnValues(salt.cmdRun(pEnv, 'I@glusterfs:server:role:primary', "gluster volume list")).split('\n')[0]
+ currentOpVersion = salt.getReturnValues(salt.cmdRun(pEnv, 'I@glusterfs:server:role:primary', "gluster volume get ${volume} cluster.op-version | grep cluster.op-version | awk '{print \$2}'")).split('\n')[0]
+ }
+ if (CLUSTER_OP_VERSION.isEmpty()) {
+ stage('Get cluster.max-op-version') {
+ CLUSTER_OP_VERSION = salt.getReturnValues(salt.cmdRun(pEnv, 'I@glusterfs:server:role:primary', "gluster volume get all cluster.max-op-version 2>/dev/null | grep cluster.max-op-version | awk '{print \$2}'")).split('\n')[0]
+ }
+ }
+ if (CLUSTER_OP_VERSION.isEmpty() || CLUSTER_OP_VERSION.length() != 5) {
+ msg = 'No cluster.op-version specified to set'
+ common.errorMsg(msg)
+ currentBuild.result = "FAILURE"
+ currentBuild.description = msg
+ } else if (currentOpVersion == CLUSTER_OP_VERSION) {
+ common.warningMsg("cluster.op-version is already set to ${currentOpVersion}")
+ } else {
+ version = convertVersion(CLUSTER_OP_VERSION)
+ if (!IGNORE_SERVER_VERSION.toBoolean()){
+ stage('Check that all servers have been updated') {
+ salt.commandStatus(pEnv, 'I@glusterfs:server', "dpkg --compare-versions \$(glusterfsd --version | head -n1| awk '{print \$2}') gt ${version} && echo good", 'good', true, true, null, true, 1)
+ common.successMsg('All servers have been updated to desired version')
+ }
+ } else {
+ common.warningMsg("Check of servers' version has been disabled")
+ }
+ if (!IGNORE_CLIENT_VERSION.toBoolean()){
+ stage('Check that all clients have been updated') {
+ salt.commandStatus(pEnv, 'I@glusterfs:client', "dpkg --compare-versions \$(glusterfsd --version | head -n1| awk '{print \$2}') gt ${version} && echo good", 'good', true, true, null, true, 1)
+ common.successMsg('All clients have been updated to desired version')
+ }
+ } else {
+ common.warningMsg("Check of clients' version has been disabled")
+ }
+ stage("Update cluster.op-version") {
+ salt.cmdRun(pEnv, 'I@glusterfs:server:role:primary', "gluster volume set all cluster.op-version ${CLUSTER_OP_VERSION}")
+ }
+ stage("Validate cluster.op-version") {
+ newOpVersion = salt.getReturnValues(salt.cmdRun(pEnv, 'I@glusterfs:server:role:primary', "gluster volume get ${volume} cluster.op-version | grep cluster.op-version | awk '{print \$2}'")).split('\n')[0]
+ if (newOpVersion != CLUSTER_OP_VERSION) {
+ throw new Exception("cluster.op-version was not set to ${CLUSTER_OP_VERSION}")
+ }
+ }
+ }
+ } catch (Throwable e) {
+ // If there was an error or exception thrown, the build failed
+ currentBuild.result = "FAILURE"
+ currentBuild.description = currentBuild.description ? e.message + " " + currentBuild.description : e.message
+ throw e
+ }
+ }
+}
diff --git a/update-glusterfs-servers.groovy b/update-glusterfs-servers.groovy
new file mode 100644
index 0000000..23b280d
--- /dev/null
+++ b/update-glusterfs-servers.groovy
@@ -0,0 +1,92 @@
+/**
+ * Update packages on given server nodes
+ *
+ * Expected parameters:
+ * DRIVE_TRAIN_PARAMS Yaml, DriveTrain releated params:
+ * SALT_MASTER_CREDENTIALS Credentials to the Salt API
+ * SALT_MASTER_URL Full Salt API address [https://10.10.10.1:8000]
+ * IGNORE_SERVER_STATUS Does not validate server availability/status before update
+ * IGNORE_NON_REPLICATED_VOLUMES Update GlusterFS even there is a non-replicated volume(s)
+ * TARGET_SERVERS Salt compound target to match nodes to be updated [*, G@osfamily:debian]
+ */
+
+def pEnv = "pepperEnv"
+def salt = new com.mirantis.mk.Salt()
+def common = new com.mirantis.mk.Common()
+def python = new com.mirantis.mk.Python()
+def pkg_name = 'glusterfs-server'
+
+// Convert parameters from yaml to env variables
+params = readYaml text: env.DRIVE_TRAIN_PARAMS
+for (key in params.keySet()) {
+ value = params[key]
+ env.setProperty(key, value)
+}
+
+/**
+ * - choose only those hosts where update is available
+ * - validate that all servers are in normal working state. Can be skipped with option
+ * - validate all volumes are replicated. If there is a non-replicated volume stop update. Can be skipped with option
+ * - run update state on one server at a time
+ */
+
+timeout(time: 12, unit: 'HOURS') {
+ node() {
+ try {
+
+ stage('Setup virtualenv for Pepper') {
+ python.setupPepperVirtualenv(pEnv, SALT_MASTER_URL, SALT_MASTER_CREDENTIALS)
+ }
+
+ stage('List target servers') {
+ all_minions = salt.getMinions(pEnv, TARGET_SERVERS)
+
+ if (all_minions.isEmpty()) {
+ throw new Exception("No minion was targeted")
+ }
+ minions = []
+ for (minion in all_minions) {
+ latest_version = salt.getReturnValues(salt.runSaltProcessStep(pEnv, minion, 'pkg.latest_version', [pkg_name, 'show_installed=True'])).split('\n')[0]
+ current_version = salt.getReturnValues(salt.runSaltProcessStep(pEnv, minion, 'pkg.version', [pkg_name])).split('\n')[0]
+ if (latest_version != current_version) {
+ minions.add(minion)
+ } else {
+ common.infoMsg("${pkg_name} has been already upgraded or newer version is not available on ${minion}. Skip upgrade")
+ }
+ }
+ }
+ if (!minions.isEmpty()) {
+ if (!IGNORE_SERVER_STATUS.toBoolean()){
+ stage('Validate servers availability') {
+ salt.commandStatus(pEnv, TARGET_SERVERS, "gluster pool list | fgrep localhost", 'Connected', true, true, null, true, 1)
+ common.successMsg("All servers are available")
+ }
+ } else {
+ common.warningMsg("Check of servers availability has been disabled")
+ }
+ if (!IGNORE_NON_REPLICATED_VOLUMES.toBoolean()){
+ stage('Check that all volumes are replicated') {
+ salt.commandStatus(pEnv, TARGET_SERVERS, "gluster volume info | fgrep 'Type:' | fgrep -v Replicate", null, false, true, null, true, 1)
+ common.successMsg("All volumes are replicated")
+ }
+ } else {
+ common.warningMsg("Check of volumes' replication has been disabled. Be aware, you may lost data during update!")
+ }
+ // Actual update
+ for (tgt in minions) {
+ stage("Update glusterfs on ${tgt}") {
+ salt.runSaltProcessStep(pEnv, tgt, 'state.apply', ['glusterfs.update.server'])
+ }
+ }
+ } else {
+ common.warningMsg("No hosts to update glusterfs on")
+ }
+ } catch (Throwable e) {
+ // If there was an error or exception thrown, the build failed
+ currentBuild.result = "FAILURE"
+ currentBuild.description = currentBuild.description ? e.message + " " + currentBuild.description : e.message
+ salt.runSaltProcessStep(pEnv, TARGET_SERVERS, 'state.apply', ['glusterfs'])
+ throw e
+ }
+ }
+}
diff --git a/update-glusterfs.groovy b/update-glusterfs.groovy
new file mode 100644
index 0000000..67d3341
--- /dev/null
+++ b/update-glusterfs.groovy
@@ -0,0 +1,81 @@
+/**
+ * Complete update glusterfs pipeline
+ *
+ * Expected parameters:
+ * DRIVE_TRAIN_PARAMS Yaml, DriveTrain releated params:
+ * SALT_MASTER_CREDENTIALS Credentials to the Salt API
+ * SALT_MASTER_URL Full Salt API address [https://10.10.10.1:8000]
+ */
+
+// Convert parameters from yaml to env variables
+params = readYaml text: env.DRIVE_TRAIN_PARAMS
+for (key in params.keySet()) {
+ value = params[key]
+ env.setProperty(key, value)
+}
+
+def waitGerrit(salt_target, wait_timeout) {
+ def salt = new com.mirantis.mk.Salt()
+ def common = new com.mirantis.mk.Common()
+ def python = new com.mirantis.mk.Python()
+ def pEnv = "pepperEnv"
+ python.setupPepperVirtualenv(pEnv, env.SALT_MASTER_URL, env.SALT_MASTER_CREDENTIALS)
+
+ salt.fullRefresh(pEnv, salt_target)
+
+ def gerrit_master_url = salt.getPillar(pEnv, salt_target, '_param:gerrit_master_url')
+
+ if(!gerrit_master_url['return'].isEmpty()) {
+ gerrit_master_url = gerrit_master_url['return'][0].values()[0]
+ } else {
+ gerrit_master_url = ''
+ }
+
+ if (gerrit_master_url != '') {
+ common.infoMsg('Gerrit master url "' + gerrit_master_url + '" retrieved at _param:gerrit_master_url')
+ } else {
+ common.infoMsg('Gerrit master url could not be retrieved at _param:gerrit_master_url. Falling back to gerrit pillar')
+
+ def gerrit_host
+ def gerrit_http_port
+ def gerrit_http_scheme
+ def gerrit_http_prefix
+
+ def host_pillar = salt.getPillar(pEnv, salt_target, 'gerrit:client:server:host')
+ gerrit_host = salt.getReturnValues(host_pillar)
+
+ def port_pillar = salt.getPillar(pEnv, salt_target, 'gerrit:client:server:http_port')
+ gerrit_http_port = salt.getReturnValues(port_pillar)
+
+ def scheme_pillar = salt.getPillar(pEnv, salt_target, 'gerrit:client:server:protocol')
+ gerrit_http_scheme = salt.getReturnValues(scheme_pillar)
+
+ def prefix_pillar = salt.getPillar(pEnv, salt_target, 'gerrit:client:server:url_prefix')
+ gerrit_http_prefix = salt.getReturnValues(prefix_pillar)
+
+ gerrit_master_url = gerrit_http_scheme + '://' + gerrit_host + ':' + gerrit_http_port + gerrit_http_prefix
+
+ }
+
+ timeout(wait_timeout) {
+ common.infoMsg('Waiting for Gerrit to come up..')
+ def check_gerrit_cmd = 'while true; do curl -sI -m 3 -o /dev/null -w' + " '" + '%{http_code}' + "' " + gerrit_master_url + '/ | grep 200 && break || sleep 1; done'
+ salt.cmdRun(pEnv, salt_target, 'timeout ' + (wait_timeout*60+3) + ' /bin/sh -c -- ' + '"' + check_gerrit_cmd + '"')
+ }
+}
+
+node() {
+ stage('Update glusterfs servers') {
+ build(job: 'update-glusterfs-servers')
+ }
+ sleep 180
+ stage('Update glusterfs clients') {
+ build(job: 'update-glusterfs-clients')
+ }
+}
+node() {
+ waitGerrit('I@gerrit:client', 300)
+ stage('Update glusterfs cluster.op-version') {
+ build(job: 'update-glusterfs-cluster-op-version')
+ }
+}
diff --git a/update-package.groovy b/update-package.groovy
index df7655b..14c2056 100644
--- a/update-package.groovy
+++ b/update-package.groovy
@@ -6,6 +6,7 @@
* SALT_MASTER_URL Full Salt API address [https://10.10.10.1:8000].
* TARGET_SERVERS Salt compound target to match nodes to be updated [*, G@osfamily:debian].
* TARGET_PACKAGES Space delimited list of packages to be updates [package1=version package2=version], empty string means all updating all packages to the latest version.
+ * BATCH_SIZE Use batching for large amount of target nodes
*
**/
@@ -13,14 +14,19 @@
salt = new com.mirantis.mk.Salt()
common = new com.mirantis.mk.Common()
-def installSaltStack(target, pkgs, masterUpdate = false){
+def batch_size = ''
+if (common.validInputParam('BATCH_SIZE')) {
+ batch_size = "${BATCH_SIZE}"
+}
+
+def installSaltStack(target, pkgs, batch, masterUpdate = false){
salt.cmdRun(pepperEnv, "I@salt:master", "salt -C '${target}' --async pkg.install force_yes=True pkgs='$pkgs'")
def minions_reachable = target
if (masterUpdate) {
// in case of update Salt Master packages - check all minions are good
minions_reachable = '*'
}
- salt.checkTargetMinionsReady(['saltId': pepperEnv, 'target': target, 'target_reachable': minions_reachable])
+ salt.checkTargetMinionsReady(['saltId': pepperEnv, 'target': target, 'target_reachable': minions_reachable, 'batch': batch])
}
timeout(time: 12, unit: 'HOURS') {
@@ -46,12 +52,16 @@
stage("List package upgrades") {
common.infoMsg("Listing all the packages that have a new update available on nodes: ${targetLiveAll}")
- salt.runSaltProcessStep(pepperEnv, targetLiveAll, 'pkg.list_upgrades', [], null, true)
+ salt.runSaltProcessStep(pepperEnv, targetLiveAll, 'pkg.list_upgrades', [], batch_size, true)
if (TARGET_PACKAGES != '' && TARGET_PACKAGES != '*') {
- common.warningMsg("Note that only the \"${TARGET_PACKAGES}\" would be installed from the above list of available updates on the ${targetLiveAll}")
+ if (ALLOW_DEPENDENCY_UPDATE.toBoolean()) {
+ common.warningMsg("Note that the \"${TARGET_PACKAGES}\" and it new dependencies would be installed from the above list of available updates on the ${targetLiveAll}")
+ } else {
+ common.warningMsg("Note that only the \"${TARGET_PACKAGES}\" would be installed from the above list of available updates on the ${targetLiveAll}")
+ commandKwargs = ['only_upgrade': 'true']
+ }
command = "pkg.install"
packages = TARGET_PACKAGES.tokenize(' ')
- commandKwargs = ['only_upgrade': 'true']
}
}
@@ -68,9 +78,9 @@
for (int i = 0; i < saltTargets.size(); i++ ) {
common.retry(10, 5) {
if (salt.getMinions(pepperEnv, "I@salt:master and ${saltTargets[i]}")) {
- installSaltStack("I@salt:master and ${saltTargets[i]}", '["salt-master", "salt-common", "salt-api", "salt-minion"]', true)
+ installSaltStack("I@salt:master and ${saltTargets[i]}", '["salt-master", "salt-common", "salt-api", "salt-minion"]', null, true)
} else if (salt.getMinions(pepperEnv, "I@salt:minion and not I@salt:master and ${saltTargets[i]}")) {
- installSaltStack("I@salt:minion and not I@salt:master and ${saltTargets[i]}", '["salt-minion"]')
+ installSaltStack("I@salt:minion and not I@salt:master and ${saltTargets[i]}", '["salt-minion"]', batch_size)
} else {
error("Minion ${saltTargets[i]} is not reachable!")
}
@@ -78,7 +88,7 @@
}
}
common.infoMsg('Starting package upgrades...')
- out = salt.runSaltCommand(pepperEnv, 'local', ['expression': targetLiveAll, 'type': 'compound'], command, null, packages, commandKwargs)
+ out = salt.runSaltCommand(pepperEnv, 'local', ['expression': targetLiveAll, 'type': 'compound'], command, batch_size, packages, commandKwargs)
salt.printSaltCommandResult(out)
for(value in out.get("return")[0].values()){
if (value.containsKey('result') && value.result == false) {
diff --git a/update-salt-environment.groovy b/update-salt-environment.groovy
index b91f385..2ae408f 100644
--- a/update-salt-environment.groovy
+++ b/update-salt-environment.groovy
@@ -27,7 +27,7 @@
'apt-get update && apt-get install -y salt-formula-*'
)
common.infoMsg("Running salt sync-all")
- salt.runSaltProcessStep(venvPepper, 'jma*', 'saltutil.sync_all', [], null, true)
+ salt.runSaltProcessStep(venvPepper, '*', 'saltutil.sync_all', [], null, true)
}
}
stage("Update Reclass") {
diff --git a/upgrade-mcp-release.groovy b/upgrade-mcp-release.groovy
index d1614eb..1d28498 100644
--- a/upgrade-mcp-release.groovy
+++ b/upgrade-mcp-release.groovy
@@ -8,6 +8,7 @@
* DRIVE_TRAIN_PARAMS Yaml, DriveTrain releated params:
* SALT_MASTER_URL Salt API server location
* SALT_MASTER_CREDENTIALS Credentials to the Salt API
+ * BATCH_SIZE Use batch sizing during upgrade for large envs
* UPGRADE_SALTSTACK Upgrade SaltStack packages to new version.
* UPDATE_CLUSTER_MODEL Update MCP version parameter in cluster model
* UPDATE_PIPELINES Update pipeline repositories on Gerrit
@@ -22,14 +23,25 @@
venvPepper = "venvPepper"
workspace = ""
-def triggerMirrorJob(jobName) {
+def triggerMirrorJob(String jobName, String reclassSystemBranch) {
params = jenkinsUtils.getJobParameters(jobName)
- build job: jobName, parameters: [
- [$class: 'StringParameterValue', name: 'BRANCHES', value: params.get("BRANCHES")],
- [$class: 'StringParameterValue', name: 'CREDENTIALS_ID', value: params.get("CREDENTIALS_ID")],
- [$class: 'StringParameterValue', name: 'SOURCE_URL', value: params.get("SOURCE_URL")],
- [$class: 'StringParameterValue', name: 'TARGET_URL', value: params.get("TARGET_URL")]
- ]
+ try {
+ build job: jobName, parameters: [
+ [$class: 'StringParameterValue', name: 'BRANCHES', value: params.get('BRANCHES')],
+ [$class: 'StringParameterValue', name: 'CREDENTIALS_ID', value: params.get('CREDENTIALS_ID')],
+ [$class: 'StringParameterValue', name: 'SOURCE_URL', value: params.get('SOURCE_URL')],
+ [$class: 'StringParameterValue', name: 'TARGET_URL', value: params.get('TARGET_URL')]
+ ]
+ } catch (Exception updateErr) {
+ common.warningMsg(updateErr)
+ common.warningMsg('Attempt to update git repo in failsafe manner')
+ build job: jobName, parameters: [
+ [$class: 'StringParameterValue', name: 'BRANCHES', value: reclassSystemBranch.replace('origin/', '')],
+ [$class: 'StringParameterValue', name: 'CREDENTIALS_ID', value: params.get('CREDENTIALS_ID')],
+ [$class: 'StringParameterValue', name: 'SOURCE_URL', value: params.get('SOURCE_URL')],
+ [$class: 'StringParameterValue', name: 'TARGET_URL', value: params.get('TARGET_URL')]
+ ]
+ }
}
def updateSaltStack(target, pkgs) {
@@ -58,64 +70,231 @@
}
}
+def getWorkerThreads(saltId) {
+ if (env.getEnvironment().containsKey('SALT_MASTER_OPT_WORKER_THREADS')) {
+ return env['SALT_MASTER_OPT_WORKER_THREADS'].toString()
+ }
+ def threads = salt.cmdRun(saltId, "I@salt:master", "cat /etc/salt/master.d/master.conf | grep worker_threads | cut -f 2 -d ':'", true, null, true)
+ return threads['return'][0].values()[0].replaceAll('Salt command execution success','').trim()
+}
+
+def wa29352(ArrayList saltMinions, String cname) {
+ // WA for PROD-29352. Issue cause due patch https://gerrit.mcp.mirantis.com/#/c/37932/12/openssh/client/root.yml
+ // Default soft-param has been removed, what now makes not possible to render some old env's.
+ // Like fix, we found copy-paste already generated key from backups, to secrets.yml with correct key name
+ def wa29352ClassName = 'cluster.' + cname + '.infra.secrets_root_wa29352'
+ def wa29352File = "/srv/salt/reclass/classes/cluster/${cname}/infra/secrets_root_wa29352.yml"
+ def wa29352SecretsFile = "/srv/salt/reclass/classes/cluster/${cname}/infra/secrets.yml"
+ def _tempFile = '/tmp/wa29352_' + UUID.randomUUID().toString().take(8)
+ try {
+ salt.cmdRun(venvPepper, 'I@salt:master', "grep -qiv root_private_key ${wa29352SecretsFile}", true, null, false)
+ salt.cmdRun(venvPepper, 'I@salt:master', "test ! -f ${wa29352File}", true, null, false)
+ }
+ catch (Exception ex) {
+ common.infoMsg('Work-around for PROD-29352 already applied, nothing todo')
+ return
+ }
+ def rKeysDict = [
+ 'parameters': [
+ '_param': [
+ 'root_private_key': salt.getPillar(venvPepper, 'I@salt:master', '_param:root_private_key').get('return')[0].values()[0].trim(),
+ 'root_public_key' : '',
+ ]
+ ]
+ ]
+ // save root key,and generate public one from it
+ writeFile file: _tempFile, text: rKeysDict['parameters']['_param']['root_private_key'].toString().trim()
+ sh('chmod 0600 ' + _tempFile)
+ rKeysDict['parameters']['_param']['root_public_key'] = sh(script: "ssh-keygen -q -y -f ${_tempFile}", returnStdout: true).trim()
+ sh('rm -fv ' + _tempFile)
+ writeYaml file: _tempFile, data: rKeysDict
+ def yamlData = sh(script: "cat ${_tempFile} | base64", returnStdout: true).trim()
+ salt.cmdRun(venvPepper, 'I@salt:master', "echo '${yamlData}' | base64 -d > ${wa29352File}", false, null, false)
+ common.infoMsg("Add $wa29352ClassName class into secrets.yml")
+
+ // Add 'classes:' directive
+ salt.cmdRun(venvPepper, 'I@salt:master', "cd /srv/salt/reclass/classes/cluster/$cname && " +
+ "grep -q 'classes:' infra/secrets.yml || sed -i '1iclasses:' infra/secrets.yml")
+
+ salt.cmdRun(venvPepper, 'I@salt:master', "cd /srv/salt/reclass/classes/cluster/$cname && " +
+ "grep -q '${wa29352ClassName}' infra/secrets.yml || sed -i '/classes:/ a - $wa29352ClassName' infra/secrets.yml")
+ salt.fullRefresh(venvPepper, '*')
+ sh('rm -fv ' + _tempFile)
+ salt.cmdRun(venvPepper, 'I@salt:master', "cd /srv/salt/reclass/classes/cluster/$cname && git status && " +
+ "git add ${wa29352File} && git add -u && git commit --allow-empty -m 'Cluster model updated with WA for PROD-29352. Issue cause due patch https://gerrit.mcp.mirantis.com/#/c/37932/ at ${common.getDatetime()}' ")
+ common.infoMsg('Work-around for PROD-29352 successfully applied')
+}
+
+def wa29155(ArrayList saltMinions, String cname) {
+ // WA for PROD-29155. Issue cause due patch https://gerrit.mcp.mirantis.com/#/c/37932/
+ // CHeck for existence cmp nodes, and try to render it. Is failed, apply ssh-key wa
+ def ret = ''
+ def patched = false
+ def wa29155ClassName = 'cluster.' + cname + '.infra.secrets_nova_wa29155'
+ def wa29155File = "/srv/salt/reclass/classes/cluster/${cname}/infra/secrets_nova_wa29155.yml"
+
+ try {
+ salt.cmdRun(venvPepper, 'I@salt:master', "test ! -f ${wa29155File}", true, null, false)
+ }
+ catch (Exception ex) {
+ common.infoMsg('Work-around for PROD-29155 already apply, nothing todo')
+ return
+ }
+ salt.fullRefresh(venvPepper, 'I@salt:master')
+ salt.fullRefresh(venvPepper, 'I@nova:compute')
+ for (String minion in saltMinions) {
+ // First attempt, second will be performed in next validateReclassModel() stages
+ try {
+ salt.cmdRun(venvPepper, 'I@salt:master', "reclass -n ${minion}", true, null, false).get('return')[0].values()[0].replaceAll('Salt command execution success', '').trim()
+ } catch (Exception e) {
+ common.errorMsg(e.toString())
+ if (patched) {
+ error("Node: ${minion} failed to render after reclass-system upgrade!WA29155 probably didn't help.")
+ }
+ // check, that failed exactly by our case, by key-length check.
+ def missed_key = salt.getPillar(venvPepper, minion, '_param:nova_compute_ssh_private').get("return")[0].values()[0]
+ if (missed_key != '') {
+ error("Node: ${minion} failed to render after reclass-system upgrade!")
+ }
+ common.warningMsg('Perform: Attempt to apply WA for PROD-29155\n' +
+ 'See https://gerrit.mcp.mirantis.com/#/c/37932/ for more info')
+ common.warningMsg('WA-PROD-29155 Generating new ssh key at master node')
+ def _tempFile = "/tmp/nova_wa29155_" + UUID.randomUUID().toString().take(8)
+ common.infoMsg('Perform: generation NEW ssh-private key for nova-compute')
+ salt.cmdRun(venvPepper, 'I@salt:master', "ssh-keygen -f ${_tempFile} -N '' -q")
+ def _pub_k = salt.runSaltProcessStep(venvPepper, 'I@salt:master', 'cmd.run', "cat ${_tempFile}.pub").get('return')[0].values()[0].trim()
+ def _priv_k = salt.runSaltProcessStep(venvPepper, 'I@salt:master', 'cmd.run', "cat ${_tempFile}").get('return')[0].values()[0].trim()
+ salt.cmdRun(venvPepper, 'I@salt:master', "rm -fv ${_tempFile}", false, null, false)
+ def novaKeysDict = [
+ "parameters": [
+ "_param": [
+ "nova_compute_ssh_private": _priv_k,
+ "nova_compute_ssh_public" : _pub_k
+ ]
+ ]
+ ]
+ writeYaml file: _tempFile, data: novaKeysDict
+ def yamlData = sh(script: "cat ${_tempFile} | base64", returnStdout: true).trim()
+ salt.cmdRun(venvPepper, 'I@salt:master', "echo '${yamlData}' | base64 -d > ${wa29155File}", false, null, false)
+ common.infoMsg("Add $wa29155ClassName class into secrets.yml")
+
+ // Add 'classes:' directive
+ salt.cmdRun(venvPepper, 'I@salt:master', "cd /srv/salt/reclass/classes/cluster/$cname && " +
+ "grep -q 'classes:' infra/secrets.yml || sed -i '1iclasses:' infra/secrets.yml")
+
+ salt.cmdRun(venvPepper, 'I@salt:master', "cd /srv/salt/reclass/classes/cluster/$cname && " +
+ "grep -q '${wa29155ClassName}' infra/secrets.yml || sed -i '/classes:/ a - $wa29155ClassName' infra/secrets.yml")
+ salt.fullRefresh(venvPepper, 'cfg*')
+ salt.fullRefresh(venvPepper, 'cmp*')
+ patched = true
+ }
+ }
+ if (patched) {
+ salt.cmdRun(venvPepper, 'I@salt:master', "cd /srv/salt/reclass/classes/cluster/$cname && git status && " +
+ "git add ${wa29155File} && git add -u && git commit --allow-empty -m 'Cluster model updated with WA for PROD-29155. Issue cause due patch https://gerrit.mcp.mirantis.com/#/c/37932/ at ${common.getDatetime()}' ")
+ common.infoMsg('Work-around for PROD-29155 successfully applied')
+ }
+
+}
+
+def wa32284(String clusterName) {
+ def clientGluster = salt.getPillar(venvPepper, 'I@salt:master', "glusterfs:client:enabled").get("return")[0].values()[0]
+ def pkiGluster = salt.getPillar(venvPepper, 'I@salt:master', "glusterfs:client:volumes:salt_pki").get("return")[0].values()[0]
+ def nginxEnabledAtMaster = salt.getPillar(venvPepper, 'I@salt:master', 'nginx:server:enabled').get('return')[0].values()[0]
+ if (nginxEnabledAtMaster.toString().toLowerCase() == 'true' && clientGluster.toString().toLowerCase() == 'true' && pkiGluster) {
+ def nginxRequires = salt.getPillar(venvPepper, 'I@salt:master', 'nginx:server:wait_for_service').get('return')[0].values()[0]
+ if (nginxRequires.isEmpty()) {
+ def nginxRequiresClassName = "cluster.${clusterName}.infra.config.nginx_requires_wa32284"
+ def nginxRequiresClassFile = "/srv/salt/reclass/classes/cluster/${clusterName}/infra/config/nginx_requires_wa32284.yml"
+ def nginxRequiresBlock = ['parameters': ['nginx': ['server': ['wait_for_service': ['srv-salt-pki.mount'] ] ] ] ]
+ def _tempFile = '/tmp/wa32284_' + UUID.randomUUID().toString().take(8)
+ writeYaml file: _tempFile , data: nginxRequiresBlock
+ def nginxRequiresBlockString = sh(script: "cat ${_tempFile}", returnStdout: true).trim()
+ salt.cmdRun(venvPepper, 'I@salt:master', "cd /srv/salt/reclass/classes/cluster/${clusterName} && " +
+ "sed -i '/^parameters:/i - ${nginxRequiresClassName}' infra/config/init.yml")
+ salt.cmdRun(venvPepper, 'I@salt:master', "echo '${nginxRequiresBlockString}' > ${nginxRequiresClassFile}", false, null, false)
+ }
+ }
+}
+
+def wa32182(String cluster_name) {
+ if (salt.testTarget(venvPepper, 'I@opencontrail:control or I@opencontrail:collector')) {
+ def clusterModelPath = "/srv/salt/reclass/classes/cluster/${cluster_name}"
+ def fixFile = "${clusterModelPath}/opencontrail/common_wa32182.yml"
+ def usualFile = "${clusterModelPath}/opencontrail/common.yml"
+ def fixFileContent = "classes:\n- system.opencontrail.common\n"
+ salt.cmdRun(venvPepper, 'I@salt:master', "test -f ${fixFile} -o -f ${usualFile} || echo '${fixFileContent}' > ${fixFile}")
+ def contrailFiles = ['opencontrail/analytics.yml', 'opencontrail/control.yml', 'openstack/compute/init.yml']
+ if (salt.testTarget(venvPepper, "I@kubernetes:master")) {
+ contrailFiles.add('kubernetes/compute.yml')
+ }
+ for(String contrailFile in contrailFiles) {
+ contrailFile = "${clusterModelPath}/${contrailFile}"
+ def containsFix = salt.cmdRun(venvPepper, 'I@salt:master', "grep -E '^- cluster\\.${cluster_name}\\.opencontrail\\.common(_wa32182)?\$' ${contrailFile}", false, null, true).get('return')[0].values()[0].replaceAll('Salt command execution success', '').trim()
+ if (containsFix) {
+ continue
+ } else {
+ salt.cmdRun(venvPepper, 'I@salt:master', "grep -q -E '^parameters:' ${contrailFile} && sed -i '/^parameters:/i - cluster.${cluster_name}.opencontrail.common_wa32182' ${contrailFile} || " +
+ "echo '- cluster.${cluster_name}.opencontrail.common_wa32182' >> ${contrailFile}")
+ }
+ }
+ }
+}
+
def archiveReclassInventory(filename) {
- def ret = salt.cmdRun(venvPepper, 'I@salt:master', "reclass -i", true, null, false)
- def reclassInv = ret.values()[0]
- writeFile file: filename, text: reclassInv.toString()
- archiveArtifacts artifacts: "$filename"
+ def _tmp_file = '/tmp/' + filename + UUID.randomUUID().toString().take(8)
+ // jenkins may fail at overheap. Compress data with gzip like WA
+ def ret = salt.cmdRun(venvPepper, 'I@salt:master', 'reclass -i 2>/dev/null | gzip -9 -c | base64', true, null, false).get('return')[0].values()[0].replaceAll('Salt command execution success', '').trim()
+ def _tmp = sh(script: "echo '$ret' > ${_tmp_file}", returnStdout: false)
+ sh(script: "cat ${_tmp_file} | base64 -d | gzip -d > $filename", returnStdout: false)
+ archiveArtifacts artifacts: filename
+ sh(script: "rm -v ${_tmp_file}|| true")
}
def validateReclassModel(ArrayList saltMinions, String suffix) {
try {
- dir(suffix) {
- for(String minion in saltMinions) {
- common.infoMsg("Reclass model validation for minion ${minion}...")
- def ret = salt.cmdRun("${workspace}/${venvPepper}", 'I@salt:master', "reclass -n ${minion}", true, null, false).get('return')[0].values()[0]
- writeFile file: minion, text: ret.toString()
- }
+ for (String minion in saltMinions) {
+ common.infoMsg("Reclass model validation for minion ${minion}...")
+ def reclassInv = salt.cmdRun(venvPepper, 'I@salt:master', "reclass -n ${minion}", true, null, false).get('return')[0].values()[0].replaceAll('Salt command execution success', '').trim()
+ writeFile file: "inventory-${minion}-${suffix}.out", text: reclassInv.toString()
}
} catch (Exception e) {
common.errorMsg('Can not validate current Reclass model. Inspect failed minion manually.')
- error(e)
+ error(e.toString())
}
}
-def archiveReclassModelChanges(ArrayList saltMinions, String oldSuffix, String newSuffix) {
- def diffDir = 'pillarsDiff'
- dir(diffDir) {
- for(String minion in saltMinions) {
- def fileName = "reclass-model-${minion}-diff.out"
- sh "diff -u ${workspace}/${oldSuffix}/${minion} ${workspace}/${newSuffix}/${minion} > ${fileName} || true"
- }
+def archiveReclassModelChanges(ArrayList saltMinions, String oldSuffix = 'before', String newSuffix = 'after') {
+ for (String minion in saltMinions) {
+ def fileName = "reclass-model-${minion}-diff.out"
+ sh "diff -u inventory-${minion}-${oldSuffix}.out inventory-${minion}-${newSuffix}.out > ${fileName} || true"
+ archiveArtifacts artifacts: "${fileName}"
}
- archiveArtifacts artifacts: "${oldSuffix}/*"
- archiveArtifacts artifacts: "${newSuffix}/*"
- archiveArtifacts artifacts: "${diffDir}/*"
}
if (common.validInputParam('PIPELINE_TIMEOUT')) {
try {
pipelineTimeout = env.PIPELINE_TIMEOUT.toInteger()
- } catch(Exception e) {
+ } catch (Exception e) {
common.warningMsg("Provided PIPELINE_TIMEOUT parameter has invalid value: ${env.PIPELINE_TIMEOUT} - should be interger")
}
}
timeout(time: pipelineTimeout, unit: 'HOURS') {
- node("python && docker") {
+ node("python") {
try {
+ def inventoryBeforeFilename = "reclass-inventory-before.out"
+ def inventoryAfterFilename = "reclass-inventory-after.out"
workspace = common.getWorkspace()
- deleteDir()
targetMcpVersion = null
if (!common.validInputParam('TARGET_MCP_VERSION') && !common.validInputParam('MCP_VERSION')) {
error('You must specify MCP version in TARGET_MCP_VERSION|MCP_VERSION variable')
}
// bw comp. for 2018.X => 2018.11 release
- if (common.validInputParam('MCP_VERSION')){
+ if (common.validInputParam('MCP_VERSION')) {
targetMcpVersion = env.MCP_VERSION
common.warningMsg("targetMcpVersion has been changed to:${targetMcpVersion}, which was taken from deprecated pipeline viriable:MCP_VERSION")
- }
- else {
+ } else {
targetMcpVersion = env.TARGET_MCP_VERSION
}
// end bw comp. for 2018.X => 2018.11 release
@@ -137,19 +316,21 @@
def updateLocalRepos = ''
def reclassSystemBranch = ''
def reclassSystemBranchDefault = gitTargetMcpVersion
+ def batchSize = ''
if (gitTargetMcpVersion != 'proposed') {
reclassSystemBranchDefault = "origin/${gitTargetMcpVersion}"
}
- def driteTrainParamsYaml = env.getProperty('DRIVE_TRAIN_PARAMS')
- if (driteTrainParamsYaml) {
- def driteTrainParams = readYaml text: driteTrainParamsYaml
- saltMastURL = driteTrainParams.get('SALT_MASTER_URL')
- saltMastCreds = driteTrainParams.get('SALT_MASTER_CREDENTIALS')
- upgradeSaltStack = driteTrainParams.get('UPGRADE_SALTSTACK', false).toBoolean()
- updateClusterModel = driteTrainParams.get('UPDATE_CLUSTER_MODEL', false).toBoolean()
- updatePipelines = driteTrainParams.get('UPDATE_PIPELINES', false).toBoolean()
- updateLocalRepos = driteTrainParams.get('UPDATE_LOCAL_REPOS', false).toBoolean()
- reclassSystemBranch = driteTrainParams.get('RECLASS_SYSTEM_BRANCH', reclassSystemBranchDefault)
+ def driveTrainParamsYaml = env.getProperty('DRIVE_TRAIN_PARAMS')
+ if (driveTrainParamsYaml) {
+ def driveTrainParams = readYaml text: driveTrainParamsYaml
+ saltMastURL = driveTrainParams.get('SALT_MASTER_URL')
+ saltMastCreds = driveTrainParams.get('SALT_MASTER_CREDENTIALS')
+ upgradeSaltStack = driveTrainParams.get('UPGRADE_SALTSTACK', false).toBoolean()
+ updateClusterModel = driveTrainParams.get('UPDATE_CLUSTER_MODEL', false).toBoolean()
+ updatePipelines = driveTrainParams.get('UPDATE_PIPELINES', false).toBoolean()
+ updateLocalRepos = driveTrainParams.get('UPDATE_LOCAL_REPOS', false).toBoolean()
+ reclassSystemBranch = driveTrainParams.get('RECLASS_SYSTEM_BRANCH', reclassSystemBranchDefault)
+ batchSize = driveTrainParams.get('BATCH_SIZE', '')
} else {
// backward compatibility for 2018.11.0
saltMastURL = env.getProperty('SALT_MASTER_URL')
@@ -160,26 +341,30 @@
updateLocalRepos = env.getProperty('UPDATE_LOCAL_REPOS').toBoolean()
reclassSystemBranch = reclassSystemBranchDefault
}
-
python.setupPepperVirtualenv(venvPepper, saltMastURL, saltMastCreds)
-
- def pillarsBeforeSuffix = 'pillarsBefore'
- def pillarsAfterSuffix = 'pillarsAfter'
- def inventoryBeforeFilename = "reclass-inventory-before.out"
- def inventoryAfterFilename = "reclass-inventory-after.out"
-
def minions = salt.getMinions(venvPepper, '*')
def cluster_name = salt.getPillar(venvPepper, 'I@salt:master', "_param:cluster_name").get("return")[0].values()[0]
+ if (cluster_name == '' || cluster_name == 'null' || cluster_name == null) {
+ error('Pillar data is broken for Salt master node! Please check it manually and re-run pipeline.')
+ }
+ if (!batchSize) {
+ batchSize = getWorkerThreads(venvPepper)
+ }
- stage("Update Reclass and Salt-Formulas ") {
- validateReclassModel(minions, pillarsBeforeSuffix)
+ stage('Update Reclass and Salt-Formulas') {
+ common.infoMsg('Perform: Full salt sync')
+ salt.fullRefresh(venvPepper, '*')
+ common.infoMsg('Perform: Validate reclass medata before processing')
+ validateReclassModel(minions, 'before')
+
+ common.infoMsg('Perform: archiveReclassInventory before upgrade')
archiveReclassInventory(inventoryBeforeFilename)
try {
- salt.cmdRun(venvPepper, 'I@salt:master', "cd /srv/salt/reclass/ && git diff-index --quiet HEAD --")
+ salt.cmdRun(venvPepper, 'I@salt:master', 'cd /srv/salt/reclass/ && git status && git diff-index --quiet HEAD --')
}
catch (Exception ex) {
- error("You have uncommited changes in your Reclass cluster model repository. Please commit or reset them and rerun the pipeline.")
+ error('You have uncommitted changes in your Reclass cluster model repository. Please commit or reset them and rerun the pipeline.')
}
if (updateClusterModel) {
common.infoMsg('Perform: UPDATE_CLUSTER_MODEL')
@@ -202,7 +387,33 @@
salt.cmdRun(venvPepper, 'I@salt:master', "cd /srv/salt/reclass/classes/cluster/$cluster_name && " +
"grep -r --exclude-dir=aptly -l 'system.linux.system.repo.mcp.updates' * | xargs --no-run-if-empty sed -i 's/system.linux.system.repo.mcp.updates/system.linux.system.repo.mcp.apt_mirantis.update/g'")
salt.cmdRun(venvPepper, 'I@salt:master', "cd /srv/salt/reclass/classes/cluster/$cluster_name && " +
- "grep -r --exclude-dir=aptly -l 'system.linux.system.repo.mcp.extra' * | xargs --no-run-if-empty sed -i 's/system.linux.system.repo.mcp.extra/system.linux.system.repo.mcp.apt_mirantis.extra/g'")
+ "grep -r --exclude-dir=aptly -l 'system.linux.system.repo.mcp.extra' * | xargs --no-run-if-empty sed -i 's/system.linux.system.repo.mcp.extra/system.linux.system.repo.mcp.apt_mirantis.extra/g'")
+
+ salt.cmdRun(venvPepper, 'I@salt:master', "cd /srv/salt/reclass/classes/cluster/$cluster_name/infra && sed -i '/linux_system_repo_mcp_maas_url/d' maas.yml")
+ salt.cmdRun(venvPepper, 'I@salt:master', "cd /srv/salt/reclass/classes/cluster/$cluster_name/infra && sed -i '/maas_region_main_archive/d' maas.yml")
+
+ // Switch Jenkins/Gerrit to use LDAP SSL/TLS
+ def gerritldapURI = salt.cmdRun(venvPepper, 'I@salt:master', "cd /srv/salt/reclass/classes/cluster/$cluster_name && " +
+ "grep -r --exclude-dir=aptly 'gerrit_ldap_server: .*' * | grep -Po 'gerrit_ldap_server: \\K.*' | tr -d '\"'", true, null, false).get('return')[0].values()[0].replaceAll('Salt command execution success', '').trim()
+ if (gerritldapURI.startsWith('ldap://')) {
+ salt.cmdRun(venvPepper, 'I@salt:master', "cd /srv/salt/reclass/classes/cluster/$cluster_name && " +
+ "grep -r --exclude-dir=aptly -l 'gerrit_ldap_server: .*' * | xargs --no-run-if-empty sed -i 's|ldap://|ldaps://|g'")
+ } else if (! gerritldapURI.startsWith('ldaps://')) {
+ salt.cmdRun(venvPepper, 'I@salt:master', "cd /srv/salt/reclass/classes/cluster/$cluster_name && " +
+ "grep -r --exclude-dir=aptly -l 'gerrit_ldap_server: .*' * | xargs --no-run-if-empty sed -i 's|gerrit_ldap_server: .*|gerrit_ldap_server: \"ldaps://${gerritldapURI}\"|g'")
+ }
+ def jenkinsldapURI = salt.cmdRun(venvPepper, 'I@salt:master', "cd /srv/salt/reclass/classes/cluster/$cluster_name && " +
+ "grep -r --exclude-dir=aptly 'jenkins_security_ldap_server: .*' * | grep -Po 'jenkins_security_ldap_server: \\K.*' | tr -d '\"'", true, null, false).get('return')[0].values()[0].replaceAll('Salt command execution success', '').trim()
+ if (jenkinsldapURI.startsWith('ldap://')) {
+ salt.cmdRun(venvPepper, 'I@salt:master', "cd /srv/salt/reclass/classes/cluster/$cluster_name && " +
+ "grep -r --exclude-dir=aptly -l 'jenkins_security_ldap_server: .*' * | xargs --no-run-if-empty sed -i 's|ldap://|ldaps://|g'")
+ } else if (! jenkinsldapURI.startsWith('ldaps://')) {
+ salt.cmdRun(venvPepper, 'I@salt:master', "cd /srv/salt/reclass/classes/cluster/$cluster_name && " +
+ "grep -r --exclude-dir=aptly -l 'jenkins_security_ldap_server: .*' * | xargs --no-run-if-empty sed -i 's|jenkins_security_ldap_server: .*|jenkins_security_ldap_server: \"ldaps://${jenkinsldapURI}\"|g'")
+ }
+
+ wa32284(cluster_name)
+
salt.cmdRun(venvPepper, 'I@salt:master', "cd /srv/salt/reclass/classes/system && git checkout ${reclassSystemBranch}")
// Add kubernetes-extra repo
if (salt.testTarget(venvPepper, "I@kubernetes:master")) {
@@ -217,7 +428,7 @@
}
// Add all update repositories
def repoIncludeBase = '- system.linux.system.repo.mcp.apt_mirantis.'
- def updateRepoList = [ 'cassandra', 'ceph', 'contrail', 'docker', 'elastic', 'extra', 'openstack', 'percona', 'salt-formulas', 'saltstack', 'ubuntu' ]
+ def updateRepoList = ['cassandra', 'ceph', 'contrail', 'docker', 'elastic', 'extra', 'openstack', 'maas', 'percona', 'salt-formulas', 'saltstack', 'ubuntu']
updateRepoList.each { repo ->
def repoNameUpdateInclude = "${repoIncludeBase}update.${repo}"
def filesWithInclude = salt.cmdRun(venvPepper, 'I@salt:master', "cd /srv/salt/reclass/classes/cluster/$cluster_name && grep -Plr '\\${repoIncludeBase}${repo}\$' . || true", false).get('return')[0].values()[0].trim().tokenize('\n')
@@ -226,11 +437,12 @@
if (updateRepoIncludeExist == 'not_found') {
// Include needs to be added
salt.cmdRun(venvPepper, 'I@salt:master', "cd /srv/salt/reclass/classes/cluster/$cluster_name && " +
- "sed -i 's/\\( *\\)${repoIncludeBase}${repo}\$/&\\n\\1${repoNameUpdateInclude}/g' ${file}")
+ "sed -i 's/\\( *\\)${repoIncludeBase}${repo}\$/&\\n\\1${repoNameUpdateInclude}/g' ${file}")
common.infoMsg("Update repo for ${repo} is added to ${file}")
}
}
}
+ wa32182(cluster_name)
// Add new defaults
common.infoMsg("Add new defaults")
salt.cmdRun(venvPepper, 'I@salt:master', "grep '^ mcp_version: ' /srv/salt/reclass/classes/cluster/$cluster_name/infra/init.yml || " +
@@ -247,121 +459,23 @@
salt.cmdRun(venvPepper, 'I@salt:master', "cd /srv/salt/reclass/classes/cluster/$cluster_name && git status && " +
"git add -u && git commit --allow-empty -m 'Cluster model update to the release $targetMcpVersion on $dateTime'")
}
-
- salt.runSaltProcessStep(venvPepper, 'I@salt:master', 'saltutil.refresh_pillar')
- try {
- salt.enforceState(venvPepper, 'I@salt:master', 'linux.system.repo')
- } catch (Exception e) {
- common.errorMsg("Something wrong with model after UPDATE_CLUSTER_MODEL step. Please check model.")
- throw e
- }
-
- common.infoMsg('Running a check for compatibility with new Reclass/Salt-Formulas packages')
- def saltModelDir = 'salt-model'
- def nodesArtifact = 'pillarsFromValidation.tar.gz'
- def reclassModel = 'reclassModel.tar.gz'
- def pillarsAfterValidation = 'pillarsFromValidation'
- try {
- def repos = salt.getPillar(venvPepper, 'I@salt:master', "linux:system:repo").get("return")[0].values()[0]
- def cfgInfo = salt.getPillar(venvPepper, 'I@salt:master', "reclass:storage:node:infra_cfg01_node").get("return")[0].values()[0]
- def docker_image_for_test = salt.getPillar(venvPepper, 'I@salt:master', "_param:docker_image_cvp_sanity_checks").get("return")[0].values()[0]
- def saltModelTesting = new com.mirantis.mk.SaltModelTesting()
- def config = [
- 'dockerHostname': "cfg01",
- 'distribRevision': "${targetMcpVersion}",
- 'baseRepoPreConfig': true,
- 'extraRepoMergeStrategy': 'override',
- 'dockerContainerName': 'new-reclass-package-check',
- 'dockerMaxCpus': 1,
- 'image': docker_image_for_test,
- 'dockerExtraOpts': [
- "-v ${env.WORKSPACE}/${saltModelDir}:/srv/salt/reclass",
- "--entrypoint ''",
- ],
- 'extraRepos': ['repo': repos, 'aprConfD': "APT::Get::AllowUnauthenticated 'true';" ],
- 'envOpts': [ "CLUSTER_NAME=${cluster_name}", "NODES_ARTIFACT_NAME=${nodesArtifact}" ]
- ]
- def tarName = '/tmp/currentModel.tar.gz'
- salt.cmdRun(venvPepper, 'I@salt:master', "tar -cf ${tarName} --mode='a+rwX' --directory=/srv/salt/reclass classes")
- if (cfgInfo == '') {
- // case for old setups when cfg01 node model was static
- def node_name = salt.getPillar(venvPepper, 'I@salt:master', "linux:system:name").get("return")[0].values()[0]
- def node_domain = salt.getPillar(venvPepper, 'I@salt:master', "linux:system:domain").get("return")[0].values()[0]
- salt.cmdRun(venvPepper, 'I@salt:master', "tar -rf ${tarName} --mode='a+rwX' --directory=/srv/salt/reclass nodes/${node_name}.${node_domain}.yml")
- config['envOpts'].add("CFG_NODE_NAME=${node_name}.${node_domain}")
- }
- def modelHash = salt.cmdRun(venvPepper, 'I@salt:master', "cat ${tarName} | gzip -9 -c | base64", false, null, false).get('return')[0].values()[0]
- writeFile file: 'modelHash', text: modelHash
- sh "cat modelHash | base64 -d | gzip -d > ${reclassModel}"
- sh "mkdir ${saltModelDir} && tar -xf ${reclassModel} -C ${saltModelDir}"
-
- config['runCommands'] = [
- '001_Install_Salt_Reclass_Packages': { sh('apt-get install -y reclass salt-formula-*') },
- '002_Get_new_nodes': {
- try {
- sh('''#!/bin/bash
- new_generated_dir=/srv/salt/_new_nodes
- new_pillar_dir=/srv/salt/_new_pillar
- reclass_classes=/srv/salt/reclass/classes/
- mkdir -p ${new_generated_dir} ${new_pillar_dir}
- nodegenerator -b ${reclass_classes} -o ${new_generated_dir} ${CLUSTER_NAME}
- for node in $(ls ${new_generated_dir}); do
- nodeName=$(basename -s .yml ${node})
- reclass -n ${nodeName} -c ${reclass_classes} -u ${new_generated_dir} > ${new_pillar_dir}/${nodeName}
- done
- if [[ -n "${CFG_NODE_NAME}" ]]; then
- reclass -n ${CFG_NODE_NAME} -c ${reclass_classes} -u /srv/salt/reclass/nodes > ${new_pillar_dir}/${CFG_NODE_NAME}
- fi
- tar -czf /tmp/${NODES_ARTIFACT_NAME} -C ${new_pillar_dir}/ .
- ''')
- } catch (Exception e) {
- print "Test new nodegenerator tool is failed: ${e}"
- throw e
- }
- },
- ]
- config['runFinally'] = [ '001_Archive_nodegenerator_artefact': {
- sh(script: "mv /tmp/${nodesArtifact} ${env.WORKSPACE}/${nodesArtifact}")
- archiveArtifacts artifacts: nodesArtifact
- }]
- saltModelTesting.setupDockerAndTest(config)
- def pillarsValidationDiff = "${pillarsAfterValidation}/diffFromOriginal"
- sh "mkdir -p ${pillarsValidationDiff} && tar -xf ${nodesArtifact} --dir ${pillarsAfterValidation}/"
- def changesFound = false
- for(String minion in minions) {
- try {
- sh (script:"diff -u -w -I '^Salt command execution success' -I '^ node: ' -I '^ uri: ' -I '^ timestamp: ' ${pillarsBeforeSuffix}/${minion} ${pillarsAfterValidation}/${minion} > ${pillarsValidationDiff}/${minion}", returnStdout: true)
- } catch(Exception e) {
- changesFound = true
- archiveArtifacts artifacts: "${pillarsValidationDiff}/${minion}"
- def buildUrl = env.BUILD_URL ? env.BUILD_URL : "${env.JENKINS_URL}/job/${env.JOB_NAME}/${env.BUILD_NUMBER}"
- common.errorMsg("Found diff changes for ${minion} minion: ${buildUrl}/artifact/${pillarsValidationDiff}/${minion}/*view*/ ")
- }
- }
- if (changesFound) {
- common.warningMsg('Found diff changes between current pillar data and updated. Inspect logs above.')
- input message: 'Continue anyway?'
- } else {
- common.infoMsg('Diff between current pillar data and updated one - not found.')
- }
- } catch (Exception updateErr) {
- common.warningMsg(updateErr)
- common.warningMsg('Failed to validate update Salt Formulas repos/packages.')
- input message: 'Continue anyway?'
- } finally {
- sh "rm -rf ${saltModelDir} ${nodesArtifact} ${pillarsAfterValidation} ${reclassModel}"
- }
-
try {
common.infoMsg('Perform: UPDATE Salt Formulas')
+ salt.fullRefresh(venvPepper, '*')
+ salt.enforceState(venvPepper, 'I@salt:master', 'linux.system.repo')
def saltEnv = salt.getPillar(venvPepper, 'I@salt:master', "_param:salt_master_base_environment").get("return")[0].values()[0]
- salt.runSaltProcessStep(venvPepper, 'I@salt:master', 'state.sls_id', ["salt_master_${saltEnv}_pkg_formulas",'salt.master.env'])
+ salt.runSaltProcessStep(venvPepper, 'I@salt:master', 'state.sls_id', ["salt_master_${saltEnv}_pkg_formulas", 'salt.master.env'])
+ salt.fullRefresh(venvPepper, '*')
} catch (Exception updateErr) {
common.warningMsg(updateErr)
common.warningMsg('Failed to update Salt Formulas repos/packages. Check current available documentation on https://docs.mirantis.com/mcp/latest/, how to update packages.')
input message: 'Continue anyway?'
}
+ wa29352(minions, cluster_name)
+ def computeMinions = salt.getMinions(venvPepper, 'I@nova:compute')
+ wa29155(computeMinions, cluster_name)
+
try {
common.infoMsg('Perform: UPDATE Reclass package')
salt.runSaltProcessStep(venvPepper, 'I@salt:master', 'pkg.install', ["reclass"])
@@ -381,21 +495,23 @@
}
salt.fullRefresh(venvPepper, '*')
-
try {
salt.cmdRun(venvPepper, 'I@salt:master', "reclass-salt --top")
}
catch (Exception ex) {
- error("Reclass fails rendering. Pay attention to your cluster model.")
+
+ error('Reclass fails rendering. Pay attention to your cluster model.' +
+ 'ErrorMessage:' + ex.toString())
}
+ common.infoMsg('Perform: archiveReclassInventory AFTER upgrade')
archiveReclassInventory(inventoryAfterFilename)
sh "diff -u $inventoryBeforeFilename $inventoryAfterFilename > reclass-inventory-diff.out || true"
archiveArtifacts artifacts: "reclass-inventory-diff.out"
- validateReclassModel(minions, pillarsAfterSuffix)
- archiveReclassModelChanges(minions, pillarsBeforeSuffix, pillarsAfterSuffix)
+ validateReclassModel(minions, 'after')
+ archiveReclassModelChanges(minions)
}
if (updateLocalRepos) {
@@ -437,37 +553,37 @@
}
}
- stage("Update Drivetrain") {
+ stage('Update Drivetrain') {
if (upgradeSaltStack) {
updateSaltStack("I@salt:master", '["salt-master", "salt-common", "salt-api", "salt-minion"]')
- salt.enforceState(venvPepper, "I@linux:system", 'linux.system.repo', true)
+ salt.enforceState(venvPepper, "I@linux:system", 'linux.system.repo', true, true, batchSize)
updateSaltStack("I@salt:minion and not I@salt:master", '["salt-minion"]')
}
if (updatePipelines) {
- triggerMirrorJob("git-mirror-downstream-mk-pipelines")
- triggerMirrorJob("git-mirror-downstream-pipeline-library")
+ common.infoMsg('Perform: UPDATE git repos')
+ triggerMirrorJob('git-mirror-downstream-mk-pipelines', reclassSystemBranch)
+ triggerMirrorJob('git-mirror-downstream-pipeline-library', reclassSystemBranch)
}
// update minions certs
- // call for `salt.minion.ca` state on related nodes to make sure
- // mine was updated with required data after salt-minion/salt-master restart salt:minion:ca
- salt.enforceState(venvPepper, "I@salt:minion:ca", 'salt.minion.ca', true)
- salt.enforceState(venvPepper, "I@salt:minion", 'salt.minion.cert', true)
- // run `salt.minion` to refresh all minion configs (for example _keystone.conf)
- salt.enforceState([saltId: venvPepper, target: "I@salt:minion ${extra_tgt}", state: ['salt.minion'], read_timeout: 60, retries: 2])
- // updating users and keys
- salt.enforceState(venvPepper, "I@linux:system", 'linux.system.user', true)
- salt.enforceState(venvPepper, "I@linux:system", 'openssh', true)
+ salt.enforceState(venvPepper, "I@salt:minion", 'salt.minion.cert', true, true, batchSize)
- salt.enforceState(venvPepper, "I@jenkins:client", 'jenkins.client', true)
+ // Retry needed only for rare race-condition in user appearance
+ common.infoMsg('Perform: updating users and keys')
+ salt.enforceState(venvPepper, "I@linux:system", 'linux.system.user', true, true, batchSize)
+ common.infoMsg('Perform: updating openssh')
+ salt.enforceState(venvPepper, "I@linux:system", 'openssh', true, true, batchSize)
- salt.cmdRun(venvPepper, "I@salt:master", "salt -C 'I@jenkins:client and I@docker:client' state.sls docker.client --async")
+ // Apply changes for HaProxy on CI/CD nodes
+ salt.enforceState(venvPepper, 'I@keepalived:cluster:instance:cicd_control_vip and I@haproxy:proxy', 'haproxy.proxy', true)
+
+ salt.cmdRun(venvPepper, "I@salt:master", "salt -C 'I@jenkins:client and I@docker:client and not I@salt:master' state.sls docker.client --async")
sleep(180)
- common.infoMsg("Checking if Docker containers are up")
+ common.infoMsg('Perform: Checking if Docker containers are up')
try {
common.retry(10, 30) {
@@ -477,6 +593,13 @@
catch (Exception ex) {
error("Docker containers for CI/CD services are having troubles with starting.")
}
+
+ salt.enforceState(venvPepper, 'I@jenkins:client and not I@salt:master', 'jenkins.client', true)
+
+ // update Nginx proxy settings for Jenkins/Gerrit if needed
+ if (salt.testTarget(venvPepper, 'I@nginx:server:site:nginx_proxy_jenkins and I@nginx:server:site:nginx_proxy_gerrit')) {
+ salt.enforceState(venvPepper, 'I@nginx:server:site:nginx_proxy_jenkins and I@nginx:server:site:nginx_proxy_gerrit', 'nginx.server', true, true, null, false, 60, 2)
+ }
}
}
catch (Throwable e) {
diff --git a/validate-cloud.groovy b/validate-cloud.groovy
index 930a27d..962c4ed 100644
--- a/validate-cloud.groovy
+++ b/validate-cloud.groovy
@@ -36,6 +36,9 @@
*
* PARALLEL_PERFORMANCE If enabled, run Rally tests separately in parallel for each sub directory found
* inside RALLY_SCENARIOS and RALLY_SL_SCENARIOS (if STACKLIGHT_RALLY is enabled)
+ * GENERATE_REPORT Set this to false if you are running longevity tests on a cicd node with less than
+ * 21GB memory. Rally consumes lots of memory when generating reports sourcing week
+ * amounts of data (BUG PROD-30433)
*/
common = new com.mirantis.mk.Common()
@@ -56,6 +59,7 @@
def pluginsRepo = rally.get('RALLY_PLUGINS_REPO') ?: 'https://github.com/Mirantis/rally-plugins'
def pluginsBranch = rally.get('RALLY_PLUGINS_BRANCH') ?: 'master'
def tags = rally.get('RALLY_TAGS') ?: []
+def generateReport = rally.get('GENERATE_REPORT', true).toBoolean()
// contrainer working dir vars
def rallyWorkdir = '/home/rally'
@@ -194,7 +198,7 @@
platform, rally.RALLY_SCENARIOS,
rally.RALLY_SL_SCENARIOS, rally.RALLY_TASK_ARGS_FILE,
rally.RALLY_DB_CONN_STRING, tags,
- rally.RALLY_TRENDS.toBoolean(), rally.SKIP_LIST
+ rally.RALLY_TRENDS.toBoolean(), rally.SKIP_LIST, generateReport
)
def commands_list = commands.collectEntries{ [ (it.key) : { sh("${it.value}") } ] }
@@ -301,7 +305,8 @@
curPlatform, commonScens,
stacklightScens, rally.RALLY_TASK_ARGS_FILE,
rally.RALLY_DB_CONN_STRING, tags,
- rally.RALLY_TRENDS.toBoolean(), rally.SKIP_LIST
+ rally.RALLY_TRENDS.toBoolean(), rally.SKIP_LIST,
+ generateReport
)
// copy required files for the current task