Merge "Updated docstring"
diff --git a/cloud-update.groovy b/cloud-update.groovy
index a8b2c71..f383c24 100644
--- a/cloud-update.groovy
+++ b/cloud-update.groovy
@@ -10,9 +10,12 @@
* PER_NODE Target nodes will be managed one by one (bool)
* ROLLBACK_BY_REDEPLOY Omit taking live snapshots. Rollback is planned to be done by redeployment (bool)
* STOP_SERVICES Stop API services before update (bool)
+ * TARGET_KERNEL_UPDATES Comma separated list of nodes to update kernel if newer version is available (Valid values are cfg,ctl,prx,msg,dbs,log,mon,mtr,ntw,nal,gtw-virtual,cmn,rgw,cid,cmp,kvm,osd,gtw-physical)
+ * TARGET_REBOOT Comma separated list of nodes to reboot after update or physical machine rollback (Valid values are cfg,ctl,prx,msg,dbs,log,mon,mtr,ntw,nal,gtw-virtual,cmn,rgw,cid,cmp,kvm,osd,gtw-physical)
+ * TARGET_HIGHSTATE Comma separated list of nodes to run Salt Highstate on after update or physical machine rollback (Valid values are cfg,ctl,prx,msg,dbs,log,mon,mtr,ntw,nal,gtw-virtual,cmn,rgw,cid,cmp,kvm,osd,gtw-physical)
* TARGET_UPDATES Comma separated list of nodes to update (Valid values are cfg,ctl,prx,msg,dbs,log,mon,mtr,ntw,nal,gtw-virtual,cmn,rgw,cid,cmp,kvm,osd,gtw-physical)
- * TARGET_ROLLBACKS Comma separated list of nodes to update (Valid values are ctl,prx,msg,dbs,log,mon,mtr,ntw,nal,gtw-virtual,cmn,rgw,cmp,kvm,osd,gtw-physical)
- * TARGET_MERGES Comma separated list of nodes to update (Valid values are cfg,ctl,prx,msg,dbs,log,mon,mtr,ntw,nal,gtw-virtual,cmn,rgw,cid)
+ * TARGET_ROLLBACKS Comma separated list of nodes to rollback (Valid values are ctl,prx,msg,dbs,log,mon,mtr,ntw,nal,gtw-virtual,cmn,rgw,cmp,kvm,osd,gtw-physical)
+ * TARGET_SNAPSHOT_MERGES Comma separated list of nodes to merge live snapshot for (Valid values are cfg,ctl,prx,msg,dbs,log,mon,mtr,ntw,nal,gtw-virtual,cmn,rgw,cid)
* CTL_TARGET Salt targeted CTL nodes (ex. ctl*)
* PRX_TARGET Salt targeted PRX nodes (ex. prx*)
* MSG_TARGET Salt targeted MSG nodes (ex. msg*)
@@ -28,11 +31,10 @@
* CMP_TARGET Salt targeted physical compute nodes (ex. cmp001*)
* KVM_TARGET Salt targeted physical KVM nodes (ex. kvm01*)
* CEPH_OSD_TARGET Salt targeted physical Ceph OSD nodes (ex. osd001*)
- * GTW_TARGET Salt targeted physical GTW nodes (ex. gtw01*)
- * REBOOT Reboot nodes after update (bool)
- * ROLLBACK_PKG_VERSIONS Space separated list of pkgs=versions to rollback to (ex. pkg_name1=pkg_version1 pkg_name2=pkg_version2)
- * PURGE_PKGS Space separated list of pkgs=versions to be purged (ex. pkg_name1=pkg_version1 pkg_name2=pkg_version2)
- * REMOVE_PKGS Space separated list of pkgs=versions to be removed (ex. pkg_name1=pkg_version1 pkg_name2=pkg_version2)
+ * GTW_TARGET Salt targeted physical or virtual GTW nodes (ex. gtw01*)
+ * ROLLBACK_PKG_VERSIONS Space separated list of pkgs=versions to rollback to on physical targeted machines (ex. pkg_name1=pkg_version1 pkg_name2=pkg_version2)
+ * PURGE_PKGS Space separated list of pkgs=versions to be purged on physical targeted machines (ex. pkg_name1=pkg_version1 pkg_name2=pkg_version2)
+ * REMOVE_PKGS Space separated list of pkgs=versions to be removed on physical targeted machines (ex. pkg_name1=pkg_version1 pkg_name2=pkg_version2)
* RESTORE_GALERA Restore Galera DB (bool)
* RESTORE_CONTRAIL_DB Restore Cassandra and Zookeeper DBs for OpenContrail (bool)
*
@@ -44,7 +46,7 @@
def updates = TARGET_UPDATES.tokenize(",").collect{it -> it.trim()}
def rollbacks = TARGET_ROLLBACKS.tokenize(",").collect{it -> it.trim()}
-def merges = TARGET_MERGES.tokenize(",").collect{it -> it.trim()}
+def merges = TARGET_SNAPSHOT_MERGES.tokenize(",").collect{it -> it.trim()}
def pepperEnv = "pepperEnv"
def minions
@@ -56,8 +58,9 @@
def updatePkgs(pepperEnv, target, targetType="", targetPackages="") {
def salt = new com.mirantis.mk.Salt()
def common = new com.mirantis.mk.Common()
+ def kernelUpdates = TARGET_KERNEL_UPDATES.tokenize(",").collect{it -> it.trim()}
+ def distUpgrade = false
def commandKwargs
- def distUpgrade
def pkgs
def out
@@ -65,7 +68,11 @@
stage("List package upgrades") {
common.infoMsg("Listing all the packages that have a new update available on ${target}")
- pkgs = salt.getReturnValues(salt.runSaltProcessStep(pepperEnv, target, 'pkg.list_upgrades', [], null, true))
+ if (kernelUpdates.contains(targetType)) {
+ pkgs = salt.getReturnValues(salt.runSaltProcessStep(pepperEnv, target, 'pkg.list_upgrades', [], null, true))
+ } else {
+ pkgs = salt.getReturnValues(salt.runSaltProcessStep(pepperEnv, target, 'pkg.list_upgrades', ['dist_upgrade=False'], null, true))
+ }
if(targetPackages != "" && targetPackages != "*"){
common.infoMsg("Note that only the ${targetPackages} would be installed from the above list of available updates on the ${target}")
}
@@ -91,27 +98,29 @@
if (targetPackages != "") {
// list installed versions of pkgs that will be upgraded
- def installedPkgs = []
- def newPkgs = []
- def targetPkgList = targetPackages.tokenize(',')
- for (pkg in targetPkgList) {
- def version
- try {
- def pkgsDetails = salt.getReturnValues(salt.runSaltProcessStep(pepperEnv, target, 'pkg.info_installed', [pkg], null, true))
- version = pkgsDetails.get(pkg).get('version')
- } catch (Exception er) {
- common.infoMsg("${pkg} not installed yet")
+ if (targetType == 'kvm' || targetType == 'cmp' || targetType == 'osd' || targetType == 'gtw-physical') {
+ def installedPkgs = []
+ def newPkgs = []
+ def targetPkgList = targetPackages.tokenize(',')
+ for (pkg in targetPkgList) {
+ def version
+ try {
+ def pkgsDetails = salt.getReturnValues(salt.runSaltProcessStep(pepperEnv, target, 'pkg.info_installed', [pkg], null, true))
+ version = pkgsDetails.get(pkg).get('version')
+ } catch (Exception er) {
+ common.infoMsg("${pkg} not installed yet")
+ }
+ if (version?.trim()) {
+ installedPkgs.add(pkg + '=' + version)
+ } else {
+ newPkgs.add(pkg)
+ }
}
- if (version?.trim()) {
- installedPkgs.add(pkg + '=' + version)
- } else {
- newPkgs.add(pkg)
- }
+ common.warningMsg("the following list of pkgs will be upgraded")
+ common.warningMsg(installedPkgs.join(" "))
+ common.warningMsg("the following list of pkgs will be newly installed")
+ common.warningMsg(newPkgs.join(" "))
}
- common.warningMsg("the following list of pkgs will be upgraded")
- common.warningMsg(installedPkgs.join(" "))
- common.warningMsg("the following list of pkgs will be newly installed")
- common.warningMsg(newPkgs.join(" "))
// set variables
command = "pkg.install"
packages = targetPackages
@@ -119,15 +128,16 @@
}else {
command = "pkg.upgrade"
- commandKwargs = ['dist_upgrade': 'true']
- distUpgrade = true
+ if (kernelUpdates.contains(targetType)) {
+ commandKwargs = ['dist_upgrade': 'true']
+ distUpgrade = true
+ }
packages = null
}
- // todo exception to cfg or cicd
stage("stop services on ${target}") {
- if ((STOP_SERVICES.toBoolean()) && (targetType != 'cicd')) {
- if (targetType == 'contrail') {
+ if ((STOP_SERVICES.toBoolean()) && (targetType != 'cid')) {
+ if (targetType == 'ntw' || targetType == 'nal') {
stopContrailServices(pepperEnv, target)
} else {
def probe = salt.getFirstMinion(pepperEnv, "${target}")
@@ -138,7 +148,7 @@
stage('Apply package upgrades') {
// salt master pkg
- if (targetType == 'I@salt:master') {
+ if (targetType == 'cfg') {
common.warningMsg('salt-master pkg upgrade, rerun the pipeline if disconnected')
salt.runSaltProcessStep(pepperEnv, target, 'pkg.install', ['salt-master'], null, true, 5)
salt.minionsReachable(pepperEnv, 'I@salt:master', '*')
@@ -453,26 +463,30 @@
}
}
-def highstate(pepperEnv, target) {
+def highstate(pepperEnv, target, type) {
def salt = new com.mirantis.mk.Salt()
def common = new com.mirantis.mk.Common()
-
- stage("Apply highstate on ${target} nodes") {
- try {
- common.retry(3){
- //salt.enforceHighstate(pepperEnv, target)
- }
- } catch (Exception e) {
- common.errorMsg(e)
- if (INTERACTIVE.toBoolean()) {
- input message: "Highstate failed on ${target}. Fix it manually or run rollback on ${target}."
- } else {
- throw new Exception("highstate failed")
+ def highstates = TARGET_HIGHSTATE.tokenize(",").collect{it -> it.trim()}
+ def reboots = TARGET_REBOOT.tokenize(",").collect{it -> it.trim()}
+ // optionally run highstate
+ if (highstates.contains(type)) {
+ stage("Apply highstate on ${target} nodes") {
+ try {
+ common.retry(3){
+ salt.enforceHighstate(pepperEnv, target)
+ }
+ } catch (Exception e) {
+ common.errorMsg(e)
+ if (INTERACTIVE.toBoolean()) {
+ input message: "Highstate failed on ${target}. Fix it manually or run rollback on ${target}."
+ } else {
+ throw new Exception("highstate failed")
+ }
}
}
}
// optionally reboot
- if (REBOOT.toBoolean()) {
+ if (reboots.contains(type)) {
stage("Reboot ${target} nodes") {
salt.runSaltProcessStep(pepperEnv, target, 'system.reboot', null, null, true, 5)
sleep(10)
@@ -539,6 +553,7 @@
}
nodeCount++
}
+ salt.minionsReachable(pepperEnv, 'I@salt:master', tgt)
}
@@ -591,7 +606,7 @@
def target = salt.stripDomainName(t)
def nodeProvider = salt.getNodeProvider(pepperEnv, "${generalTarget}0${nodeCount}")
salt.runSaltProcessStep(pepperEnv, "${nodeProvider}*", 'virt.destroy', ["${target}.${domain}"], null, true)
- salt.runSaltProcessStep(pepperEnv, "${nodeProvider}*", 'virt.undefine', ["${target}.${domain}"], null, true)
+ //salt.runSaltProcessStep(pepperEnv, "${nodeProvider}*", 'virt.undefine', ["${target}.${domain}"], null, true)
try {
salt.cmdRun(pepperEnv, 'I@salt:master', "salt-key -d ${target}.${domain} -y")
} catch (Exception e) {
@@ -601,6 +616,12 @@
}
}
+def saltMasterBackup(pepperEnv) {
+ def salt = new com.mirantis.mk.Salt()
+ salt.enforceState(pepperEnv, 'I@salt:master', 'backupninja')
+ salt.cmdRun(pepperEnv, 'I@salt:master', "su root -c 'backupninja -n --run /etc/backup.d/200.backup.rsync'")
+}
+
def backupCeph(pepperEnv, tgt) {
def salt = new com.mirantis.mk.Salt()
salt.enforceState(pepperEnv, 'I@ceph:backup:server', 'ceph.backup')
@@ -702,24 +723,43 @@
]
}
-def verifyAPIs(pepperEnv) {
+def verifyAPIs(pepperEnv, target) {
def salt = new com.mirantis.mk.Salt()
def common = new com.mirantis.mk.Common()
- salt.cmdRun(pepperEnv, target, '. /root/keystonercv3; openstack service list; openstack image list; openstack flavor list; openstack compute service list; openstack server list; openstack network list; openstack volume list; openstack orchestration service list')
-
+ def out = salt.cmdRun(pepperEnv, target, '. /root/keystonercv3; openstack service list; openstack image list; openstack flavor list; openstack compute service list; openstack server list; openstack network list; openstack volume list; openstack orchestration service list')
+ if (out.toString().toLowerCase().contains('error')) {
+ common.errorMsg(out)
+ if (INTERACTIVE.toBoolean()) {
+ input message: "APIs are not working as expected. Please fix it manually."
+ } else {
+ throw new Exception("APIs are not working as expected")
+ }
+ }
}
-def verifyGalera(pepperEnv) {
+def verifyGalera(pepperEnv, target, count=0, maxRetries=200) {
def salt = new com.mirantis.mk.Salt()
def common = new com.mirantis.mk.Common()
- def out = salt.getReturnValues(salt.cmdRun(pepperEnv, 'I@galera:master', 'salt-call mysql.status | grep -A1 wsrep_cluster_size'))
-
- if ((!out.toString().contains('wsrep_cluster_size')) || (out.toString().contains('0'))) {
- if (INTERACTIVE.toBoolean()) {
- input message: "Galera is not working as expected. Please check it and fix it first before clicking on PROCEED."
+ def out
+ while(count < maxRetries) {
+ try {
+ out = salt.getReturnValues(salt.cmdRun(pepperEnv, target, 'salt-call mysql.status | grep -A1 wsrep_cluster_size'))
+ } catch (Exception er) {
+ common.infoMsg(er)
+ }
+ if ((!out.toString().contains('wsrep_cluster_size')) || (out.toString().contains('0'))) {
+ count++
+ if (count == maxRetries) {
+ if (INTERACTIVE.toBoolean()) {
+ input message: "Galera is not working as expected. Please check it and fix it first before clicking on PROCEED."
+ } else {
+ common.errorMsg(out)
+ throw new Exception("Galera is not working as expected")
+ }
+ }
+ sleep(time: 500, unit: 'MILLISECONDS')
} else {
- common.errorMsg(out)
- throw new Exception("Galera is not working as expected")
+ break
}
}
}
@@ -818,41 +858,44 @@
*/
if (updates.contains("cfg")) {
def target = 'I@salt:master'
+ def type = 'cfg'
if (salt.testTarget(pepperEnv, target)) {
def master = salt.getReturnValues(salt.getPillar(pepperEnv, target, 'linux:network:hostname'))
getCfgNodeProvider(pepperEnv, master)
if (!ROLLBACK_BY_REDEPLOY.toBoolean()) {
virsh.liveSnapshotPresent(pepperEnv, CFG_NODE_PROVIDER, master, SNAPSHOT_NAME)
+ } else {
+ saltMasterBackup(pepperEnv)
}
if (PER_NODE.toBoolean()) {
def targetHosts = salt.getMinionsSorted(pepperEnv, target)
for (t in targetHosts) {
- updatePkgs(pepperEnv, t, target)
- highstate(pepperEnv, t)
+ updatePkgs(pepperEnv, t, type)
+ highstate(pepperEnv, t, type)
}
} else {
- updatePkgs(pepperEnv, target)
- highstate(pepperEnv, target)
+ updatePkgs(pepperEnv, target, type)
+ highstate(pepperEnv, target, type)
}
}
}
if (updates.contains("ctl")) {
def target = CTL_TARGET
+ def type = 'ctl'
if (salt.testTarget(pepperEnv, target)) {
if (!ROLLBACK_BY_REDEPLOY.toBoolean()) {
- def generalTarget = 'ctl'
- liveSnapshot(pepperEnv, target, generalTarget)
+ liveSnapshot(pepperEnv, target, type)
}
if (PER_NODE.toBoolean()) {
def targetHosts = salt.getMinionsSorted(pepperEnv, target)
for (t in targetHosts) {
- updatePkgs(pepperEnv, t)
- highstate(pepperEnv, t)
+ updatePkgs(pepperEnv, t, type)
+ highstate(pepperEnv, t, type)
}
} else {
- updatePkgs(pepperEnv, target)
- highstate(pepperEnv, target)
+ updatePkgs(pepperEnv, target, type)
+ highstate(pepperEnv, target, type)
}
verifyAPIs(pepperEnv, target)
}
@@ -860,20 +903,20 @@
if (updates.contains("prx")) {
def target = PRX_TARGET
+ def type = 'prx'
if (salt.testTarget(pepperEnv, target)) {
if (!ROLLBACK_BY_REDEPLOY.toBoolean()) {
- def generalTarget = 'prx'
- liveSnapshot(pepperEnv, target, generalTarget)
+ liveSnapshot(pepperEnv, target, type)
}
if (PER_NODE.toBoolean()) {
def targetHosts = salt.getMinionsSorted(pepperEnv, target)
for (t in targetHosts) {
- updatePkgs(pepperEnv, t)
- highstate(pepperEnv, t)
+ updatePkgs(pepperEnv, t, type)
+ highstate(pepperEnv, t, type)
}
} else {
- updatePkgs(pepperEnv, target)
- highstate(pepperEnv, target)
+ updatePkgs(pepperEnv, target, type)
+ highstate(pepperEnv, target, type)
}
verifyService(pepperEnv, target, 'nginx')
}
@@ -881,20 +924,20 @@
if (updates.contains("msg")) {
def target = MSG_TARGET
+ def type = 'msg'
if (salt.testTarget(pepperEnv, target)) {
if (!ROLLBACK_BY_REDEPLOY.toBoolean()) {
- def generalTarget = 'msg'
- liveSnapshot(pepperEnv, target, generalTarget)
+ liveSnapshot(pepperEnv, target, type)
}
if (PER_NODE.toBoolean()) {
def targetHosts = salt.getMinionsSorted(pepperEnv, target)
for (t in targetHosts) {
- updatePkgs(pepperEnv, t)
- highstate(pepperEnv, t)
+ updatePkgs(pepperEnv, t, type)
+ highstate(pepperEnv, t, type)
}
} else {
- updatePkgs(pepperEnv, target, target)
- highstate(pepperEnv, target)
+ updatePkgs(pepperEnv, target, type)
+ highstate(pepperEnv, target, type)
}
verifyService(pepperEnv, target, 'rabbitmq-server')
}
@@ -902,47 +945,45 @@
if (updates.contains("dbs")) {
def target = DBS_TARGET
+ def type = 'dbs'
if (salt.testTarget(pepperEnv, target)) {
backupGalera(pepperEnv)
- salt.runSaltProcessStep(pepperEnv, target, 'service.stop', ['keepalived'], null, true)
- salt.runSaltProcessStep(pepperEnv, target, 'service.stop', ['haproxy'], null, true)
if (!ROLLBACK_BY_REDEPLOY.toBoolean()) {
- def generalTarget = 'dbs'
- liveSnapshot(pepperEnv, target, generalTarget)
+ liveSnapshot(pepperEnv, target, type)
}
- if (REBOOT.toBoolean()) {
+ if (REBOOT.toBoolean() || PER_NODE.toBoolean()) {
def targetHosts = salt.getMinionsSorted(pepperEnv, target)
- // one by one update
for (t in targetHosts) {
- updatePkgs(pepperEnv, t)
- highstate(pepperEnv, t)
+ updatePkgs(pepperEnv, t, type)
+ highstate(pepperEnv, t, type)
+ verifyGalera(pepperEnv, t)
}
} else {
- updatePkgs(pepperEnv, target)
- highstate(pepperEnv, target)
+ updatePkgs(pepperEnv, target, type)
+ highstate(pepperEnv, target, type)
+ verifyGalera(pepperEnv, target)
}
- verifyGalera(pepperEnv)
}
}
if (updates.contains("ntw")) {
def target = NTW_TARGET
+ def type = 'ntw'
if (salt.testTarget(pepperEnv, target)) {
backupContrail(pepperEnv)
if (!ROLLBACK_BY_REDEPLOY.toBoolean()) {
- def generalTarget = 'ntw'
- liveSnapshot(pepperEnv, target, generalTarget)
+ liveSnapshot(pepperEnv, target, type)
}
if (PER_NODE.toBoolean()) {
def targetHosts = salt.getMinionsSorted(pepperEnv, target)
for (t in targetHosts) {
- updatePkgs(pepperEnv, t, 'contrail')
- highstate(pepperEnv, t)
+ updatePkgs(pepperEnv, t, type)
+ highstate(pepperEnv, t, type)
verifyContrail(pepperEnv, t)
}
} else {
- updatePkgs(pepperEnv, target, 'contrail')
- highstate(pepperEnv, target)
+ updatePkgs(pepperEnv, target, type)
+ highstate(pepperEnv, target, type)
verifyContrail(pepperEnv, target)
}
}
@@ -950,20 +991,20 @@
if (updates.contains("nal")) {
def target = NAL_TARGET
+ def type = 'nal'
if (salt.testTarget(pepperEnv, target)) {
if (!ROLLBACK_BY_REDEPLOY.toBoolean()) {
- def generalTarget = 'nal'
- liveSnapshot(pepperEnv, target, generalTarget)
+ liveSnapshot(pepperEnv, target, type)
}
if (PER_NODE.toBoolean()) {
def targetHosts = salt.getMinionsSorted(pepperEnv, target)
for (t in targetHosts) {
- updatePkgs(pepperEnv, t, 'contrail')
- highstate(pepperEnv, t)
+ updatePkgs(pepperEnv, t, type)
+ highstate(pepperEnv, t, type)
}
} else {
- updatePkgs(pepperEnv, target, 'contrail')
- highstate(pepperEnv, target)
+ updatePkgs(pepperEnv, target, type)
+ highstate(pepperEnv, target, type)
}
verifyContrail(pepperEnv, target)
}
@@ -971,20 +1012,20 @@
if (updates.contains("gtw-virtual")) {
def target = GTW_TARGET
+ def type = 'gtw-virtual'
if (salt.testTarget(pepperEnv, target)) {
if (!ROLLBACK_BY_REDEPLOY.toBoolean()) {
- def generalTarget = 'gtw'
- liveSnapshot(pepperEnv, target, generalTarget)
+ liveSnapshot(pepperEnv, target, type)
}
if (PER_NODE.toBoolean()) {
def targetHosts = salt.getMinionsSorted(pepperEnv, target)
for (t in targetHosts) {
- updatePkgs(pepperEnv, t)
- highstate(pepperEnv, t)
+ updatePkgs(pepperEnv, t, type)
+ highstate(pepperEnv, t, type)
}
} else {
- updatePkgs(pepperEnv, target)
- highstate(pepperEnv, target)
+ updatePkgs(pepperEnv, target, type)
+ highstate(pepperEnv, target, type)
}
verifyService(pepperEnv, target, 'neutron-dhcp-agent')
}
@@ -992,22 +1033,22 @@
if (updates.contains("cmn")) {
def target = CMN_TARGET
+ def type = 'cmn'
if (salt.testTarget(pepperEnv, target)) {
if (!ROLLBACK_BY_REDEPLOY.toBoolean()) {
- def generalTarget = 'cmn'
- liveSnapshot(pepperEnv, target, generalTarget)
+ liveSnapshot(pepperEnv, target, type)
} else {
- backupCeph(pepperEnv)
+ backupCeph(pepperEnv, target)
}
if (PER_NODE.toBoolean()) {
def targetHosts = salt.getMinionsSorted(pepperEnv, target)
for (t in targetHosts) {
- updatePkgs(pepperEnv, t)
- highstate(pepperEnv, t)
+ updatePkgs(pepperEnv, t, type)
+ highstate(pepperEnv, t, type)
}
} else {
- updatePkgs(pepperEnv, target)
- highstate(pepperEnv, target)
+ updatePkgs(pepperEnv, target, type)
+ highstate(pepperEnv, target, type)
}
verifyCeph(pepperEnv, target, 'mon@')
}
@@ -1015,20 +1056,20 @@
if (updates.contains("rgw")) {
def target = RGW_TARGET
+ def type = 'rgw'
if (salt.testTarget(pepperEnv, target)) {
if (!ROLLBACK_BY_REDEPLOY.toBoolean()) {
- def generalTarget = 'rgw'
- liveSnapshot(pepperEnv, target, generalTarget)
+ liveSnapshot(pepperEnv, target, type)
}
if (PER_NODE.toBoolean()) {
def targetHosts = salt.getMinionsSorted(pepperEnv, target)
for (t in targetHosts) {
- updatePkgs(pepperEnv, t)
- highstate(pepperEnv, t)
+ updatePkgs(pepperEnv, t, type)
+ highstate(pepperEnv, t, type)
}
} else {
- updatePkgs(pepperEnv, target)
- highstate(pepperEnv, target)
+ updatePkgs(pepperEnv, target, type)
+ highstate(pepperEnv, target, type)
}
verifyCeph(pepperEnv, target, 'radosgw@rgw.')
}
@@ -1036,73 +1077,73 @@
if (updates.contains("log")) {
def target = LOG_TARGET
+ def type = 'log'
if (salt.testTarget(pepperEnv, target)) {
if (!ROLLBACK_BY_REDEPLOY.toBoolean()) {
- def generalTarget = 'log'
- liveSnapshot(pepperEnv, target, generalTarget)
+ liveSnapshot(pepperEnv, target, type)
}
if (PER_NODE.toBoolean()) {
def targetHosts = salt.getMinionsSorted(pepperEnv, target)
for (t in targetHosts) {
- updatePkgs(pepperEnv, t)
- highstate(pepperEnv, t)
+ updatePkgs(pepperEnv, t, type)
+ highstate(pepperEnv, t, type)
}
} else {
- updatePkgs(pepperEnv, target)
- highstate(pepperEnv, target)
+ updatePkgs(pepperEnv, target, type)
+ highstate(pepperEnv, target, type)
}
}
}
if (updates.contains("mon")) {
def target = MON_TARGET
+ def type = 'mon'
if (salt.testTarget(pepperEnv, target)) {
if (!ROLLBACK_BY_REDEPLOY.toBoolean()) {
- def generalTarget = 'mon'
- liveSnapshot(pepperEnv, target, generalTarget)
+ liveSnapshot(pepperEnv, target, type)
}
if (PER_NODE.toBoolean()) {
def targetHosts = salt.getMinionsSorted(pepperEnv, target)
for (t in targetHosts) {
- updatePkgs(pepperEnv, t)
- highstate(pepperEnv, t)
+ updatePkgs(pepperEnv, t, type)
+ highstate(pepperEnv, t, type)
}
} else {
- updatePkgs(pepperEnv, target)
- highstate(pepperEnv, target)
+ updatePkgs(pepperEnv, target, type)
+ highstate(pepperEnv, target, type)
}
}
}
if (updates.contains("mtr")) {
def target = MTR_TARGET
+ def type = 'mtr'
if (salt.testTarget(pepperEnv, target)) {
if (!ROLLBACK_BY_REDEPLOY.toBoolean()) {
- def generalTarget = 'mtr'
- liveSnapshot(pepperEnv, target, generalTarget)
+ liveSnapshot(pepperEnv, target, type)
}
if (PER_NODE.toBoolean()) {
def targetHosts = salt.getMinionsSorted(pepperEnv, target)
for (t in targetHosts) {
- updatePkgs(pepperEnv, t)
- highstate(pepperEnv, t)
+ updatePkgs(pepperEnv, t, type)
+ highstate(pepperEnv, t, type)
}
} else {
- updatePkgs(pepperEnv, target)
- highstate(pepperEnv, target)
+ updatePkgs(pepperEnv, target, type)
+ highstate(pepperEnv, target, type)
}
}
}
if (updates.contains("cid")) {
def target = CID_TARGET
+ def type = 'cid'
if (salt.testTarget(pepperEnv, target)) {
if (!ROLLBACK_BY_REDEPLOY.toBoolean()) {
- def generalTarget = 'cid'
- liveSnapshot(pepperEnv, target, generalTarget)
+ liveSnapshot(pepperEnv, target, type)
}
- updatePkgs(pepperEnv, target, 'cicd')
- highstate(pepperEnv, target)
+ updatePkgs(pepperEnv, target, type)
+ highstate(pepperEnv, target, type)
verifyService(pepperEnv, target, 'docker')
}
}
@@ -1112,16 +1153,17 @@
//
if (updates.contains("cmp")) {
def target = CMP_TARGET
+ def type = 'cmp'
if (salt.testTarget(pepperEnv, target)) {
if (PER_NODE.toBoolean()) {
def targetHosts = salt.getMinionsSorted(pepperEnv, target)
for (t in targetHosts) {
- updatePkgs(pepperEnv, t)
- highstate(pepperEnv, t)
+ updatePkgs(pepperEnv, t, type)
+ highstate(pepperEnv, t, type)
}
} else {
- updatePkgs(pepperEnv, target)
- highstate(pepperEnv, target)
+ updatePkgs(pepperEnv, target, type)
+ highstate(pepperEnv, target, type)
}
verifyService(pepperEnv, target, 'nova-compute')
}
@@ -1129,16 +1171,17 @@
if (updates.contains("kvm")) {
def target = KVM_TARGET
+ def type = 'kvm'
if (salt.testTarget(pepperEnv, target)) {
if (PER_NODE.toBoolean()) {
def targetHosts = salt.getMinionsSorted(pepperEnv, target)
for (t in targetHosts) {
- updatePkgs(pepperEnv, t)
- highstate(pepperEnv, t)
+ updatePkgs(pepperEnv, t, type)
+ highstate(pepperEnv, t, type)
}
} else {
- updatePkgs(pepperEnv, target, target)
- highstate(pepperEnv, target)
+ updatePkgs(pepperEnv, target, type)
+ highstate(pepperEnv, target, type)
}
verifyService(pepperEnv, target, 'libvirt-bin')
}
@@ -1146,16 +1189,17 @@
if (updates.contains("osd")) {
def target = CEPH_OSD_TARGET
+ def type = 'osd'
if (salt.testTarget(pepperEnv, target)) {
if (PER_NODE.toBoolean()) {
def targetHosts = salt.getMinionsSorted(pepperEnv, target)
for (t in targetHosts) {
- updatePkgs(pepperEnv, t)
- highstate(pepperEnv, t)
+ updatePkgs(pepperEnv, t, type)
+ highstate(pepperEnv, t, type)
}
} else {
- updatePkgs(pepperEnv, target)
- highstate(pepperEnv, target)
+ updatePkgs(pepperEnv, target, type)
+ highstate(pepperEnv, target, type)
}
verifyCephOsds(pepperEnv, target)
}
@@ -1163,16 +1207,17 @@
if (updates.contains("gtw-physical")) {
def target = GTW_TARGET
+ def type = 'gtw-physical'
if (salt.testTarget(pepperEnv, target)) {
if (PER_NODE.toBoolean()) {
def targetHosts = salt.getMinionsSorted(pepperEnv, target)
for (t in targetHosts) {
- updatePkgs(pepperEnv, t)
- highstate(pepperEnv, t)
+ updatePkgs(pepperEnv, t, type)
+ highstate(pepperEnv, t, type)
}
} else {
- updatePkgs(pepperEnv, target)
- highstate(pepperEnv, target)
+ updatePkgs(pepperEnv, target, type)
+ highstate(pepperEnv, target, type)
}
verifyService(pepperEnv, target, 'neutron-dhcp-agent')
}
@@ -1181,7 +1226,7 @@
/*
* Rollback section
*/
- /* if (rollbacks.contains("ctl")) {
+ /* if (rollbacks.contains("cfg")) {
if (salt.testTarget(pepperEnv, 'I@salt:master')) {
stage('ROLLBACK_CFG') {
input message: "To rollback CFG nodes run the following commands on kvm nodes hosting the CFG nodes: virsh destroy cfg0X.domain; virsh define /var/lib/libvirt/images/cfg0X.domain.xml; virsh start cfg0X.domain; virsh snapshot-delete cfg0X.domain --metadata ${SNAPSHOT_NAME}; rm /var/lib/libvirt/images/cfg0X.domain.${SNAPSHOT_NAME}.qcow2; rm /var/lib/libvirt/images/cfg0X.domain.xml; At the end restart 'docker' service on all cicd nodes and run 'linux.system.repo' Salt states on cicd nodes. After running the previous commands current pipeline job will be killed."
@@ -1234,7 +1279,7 @@
if (!ROLLBACK_BY_REDEPLOY.toBoolean()) {
rollback(pepperEnv, target, 'dbs')
clusterGalera(pepperEnv)
- verifyGalera(pepperEnv)
+ verifyGalera(pepperEnv, target)
} else {
removeNode(pepperEnv, target, 'dbs')
}
@@ -1348,16 +1393,17 @@
//
if (rollbacks.contains("cmp")) {
def target = CMP_TARGET
+ def type = 'cmp'
if (salt.testTarget(pepperEnv, target)) {
if (PER_NODE.toBoolean()) {
def targetHosts = salt.getMinionsSorted(pepperEnv, target)
for (t in targetHosts) {
rollbackPkgs(pepperEnv, t)
- highstate(pepperEnv, t)
+ highstate(pepperEnv, t, type)
}
} else {
rollbackPkgs(pepperEnv, target, target)
- highstate(pepperEnv, target)
+ highstate(pepperEnv, target, type)
}
verifyService(pepperEnv, target, 'nova-compute')
}
@@ -1365,16 +1411,17 @@
if (rollbacks.contains("kvm")) {
def target = KVM_TARGET
+ def type = 'kvm'
if (salt.testTarget(pepperEnv, target)) {
if (PER_NODE.toBoolean()) {
def targetHosts = salt.getMinionsSorted(pepperEnv, target)
for (t in targetHosts) {
rollbackPkgs(pepperEnv, t)
- highstate(pepperEnv, t)
+ highstate(pepperEnv, t, type)
}
} else {
rollbackPkgs(pepperEnv, target, target)
- highstate(pepperEnv, target)
+ highstate(pepperEnv, target, type)
}
verifyService(pepperEnv, target, 'libvirt-bin')
}
@@ -1382,16 +1429,17 @@
if (rollbacks.contains("osd")) {
def target = CEPH_OSD_TARGET
+ def type = 'osd'
if (salt.testTarget(pepperEnv, target)) {
if (PER_NODE.toBoolean()) {
def targetHosts = salt.getMinionsSorted(pepperEnv, target)
for (t in targetHosts) {
rollbackPkgs(pepperEnv, t)
- highstate(pepperEnv, t)
+ highstate(pepperEnv, t, type)
}
} else {
rollbackPkgs(pepperEnv, target, target)
- highstate(pepperEnv, target)
+ highstate(pepperEnv, target, type)
}
verifyCephOsds(pepperEnv, target)
}
@@ -1399,16 +1447,17 @@
if (rollbacks.contains("gtw-physical")) {
def target = GTW_TARGET
+ def type = 'gtw-physical'
if (salt.testTarget(pepperEnv, target)) {
if (PER_NODE.toBoolean()) {
def targetHosts = salt.getMinionsSorted(pepperEnv, target)
for (t in targetHosts) {
rollbackPkgs(pepperEnv, t)
- highstate(pepperEnv, t)
+ highstate(pepperEnv, t, type)
}
} else {
rollbackPkgs(pepperEnv, target, target)
- highstate(pepperEnv, target)
+ highstate(pepperEnv, target, type)
}
verifyService(pepperEnv, target, 'neutron-dhcp-agent')
}
@@ -1426,54 +1475,66 @@
if (merges.contains("ctl")) {
if (salt.testTarget(pepperEnv, CTL_TARGET)) {
mergeSnapshot(pepperEnv, CTL_TARGET, 'ctl')
+ verifyService(pepperEnv, CTL_TARGET, 'nova-api')
}
}
if (merges.contains("prx")) {
if (salt.testTarget(pepperEnv, PRX_TARGET)) {
mergeSnapshot(pepperEnv, PRX_TARGET, 'prx')
+ verifyService(pepperEnv, PRX_TARGET, 'nginx')
}
}
if (merges.contains("msg")) {
if (salt.testTarget(pepperEnv, MSG_TARGET)) {
mergeSnapshot(pepperEnv, MSG_TARGET, 'msg')
+ verifyService(pepperEnv, MSG_TARGET, 'rabbitmq-server')
}
}
if (merges.contains("dbs")) {
if (salt.testTarget(pepperEnv, DBS_TARGET)) {
mergeSnapshot(pepperEnv, DBS_TARGET, 'dbs')
+ verifyGalera(pepperEnv, DBS_TARGET)
+ backupGalera(pepperEnv)
}
}
if (merges.contains("ntw")) {
if (salt.testTarget(pepperEnv, NTW_TARGET)) {
mergeSnapshot(pepperEnv, NTW_TARGET, 'ntw')
+ verifyContrail(pepperEnv, NTW_TARGET)
+ backupContrail(pepperEnv)
}
}
if (merges.contains("nal")) {
if (salt.testTarget(pepperEnv, NAL_TARGET)) {
mergeSnapshot(pepperEnv, NAL_TARGET, 'nal')
+ verifyContrail(pepperEnv, NAL_TARGET)
}
}
if (merges.contains("gtw-virtual")) {
if (salt.testTarget(pepperEnv, GTW_TARGET)) {
mergeSnapshot(pepperEnv, GTW_TARGET, 'gtw')
+ verifyService(pepperEnv, GTW_TARGET, 'neutron-dhcp-agent')
}
}
if (merges.contains("cmn")) {
if (salt.testTarget(pepperEnv, CMN_TARGET)) {
mergeSnapshot(pepperEnv, CMN_TARGET, 'cmn')
+ verifyCeph(pepperEnv, CMN_TARGET, 'mon@')
+ backupCeph(pepperEnv, CMN_TARGET)
}
}
if (merges.contains("rgw")) {
if (salt.testTarget(pepperEnv, RGW_TARGET)) {
mergeSnapshot(pepperEnv, RGW_TARGET, 'rgw')
+ verifyCeph(pepperEnv, RGW_TARGET, 'radosgw@rgw.')
}
}
@@ -1498,15 +1559,18 @@
if (merges.contains("cid")) {
if (salt.testTarget(pepperEnv, CID_TARGET)) {
mergeSnapshot(pepperEnv, CID_TARGET, 'cid')
+ verifyService(pepperEnv, CID_TARGET, 'docker')
}
}
if (RESTORE_GALERA.toBoolean()) {
restoreGalera(pepperEnv)
+ verifyGalera(pepperEnv, DBS_TARGET)
}
if (RESTORE_CONTRAIL_DB.toBoolean()) {
restoreContrailDb(pepperEnv)
+ // verification is already present in restore pipelines
}
} catch (Throwable e) {
diff --git a/cvp-ha.groovy b/cvp-ha.groovy
index 4952502..4bb5018 100644
--- a/cvp-ha.groovy
+++ b/cvp-ha.groovy
@@ -29,6 +29,7 @@
def artifacts_dir = 'validation_artifacts/'
def remote_artifacts_dir = '/root/qa_results/'
def current_target_node = ''
+def first_node = ''
def tempest_result = ''
timeout(time: 12, unit: 'HOURS') {
node() {
@@ -61,6 +62,7 @@
current_target_node = validate.get_vip_node(saltMaster, TARGET_NODES)
common.warningMsg("Shutdown current vip node ${current_target_node}")
validate.shutdown_vm_node(saltMaster, current_target_node, 'soft_shutdown')
+ sleep 15
}
stage('Check during shutdown') {
tempest_result = validate.runCVPtempest(saltMaster, TEMPEST_TARGET_NODE, TEMPEST_TEST_PATTERN, SKIP_LIST_PATH, remote_artifacts_dir, "docker_tempest_during_shutdown")
@@ -79,6 +81,9 @@
if (status == null) {
throw new Exception("Node ${current_target_node} cannot start")
}
+ first_node = current_target_node
+ current_target_node = ''
+ sleep 30
}
stage('Check after shutdown') {
tempest_result = validate.runCVPtempest(saltMaster, TEMPEST_TARGET_NODE, TEMPEST_TEST_PATTERN, SKIP_LIST_PATH, remote_artifacts_dir, "docker_tempest_after_shutdown")
@@ -96,10 +101,12 @@
input message: "Are you sure you want to hard shutdown current vip node?"
}
}
- salt.cmdRun(saltMaster, current_target_node, "service keepalived stop")
+ salt.cmdRun(saltMaster, first_node, "service keepalived stop")
current_target_node = validate.get_vip_node(saltMaster, TARGET_NODES)
common.warningMsg("Shutdown current vip node ${current_target_node}")
validate.shutdown_vm_node(saltMaster, current_target_node, 'hard_shutdown')
+ sleep 10
+ salt.cmdRun(saltMaster, first_node, "service keepalived start")
}
stage('Check during hard shutdown') {
tempest_result = validate.runCVPtempest(saltMaster, TEMPEST_TARGET_NODE, TEMPEST_TEST_PATTERN, SKIP_LIST_PATH, remote_artifacts_dir, "docker_tempest_during_hard_shutdown")
@@ -116,9 +123,10 @@
common.infoMsg("Checking that node is UP")
status = salt.minionsReachable(saltMaster, 'I@salt:master', current_target_node, null, 10, num_retries)
if (status == null) {
- throw new Exception("Command execution failed")
+ throw new Exception("Node ${current_target_node} cannot start")
}
- salt.cmdRun(saltMaster, TARGET_NODES, "service keepalived start")
+ current_target_node = ''
+ sleep 30
}
stage('Check after hard shutdown') {
tempest_result = validate.runCVPtempest(saltMaster, TEMPEST_TARGET_NODE, TEMPEST_TEST_PATTERN, SKIP_LIST_PATH, remote_artifacts_dir, "docker_tempest_after_hard_shutdown")
@@ -127,7 +135,7 @@
currentBuild.result = "FAILURE"
throw new Exception("Tempest tests failed")
}
- sleep 15
+ sleep 5
}
stage('Reboot') {
@@ -148,6 +156,7 @@
currentBuild.result = "FAILURE"
throw new Exception("Tempest tests failed")
}
+ sleep 30
}
stage('Check after reboot') {
common.warningMsg("Checking that node is UP")
@@ -175,6 +184,11 @@
if (DEBUG_MODE == 'false') {
salt.cmdRun(saltMaster, TEMPEST_TARGET_NODE, "rm -rf ${remote_artifacts_dir}")
validate.runCleanup(saltMaster, TEMPEST_TARGET_NODE)
+ if (current_target_node != '') {
+ common.warningMsg("Powering on node ${current_target_node}")
+ kvm = validate.locate_node_on_kvm(saltMaster, current_target_node)
+ salt.cmdRun(saltMaster, kvm, "virsh start ${current_target_node}")
+ }
}
}
}
diff --git a/cvp-runner.groovy b/cvp-runner.groovy
new file mode 100644
index 0000000..dd58da5
--- /dev/null
+++ b/cvp-runner.groovy
@@ -0,0 +1,39 @@
+/**
+ *
+ * Launch pytest frameworks in Jenkins
+ *
+ * Expected parameters:
+ * SALT_MASTER_URL URL of Salt master
+ * SALT_MASTER_CREDENTIALS Credentials to the Salt API
+ *
+ * TESTS_SET Leave empty for full run or choose a file (test)
+ * TESTS_REPO Repo to clone
+ * TESTS_SETTINGS Additional environment varibales to apply
+ * PROXY Proxy to use for cloning repo or for pip
+ *
+ */
+
+validate = new com.mirantis.mcp.Validate()
+
+def artifacts_dir = 'validation_artifacts/'
+
+node() {
+ try{
+ stage('Initialization') {
+ validate.prepareVenv(TESTS_REPO, PROXY)
+ }
+
+ stage('Run Tests') {
+ sh "mkdir -p ${artifacts_dir}"
+ validate.runTests(SALT_MASTER_URL, SALT_MASTER_CREDENTIALS, TESTS_SET, artifacts_dir, TESTS_SETTINGS)
+ }
+ stage ('Publish results') {
+ archiveArtifacts artifacts: "${artifacts_dir}/*"
+ junit "${artifacts_dir}/*.xml"
+ }
+ } catch (Throwable e) {
+ // If there was an error or exception thrown, the build failed
+ currentBuild.result = "FAILURE"
+ throw e
+ }
+}
diff --git a/openstack-compute-install.groovy b/openstack-compute-install.groovy
index 7b9054a..7602dcf 100644
--- a/openstack-compute-install.groovy
+++ b/openstack-compute-install.groovy
@@ -98,9 +98,15 @@
}
}
- stage("Install monitoring") {
+ stage("Update/Install monitoring") {
+ //Collect Grains
+ salt.enforceState(pepperEnv, targetLiveAll, 'salt.minion.grains')
+ salt.runSaltProcessStep(pepperEnv, targetLiveAll, 'saltutil.refresh_modules')
+ salt.runSaltProcessStep(pepperEnv, targetLiveAll, 'mine.update')
+ sleep(5)
+
salt.enforceState(pepperEnv, targetLiveAll, 'prometheus')
- salt.enforceState(pepperEnv, 'I@prometheus', 'prometheus')
+ salt.enforceState(pepperEnv, 'I@prometheus:server', 'prometheus')
}
} catch (Throwable e) {