Merge "Extended docker-build pipeline to add uploads to artifactory"
diff --git a/aptly-add-packages.groovy b/aptly-add-packages.groovy
new file mode 100644
index 0000000..2d117fc
--- /dev/null
+++ b/aptly-add-packages.groovy
@@ -0,0 +1,67 @@
+/**
+ *
+ * Aptly add packages pipeline
+ *
+ * Expected parameters:
+ * APTLY_API_URL - URL of Aptly API, example: http://10.1.0.12:8080
+ * APTLY_REPO - Name of aptly repo to put packages
+ * CLEANUP_REPO - Option to cleanup repo contents after publish
+ * PROMOTE - Option to promote the publish
+ * PROMOTE_COMPONENT - Component to promote (only used when PROMOTE = True)
+ * PROMOTE_SOURCE - Source regex to promote from (only used when PROMOTE = True)
+ * PROMOTE_TARGET - Target regex to promote to (only used when PROMOTE = True)
+ * PROMOTE_STORAGES - Storages to promote (only used when PROMOTE = True)
+ *
+ */
+
+// Load shared libs
+aptly = new com.mirantis.mk.Aptly()
+common = new com.mirantis.mk.Common()
+
+timeout(time: 12, unit: 'HOURS') {
+ node("docker&&hardware") {
+ try {
+ def aptlyServer = ["url": APTLY_API_URL]
+ def workspace = common.getWorkspace()
+ def actualTime = (System.currentTimeMillis()/1000).toInteger()
+ def snapshotName = APTLY_REPO + "-" + actualTime
+
+ lock("aptly-api") {
+ stage("upload") {
+ def inputFile = input message: 'Upload file', parameters: [file(name: 'packages.tar.gz')]
+ new hudson.FilePath(new File("$workspace/packages.tar.gz")).copyFrom(inputFile)
+ inputFile.delete()
+
+ sh "mkdir ${workspace}/packages;tar -xvzf ${workspace}/packages.tar.gz --directory ${workspace}/packages"
+
+ def packages = sh(script: "ls -1a ${workspace}/packages | tail -n +3", returnStdout: true)
+ packages = packages.tokenize("\n")
+ for(pkg in packages){
+ aptly.uploadPackage("${workspace}/packages/${pkg}", APTLY_API_URL, APTLY_REPO)
+ }
+ }
+
+ stage("publish") {
+ aptly.snapshotCreateByAPI(aptlyServer, APTLY_REPO, snapshotName)
+ aptly.publish(APTLY_API_URL)
+
+ if(PROMOTE.toBoolean()){
+ aptly.promotePublish(APTLY_API_URL, PROMOTE_SOURCE, PROMOTE_TARGET, false, PROMOTE_COMPONENT, null, false, '-d --timeout 600', false, PROMOTE_STORAGES)
+ }
+
+ if(CLEANUP_REPO.toBoolean()){
+ def packageList = aptly.listPackagesFromRepoByAPI(aptlyServer, APTLY_REPO)
+ aptly.deletePackagesFromRepoByAPI(aptlyServer, APTLY_REPO, packageList)
+ }
+ }
+ }
+ sh "rm -rf ${workspace}/*"
+ }
+ catch (Throwable e) {
+ // If there was an error or exception thrown, the build failed
+ sh "rm -rf ${workspace}/*"
+ currentBuild.result = "FAILURE"
+ throw e
+ }
+ }
+}
\ No newline at end of file
diff --git a/ceph-add-node.groovy b/ceph-add-node.groovy
index 92d61e0..33a5a67 100644
--- a/ceph-add-node.groovy
+++ b/ceph-add-node.groovy
@@ -74,7 +74,15 @@
}
}
- salt.enforceState(pepperEnv, HOST, 'prometheus')
- salt.enforceState(pepperEnv, 'I@prometheus', 'prometheus')
+ stage("Update/Install monitoring") {
+ //Collect Grains
+ salt.enforceState(pepperEnv, HOST, 'salt.minion.grains')
+ salt.runSaltProcessStep(pepperEnv, HOST, 'saltutil.refresh_modules')
+ salt.runSaltProcessStep(pepperEnv, HOST, 'mine.update')
+ sleep(5)
+
+ salt.enforceState(pepperEnv, HOST, 'prometheus')
+ salt.enforceState(pepperEnv, 'I@prometheus:server', 'prometheus')
+ }
}
}
diff --git a/cloud-deploy-pipeline.groovy b/cloud-deploy-pipeline.groovy
index c2f9351..66e4bc7 100644
--- a/cloud-deploy-pipeline.groovy
+++ b/cloud-deploy-pipeline.groovy
@@ -201,6 +201,13 @@
envParams.put('cluster_name', STACK_CLUSTER_NAME)
}
+ if (common.validInputParam('STACK_COMPUTE_COUNT')) {
+ if (STACK_COMPUTE_COUNT.toInteger() > 0){
+ common.infoMsg("Setting cluster_node_count to ${STACK_COMPUTE_COUNT}")
+ envParams.put('cluster_node_count', STACK_COMPUTE_COUNT)
+ }
+ }
+
openstack.createHeatStack(openstackCloud, STACK_NAME, STACK_TEMPLATE, envParams, HEAT_STACK_ENVIRONMENT, venv)
}
@@ -580,10 +587,17 @@
stage('Finalize') {
if (common.checkContains('STACK_INSTALL', 'finalize')) {
def gluster_compound = 'I@glusterfs:server'
+ def salt_ca_compound = 'I@salt:minion:ca:salt_master_ca'
// Enforce highstate asynchronous only on the nodes which are not glusterfs servers
- salt.enforceHighstate(venvPepper, '* and not ' + gluster_compound)
+ salt.enforceHighstate(venvPepper, '* and not ' + gluster_compound + ' and not ' + salt_ca_compound)
// Iterate over nonempty set of gluster servers and apply highstates one by one
// TODO: switch to batch once salt 2017.7+ would be used
+ def saltcaMinions = salt.getMinionsSorted(venvPepper, salt_ca_compound)
+ if ( !saltcaMinions.isEmpty() ) {
+ for ( target in saltcaMinions ) {
+ salt.enforceHighstate(venvPepper, target)
+ }
+ }
def glusterMinions = salt.getMinionsSorted(venvPepper, gluster_compound)
if ( !glusterMinions.isEmpty() ) {
for ( target in glusterMinions ) {
diff --git a/cloud-update.groovy b/cloud-update.groovy
index a8b2c71..8c8b8b6 100644
--- a/cloud-update.groovy
+++ b/cloud-update.groovy
@@ -10,9 +10,12 @@
* PER_NODE Target nodes will be managed one by one (bool)
* ROLLBACK_BY_REDEPLOY Omit taking live snapshots. Rollback is planned to be done by redeployment (bool)
* STOP_SERVICES Stop API services before update (bool)
+ * TARGET_KERNEL_UPDATES Comma separated list of nodes to update kernel if newer version is available (Valid values are cfg,ctl,prx,msg,dbs,log,mon,mtr,ntw,nal,gtw-virtual,cmn,rgw,cid,cmp,kvm,osd,gtw-physical)
+ * TARGET_REBOOT Comma separated list of nodes to reboot after update or physical machine rollback (Valid values are cfg,ctl,prx,msg,dbs,log,mon,mtr,ntw,nal,gtw-virtual,cmn,rgw,cid,cmp,kvm,osd,gtw-physical)
+ * TARGET_HIGHSTATE Comma separated list of nodes to run Salt Highstate on after update or physical machine rollback (Valid values are cfg,ctl,prx,msg,dbs,log,mon,mtr,ntw,nal,gtw-virtual,cmn,rgw,cid,cmp,kvm,osd,gtw-physical)
* TARGET_UPDATES Comma separated list of nodes to update (Valid values are cfg,ctl,prx,msg,dbs,log,mon,mtr,ntw,nal,gtw-virtual,cmn,rgw,cid,cmp,kvm,osd,gtw-physical)
- * TARGET_ROLLBACKS Comma separated list of nodes to update (Valid values are ctl,prx,msg,dbs,log,mon,mtr,ntw,nal,gtw-virtual,cmn,rgw,cmp,kvm,osd,gtw-physical)
- * TARGET_MERGES Comma separated list of nodes to update (Valid values are cfg,ctl,prx,msg,dbs,log,mon,mtr,ntw,nal,gtw-virtual,cmn,rgw,cid)
+ * TARGET_ROLLBACKS Comma separated list of nodes to rollback (Valid values are ctl,prx,msg,dbs,log,mon,mtr,ntw,nal,gtw-virtual,cmn,rgw,cmp,kvm,osd,gtw-physical)
+ * TARGET_SNAPSHOT_MERGES Comma separated list of nodes to merge live snapshot for (Valid values are cfg,ctl,prx,msg,dbs,log,mon,mtr,ntw,nal,gtw-virtual,cmn,rgw,cid)
* CTL_TARGET Salt targeted CTL nodes (ex. ctl*)
* PRX_TARGET Salt targeted PRX nodes (ex. prx*)
* MSG_TARGET Salt targeted MSG nodes (ex. msg*)
@@ -28,11 +31,10 @@
* CMP_TARGET Salt targeted physical compute nodes (ex. cmp001*)
* KVM_TARGET Salt targeted physical KVM nodes (ex. kvm01*)
* CEPH_OSD_TARGET Salt targeted physical Ceph OSD nodes (ex. osd001*)
- * GTW_TARGET Salt targeted physical GTW nodes (ex. gtw01*)
- * REBOOT Reboot nodes after update (bool)
- * ROLLBACK_PKG_VERSIONS Space separated list of pkgs=versions to rollback to (ex. pkg_name1=pkg_version1 pkg_name2=pkg_version2)
- * PURGE_PKGS Space separated list of pkgs=versions to be purged (ex. pkg_name1=pkg_version1 pkg_name2=pkg_version2)
- * REMOVE_PKGS Space separated list of pkgs=versions to be removed (ex. pkg_name1=pkg_version1 pkg_name2=pkg_version2)
+ * GTW_TARGET Salt targeted physical or virtual GTW nodes (ex. gtw01*)
+ * ROLLBACK_PKG_VERSIONS Space separated list of pkgs=versions to rollback to on physical targeted machines (ex. pkg_name1=pkg_version1 pkg_name2=pkg_version2)
+ * PURGE_PKGS Space separated list of pkgs=versions to be purged on physical targeted machines (ex. pkg_name1=pkg_version1 pkg_name2=pkg_version2)
+ * REMOVE_PKGS Space separated list of pkgs=versions to be removed on physical targeted machines (ex. pkg_name1=pkg_version1 pkg_name2=pkg_version2)
* RESTORE_GALERA Restore Galera DB (bool)
* RESTORE_CONTRAIL_DB Restore Cassandra and Zookeeper DBs for OpenContrail (bool)
*
@@ -44,7 +46,8 @@
def updates = TARGET_UPDATES.tokenize(",").collect{it -> it.trim()}
def rollbacks = TARGET_ROLLBACKS.tokenize(",").collect{it -> it.trim()}
-def merges = TARGET_MERGES.tokenize(",").collect{it -> it.trim()}
+def merges = TARGET_SNAPSHOT_MERGES.tokenize(",").collect{it -> it.trim()}
+def reboots = TARGET_REBOOT.tokenize(",").collect{it -> it.trim()}
def pepperEnv = "pepperEnv"
def minions
@@ -56,8 +59,9 @@
def updatePkgs(pepperEnv, target, targetType="", targetPackages="") {
def salt = new com.mirantis.mk.Salt()
def common = new com.mirantis.mk.Common()
+ def kernelUpdates = TARGET_KERNEL_UPDATES.tokenize(",").collect{it -> it.trim()}
+ def distUpgrade = false
def commandKwargs
- def distUpgrade
def pkgs
def out
@@ -65,7 +69,11 @@
stage("List package upgrades") {
common.infoMsg("Listing all the packages that have a new update available on ${target}")
- pkgs = salt.getReturnValues(salt.runSaltProcessStep(pepperEnv, target, 'pkg.list_upgrades', [], null, true))
+ if (kernelUpdates.contains(targetType)) {
+ pkgs = salt.getReturnValues(salt.runSaltProcessStep(pepperEnv, target, 'pkg.list_upgrades', [], null, true))
+ } else {
+ pkgs = salt.getReturnValues(salt.runSaltProcessStep(pepperEnv, target, 'pkg.list_upgrades', ['dist_upgrade=False'], null, true))
+ }
if(targetPackages != "" && targetPackages != "*"){
common.infoMsg("Note that only the ${targetPackages} would be installed from the above list of available updates on the ${target}")
}
@@ -91,27 +99,29 @@
if (targetPackages != "") {
// list installed versions of pkgs that will be upgraded
- def installedPkgs = []
- def newPkgs = []
- def targetPkgList = targetPackages.tokenize(',')
- for (pkg in targetPkgList) {
- def version
- try {
- def pkgsDetails = salt.getReturnValues(salt.runSaltProcessStep(pepperEnv, target, 'pkg.info_installed', [pkg], null, true))
- version = pkgsDetails.get(pkg).get('version')
- } catch (Exception er) {
- common.infoMsg("${pkg} not installed yet")
+ if (targetType == 'kvm' || targetType == 'cmp' || targetType == 'osd' || targetType == 'gtw-physical') {
+ def installedPkgs = []
+ def newPkgs = []
+ def targetPkgList = targetPackages.tokenize(',')
+ for (pkg in targetPkgList) {
+ def version
+ try {
+ def pkgsDetails = salt.getReturnValues(salt.runSaltProcessStep(pepperEnv, target, 'pkg.info_installed', [pkg], null, true))
+ version = pkgsDetails.get(pkg).get('version')
+ } catch (Exception er) {
+ common.infoMsg("${pkg} not installed yet")
+ }
+ if (version?.trim()) {
+ installedPkgs.add(pkg + '=' + version)
+ } else {
+ newPkgs.add(pkg)
+ }
}
- if (version?.trim()) {
- installedPkgs.add(pkg + '=' + version)
- } else {
- newPkgs.add(pkg)
- }
+ common.warningMsg("the following list of pkgs will be upgraded")
+ common.warningMsg(installedPkgs.join(" "))
+ common.warningMsg("the following list of pkgs will be newly installed")
+ common.warningMsg(newPkgs.join(" "))
}
- common.warningMsg("the following list of pkgs will be upgraded")
- common.warningMsg(installedPkgs.join(" "))
- common.warningMsg("the following list of pkgs will be newly installed")
- common.warningMsg(newPkgs.join(" "))
// set variables
command = "pkg.install"
packages = targetPackages
@@ -119,26 +129,27 @@
}else {
command = "pkg.upgrade"
- commandKwargs = ['dist_upgrade': 'true']
- distUpgrade = true
+ if (kernelUpdates.contains(targetType)) {
+ commandKwargs = ['dist_upgrade': 'true']
+ distUpgrade = true
+ }
packages = null
}
- // todo exception to cfg or cicd
stage("stop services on ${target}") {
- if ((STOP_SERVICES.toBoolean()) && (targetType != 'cicd')) {
- if (targetType == 'contrail') {
- stopContrailServices(pepperEnv, target)
+ if ((STOP_SERVICES.toBoolean()) && (targetType != 'cid')) {
+ if (targetType == 'ntw' || targetType == 'nal') {
+ contrailServices(pepperEnv, target, 'stop')
} else {
def probe = salt.getFirstMinion(pepperEnv, "${target}")
- stopServices(pepperEnv, probe, target)
+ services(pepperEnv, probe, target, 'stop')
}
}
}
stage('Apply package upgrades') {
// salt master pkg
- if (targetType == 'I@salt:master') {
+ if (targetType == 'cfg') {
common.warningMsg('salt-master pkg upgrade, rerun the pipeline if disconnected')
salt.runSaltProcessStep(pepperEnv, target, 'pkg.install', ['salt-master'], null, true, 5)
salt.minionsReachable(pepperEnv, 'I@salt:master', '*')
@@ -258,7 +269,7 @@
return
}
if (STOP_SERVICES.toBoolean()) {
- stopServices(pepperEnv, probe, target)
+ services(pepperEnv, probe, target, 'stop')
}
}
@@ -341,138 +352,110 @@
}
}
-def getCfgNodeProvider(pepperEnv, master_name) {
+def getNodeProvider(pepperEnv, nodeName, type='') {
def salt = new com.mirantis.mk.Salt()
def common = new com.mirantis.mk.Common()
- if (!CFG_NODE_PROVIDER?.trim()) {
- kvms = salt.getMinions(pepperEnv, 'I@salt:control')
- for (kvm in kvms) {
- try {
- vms = salt.getReturnValues(salt.runSaltProcessStep(pepperEnv, kvm, 'virt.list_active_vms', [], null, true))
- if (vms.toString().contains(master_name)) {
+ def kvms = salt.getMinions(pepperEnv, 'I@salt:control')
+ for (kvm in kvms) {
+ try {
+ vms = salt.getReturnValues(salt.runSaltProcessStep(pepperEnv, kvm, 'virt.list_domains', [], null, true))
+ if (vms.toString().contains(nodeName)) {
+ if (type == 'master' && !CFG_NODE_PROVIDER?.trim()) {
CFG_NODE_PROVIDER = kvm
+ } else {
+ return kvm
//break
}
- } catch (Exception er) {
- common.infoMsg("${master_name} not present on ${kvm}")
+ }
+ } catch (Exception er) {
+ common.infoMsg("${nodeName} not present on ${kvm}")
+ }
+ }
+}
+
+def services(pepperEnv, probe, target, action='stop') {
+ def services = ["keepalived","haproxy","nginx","nova-api","cinder","glance","heat","neutron","apache2","rabbitmq-server"]
+ if (action == 'stop') {
+ def openstack = new com.mirantis.mk.Openstack()
+ openstack.stopServices(pepperEnv, probe, target, services, INTERACTIVE.toBoolean())
+ } else {
+ def salt = new com.mirantis.mk.Salt()
+ for (s in services) {
+ def outputServicesStr = salt.getReturnValues(salt.cmdRun(pepperEnv, "${probe}*", "service --status-all | grep ${s} | awk \'{print \$4}\'"))
+ def servicesList = outputServicesStr.tokenize("\n").init() //init() returns the items from the Iterable excluding the last item
+ if (servicesList) {
+ for (name in servicesList) {
+ if (!name.contains('Salt command')) {
+ salt.runSaltProcessStep(pepperEnv, "${target}*", 'service.start', ["${name}"])
+ }
+ }
}
}
}
}
-/*
-def rollbackSaltMaster(pepperEnv, target, path='/var/lib/libvirt/images') {
+// must be treated separately due to OC on Trusty
+def contrailServices(pepperEnv, target, action='stop') {
def salt = new com.mirantis.mk.Salt()
def common = new com.mirantis.mk.Common()
- try {
- input message: "PART1 - Are you sure to rollback ${target}? To rollback click on PROCEED. To skip rollback PART1 click on ABORT."
- } catch (Exception er) {
- common.infoMsg("skipping rollback of ${target}")
- return
- }
- def domain = salt.getDomainName(pepperEnv)
- def master = salt.getReturnValues(salt.getPillar(pepperEnv, target, 'linux:network:hostname'))
- getCfgNodeProvider(pepperEnv, master)
- try {
- try {
- salt.getReturnValues(salt.cmdRun(pepperEnv, CFG_NODE_PROVIDER, "ls -la ${path}/${master}.${domain}.xml"))
- common.errorMsg('Pipeline is about to disconnect from salt-api. You will have to rerun the pipeline with ROLLBACK_CFG checked and skip PART1 to finish rollback.')
- salt.cmdRun(pepperEnv, CFG_NODE_PROVIDER, "virsh destroy ${master}.${domain}; virsh define ${path}/${master}.${domain}.xml; virsh start ${master}.${domain} ")
- } catch (Exception er) {
- common.errorMsg(er)
- input message: "Rollback for ${target} failed. Rollback manually."
- }
- } catch (Exception er) {
- common.errorMsg(er)
- input message: "Rollback for ${target} failed. Rollback manually."
- }
-}
-
-def finishSaltMasterRollback(pepperEnv, target, path='/var/lib/libvirt/images') {
- def salt = new com.mirantis.mk.Salt()
- def common = new com.mirantis.mk.Common()
- def virsh = new com.mirantis.mk.Virsh()
- try {
- input message: "PART2 - Are you sure to finalize ${target} rollback? Click on PROCEED. To skip rollback click on ABORT."
- } catch (Exception er) {
- common.infoMsg("skipping finalize rollback of ${target}")
- return
- }
- salt.minionsReachable(pepperEnv, 'I@salt:master', '*')
- def domain = salt.getDomainName(pepperEnv)
- def master = salt.getReturnValues(salt.getPillar(pepperEnv, target, 'linux:network:hostname'))
- getCfgNodeProvider(pepperEnv, master)
- try {
- virsh.liveSnapshotAbsent(pepperEnv, CFG_NODE_PROVIDER, master, SNAPSHOT_NAME, path)
- // purge and setup previous repos
- salt.enforceState(pepperEnv, target, 'linux.system.repo')
- } catch (Exception e) {
- common.errorMsg(e)
- input message: "Check what failed after ${target} rollback. Do you want to PROCEED?"
- }
-}*/
-
-def stopServices(pepperEnv, probe, target) {
- def openstack = new com.mirantis.mk.Openstack()
def services = []
- services.add('keepalived')
- services.add('nginx')
- services.add('haproxy')
- services.add('nova-api')
- services.add('cinder')
- services.add('glance')
- services.add('heat')
- services.add('neutron')
- services.add('apache2')
- services.add('rabbitmq-server')
- if (INTERACTIVE.toBoolean()) {
- openstack.stopServices(pepperEnv, probe, target, services, true)
+ if (action == 'stop') {
+ services.add('supervisor-control')
+ services.add('supervisor-config')
+ services.add('supervisor-database')
+ services.add('zookeeper')
+ services.add('ifmap-server')
+ services.add('haproxy')
+ services.add('keepalived')
} else {
- openstack.stopServices(pepperEnv, probe, target, services)
+ services.add('keepalived')
+ services.add('haproxy')
+ services.add('ifmap-server')
+ services.add('zookeeper')
+ services.add('supervisor-database')
+ services.add('supervisor-config')
+ services.add('supervisor-control')
}
-}
-
-// must be stopped separately due to OC on Trusty
-def stopContrailServices(pepperEnv, target) {
- def salt = new com.mirantis.mk.Salt()
- def common = new com.mirantis.mk.Common()
- def services = []
- services.add('keepalived')
- services.add('haproxy')
- services.add('supervisor-control')
- services.add('supervisor-config')
- services.add('supervisor-database')
- services.add('zookeeper')
- services.add('ifmap-server')
for (s in services) {
try {
- salt.runSaltProcessStep(pepperEnv, target, 'service.stop', [s], null, true)
+ salt.runSaltProcessStep(pepperEnv, target, "service.${action}", [s], null, true)
} catch (Exception er) {
- common.infoMsg(er)
+ common.warningMsg(er)
}
}
}
-def highstate(pepperEnv, target) {
+def highstate(pepperEnv, target, type) {
def salt = new com.mirantis.mk.Salt()
def common = new com.mirantis.mk.Common()
-
- stage("Apply highstate on ${target} nodes") {
- try {
- common.retry(3){
- //salt.enforceHighstate(pepperEnv, target)
+ def highstates = TARGET_HIGHSTATE.tokenize(",").collect{it -> it.trim()}
+ def reboots = TARGET_REBOOT.tokenize(",").collect{it -> it.trim()}
+ // optionally run highstate
+ if (highstates.contains(type)) {
+ stage("Apply highstate on ${target} nodes") {
+ try {
+ common.retry(3){
+ salt.enforceHighstate(pepperEnv, target)
+ }
+ } catch (Exception e) {
+ common.errorMsg(e)
+ if (INTERACTIVE.toBoolean()) {
+ input message: "Highstate failed on ${target}. Fix it manually or run rollback on ${target}."
+ } else {
+ throw new Exception("highstate failed")
+ }
}
- } catch (Exception e) {
- common.errorMsg(e)
- if (INTERACTIVE.toBoolean()) {
- input message: "Highstate failed on ${target}. Fix it manually or run rollback on ${target}."
- } else {
- throw new Exception("highstate failed")
- }
+ }
+ } else if (!reboots.contains(type) && STOP_SERVICES.toBoolean() && type != 'cid') {
+ if (type == 'ntw' || type == 'nal') {
+ contrailServices(pepperEnv, target, 'start')
+ } else {
+ def probe = salt.getFirstMinion(pepperEnv, "${target}")
+ services(pepperEnv, probe, target, 'start')
}
}
// optionally reboot
- if (REBOOT.toBoolean()) {
+ if (reboots.contains(type)) {
stage("Reboot ${target} nodes") {
salt.runSaltProcessStep(pepperEnv, target, 'system.reboot', null, null, true, 5)
sleep(10)
@@ -510,14 +493,10 @@
def domain = salt.getDomainName(pepperEnv)
def target_hosts = salt.getMinionsSorted(pepperEnv, "${tgt}")
common.warningMsg(target_hosts)
- //def nodeCount = 1
for (t in target_hosts) {
def target = salt.stripDomainName(t)
- def nodeCount = target[4]
- common.warningMsg(nodeCount)
- def nodeProvider = salt.getNodeProvider(pepperEnv, "${generalTarget}0${nodeCount}")
+ def nodeProvider = getNodeProvider(pepperEnv, t)
virsh.liveSnapshotPresent(pepperEnv, nodeProvider, target, SNAPSHOT_NAME)
- //nodeCount++
}
}
@@ -526,19 +505,18 @@
def virsh = new com.mirantis.mk.Virsh()
def domain = salt.getDomainName(pepperEnv)
def target_hosts = salt.getMinionsSorted(pepperEnv, "${tgt}")
- def nodeCount = 1
for (t in target_hosts) {
if (tgt == 'I@salt:master') {
- def master = salt.getReturnValues(salt.getPillar(pepperEnv, target, 'linux:network:hostname'))
- getCfgNodeProvider(pepperEnv, master)
+ def master = salt.getReturnValues(salt.getPillar(pepperEnv, t, 'linux:network:hostname'))
+ getNodeProvider(pepperEnv, master, 'master')
virsh.liveSnapshotMerge(pepperEnv, CFG_NODE_PROVIDER, master, SNAPSHOT_NAME)
} else {
def target = salt.stripDomainName(t)
- def nodeProvider = salt.getNodeProvider(pepperEnv, "${generalTarget}0${nodeCount}")
+ def nodeProvider = getNodeProvider(pepperEnv, t)
virsh.liveSnapshotMerge(pepperEnv, nodeProvider, target, SNAPSHOT_NAME)
}
- nodeCount++
}
+ salt.minionsReachable(pepperEnv, 'I@salt:master', tgt)
}
@@ -550,20 +528,16 @@
def domain = salt.getDomainName(pepperEnv)
def target_hosts = salt.getMinionsSorted(pepperEnv, "${tgt}")
// first destroy all vms
- def nodeCount = 1
for (t in target_hosts) {
def target = salt.stripDomainName(t)
- def nodeProvider = salt.getNodeProvider(pepperEnv, "${generalTarget}0${nodeCount}")
+ def nodeProvider = getNodeProvider(pepperEnv, t)
salt.runSaltProcessStep(pepperEnv, "${nodeProvider}*", 'virt.destroy', ["${target}.${domain}"], null, true)
- nodeCount++
}
- nodeCount = 1
// rollback vms
for (t in target_hosts) {
def target = salt.stripDomainName(t)
- def nodeProvider = salt.getNodeProvider(pepperEnv, "${generalTarget}0${nodeCount}")
+ def nodeProvider = getNodeProvider(pepperEnv, t)
virsh.liveSnapshotRollback(pepperEnv, nodeProvider, target, SNAPSHOT_NAME)
- nodeCount++
}
try {
salt.minionsReachable(pepperEnv, 'I@salt:master', tgt)
@@ -586,21 +560,25 @@
def domain = salt.getDomainName(pepperEnv)
def target_hosts = salt.getMinionsSorted(pepperEnv, "${tgt}")
// first destroy all vms
- def nodeCount = 1
for (t in target_hosts) {
def target = salt.stripDomainName(t)
- def nodeProvider = salt.getNodeProvider(pepperEnv, "${generalTarget}0${nodeCount}")
+ def nodeProvider = getNodeProvider(pepperEnv, t)
salt.runSaltProcessStep(pepperEnv, "${nodeProvider}*", 'virt.destroy', ["${target}.${domain}"], null, true)
- salt.runSaltProcessStep(pepperEnv, "${nodeProvider}*", 'virt.undefine', ["${target}.${domain}"], null, true)
+ //salt.runSaltProcessStep(pepperEnv, "${nodeProvider}*", 'virt.undefine', ["${target}.${domain}"], null, true)
try {
salt.cmdRun(pepperEnv, 'I@salt:master', "salt-key -d ${target}.${domain} -y")
} catch (Exception e) {
common.warningMsg('does not match any accepted, unaccepted or rejected keys. They were probably already removed. We should continue to run')
}
- nodeCount++
}
}
+def saltMasterBackup(pepperEnv) {
+ def salt = new com.mirantis.mk.Salt()
+ salt.enforceState(pepperEnv, 'I@salt:master', 'backupninja')
+ salt.cmdRun(pepperEnv, 'I@salt:master', "su root -c 'backupninja -n --run /etc/backup.d/200.backup.rsync'")
+}
+
def backupCeph(pepperEnv, tgt) {
def salt = new com.mirantis.mk.Salt()
salt.enforceState(pepperEnv, 'I@ceph:backup:server', 'ceph.backup')
@@ -702,24 +680,43 @@
]
}
-def verifyAPIs(pepperEnv) {
+def verifyAPIs(pepperEnv, target) {
def salt = new com.mirantis.mk.Salt()
def common = new com.mirantis.mk.Common()
- salt.cmdRun(pepperEnv, target, '. /root/keystonercv3; openstack service list; openstack image list; openstack flavor list; openstack compute service list; openstack server list; openstack network list; openstack volume list; openstack orchestration service list')
-
+ def out = salt.cmdRun(pepperEnv, target, '. /root/keystonercv3; openstack service list; openstack image list; openstack flavor list; openstack compute service list; openstack server list; openstack network list; openstack volume list; openstack orchestration service list')
+ if (out.toString().toLowerCase().contains('error')) {
+ common.errorMsg(out)
+ if (INTERACTIVE.toBoolean()) {
+ input message: "APIs are not working as expected. Please fix it manually."
+ } else {
+ throw new Exception("APIs are not working as expected")
+ }
+ }
}
-def verifyGalera(pepperEnv) {
+def verifyGalera(pepperEnv, target, count=0, maxRetries=200) {
def salt = new com.mirantis.mk.Salt()
def common = new com.mirantis.mk.Common()
- def out = salt.getReturnValues(salt.cmdRun(pepperEnv, 'I@galera:master', 'salt-call mysql.status | grep -A1 wsrep_cluster_size'))
-
- if ((!out.toString().contains('wsrep_cluster_size')) || (out.toString().contains('0'))) {
- if (INTERACTIVE.toBoolean()) {
- input message: "Galera is not working as expected. Please check it and fix it first before clicking on PROCEED."
+ def out
+ while(count < maxRetries) {
+ try {
+ out = salt.getReturnValues(salt.cmdRun(pepperEnv, target, 'salt-call mysql.status | grep -A1 wsrep_cluster_size'))
+ } catch (Exception er) {
+ common.infoMsg(er)
+ }
+ if ((!out.toString().contains('wsrep_cluster_size')) || (out.toString().contains('0'))) {
+ count++
+ if (count == maxRetries) {
+ if (INTERACTIVE.toBoolean()) {
+ input message: "Galera is not working as expected. Please check it and fix it first before clicking on PROCEED."
+ } else {
+ common.errorMsg(out)
+ throw new Exception("Galera is not working as expected")
+ }
+ }
+ sleep(time: 500, unit: 'MILLISECONDS')
} else {
- common.errorMsg(out)
- throw new Exception("Galera is not working as expected")
+ break
}
}
}
@@ -818,41 +815,44 @@
*/
if (updates.contains("cfg")) {
def target = 'I@salt:master'
+ def type = 'cfg'
if (salt.testTarget(pepperEnv, target)) {
def master = salt.getReturnValues(salt.getPillar(pepperEnv, target, 'linux:network:hostname'))
- getCfgNodeProvider(pepperEnv, master)
+ getNodeProvider(pepperEnv, master, 'master')
if (!ROLLBACK_BY_REDEPLOY.toBoolean()) {
virsh.liveSnapshotPresent(pepperEnv, CFG_NODE_PROVIDER, master, SNAPSHOT_NAME)
+ } else {
+ saltMasterBackup(pepperEnv)
}
if (PER_NODE.toBoolean()) {
def targetHosts = salt.getMinionsSorted(pepperEnv, target)
for (t in targetHosts) {
- updatePkgs(pepperEnv, t, target)
- highstate(pepperEnv, t)
+ updatePkgs(pepperEnv, t, type)
+ highstate(pepperEnv, t, type)
}
} else {
- updatePkgs(pepperEnv, target)
- highstate(pepperEnv, target)
+ updatePkgs(pepperEnv, target, type)
+ highstate(pepperEnv, target, type)
}
}
}
if (updates.contains("ctl")) {
def target = CTL_TARGET
+ def type = 'ctl'
if (salt.testTarget(pepperEnv, target)) {
if (!ROLLBACK_BY_REDEPLOY.toBoolean()) {
- def generalTarget = 'ctl'
- liveSnapshot(pepperEnv, target, generalTarget)
+ liveSnapshot(pepperEnv, target, type)
}
if (PER_NODE.toBoolean()) {
def targetHosts = salt.getMinionsSorted(pepperEnv, target)
for (t in targetHosts) {
- updatePkgs(pepperEnv, t)
- highstate(pepperEnv, t)
+ updatePkgs(pepperEnv, t, type)
+ highstate(pepperEnv, t, type)
}
} else {
- updatePkgs(pepperEnv, target)
- highstate(pepperEnv, target)
+ updatePkgs(pepperEnv, target, type)
+ highstate(pepperEnv, target, type)
}
verifyAPIs(pepperEnv, target)
}
@@ -860,20 +860,20 @@
if (updates.contains("prx")) {
def target = PRX_TARGET
+ def type = 'prx'
if (salt.testTarget(pepperEnv, target)) {
if (!ROLLBACK_BY_REDEPLOY.toBoolean()) {
- def generalTarget = 'prx'
- liveSnapshot(pepperEnv, target, generalTarget)
+ liveSnapshot(pepperEnv, target, type)
}
if (PER_NODE.toBoolean()) {
def targetHosts = salt.getMinionsSorted(pepperEnv, target)
for (t in targetHosts) {
- updatePkgs(pepperEnv, t)
- highstate(pepperEnv, t)
+ updatePkgs(pepperEnv, t, type)
+ highstate(pepperEnv, t, type)
}
} else {
- updatePkgs(pepperEnv, target)
- highstate(pepperEnv, target)
+ updatePkgs(pepperEnv, target, type)
+ highstate(pepperEnv, target, type)
}
verifyService(pepperEnv, target, 'nginx')
}
@@ -881,20 +881,20 @@
if (updates.contains("msg")) {
def target = MSG_TARGET
+ def type = 'msg'
if (salt.testTarget(pepperEnv, target)) {
if (!ROLLBACK_BY_REDEPLOY.toBoolean()) {
- def generalTarget = 'msg'
- liveSnapshot(pepperEnv, target, generalTarget)
+ liveSnapshot(pepperEnv, target, type)
}
if (PER_NODE.toBoolean()) {
def targetHosts = salt.getMinionsSorted(pepperEnv, target)
for (t in targetHosts) {
- updatePkgs(pepperEnv, t)
- highstate(pepperEnv, t)
+ updatePkgs(pepperEnv, t, type)
+ highstate(pepperEnv, t, type)
}
} else {
- updatePkgs(pepperEnv, target, target)
- highstate(pepperEnv, target)
+ updatePkgs(pepperEnv, target, type)
+ highstate(pepperEnv, target, type)
}
verifyService(pepperEnv, target, 'rabbitmq-server')
}
@@ -902,47 +902,45 @@
if (updates.contains("dbs")) {
def target = DBS_TARGET
+ def type = 'dbs'
if (salt.testTarget(pepperEnv, target)) {
backupGalera(pepperEnv)
- salt.runSaltProcessStep(pepperEnv, target, 'service.stop', ['keepalived'], null, true)
- salt.runSaltProcessStep(pepperEnv, target, 'service.stop', ['haproxy'], null, true)
if (!ROLLBACK_BY_REDEPLOY.toBoolean()) {
- def generalTarget = 'dbs'
- liveSnapshot(pepperEnv, target, generalTarget)
+ liveSnapshot(pepperEnv, target, type)
}
- if (REBOOT.toBoolean()) {
+ if (reboots.contains(type) || PER_NODE.toBoolean()) {
def targetHosts = salt.getMinionsSorted(pepperEnv, target)
- // one by one update
for (t in targetHosts) {
- updatePkgs(pepperEnv, t)
- highstate(pepperEnv, t)
+ updatePkgs(pepperEnv, t, type)
+ highstate(pepperEnv, t, type)
+ verifyGalera(pepperEnv, t)
}
} else {
- updatePkgs(pepperEnv, target)
- highstate(pepperEnv, target)
+ updatePkgs(pepperEnv, target, type)
+ highstate(pepperEnv, target, type)
+ verifyGalera(pepperEnv, target)
}
- verifyGalera(pepperEnv)
}
}
if (updates.contains("ntw")) {
def target = NTW_TARGET
+ def type = 'ntw'
if (salt.testTarget(pepperEnv, target)) {
backupContrail(pepperEnv)
if (!ROLLBACK_BY_REDEPLOY.toBoolean()) {
- def generalTarget = 'ntw'
- liveSnapshot(pepperEnv, target, generalTarget)
+ liveSnapshot(pepperEnv, target, type)
}
if (PER_NODE.toBoolean()) {
def targetHosts = salt.getMinionsSorted(pepperEnv, target)
for (t in targetHosts) {
- updatePkgs(pepperEnv, t, 'contrail')
- highstate(pepperEnv, t)
+ updatePkgs(pepperEnv, t, type)
+ highstate(pepperEnv, t, type)
verifyContrail(pepperEnv, t)
}
} else {
- updatePkgs(pepperEnv, target, 'contrail')
- highstate(pepperEnv, target)
+ updatePkgs(pepperEnv, target, type)
+ highstate(pepperEnv, target, type)
verifyContrail(pepperEnv, target)
}
}
@@ -950,20 +948,20 @@
if (updates.contains("nal")) {
def target = NAL_TARGET
+ def type = 'nal'
if (salt.testTarget(pepperEnv, target)) {
if (!ROLLBACK_BY_REDEPLOY.toBoolean()) {
- def generalTarget = 'nal'
- liveSnapshot(pepperEnv, target, generalTarget)
+ liveSnapshot(pepperEnv, target, type)
}
if (PER_NODE.toBoolean()) {
def targetHosts = salt.getMinionsSorted(pepperEnv, target)
for (t in targetHosts) {
- updatePkgs(pepperEnv, t, 'contrail')
- highstate(pepperEnv, t)
+ updatePkgs(pepperEnv, t, type)
+ highstate(pepperEnv, t, type)
}
} else {
- updatePkgs(pepperEnv, target, 'contrail')
- highstate(pepperEnv, target)
+ updatePkgs(pepperEnv, target, type)
+ highstate(pepperEnv, target, type)
}
verifyContrail(pepperEnv, target)
}
@@ -971,20 +969,20 @@
if (updates.contains("gtw-virtual")) {
def target = GTW_TARGET
+ def type = 'gtw-virtual'
if (salt.testTarget(pepperEnv, target)) {
if (!ROLLBACK_BY_REDEPLOY.toBoolean()) {
- def generalTarget = 'gtw'
- liveSnapshot(pepperEnv, target, generalTarget)
+ liveSnapshot(pepperEnv, target, type)
}
if (PER_NODE.toBoolean()) {
def targetHosts = salt.getMinionsSorted(pepperEnv, target)
for (t in targetHosts) {
- updatePkgs(pepperEnv, t)
- highstate(pepperEnv, t)
+ updatePkgs(pepperEnv, t, type)
+ highstate(pepperEnv, t, type)
}
} else {
- updatePkgs(pepperEnv, target)
- highstate(pepperEnv, target)
+ updatePkgs(pepperEnv, target, type)
+ highstate(pepperEnv, target, type)
}
verifyService(pepperEnv, target, 'neutron-dhcp-agent')
}
@@ -992,22 +990,22 @@
if (updates.contains("cmn")) {
def target = CMN_TARGET
+ def type = 'cmn'
if (salt.testTarget(pepperEnv, target)) {
if (!ROLLBACK_BY_REDEPLOY.toBoolean()) {
- def generalTarget = 'cmn'
- liveSnapshot(pepperEnv, target, generalTarget)
+ liveSnapshot(pepperEnv, target, type)
} else {
- backupCeph(pepperEnv)
+ backupCeph(pepperEnv, target)
}
if (PER_NODE.toBoolean()) {
def targetHosts = salt.getMinionsSorted(pepperEnv, target)
for (t in targetHosts) {
- updatePkgs(pepperEnv, t)
- highstate(pepperEnv, t)
+ updatePkgs(pepperEnv, t, type)
+ highstate(pepperEnv, t, type)
}
} else {
- updatePkgs(pepperEnv, target)
- highstate(pepperEnv, target)
+ updatePkgs(pepperEnv, target, type)
+ highstate(pepperEnv, target, type)
}
verifyCeph(pepperEnv, target, 'mon@')
}
@@ -1015,20 +1013,20 @@
if (updates.contains("rgw")) {
def target = RGW_TARGET
+ def type = 'rgw'
if (salt.testTarget(pepperEnv, target)) {
if (!ROLLBACK_BY_REDEPLOY.toBoolean()) {
- def generalTarget = 'rgw'
- liveSnapshot(pepperEnv, target, generalTarget)
+ liveSnapshot(pepperEnv, target, type)
}
if (PER_NODE.toBoolean()) {
def targetHosts = salt.getMinionsSorted(pepperEnv, target)
for (t in targetHosts) {
- updatePkgs(pepperEnv, t)
- highstate(pepperEnv, t)
+ updatePkgs(pepperEnv, t, type)
+ highstate(pepperEnv, t, type)
}
} else {
- updatePkgs(pepperEnv, target)
- highstate(pepperEnv, target)
+ updatePkgs(pepperEnv, target, type)
+ highstate(pepperEnv, target, type)
}
verifyCeph(pepperEnv, target, 'radosgw@rgw.')
}
@@ -1036,73 +1034,73 @@
if (updates.contains("log")) {
def target = LOG_TARGET
+ def type = 'log'
if (salt.testTarget(pepperEnv, target)) {
if (!ROLLBACK_BY_REDEPLOY.toBoolean()) {
- def generalTarget = 'log'
- liveSnapshot(pepperEnv, target, generalTarget)
+ liveSnapshot(pepperEnv, target, type)
}
if (PER_NODE.toBoolean()) {
def targetHosts = salt.getMinionsSorted(pepperEnv, target)
for (t in targetHosts) {
- updatePkgs(pepperEnv, t)
- highstate(pepperEnv, t)
+ updatePkgs(pepperEnv, t, type)
+ highstate(pepperEnv, t, type)
}
} else {
- updatePkgs(pepperEnv, target)
- highstate(pepperEnv, target)
+ updatePkgs(pepperEnv, target, type)
+ highstate(pepperEnv, target, type)
}
}
}
if (updates.contains("mon")) {
def target = MON_TARGET
+ def type = 'mon'
if (salt.testTarget(pepperEnv, target)) {
if (!ROLLBACK_BY_REDEPLOY.toBoolean()) {
- def generalTarget = 'mon'
- liveSnapshot(pepperEnv, target, generalTarget)
+ liveSnapshot(pepperEnv, target, type)
}
if (PER_NODE.toBoolean()) {
def targetHosts = salt.getMinionsSorted(pepperEnv, target)
for (t in targetHosts) {
- updatePkgs(pepperEnv, t)
- highstate(pepperEnv, t)
+ updatePkgs(pepperEnv, t, type)
+ highstate(pepperEnv, t, type)
}
} else {
- updatePkgs(pepperEnv, target)
- highstate(pepperEnv, target)
+ updatePkgs(pepperEnv, target, type)
+ highstate(pepperEnv, target, type)
}
}
}
if (updates.contains("mtr")) {
def target = MTR_TARGET
+ def type = 'mtr'
if (salt.testTarget(pepperEnv, target)) {
if (!ROLLBACK_BY_REDEPLOY.toBoolean()) {
- def generalTarget = 'mtr'
- liveSnapshot(pepperEnv, target, generalTarget)
+ liveSnapshot(pepperEnv, target, type)
}
if (PER_NODE.toBoolean()) {
def targetHosts = salt.getMinionsSorted(pepperEnv, target)
for (t in targetHosts) {
- updatePkgs(pepperEnv, t)
- highstate(pepperEnv, t)
+ updatePkgs(pepperEnv, t, type)
+ highstate(pepperEnv, t, type)
}
} else {
- updatePkgs(pepperEnv, target)
- highstate(pepperEnv, target)
+ updatePkgs(pepperEnv, target, type)
+ highstate(pepperEnv, target, type)
}
}
}
if (updates.contains("cid")) {
def target = CID_TARGET
+ def type = 'cid'
if (salt.testTarget(pepperEnv, target)) {
if (!ROLLBACK_BY_REDEPLOY.toBoolean()) {
- def generalTarget = 'cid'
- liveSnapshot(pepperEnv, target, generalTarget)
+ liveSnapshot(pepperEnv, target, type)
}
- updatePkgs(pepperEnv, target, 'cicd')
- highstate(pepperEnv, target)
+ updatePkgs(pepperEnv, target, type)
+ highstate(pepperEnv, target, type)
verifyService(pepperEnv, target, 'docker')
}
}
@@ -1112,16 +1110,17 @@
//
if (updates.contains("cmp")) {
def target = CMP_TARGET
+ def type = 'cmp'
if (salt.testTarget(pepperEnv, target)) {
if (PER_NODE.toBoolean()) {
def targetHosts = salt.getMinionsSorted(pepperEnv, target)
for (t in targetHosts) {
- updatePkgs(pepperEnv, t)
- highstate(pepperEnv, t)
+ updatePkgs(pepperEnv, t, type)
+ highstate(pepperEnv, t, type)
}
} else {
- updatePkgs(pepperEnv, target)
- highstate(pepperEnv, target)
+ updatePkgs(pepperEnv, target, type)
+ highstate(pepperEnv, target, type)
}
verifyService(pepperEnv, target, 'nova-compute')
}
@@ -1129,16 +1128,17 @@
if (updates.contains("kvm")) {
def target = KVM_TARGET
+ def type = 'kvm'
if (salt.testTarget(pepperEnv, target)) {
if (PER_NODE.toBoolean()) {
def targetHosts = salt.getMinionsSorted(pepperEnv, target)
for (t in targetHosts) {
- updatePkgs(pepperEnv, t)
- highstate(pepperEnv, t)
+ updatePkgs(pepperEnv, t, type)
+ highstate(pepperEnv, t, type)
}
} else {
- updatePkgs(pepperEnv, target, target)
- highstate(pepperEnv, target)
+ updatePkgs(pepperEnv, target, type)
+ highstate(pepperEnv, target, type)
}
verifyService(pepperEnv, target, 'libvirt-bin')
}
@@ -1146,16 +1146,17 @@
if (updates.contains("osd")) {
def target = CEPH_OSD_TARGET
+ def type = 'osd'
if (salt.testTarget(pepperEnv, target)) {
if (PER_NODE.toBoolean()) {
def targetHosts = salt.getMinionsSorted(pepperEnv, target)
for (t in targetHosts) {
- updatePkgs(pepperEnv, t)
- highstate(pepperEnv, t)
+ updatePkgs(pepperEnv, t, type)
+ highstate(pepperEnv, t, type)
}
} else {
- updatePkgs(pepperEnv, target)
- highstate(pepperEnv, target)
+ updatePkgs(pepperEnv, target, type)
+ highstate(pepperEnv, target, type)
}
verifyCephOsds(pepperEnv, target)
}
@@ -1163,16 +1164,17 @@
if (updates.contains("gtw-physical")) {
def target = GTW_TARGET
+ def type = 'gtw-physical'
if (salt.testTarget(pepperEnv, target)) {
if (PER_NODE.toBoolean()) {
def targetHosts = salt.getMinionsSorted(pepperEnv, target)
for (t in targetHosts) {
- updatePkgs(pepperEnv, t)
- highstate(pepperEnv, t)
+ updatePkgs(pepperEnv, t, type)
+ highstate(pepperEnv, t, type)
}
} else {
- updatePkgs(pepperEnv, target)
- highstate(pepperEnv, target)
+ updatePkgs(pepperEnv, target, type)
+ highstate(pepperEnv, target, type)
}
verifyService(pepperEnv, target, 'neutron-dhcp-agent')
}
@@ -1181,7 +1183,7 @@
/*
* Rollback section
*/
- /* if (rollbacks.contains("ctl")) {
+ /* if (rollbacks.contains("cfg")) {
if (salt.testTarget(pepperEnv, 'I@salt:master')) {
stage('ROLLBACK_CFG') {
input message: "To rollback CFG nodes run the following commands on kvm nodes hosting the CFG nodes: virsh destroy cfg0X.domain; virsh define /var/lib/libvirt/images/cfg0X.domain.xml; virsh start cfg0X.domain; virsh snapshot-delete cfg0X.domain --metadata ${SNAPSHOT_NAME}; rm /var/lib/libvirt/images/cfg0X.domain.${SNAPSHOT_NAME}.qcow2; rm /var/lib/libvirt/images/cfg0X.domain.xml; At the end restart 'docker' service on all cicd nodes and run 'linux.system.repo' Salt states on cicd nodes. After running the previous commands current pipeline job will be killed."
@@ -1234,7 +1236,7 @@
if (!ROLLBACK_BY_REDEPLOY.toBoolean()) {
rollback(pepperEnv, target, 'dbs')
clusterGalera(pepperEnv)
- verifyGalera(pepperEnv)
+ verifyGalera(pepperEnv, target)
} else {
removeNode(pepperEnv, target, 'dbs')
}
@@ -1348,16 +1350,17 @@
//
if (rollbacks.contains("cmp")) {
def target = CMP_TARGET
+ def type = 'cmp'
if (salt.testTarget(pepperEnv, target)) {
if (PER_NODE.toBoolean()) {
def targetHosts = salt.getMinionsSorted(pepperEnv, target)
for (t in targetHosts) {
rollbackPkgs(pepperEnv, t)
- highstate(pepperEnv, t)
+ highstate(pepperEnv, t, type)
}
} else {
rollbackPkgs(pepperEnv, target, target)
- highstate(pepperEnv, target)
+ highstate(pepperEnv, target, type)
}
verifyService(pepperEnv, target, 'nova-compute')
}
@@ -1365,16 +1368,17 @@
if (rollbacks.contains("kvm")) {
def target = KVM_TARGET
+ def type = 'kvm'
if (salt.testTarget(pepperEnv, target)) {
if (PER_NODE.toBoolean()) {
def targetHosts = salt.getMinionsSorted(pepperEnv, target)
for (t in targetHosts) {
rollbackPkgs(pepperEnv, t)
- highstate(pepperEnv, t)
+ highstate(pepperEnv, t, type)
}
} else {
rollbackPkgs(pepperEnv, target, target)
- highstate(pepperEnv, target)
+ highstate(pepperEnv, target, type)
}
verifyService(pepperEnv, target, 'libvirt-bin')
}
@@ -1382,16 +1386,17 @@
if (rollbacks.contains("osd")) {
def target = CEPH_OSD_TARGET
+ def type = 'osd'
if (salt.testTarget(pepperEnv, target)) {
if (PER_NODE.toBoolean()) {
def targetHosts = salt.getMinionsSorted(pepperEnv, target)
for (t in targetHosts) {
rollbackPkgs(pepperEnv, t)
- highstate(pepperEnv, t)
+ highstate(pepperEnv, t, type)
}
} else {
rollbackPkgs(pepperEnv, target, target)
- highstate(pepperEnv, target)
+ highstate(pepperEnv, target, type)
}
verifyCephOsds(pepperEnv, target)
}
@@ -1399,16 +1404,17 @@
if (rollbacks.contains("gtw-physical")) {
def target = GTW_TARGET
+ def type = 'gtw-physical'
if (salt.testTarget(pepperEnv, target)) {
if (PER_NODE.toBoolean()) {
def targetHosts = salt.getMinionsSorted(pepperEnv, target)
for (t in targetHosts) {
rollbackPkgs(pepperEnv, t)
- highstate(pepperEnv, t)
+ highstate(pepperEnv, t, type)
}
} else {
rollbackPkgs(pepperEnv, target, target)
- highstate(pepperEnv, target)
+ highstate(pepperEnv, target, type)
}
verifyService(pepperEnv, target, 'neutron-dhcp-agent')
}
@@ -1426,54 +1432,66 @@
if (merges.contains("ctl")) {
if (salt.testTarget(pepperEnv, CTL_TARGET)) {
mergeSnapshot(pepperEnv, CTL_TARGET, 'ctl')
+ verifyService(pepperEnv, CTL_TARGET, 'nova-api')
}
}
if (merges.contains("prx")) {
if (salt.testTarget(pepperEnv, PRX_TARGET)) {
mergeSnapshot(pepperEnv, PRX_TARGET, 'prx')
+ verifyService(pepperEnv, PRX_TARGET, 'nginx')
}
}
if (merges.contains("msg")) {
if (salt.testTarget(pepperEnv, MSG_TARGET)) {
mergeSnapshot(pepperEnv, MSG_TARGET, 'msg')
+ verifyService(pepperEnv, MSG_TARGET, 'rabbitmq-server')
}
}
if (merges.contains("dbs")) {
if (salt.testTarget(pepperEnv, DBS_TARGET)) {
mergeSnapshot(pepperEnv, DBS_TARGET, 'dbs')
+ verifyGalera(pepperEnv, DBS_TARGET)
+ backupGalera(pepperEnv)
}
}
if (merges.contains("ntw")) {
if (salt.testTarget(pepperEnv, NTW_TARGET)) {
mergeSnapshot(pepperEnv, NTW_TARGET, 'ntw')
+ verifyContrail(pepperEnv, NTW_TARGET)
+ backupContrail(pepperEnv)
}
}
if (merges.contains("nal")) {
if (salt.testTarget(pepperEnv, NAL_TARGET)) {
mergeSnapshot(pepperEnv, NAL_TARGET, 'nal')
+ verifyContrail(pepperEnv, NAL_TARGET)
}
}
if (merges.contains("gtw-virtual")) {
if (salt.testTarget(pepperEnv, GTW_TARGET)) {
mergeSnapshot(pepperEnv, GTW_TARGET, 'gtw')
+ verifyService(pepperEnv, GTW_TARGET, 'neutron-dhcp-agent')
}
}
if (merges.contains("cmn")) {
if (salt.testTarget(pepperEnv, CMN_TARGET)) {
mergeSnapshot(pepperEnv, CMN_TARGET, 'cmn')
+ verifyCeph(pepperEnv, CMN_TARGET, 'mon@')
+ backupCeph(pepperEnv, CMN_TARGET)
}
}
if (merges.contains("rgw")) {
if (salt.testTarget(pepperEnv, RGW_TARGET)) {
mergeSnapshot(pepperEnv, RGW_TARGET, 'rgw')
+ verifyCeph(pepperEnv, RGW_TARGET, 'radosgw@rgw.')
}
}
@@ -1498,15 +1516,18 @@
if (merges.contains("cid")) {
if (salt.testTarget(pepperEnv, CID_TARGET)) {
mergeSnapshot(pepperEnv, CID_TARGET, 'cid')
+ verifyService(pepperEnv, CID_TARGET, 'docker')
}
}
if (RESTORE_GALERA.toBoolean()) {
restoreGalera(pepperEnv)
+ verifyGalera(pepperEnv, DBS_TARGET)
}
if (RESTORE_CONTRAIL_DB.toBoolean()) {
restoreContrailDb(pepperEnv)
+ // verification is already present in restore pipelines
}
} catch (Throwable e) {
diff --git a/cvp-ha.groovy b/cvp-ha.groovy
index 4952502..4bb5018 100644
--- a/cvp-ha.groovy
+++ b/cvp-ha.groovy
@@ -29,6 +29,7 @@
def artifacts_dir = 'validation_artifacts/'
def remote_artifacts_dir = '/root/qa_results/'
def current_target_node = ''
+def first_node = ''
def tempest_result = ''
timeout(time: 12, unit: 'HOURS') {
node() {
@@ -61,6 +62,7 @@
current_target_node = validate.get_vip_node(saltMaster, TARGET_NODES)
common.warningMsg("Shutdown current vip node ${current_target_node}")
validate.shutdown_vm_node(saltMaster, current_target_node, 'soft_shutdown')
+ sleep 15
}
stage('Check during shutdown') {
tempest_result = validate.runCVPtempest(saltMaster, TEMPEST_TARGET_NODE, TEMPEST_TEST_PATTERN, SKIP_LIST_PATH, remote_artifacts_dir, "docker_tempest_during_shutdown")
@@ -79,6 +81,9 @@
if (status == null) {
throw new Exception("Node ${current_target_node} cannot start")
}
+ first_node = current_target_node
+ current_target_node = ''
+ sleep 30
}
stage('Check after shutdown') {
tempest_result = validate.runCVPtempest(saltMaster, TEMPEST_TARGET_NODE, TEMPEST_TEST_PATTERN, SKIP_LIST_PATH, remote_artifacts_dir, "docker_tempest_after_shutdown")
@@ -96,10 +101,12 @@
input message: "Are you sure you want to hard shutdown current vip node?"
}
}
- salt.cmdRun(saltMaster, current_target_node, "service keepalived stop")
+ salt.cmdRun(saltMaster, first_node, "service keepalived stop")
current_target_node = validate.get_vip_node(saltMaster, TARGET_NODES)
common.warningMsg("Shutdown current vip node ${current_target_node}")
validate.shutdown_vm_node(saltMaster, current_target_node, 'hard_shutdown')
+ sleep 10
+ salt.cmdRun(saltMaster, first_node, "service keepalived start")
}
stage('Check during hard shutdown') {
tempest_result = validate.runCVPtempest(saltMaster, TEMPEST_TARGET_NODE, TEMPEST_TEST_PATTERN, SKIP_LIST_PATH, remote_artifacts_dir, "docker_tempest_during_hard_shutdown")
@@ -116,9 +123,10 @@
common.infoMsg("Checking that node is UP")
status = salt.minionsReachable(saltMaster, 'I@salt:master', current_target_node, null, 10, num_retries)
if (status == null) {
- throw new Exception("Command execution failed")
+ throw new Exception("Node ${current_target_node} cannot start")
}
- salt.cmdRun(saltMaster, TARGET_NODES, "service keepalived start")
+ current_target_node = ''
+ sleep 30
}
stage('Check after hard shutdown') {
tempest_result = validate.runCVPtempest(saltMaster, TEMPEST_TARGET_NODE, TEMPEST_TEST_PATTERN, SKIP_LIST_PATH, remote_artifacts_dir, "docker_tempest_after_hard_shutdown")
@@ -127,7 +135,7 @@
currentBuild.result = "FAILURE"
throw new Exception("Tempest tests failed")
}
- sleep 15
+ sleep 5
}
stage('Reboot') {
@@ -148,6 +156,7 @@
currentBuild.result = "FAILURE"
throw new Exception("Tempest tests failed")
}
+ sleep 30
}
stage('Check after reboot') {
common.warningMsg("Checking that node is UP")
@@ -175,6 +184,11 @@
if (DEBUG_MODE == 'false') {
salt.cmdRun(saltMaster, TEMPEST_TARGET_NODE, "rm -rf ${remote_artifacts_dir}")
validate.runCleanup(saltMaster, TEMPEST_TARGET_NODE)
+ if (current_target_node != '') {
+ common.warningMsg("Powering on node ${current_target_node}")
+ kvm = validate.locate_node_on_kvm(saltMaster, current_target_node)
+ salt.cmdRun(saltMaster, kvm, "virsh start ${current_target_node}")
+ }
}
}
}
diff --git a/cvp-runner.groovy b/cvp-runner.groovy
new file mode 100644
index 0000000..dd58da5
--- /dev/null
+++ b/cvp-runner.groovy
@@ -0,0 +1,39 @@
+/**
+ *
+ * Launch pytest frameworks in Jenkins
+ *
+ * Expected parameters:
+ * SALT_MASTER_URL URL of Salt master
+ * SALT_MASTER_CREDENTIALS Credentials to the Salt API
+ *
+ * TESTS_SET Leave empty for full run or choose a file (test)
+ * TESTS_REPO Repo to clone
+ * TESTS_SETTINGS Additional environment varibales to apply
+ * PROXY Proxy to use for cloning repo or for pip
+ *
+ */
+
+validate = new com.mirantis.mcp.Validate()
+
+def artifacts_dir = 'validation_artifacts/'
+
+node() {
+ try{
+ stage('Initialization') {
+ validate.prepareVenv(TESTS_REPO, PROXY)
+ }
+
+ stage('Run Tests') {
+ sh "mkdir -p ${artifacts_dir}"
+ validate.runTests(SALT_MASTER_URL, SALT_MASTER_CREDENTIALS, TESTS_SET, artifacts_dir, TESTS_SETTINGS)
+ }
+ stage ('Publish results') {
+ archiveArtifacts artifacts: "${artifacts_dir}/*"
+ junit "${artifacts_dir}/*.xml"
+ }
+ } catch (Throwable e) {
+ // If there was an error or exception thrown, the build failed
+ currentBuild.result = "FAILURE"
+ throw e
+ }
+}
diff --git a/generate-cookiecutter-products.groovy b/generate-cookiecutter-products.groovy
index c2d4943..a2a4907 100644
--- a/generate-cookiecutter-products.groovy
+++ b/generate-cookiecutter-products.groovy
@@ -7,33 +7,6 @@
*
**/
-// Deprecation to avoid unexpected behaviour because it should be passed via initial context.
-// Need to delete this "if" statement at 1 April 2018.
-if(env.COOKIECUTTER_TEMPLATE_CREDENTIALS ||
- env.COOKIECUTTER_TEMPLATE_URL ||
- env.COOKIECUTTER_TEMPLATE_BRANCH ||
- env.COOKIECUTTER_TEMPLATE_PATH ||
- env.SHARED_RECLASS_URL){
- println '''
- DEPRECATION: Please note that the following variables are deprocated:
- - COOKIECUTTER_TEMPLATE_CREDENTIALS
- - COOKIECUTTER_TEMPLATE_URL
- - COOKIECUTTER_TEMPLATE_BRANCH
- - COOKIECUTTER_TEMPLATE_PATH
- - SHARED_RECLASS_URL
- You need to pass the values using the following variables from initial cookiecutter context:
- - cookiecutter_template_url
- - cookiecutter_template_branch
- - shared_reclass_url
- The following variables are not needed anymore:
- - COOKIECUTTER_TEMPLATE_CREDENTIALS - cookiecutter-templates repos are accessible for anounimous
- (https://gerrit.mcp.mirantis.net)
- - COOKIECUTTER_TEMPLATE_PATH - hardcoded to "${env.WORKSPACE}/template"
- '''
- currentBuild.result = "FAILURE"
- return
-}
-
common = new com.mirantis.mk.Common()
git = new com.mirantis.mk.Git()
python = new com.mirantis.mk.Python()
@@ -205,8 +178,16 @@
// download create-config-drive
// FIXME: that should be refactored, to use git clone - to be able download it from custom repo.
- def config_drive_script_url = "https://raw.githubusercontent.com/Mirantis/mcp-common-scripts/master/config-drive/create_config_drive.sh"
- def user_data_script_url = "https://raw.githubusercontent.com/Mirantis/mcp-common-scripts/master/config-drive/master_config.sh"
+ def mcpCommonScriptsBranch = templateContext.default_context.mcp_common_scripts_branch
+ if (mcpCommonScriptsBranch == '') {
+ mcpCommonScriptsBranch = mcpVersion
+ // Don't have nightly for mcp-common-scripts repo, therefore use master
+ if(mcpVersion == "nightly"){
+ mcpCommonScriptsBranch = 'master'
+ }
+ }
+ def config_drive_script_url = "https://raw.githubusercontent.com/Mirantis/mcp-common-scripts/${mcpCommonScriptsBranch}/config-drive/create_config_drive.sh"
+ def user_data_script_url = "https://raw.githubusercontent.com/Mirantis/mcp-common-scripts/${mcpCommonScriptsBranch}/config-drive/master_config.sh"
sh "wget -O create-config-drive ${config_drive_script_url} && chmod +x create-config-drive"
sh "wget -O user_data.sh ${user_data_script_url}"
@@ -222,9 +203,14 @@
smc['DEPLOY_NETWORK_GW'] = templateContext['default_context']['deploy_network_gateway']
smc['DEPLOY_NETWORK_NETMASK'] = templateContext['default_context']['deploy_network_netmask']
smc['DNS_SERVERS'] = templateContext['default_context']['dns_server01']
+ smc['MCP_VERSION'] = "${mcpVersion}"
if (templateContext['default_context']['local_repositories'] == 'True'){
+ def localRepoIP = templateContext['default_context']['local_repo_url']
+ smc['MCP_SALT_REPO_KEY'] = "http://${localRepoIP}/public.gpg"
+ smc['MCP_SALT_REPO_URL'] = "http://${localRepoIP}/ubuntu-xenial"
smc['PIPELINES_FROM_ISO'] = 'false'
- smc['PIPELINE_REPO_URL'] = 'http://' + templateContext['default_context']['aptly_server_deploy_address'] + ':8088'
+ smc['PIPELINE_REPO_URL'] = "http://${localRepoIP}:8088"
+ smc['LOCAL_REPOS'] = 'true'
}
if (templateContext['default_context']['upstream_proxy_enabled'] == 'True'){
if (templateContext['default_context']['upstream_proxy_auth_enabled'] == 'True'){
diff --git a/openstack-compute-install.groovy b/openstack-compute-install.groovy
index 7b9054a..7602dcf 100644
--- a/openstack-compute-install.groovy
+++ b/openstack-compute-install.groovy
@@ -98,9 +98,15 @@
}
}
- stage("Install monitoring") {
+ stage("Update/Install monitoring") {
+ //Collect Grains
+ salt.enforceState(pepperEnv, targetLiveAll, 'salt.minion.grains')
+ salt.runSaltProcessStep(pepperEnv, targetLiveAll, 'saltutil.refresh_modules')
+ salt.runSaltProcessStep(pepperEnv, targetLiveAll, 'mine.update')
+ sleep(5)
+
salt.enforceState(pepperEnv, targetLiveAll, 'prometheus')
- salt.enforceState(pepperEnv, 'I@prometheus', 'prometheus')
+ salt.enforceState(pepperEnv, 'I@prometheus:server', 'prometheus')
}
} catch (Throwable e) {
diff --git a/openstack-control-upgrade.groovy b/openstack-control-upgrade.groovy
index 21134a2..df9bfd1 100644
--- a/openstack-control-upgrade.groovy
+++ b/openstack-control-upgrade.groovy
@@ -16,6 +16,12 @@
def salt = new com.mirantis.mk.Salt()
def python = new com.mirantis.mk.Python()
+def getNodeProvider(pepperEnv, name) {
+ def salt = new com.mirantis.mk.Salt()
+ def kvm = salt.getKvmMinionId(pepperEnv)
+ return salt.getReturnValues(salt.getPillar(pepperEnv, "${kvm}", "salt:control:cluster:internal:node:${name}:provider"))
+}
+
def stopServices(pepperEnv, probe, target, type) {
def openstack = new com.mirantis.mk.Openstack()
def services = []
@@ -86,7 +92,7 @@
if (SKIP_VM_RELAUNCH.toBoolean() == false) {
- def upgNodeProvider = salt.getNodeProvider(pepperEnv, test_upgrade_node)
+ def upgNodeProvider = getNodeProvider(pepperEnv, test_upgrade_node)
salt.runSaltProcessStep(pepperEnv, "${upgNodeProvider}", 'virt.destroy', ["${test_upgrade_node}.${domain}"])
salt.runSaltProcessStep(pepperEnv, "${upgNodeProvider}", 'virt.undefine', ["${test_upgrade_node}.${domain}"])
@@ -107,10 +113,17 @@
salt.runSaltProcessStep(pepperEnv, "${test_upgrade_node}*", 'saltutil.sync_all', [])
}
+ stateRun(pepperEnv, "${test_upgrade_node}*", ['linux.network.proxy'])
+ try {
+ salt.runSaltProcessStep(pepperEnv, "${test_upgrade_node}*", 'state.sls', ["salt.minion.base"], null, true, 60)
+ } catch (Exception e) {
+ common.warningMsg(e)
+ }
+
stateRun(pepperEnv, "${test_upgrade_node}*", ['linux', 'openssh'])
try {
- salt.runSaltProcessStep(master, "${test_upgrade_node}*", 'state.sls', ["salt.minion"], null, true, 60)
+ salt.runSaltProcessStep(pepperEnv, "${test_upgrade_node}*", 'state.sls', ["salt.minion"], null, true, 60)
} catch (Exception e) {
common.warningMsg(e)
}
@@ -223,10 +236,9 @@
stopServices(pepperEnv, node, tgt, general_target)
}
- def node_count = 1
for (t in target_hosts) {
def target = salt.stripDomainName(t)
- def nodeProvider = salt.getNodeProvider(pepperEnv, "${general_target}0${node_count}")
+ def nodeProvider = salt.getNodeProvider(pepperEnv, t)
if ((OPERATING_SYSTEM_RELEASE_UPGRADE.toBoolean() == true) && (SKIP_VM_RELAUNCH.toBoolean() == false)) {
salt.runSaltProcessStep(pepperEnv, "${nodeProvider}", 'virt.destroy', ["${target}.${domain}"])
sleep(2)
@@ -244,7 +256,6 @@
} else if (OPERATING_SYSTEM_RELEASE_UPGRADE.toBoolean() == false) {
virsh.liveSnapshotPresent(pepperEnv, nodeProvider, target, snapshotName)
}
- node_count++
}
}
@@ -263,6 +274,13 @@
// salt '*' saltutil.sync_all
salt.runSaltProcessStep(pepperEnv, upgrade_general_target, 'saltutil.sync_all', [])
+ stateRun(pepperEnv, upgrade_general_target, ['linux.network.proxy'])
+ try {
+ salt.runSaltProcessStep(pepperEnv, upgrade_general_target, 'state.sls', ["salt.minion.base"], null, true, 60)
+ } catch (Exception e) {
+ common.warningMsg(e)
+ }
+
if (OPERATING_SYSTEM_RELEASE_UPGRADE.toBoolean() == false) {
try {
@@ -302,7 +320,7 @@
common.warningMsg(e)
}
try {
- salt.runSaltProcessStep(master, upgrade_general_target, 'state.sls', ["salt.minion"], null, true, 60)
+ salt.runSaltProcessStep(pepperEnv, upgrade_general_target, 'state.sls', ["salt.minion"], null, true, 60)
} catch (Exception e) {
common.warningMsg(e)
}
@@ -468,10 +486,9 @@
general_target = 'ctl'
}
- def node_count = 1
for (t in target_hosts) {
def target = salt.stripDomainName(t)
- def nodeProvider = salt.getNodeProvider(pepperEnv, "${general_target}0${node_count}")
+ def nodeProvider = salt.getNodeProvider(pepperEnv, t)
salt.runSaltProcessStep(pepperEnv, "${nodeProvider}", 'virt.destroy', ["${target}.${domain}"])
sleep(2)
if (OPERATING_SYSTEM_RELEASE_UPGRADE.toBoolean() == true) {
@@ -487,7 +504,6 @@
salt.runSaltProcessStep(pepperEnv, "${nodeProvider}", 'virt.start', ["${target}.${domain}"])
virsh.liveSnapshotAbsent(pepperEnv, nodeProvider, target, snapshotName)
}
- node_count++
}
}
diff --git a/release-mcp-version.groovy b/release-mcp-version.groovy
index 95e8bac..8af3fbe 100644
--- a/release-mcp-version.groovy
+++ b/release-mcp-version.groovy
@@ -16,6 +16,10 @@
* DOCKER_IMAGES
* GIT_CREDENTIALS
* GIT_REPO_LIST
+ * EMAIL_NOTIFY
+ * NOTIFY_RECIPIENTS
+ * NOTIFY_TEXT
+ *
*/
common = new com.mirantis.mk.Common()
@@ -51,31 +55,14 @@
]
}
-def gitRepoAddTag(repoURL, repoName, tag, credentials, ref = "HEAD"){
- git.checkoutGitRepository(repoName, repoURL, "master", credentials)
- dir(repoName) {
- def checkTag = sh(script: "git tag -l ${tag}", returnStdout: true)
- if(checkTag == ""){
- sh "git tag -a ${tag} ${ref} -m \"Release of mcp version ${tag}\""
- }else{
- def currentTagRef = sh(script: "git rev-list -n 1 ${tag}", returnStdout: true)
- if(currentTagRef.equals(ref)){
- common.infoMsg("Tag is already on the right ref")
- return
- }
- else{
- sshagent([credentials]) {
- sh "git push --delete origin ${tag}"
- }
- sh "git tag --delete ${tag}"
- sh "git tag -a ${tag} ${ref} -m \"Release of mcp version ${tag}\""
- }
- }
- sshagent([credentials]) {
- sh "git push origin ${tag}"
- }
- }
+def triggerGitTagJob(gitRepoList, gitCredentials, tag) {
+ build job: "tag-git-repos-stable", parameters: [
+ [$class: 'StringParameterValue', name: 'GIT_REPO_LIST', value: gitRepoList],
+ [$class: 'StringParameterValue', name: 'GIT_CREDENTIALS', value: gitCredentials],
+ [$class: 'StringParameterValue', name: 'TAG', value: tag]
+ ]
}
+
timeout(time: 12, unit: 'HOURS') {
node() {
try {
@@ -100,18 +87,13 @@
if(RELEASE_GIT.toBoolean())
{
common.infoMsg("Promoting Git repositories")
- def repos = GIT_REPO_LIST.tokenize('\n')
- def repoUrl, repoName, repoCommit, repoArray
- for (repo in repos){
- if(repo.trim().indexOf(' ') == -1){
- throw new IllegalArgumentException("Wrong format of repository and commit input")
- }
- repoArray = repo.trim().tokenize(' ')
- repoName = repoArray[0]
- repoUrl = repoArray[1]
- repoCommit = repoArray[2]
- gitRepoAddTag(repoUrl, repoName, TARGET_REVISION, GIT_CREDENTIALS, repoCommit)
- }
+ triggerGitTagJob(GIT_REPO_LIST, GIT_CREDENTIALS, TARGET_REVISION)
+
+ }
+ if (EMAIL_NOTIFY.toBoolean()) {
+ emailext(to: NOTIFY_RECIPIENTS,
+ body: NOTIFY_TEXT,
+ subject: "MCP Promotion has been done")
}
}
} catch (Throwable e) {
diff --git a/restore-cassandra.groovy b/restore-cassandra.groovy
index 10ec378..c98ff17 100644
--- a/restore-cassandra.groovy
+++ b/restore-cassandra.groovy
@@ -19,12 +19,7 @@
python.setupPepperVirtualenv(pepperEnv, SALT_MASTER_URL, SALT_MASTER_CREDENTIALS)
}
- stage('Start restore') {
- // # actual upgrade
-
- stage('Ask for manual confirmation') {
- input message: "Are you sure you have the correct backups ready? Do you really want to continue to restore Cassandra?"
- }
+ stage('Restore') {
try {
salt.runSaltProcessStep(pepperEnv, 'I@neutron:server', 'service.stop', ['neutron-server'], null, true)
} catch (Exception er) {
@@ -68,10 +63,12 @@
// wait until supervisor-database service is up
salt.commandStatus(pepperEnv, 'I@cassandra:backup:client', 'service supervisor-database status', 'running')
- sleep(5)
+ sleep(60)
+
// performs restore
- salt.cmdRun(pepperEnv, 'I@cassandra:backup:client', "su root -c 'salt-call state.sls cassandra'")
+ salt.enforceState(pepperEnv, 'I@cassandra:backup:client', "cassandra.backup")
salt.runSaltProcessStep(pepperEnv, 'I@cassandra:backup:client', 'system.reboot', null, null, true, 5)
+ sleep(5)
salt.runSaltProcessStep(pepperEnv, 'I@opencontrail:control and not I@cassandra:backup:client', 'system.reboot', null, null, true, 5)
// wait until supervisor-database service is up
diff --git a/restore-zookeeper.groovy b/restore-zookeeper.groovy
index d459266..185f097 100644
--- a/restore-zookeeper.groovy
+++ b/restore-zookeeper.groovy
@@ -19,13 +19,7 @@
python.setupPepperVirtualenv(pepperEnv, SALT_MASTER_URL, SALT_MASTER_CREDENTIALS)
}
- stage('Start restore') {
- // # actual upgrade
-
- stage('Ask for manual confirmation') {
- input message: "Are you sure you have the correct backups ready? Do you really want to continue to restore Zookeeper?"
- }
- // Zookeeper restore section
+ stage('Restore') {
try {
salt.runSaltProcessStep(pepperEnv, 'I@opencontrail:control', 'service.stop', ['supervisor-config'], null, true)
} catch (Exception er) {
@@ -69,7 +63,7 @@
salt.runSaltProcessStep(pepperEnv, 'I@opencontrail:control', 'file.remove', ["${backup_dir}/dbrestored"], null, true)
// performs restore
- salt.cmdRun(pepperEnv, 'I@opencontrail:control', "su root -c 'salt-call state.sls zookeeper'")
+ salt.enforceState(pepperEnv, 'I@opencontrail:control', "zookeeper.backup")
salt.runSaltProcessStep(pepperEnv, 'I@opencontrail:control', 'service.start', ['zookeeper'], null, true)
salt.runSaltProcessStep(pepperEnv, 'I@opencontrail:control', 'service.start', ['supervisor-config'], null, true)
diff --git a/tag-git-repos.groovy b/tag-git-repos.groovy
new file mode 100644
index 0000000..373e029
--- /dev/null
+++ b/tag-git-repos.groovy
@@ -0,0 +1,62 @@
+/**
+ *
+ * Tag Git repositories
+ *
+ * Expected parameters:
+ * GIT_REPO_LIST
+ * GIT_CREDENTIALS
+ * TAG
+ *
+ */
+
+common = new com.mirantis.mk.Common()
+git = new com.mirantis.mk.Git()
+
+def gitRepoAddTag(repoURL, repoName, tag, credentials, ref = "HEAD"){
+ git.checkoutGitRepository(repoName, repoURL, "master", credentials)
+ dir(repoName) {
+ def checkTag = sh(script: "git tag -l ${tag}", returnStdout: true)
+ if(checkTag == ""){
+ sh "git tag -a ${tag} ${ref} -m \"Release of mcp version ${tag}\""
+ }else{
+ def currentTagRef = sh(script: "git rev-list -n 1 ${tag}", returnStdout: true)
+ if(currentTagRef.equals(ref)){
+ common.infoMsg("Tag is already on the right ref")
+ return
+ }
+ else{
+ sshagent([credentials]) {
+ sh "git push --delete origin ${tag}"
+ }
+ sh "git tag --delete ${tag}"
+ sh "git tag -a ${tag} ${ref} -m \"Release of mcp version ${tag}\""
+ }
+ }
+ sshagent([credentials]) {
+ sh "git push origin ${tag}"
+ }
+ }
+}
+
+timeout(time: 12, unit: 'HOURS') {
+ node() {
+ try {
+ def repos = GIT_REPO_LIST.tokenize('\n')
+ def repoUrl, repoName, repoCommit, repoArray
+ for (repo in repos){
+ if(repo.trim().indexOf(' ') == -1){
+ throw new IllegalArgumentException("Wrong format of repository and commit input")
+ }
+ repoArray = repo.trim().tokenize(' ')
+ repoName = repoArray[0]
+ repoUrl = repoArray[1]
+ repoCommit = repoArray[2]
+ gitRepoAddTag(repoUrl, repoName, TAG, GIT_CREDENTIALS, repoCommit)
+ }
+ } catch (Throwable e) {
+ // If there was an error or exception thrown, the build failed
+ currentBuild.result = "FAILURE"
+ throw e
+ }
+ }
+}
\ No newline at end of file
diff --git a/update-mirror-image.groovy b/update-mirror-image.groovy
index 2f71d2a..96dc027 100644
--- a/update-mirror-image.groovy
+++ b/update-mirror-image.groovy
@@ -24,23 +24,12 @@
python = new com.mirantis.mk.Python()
venvPepper = "venvPepper"
-@NonCPS
-def Boolean dockerExists() {
- def engine = salt.getPillar(venvPepper, 'I@aptly:server', "aptly:server:source:engine")
- def matches = (engine =~ /:docker/)
- try{
- def test = matches[position]
- return false
- }catch(Exception ex){
- return true
- }
-}
-
timeout(time: 12, unit: 'HOURS') {
node() {
try {
python.setupPepperVirtualenv(venvPepper, SALT_MASTER_URL, SALT_MASTER_CREDENTIALS)
- def dockerExists = dockerExists()
+ def engine = salt.getPillar(venvPepper, 'I@aptly:server', "aptly:server:source:engine")
+ runningOnDocker = engine.get("return")[0].containsValue("docker")
if(UPDATE_APTLY.toBoolean()){
stage('Update Aptly mirrors'){
@@ -48,7 +37,7 @@
if(RECREATE_APTLY_MIRRORS.toBoolean())
{
- if(dockerExists){
+ if(runningOnDocker){
salt.cmdRun(venvPepper, 'I@aptly:server', "aptly mirror list --raw | grep -E '*' | xargs -n 1 aptly mirror drop -force", true, null, true)
}
else{
@@ -64,7 +53,7 @@
UPDATE_APTLY_MIRRORS = UPDATE_APTLY_MIRRORS.replaceAll("\\s","")
def mirrors = UPDATE_APTLY_MIRRORS.tokenize(",")
for(mirror in mirrors){
- if(dockerExists){
+ if(runningOnDocker){
salt.runSaltProcessStep(venvPepper, 'I@aptly:server', 'cmd.script', ['salt://aptly/files/aptly_mirror_update.sh', "args=\"${aptlyMirrorArgs} -m ${mirror}\""], null, true)
}else{
salt.runSaltProcessStep(venvPepper, 'I@aptly:server', 'cmd.script', ['salt://aptly/files/aptly_mirror_update.sh', "args=\"${aptlyMirrorArgs} -m ${mirror}\"", 'runas=aptly'], null, true)
@@ -74,7 +63,7 @@
else{
common.infoMsg("Updating all Aptly mirrors.")
- if(dockerExists){
+ if(runningOnDocker){
salt.runSaltProcessStep(venvPepper, 'I@aptly:server', 'cmd.script', ['salt://aptly/files/aptly_mirror_update.sh', "args=\"${aptlyMirrorArgs}\""], null, true)
}
else{
@@ -100,7 +89,7 @@
if(FORCE_OVERWRITE_APTLY_PUBLISHES.toBoolean()){
aptlyPublishArgs += "f"
}
- if(dockerExists){
+ if(runningOnDocker){
aptlyPublishArgs += " -u http://10.99.0.1:8080"
salt.runSaltProcessStep(venvPepper, 'I@aptly:server', 'cmd.script', ['salt://aptly/files/aptly_publish_update.sh', "args=\"${aptlyPublishArgs}\""], null, true)
}
diff --git a/upgrade-mcp-release.groovy b/upgrade-mcp-release.groovy
index 70256b7..fb8b0e6 100644
--- a/upgrade-mcp-release.groovy
+++ b/upgrade-mcp-release.groovy
@@ -15,23 +15,10 @@
python = new com.mirantis.mk.Python()
venvPepper = "venvPepper"
-@NonCPS
-def Boolean dockerExists() {
- def engine = salt.getPillar(venvPepper, 'I@aptly:server', "aptly:server:source:engine")
- def matches = (engine =~ /:docker/)
- try{
- def test = matches[position]
- return false
- }catch(Exception ex){
- return true
- }
-}
-
timeout(time: 12, unit: 'HOURS') {
node("python") {
try {
python.setupPepperVirtualenv(venvPepper, SALT_MASTER_URL, SALT_MASTER_CREDENTIALS)
- def dockerExists = dockerExists()
stage("Update Reclass"){
common.infoMsg("Updating reclass model")
@@ -43,7 +30,18 @@
if(UPDATE_LOCAL_REPOS.toBoolean()){
stage("Update local repos"){
common.infoMsg("Updating local repositories")
- if(dockerExists){
+
+ def engine = salt.getPillar(venvPepper, 'I@aptly:server', "aptly:server:source:engine")
+ runningOnDocker = engine.get("return")[0].containsValue("docker")
+
+ if (runningOnDocker) {
+ common.infoMsg("Aptly is running as Docker container")
+ }
+ else {
+ common.infoMsg("Aptly isn't running as Docker container. Going to use aptly user for executing aptly commands")
+ }
+
+ if(runningOnDocker){
salt.cmdRun(venvPepper, 'I@aptly:server', "aptly mirror list --raw | grep -E '*' | xargs -n 1 aptly mirror drop -force", true, null, true)
}
else{
@@ -52,7 +50,7 @@
salt.enforceState(venvPepper, 'I@aptly:server', 'aptly', true)
- if(dockerExists){
+ if(runningOnDocker){
salt.runSaltProcessStep(venvPepper, 'I@aptly:server', 'cmd.script', ['salt://aptly/files/aptly_mirror_update.sh', "args=-sv"], null, true)
salt.runSaltProcessStep(venvPepper, 'I@aptly:server', 'cmd.script', ['salt://aptly/files/aptly_publish_update.sh', "args=-frv -u http://10.99.0.1:8080"], null, true)
}