Merge "Adjust OpenStack upgrade pipelines"
diff --git a/openstack-compute-upgrade.groovy b/openstack-compute-upgrade.groovy
deleted file mode 100644
index 2984b55..0000000
--- a/openstack-compute-upgrade.groovy
+++ /dev/null
@@ -1,261 +0,0 @@
-/**
- * Update packages on given nodes
- *
- * Expected parameters:
- * SALT_MASTER_CREDENTIALS Credentials to the Salt API.
- * SALT_MASTER_URL Full Salt API address [https://10.10.10.1:8000].
- * TARGET_SERVERS Salt compound target to match nodes to be updated [*, G@osfamily:debian].
- * TARGET_SUBSET_TEST Number of nodes to list package updates, empty string means all targetted nodes.
- * TARGET_SUBSET_LIVE Number of selected nodes to live apply selected package update.
- * INTERACTIVE Ask interactive questions during pipeline run (bool).
- *
-**/
-
-def common = new com.mirantis.mk.Common()
-def salt = new com.mirantis.mk.Salt()
-def python = new com.mirantis.mk.Python()
-
-def pepperEnv = "pepperEnv"
-def targetTestSubset
-def targetLiveSubset
-def targetLiveAll
-def minions
-def result
-def args
-def command
-def commandKwargs
-def probe = 1
-
-timeout(time: 12, unit: 'HOURS') {
- node() {
- try {
-
- stage('Setup virtualenv for Pepper') {
- python.setupPepperVirtualenv(pepperEnv, SALT_MASTER_URL, SALT_MASTER_CREDENTIALS)
- }
-
- stage('List target servers') {
- minions = salt.getMinions(pepperEnv, TARGET_SERVERS)
-
- if (minions.isEmpty()) {
- throw new Exception("No minion was targeted")
- }
-
- if (TARGET_SUBSET_TEST != "") {
- targetTestSubset = minions.subList(0, Integer.valueOf(TARGET_SUBSET_TEST)).join(' or ')
- } else {
- targetTestSubset = minions.join(' or ')
- }
- targetLiveSubset = minions.subList(0, Integer.valueOf(TARGET_SUBSET_LIVE)).join(' or ')
- targetTestSubsetProbe = minions.subList(0, probe).join(' or ')
- targetLiveSubsetProbe = minions.subList(0, probe).join(' or ')
-
- targetLiveAll = minions.join(' or ')
- common.infoMsg("Found nodes: ${targetLiveAll}")
- common.infoMsg("Selected test nodes: ${targetTestSubset}")
- common.infoMsg("Selected sample nodes: ${targetLiveSubset}")
- }
-
-
- stage("Add new repos on test nodes") {
- salt.enforceState(pepperEnv, targetTestSubset, 'linux.system.repo')
- }
-
-
- opencontrail = null
-
- try {
- opencontrail = salt.cmdRun(pepperEnv, targetTestSubsetProbe, "salt-call grains.item roles | grep opencontrail.compute")
- print(opencontrail)
- } catch (Exception er) {
- common.infoMsg("opencontrail is not used")
- }
-
- if(opencontrail != null) {
- stage('Remove OC component from repos on test nodes') {
- def contrail_repo_file1 = ''
- def contrail_repo_file2 = ''
- try {
- contrail_repo_file1 = salt.cmdRun(pepperEnv, targetTestSubset, "grep -Eorl \\ oc\\([0-9]*\$\\) /etc/apt/sources.list*")['return'][0].values()[0].split("\n")[0]
- contrail_repo_file2 = salt.cmdRun(pepperEnv, targetTestSubset, "grep -Eorl \\ oc\\([0-9]*\\ \\) /etc/apt/sources.list*")['return'][0].values()[0].split("\n")[0]
- } catch (Exception er) {
- common.warningMsg(er)
- }
- salt.cmdRun(pepperEnv, targetTestSubset, "find /etc/apt/sources.list* -type f -print0 | xargs -0 sed -i -r -e 's/ oc([0-9]*) / /g;s/ oc([0-9]*\$)//g'")
- try {
- salt.cmdRun(pepperEnv, targetTestSubset, "salt-call pkg.refresh_db")
- } catch (Exception er) {
- common.warningMsg(er)
- // remove the malformed repo entry
- salt.cmdRun(pepperEnv, targetTestSubset, "rm ${contrail_repo_file1} ${contrail_repo_file2}")
- salt.runSaltProcessStep(pepperEnv, targetTestSubset, 'pkg.refresh_db', [], null, true)
- }
- }
- }
-
- stage("List package upgrades") {
- salt.runSaltProcessStep(pepperEnv, targetTestSubset, 'pkg.list_upgrades', [], null, true)
- }
-
- if (INTERACTIVE.toBoolean()){
- stage('Confirm upgrade on sample nodes') {
- input message: "Please verify the list of packages that you want to be upgraded. Do you want to continue with upgrade?"
- }
- }
-
- stage("Add new repos on sample nodes") {
- salt.enforceState(pepperEnv, targetLiveSubset, 'linux.system.repo')
- }
-
- if(opencontrail != null) {
- stage('Remove OC component from repos on sample nodes') {
- def contrail_repo_file1 = ''
- def contrail_repo_file2 = ''
- try {
- contrail_repo_file1 = salt.cmdRun(pepperEnv, targetLiveSubset, "grep -Eorl \\ oc\\([0-9]*\$\\) /etc/apt/sources.list*")['return'][0].values()[0].split("\n")[0]
- contrail_repo_file2 = salt.cmdRun(pepperEnv, targetLiveSubset, "grep -Eorl \\ oc\\([0-9]*\\ \\) /etc/apt/sources.list*")['return'][0].values()[0].split("\n")[0]
- } catch (Exception er) {
- common.warningMsg(er)
- }
- salt.cmdRun(pepperEnv, targetLiveSubset, "find /etc/apt/sources.list* -type f -print0 | xargs -0 sed -i -r -e 's/ oc([0-9]*) / /g;s/ oc([0-9]*\$)//g'")
- try {
- salt.cmdRun(pepperEnv, targetLiveSubset, "salt-call pkg.refresh_db")
- } catch (Exception er) {
- common.warningMsg(er)
- // remove the malformed repo entry
- salt.cmdRun(pepperEnv, targetLiveSubset, "rm ${contrail_repo_file1} ${contrail_repo_file2}")
- salt.runSaltProcessStep(pepperEnv, targetLiveSubset, 'pkg.refresh_db', [], null, true)
- }
- }
- }
-
- args = "apt-get -y -s -o Dpkg::Options::=\"--force-confdef\" -o Dpkg::Options::=\"--force-confold\" dist-upgrade"
-
- stage('Test upgrade on sample') {
- try {
- salt.cmdRun(pepperEnv, targetLiveSubset, args)
- } catch (Exception er) {
- print(er)
- }
- }
-
- if (INTERACTIVE.toBoolean()){
- stage('Confirm upgrade on sample') {
- input message: "Please verify if there are packages that it wants to downgrade. If so, execute apt-cache policy on them and verify if everything is fine. Do you want to continue with upgrade?"
- }
- }
-
- command = "cmd.run"
- args = 'export DEBIAN_FRONTEND=noninteractive; apt-get -y -q --allow-downgrades -o Dpkg::Options::=\"--force-confdef\" -o Dpkg::Options::=\"--force-confold\" dist-upgrade;'
-
- stage('Apply package upgrades on sample') {
- out = salt.runSaltCommand(pepperEnv, 'local', ['expression': targetLiveSubset, 'type': 'compound'], command, null, args, commandKwargs)
- salt.printSaltCommandResult(out)
- }
-
- openvswitch = null
-
- try {
- openvswitch = salt.cmdRun(pepperEnv, targetLiveSubsetProbe, "salt-call grains.item roles | grep neutron.compute")
- } catch (Exception er) {
- common.infoMsg("openvswitch is not used")
- }
-
- if(openvswitch != null) {
- args = "sudo /usr/share/openvswitch/scripts/ovs-ctl start"
-
- stage('Start ovs on sample nodes') {
- out = salt.runSaltCommand(pepperEnv, 'local', ['expression': targetLiveSubset, 'type': 'compound'], command, null, args, commandKwargs)
- salt.printSaltCommandResult(out)
- }
- stage("Run salt states on sample nodes") {
- salt.enforceState(pepperEnv, targetLiveSubset, ['nova', 'neutron'])
- }
- } else {
- stage("Run salt states on sample nodes") {
- salt.enforceState(pepperEnv, targetLiveSubset, ['nova', 'linux.system.repo'])
- }
- }
-
- stage("Run Highstate on sample nodes") {
- try {
- salt.enforceHighstate(pepperEnv, targetLiveSubset)
- } catch (Exception er) {
- common.errorMsg("Highstate was executed on ${targetLiveSubset} but something failed. Please check it and fix it accordingly.")
- }
- }
-
- if (INTERACTIVE.toBoolean()){
- stage('Confirm upgrade on all targeted nodes') {
- timeout(time: 2, unit: 'HOURS') {
- input message: "Verify that the upgraded sample nodes are working correctly. If so, do you want to approve live upgrade on ${targetLiveAll} nodes?"
- }
- }
- }
-
- stage("Add new repos on all targeted nodes") {
- salt.enforceState(pepperEnv, targetLiveAll, 'linux.system.repo')
- }
-
- if(opencontrail != null) {
- stage('Remove OC component from repos on all targeted nodes') {
- def contrail_repo_file1 = ''
- def contrail_repo_file2 = ''
- try {
- contrail_repo_file1 = salt.cmdRun(pepperEnv, targetLiveAll, "grep -Eorl \\ oc\\([0-9]*\$\\) /etc/apt/sources.list*")['return'][0].values()[0].split("\n")[0]
- contrail_repo_file2 = salt.cmdRun(pepperEnv, targetLiveAll, "grep -Eorl \\ oc\\([0-9]*\\ \\) /etc/apt/sources.list*")['return'][0].values()[0].split("\n")[0]
- } catch (Exception er) {
- common.warningMsg(er)
- }
- salt.cmdRun(pepperEnv, targetLiveAll, "find /etc/apt/sources.list* -type f -print0 | xargs -0 sed -i -r -e 's/ oc([0-9]*) / /g;s/ oc([0-9]*\$)//g'")
- try {
- salt.cmdRun(pepperEnv, targetLiveAll, "salt-call pkg.refresh_db")
- } catch (Exception er) {
- common.warningMsg(er)
- // remove the malformed repo entry
- salt.cmdRun(pepperEnv, targetLiveAll, "rm ${contrail_repo_file1} ${contrail_repo_file2}")
- salt.runSaltProcessStep(pepperEnv, targetLiveAll, 'pkg.refresh_db', [], null, true)
- }
- }
- }
-
- args = 'export DEBIAN_FRONTEND=noninteractive; apt-get -y -q --allow-downgrades -o Dpkg::Options::=\"--force-confdef\" -o Dpkg::Options::=\"--force-confold\" dist-upgrade;'
-
- stage('Apply package upgrades on all targeted nodes') {
- out = salt.runSaltCommand(pepperEnv, 'local', ['expression': targetLiveAll, 'type': 'compound'], command, null, args, commandKwargs)
- salt.printSaltCommandResult(out)
- }
-
- if(openvswitch != null) {
- args = "sudo /usr/share/openvswitch/scripts/ovs-ctl start"
-
- stage('Start ovs on all targeted nodes') {
- out = salt.runSaltCommand(pepperEnv, 'local', ['expression': targetLiveAll, 'type': 'compound'], command, null, args, commandKwargs)
- salt.printSaltCommandResult(out)
- }
- stage("Run salt states on all targeted nodes") {
- salt.enforceState(pepperEnv, targetLiveAll, ['nova', 'neutron'])
- }
- } else {
- stage("Run salt states on all targeted nodes") {
- salt.enforceState(pepperEnv, targetLiveAll, ['nova', 'linux.system.repo'])
- }
- }
-
- stage("Run Highstate on all targeted nodes") {
- try {
- salt.enforceHighstate(pepperEnv, targetLiveAll)
- } catch (Exception er) {
- common.errorMsg("Highstate was executed ${targetLiveAll} but something failed. Please check it and fix it accordingly.")
- }
- }
-
- } catch (Throwable e) {
- // If there was an error or exception thrown, the build failed
- currentBuild.result = "FAILURE"
- currentBuild.description = currentBuild.description ? e.message + " " + currentBuild.description : e.message
- throw e
- }
- }
-}
-
diff --git a/openstack-control-upgrade.groovy b/openstack-control-upgrade.groovy
index 89b5e77..6a6eea2 100644
--- a/openstack-control-upgrade.groovy
+++ b/openstack-control-upgrade.groovy
@@ -1,582 +1,192 @@
/**
+ * Upgrade OpenStack packages on control plane nodes.
+ * There are no silver boollet in uprading cloud.
* Update packages on given nodes
*
* Expected parameters:
* SALT_MASTER_CREDENTIALS Credentials to the Salt API.
* SALT_MASTER_URL Full Salt API address [http://10.10.10.1:8000].
- * STAGE_TEST_UPGRADE Run test upgrade stage (bool)
- * STAGE_REAL_UPGRADE Run real upgrade stage (bool)
- * STAGE_ROLLBACK_UPGRADE Run rollback upgrade stage (bool)
- * SKIP_VM_RELAUNCH Set to true if vms should not be recreated (bool)
- * OPERATING_SYSTEM_RELEASE_UPGRADE Set to true if operating system of vms should be upgraded to newer release (bool)
+ * OS_DIST_UPGRADE Upgrade system packages including kernel (apt-get dist-upgrade)
+ * OS_UPGRADE Upgrade all installed applications (apt-get upgrade)
+ * TARGET_SERVERS Comma separated list of salt compound definitions to upgrade.
* INTERACTIVE Ask interactive questions during pipeline run (bool).
*
+ * TODO:
+ * * Add OS_RELEASE_UPGRADE
**/
def common = new com.mirantis.mk.Common()
def salt = new com.mirantis.mk.Salt()
def python = new com.mirantis.mk.Python()
+def debian = new com.mirantis.mk.Debian()
+def openstack = new com.mirantis.mk.Openstack()
-def getNodeProvider(pepperEnv, name) {
+def interactive = INTERACTIVE.toBoolean()
+def LinkedHashMap upgradeStageMap = [:]
+
+upgradeStageMap.put('Pre upgrade',
+ [
+ 'Description': 'Only non destructive actions will be applied during this phase. Basic api, service verification will be performed.',
+ 'Status': 'NOT_LAUNCHED',
+ 'Expected behaviors': '''
+ * No service downtime
+ * No workload downtime''',
+ 'Launched actions': '''
+ * Verify API, perform basic CRUD operations for services.
+ * Verify that compute/neutron agents on hosts are up.
+ * Run some service built in checkers like keystone-manage doctor or nova-status upgrade.''',
+ 'State result': 'Basic checks around services API are passed.'
+ ])
+upgradeStageMap.put('Stop OpenStack services',
+ [
+ 'Description': 'All OpenStack python services will be stopped on All control nodes. This does not affect data plane services such as openvswitch or qemu.',
+ 'Status': 'NOT_LAUNCHED',
+ 'Expected behaviors': '''
+ * OpenStack python services are stopped.
+ * OpenStack API are not accessible from this point.
+ * No workload downtime''',
+ 'Launched actions': '''
+ * Stop OpenStack python services''',
+ 'State result': 'OpenStack python services are stopped',
+ ])
+upgradeStageMap.put('Upgrade OpenStack',
+ [
+ 'Description': 'OpenStack python code will be upgraded during this stage. No workload downtime is expected.',
+ 'Status': 'NOT_LAUNCHED',
+ 'Expected behaviors': '''
+ * OpenStack services might flap
+ * No workload downtime''',
+ 'Launched actions': '''
+ * Install new version of OpenStack packages
+ * Render version of configs
+ * Apply offline dbsync
+ * Start OpenStack services
+ * Verify agents are alive/connected
+ * Run basic API validation''',
+ 'State result': '''
+ * OpenStack packages are upgraded
+ * Services are running
+ * Basic checks around services API are passed
+ * Verified that agents/services on data plane nodes are connected to new control plane
+'''
+ ])
+upgradeStageMap.put('Upgrade OS',
+ [
+ 'Description': 'Optional step. OS packages will be upgraded during this phase, depending on the job parameters dist-upgrade might be called. And reboot of node executed.',
+ 'Status': 'NOT_LAUNCHED',
+ 'Expected behaviors': '''
+ * OpenStack services might flap
+ * No workload downtime
+ * The nodes might be rebooted''',
+ 'Launched actions': '''
+ * Install new version of system packages
+ * If doing dist-upgrade new kernel might be installed and node rebooted
+ * Verify agents are alive/connected
+ * Run basic API validation''',
+ 'State result': '''
+ * System packages are updated
+ * Services are running
+ * Basic checks around services API are passed
+ * Verified that agents/services on data plane nodes are connected
+ * Node might be rebooted
+'''
+ ])
+
+def stopOpenStackServices(env, target) {
def salt = new com.mirantis.mk.Salt()
- def kvm = salt.getKvmMinionId(pepperEnv)
- return salt.getReturnValues(salt.getPillar(pepperEnv, "${kvm}", "salt:control:cluster:internal:node:${name}:provider"))
-}
-
-def stopServices(pepperEnv, probe, target, type) {
def openstack = new com.mirantis.mk.Openstack()
- def services = []
- if (type == 'prx') {
- services.add('keepalived')
- services.add('nginx')
- } else if (type == 'ctl') {
- services.add('keepalived')
- services.add('haproxy')
- services.add('nova')
- services.add('cinder')
- services.add('glance')
- services.add('heat')
- services.add('neutron')
- services.add('apache2')
- }
- openstack.stopServices(pepperEnv, probe, target, services)
-}
-
-def retryStateRun(pepperEnv, target, state) {
def common = new com.mirantis.mk.Common()
- def salt = new com.mirantis.mk.Salt()
- try {
- salt.enforceState(pepperEnv, target, state)
- } catch (Exception e) {
- common.warningMsg("running ${state} state again")
- salt.enforceState(pepperEnv, target, state)
+
+ def services = openstack.getOpenStackUpgradeServices(env, target)
+ def st
+ for (service in services){
+ st = "${service}.upgrade.service_stopped".trim()
+ common.infoMsg("Stopping ${st} services on ${target}")
+ salt.enforceState(env, target, st)
}
}
-def stateRun(pepperEnv, target, state) {
- def common = new com.mirantis.mk.Common()
- def salt = new com.mirantis.mk.Salt()
- try {
- salt.enforceState(pepperEnv, target, state)
- } catch (Exception e) {
- common.warningMsg("Some parts of ${state} state failed. We should continue to run.")
- }
+def snapshotVM(env, domain, snapshotName) {
+ def common = new com.mirantis.mk.Common()
+ def salt = new com.mirantis.mk.Salt()
+
+ def target = salt.getNodeProvider(env, domain)
+
+ // TODO: gracefully migrate all workloads from VM, and stop it
+ salt.runSaltProcessStep(env, target, 'virt.shutdown', [domain], null, true, 3600)
+
+ //TODO: wait while VM is powered off
+
+ common.infoMsg("Creating snapshot ${snapshotName} for VM ${domain} on node ${target}")
+ salt.runSaltProcessStep(env, target, 'virt.snapshot', [domain, snapshotName], null, true, 3600)
}
+def revertSnapshotVM(env, domain, snapshotName, ensureUp=true) {
+ def common = new com.mirantis.mk.Common()
+ def salt = new com.mirantis.mk.Salt()
-def vcpTestUpgrade(pepperEnv) {
- def common = new com.mirantis.mk.Common()
- def salt = new com.mirantis.mk.Salt()
- def test_upgrade_node = "upg01"
- salt.runSaltProcessStep(pepperEnv, 'I@salt:master', 'saltutil.refresh_pillar', [], null, true, 2)
+ def target = salt.getNodeProvider(env, domain)
- stateRun(pepperEnv, 'I@salt:master', 'linux.system.repo')
- stateRun(pepperEnv, 'I@salt:master', 'salt.master')
- stateRun(pepperEnv, 'I@salt:master', 'reclass')
- stateRun(pepperEnv, 'I@salt:master', 'linux.system.repo')
+ common.infoMsg("Reverting snapshot ${snapshotName} for VM ${domain} on node ${target}")
+ salt.runSaltProcessStep(env, target, 'virt.revert_snapshot', [snapshotName, domain], null, true, 3600)
- try {
- salt.runSaltProcessStep(pepperEnv, '*', 'saltutil.refresh_pillar', [], null, true, 2)
- } catch (Exception e) {
- common.warningMsg("No response from some minions. We should continue to run")
+ if (ensureUp){
+ salt.runSaltProcessStep(env, target, 'virt.start', [domain], null, true, 300)
+ }
+}
+
+def env = "env"
+timeout(time: 12, unit: 'HOURS') {
+ node() {
+
+ stage('Setup virtualenv for Pepper') {
+ python.setupPepperVirtualenv(env, SALT_MASTER_URL, SALT_MASTER_CREDENTIALS)
}
- try {
- salt.runSaltProcessStep(pepperEnv, '*', 'saltutil.sync_all', [], null, true, 2)
- } catch (Exception e) {
- common.warningMsg("No response from some minions. We should continue to run")
+ def upgradeTargets = salt.getMinionsSorted(env, TARGET_SERVERS)
+
+ if (upgradeTargets.isEmpty()) {
+ error("No servers for upgrade matched by ${TARGET_SERVERS}")
}
- def domain = salt.getDomainName(pepperEnv)
-
- def backupninja_backup_host = salt.getReturnValues(salt.getPillar(pepperEnv, '( I@galera:master or I@galera:slave ) and I@backupninja:client', '_param:backupninja_backup_host'))
-
- if (SKIP_VM_RELAUNCH.toBoolean() == false) {
-
- def upgNodeProvider = getNodeProvider(pepperEnv, test_upgrade_node)
-
- salt.runSaltProcessStep(pepperEnv, "${upgNodeProvider}", 'virt.destroy', ["${test_upgrade_node}.${domain}"])
- salt.runSaltProcessStep(pepperEnv, "${upgNodeProvider}", 'virt.undefine', ["${test_upgrade_node}.${domain}"])
-
- try {
- salt.cmdRun(pepperEnv, 'I@salt:master', "salt-key -d ${test_upgrade_node}.${domain} -y")
- } catch (Exception e) {
- common.warningMsg("${test_upgrade_node}.${domain} does not match any accepted, unaccepted or rejected keys. The key did not exist yet or was already removed. We should continue to run")
- }
-
- // salt 'kvm02*' state.sls salt.control
- stateRun(pepperEnv, "${upgNodeProvider}", 'salt.control')
- // wait until upg node is registered in salt-key
- salt.minionPresent(pepperEnv, 'I@salt:master', test_upgrade_node)
- // salt '*' saltutil.refresh_pillar
- salt.runSaltProcessStep(pepperEnv, "${test_upgrade_node}*", 'saltutil.refresh_pillar', [])
- // salt '*' saltutil.sync_all
- salt.runSaltProcessStep(pepperEnv, "${test_upgrade_node}*", 'saltutil.sync_all', [])
+ common.printStageMap(upgradeStageMap)
+ if (interactive){
+ input message: common.getColorizedString(
+ "Above you can find detailed info this pipeline will execute.\nThe info provides brief description of each stage, actions that will be performed and service/workload impact during each stage.\nPlease read it carefully.", "yellow")
}
- stateRun(pepperEnv, "${test_upgrade_node}*", ['linux.network.proxy'])
- try {
- salt.runSaltProcessStep(pepperEnv, "${test_upgrade_node}*", 'state.sls', ["salt.minion.base"], null, true, 60)
- } catch (Exception e) {
- common.warningMsg(e)
- }
- stateRun(pepperEnv, "${test_upgrade_node}*", ['linux', 'openssh'])
-
- try {
- salt.runSaltProcessStep(pepperEnv, "${test_upgrade_node}*", 'state.sls', ["salt.minion"], null, true, 60)
- } catch (Exception e) {
- common.warningMsg(e)
- }
- stateRun(pepperEnv, "${test_upgrade_node}*", ['ntp', 'rsyslog'])
- salt.enforceState(pepperEnv, "${test_upgrade_node}*", ['linux', 'openssh', 'salt.minion', 'ntp', 'rsyslog'])
- salt.enforceState(pepperEnv, "${test_upgrade_node}*", ['rabbitmq', 'memcached'])
- try {
- salt.enforceState(pepperEnv, '( I@galera:master or I@galera:slave ) and I@backupninja:client', ['openssh.client', 'salt.minion'])
- } catch (Exception e) {
- common.warningMsg('salt-minion was restarted. We should continue to run')
- }
- salt.runSaltProcessStep(master, '( I@galera:master or I@galera:slave ) and I@backupninja:client', 'saltutil.sync_grains')
- salt.runSaltProcessStep(master, '( I@galera:master or I@galera:slave ) and I@backupninja:client', 'mine.flush')
- salt.runSaltProcessStep(master, '( I@galera:master or I@galera:slave ) and I@backupninja:client', 'mine.update')
- salt.enforceState(pepperEnv, '( I@galera:master or I@galera:slave ) and I@backupninja:client', 'backupninja')
- try {
- salt.enforceState(pepperEnv, 'I@backupninja:server', ['salt.minion'])
- } catch (Exception e) {
- common.warningMsg('salt-minion was restarted. We should continue to run')
- }
-
- salt.enforceState(pepperEnv, 'I@backupninja:server', 'backupninja')
- salt.runSaltProcessStep(pepperEnv, '( I@galera:master or I@galera:slave ) and I@backupninja:client', 'ssh.rm_known_host', ["root", "${backupninja_backup_host}"])
- try {
- salt.cmdRun(pepperEnv, '( I@galera:master or I@galera:slave ) and I@backupninja:client', "arp -d ${backupninja_backup_host}")
- } catch (Exception e) {
- common.warningMsg('The ARP entry does not exist. We should continue to run.')
- }
- salt.runSaltProcessStep(pepperEnv, '( I@galera:master or I@galera:slave ) and I@backupninja:client', 'ssh.set_known_host', ["root", "${backupninja_backup_host}"])
- salt.cmdRun(pepperEnv, '( I@galera:master or I@galera:slave ) and I@backupninja:client', 'backupninja -n --run /etc/backup.d/101.mysql')
- salt.cmdRun(pepperEnv, '( I@galera:master or I@galera:slave ) and I@backupninja:client', 'backupninja -n --run /etc/backup.d/200.backup.rsync > /tmp/backupninjalog')
-
- salt.enforceState(pepperEnv, 'I@xtrabackup:server', 'xtrabackup')
- salt.enforceState(pepperEnv, 'I@xtrabackup:client', 'openssh.client')
- salt.cmdRun(pepperEnv, 'I@xtrabackup:client', "su root -c 'salt-call state.sls xtrabackup'")
- salt.cmdRun(pepperEnv, 'I@xtrabackup:client', "su root -c '/usr/local/bin/innobackupex-runner.sh -f -s'")
-
- def databases = salt.cmdRun(pepperEnv, 'I@mysql:client','salt-call mysql.db_list | grep upgrade | awk \'/-/ {print \$2}\'')
- if(databases && databases != ""){
- def databasesList = salt.getReturnValues(databases).trim().tokenize("\n")
- for( i = 0; i < databasesList.size(); i++){
- if(databasesList[i].toLowerCase().contains('upgrade')){
- salt.runSaltProcessStep(pepperEnv, 'I@mysql:client', 'mysql.db_remove', ["${databasesList[i]}"])
- common.warningMsg("removing database ${databasesList[i]}")
- salt.runSaltProcessStep(pepperEnv, 'I@mysql:client', 'file.remove', ["/root/mysql/flags/${databasesList[i]}-installed"])
- }
- }
- salt.enforceState(pepperEnv, 'I@mysql:client', 'mysql.client')
- }else{
- common.errorMsg("No _upgrade databases were returned")
- }
-
- try {
- salt.enforceState(pepperEnv, "${test_upgrade_node}*", 'keystone.server')
- salt.runSaltProcessStep(pepperEnv, "${test_upgrade_node}*", 'service.restart', ['apache2'])
- } catch (Exception e) {
- common.warningMsg('Restarting Apache2')
- salt.runSaltProcessStep(pepperEnv, "${test_upgrade_node}*", 'service.restart', ['apache2'])
- }
- retryStateRun(pepperEnv, "${test_upgrade_node}*", 'keystone.client')
- retryStateRun(pepperEnv, "${test_upgrade_node}*", 'glance')
- salt.enforceState(pepperEnv, "${test_upgrade_node}*", 'keystone.server')
-
- retryStateRun(pepperEnv, "${test_upgrade_node}*", 'nova')
- retryStateRun(pepperEnv, "${test_upgrade_node}*", 'nova') // run nova state again as sometimes nova does not enforce itself for some reason
- retryStateRun(pepperEnv, "${test_upgrade_node}*", 'cinder')
- retryStateRun(pepperEnv, "${test_upgrade_node}*", 'neutron')
- retryStateRun(pepperEnv, "${test_upgrade_node}*", 'heat')
-
- salt.cmdRun(pepperEnv, "${test_upgrade_node}*", '. /root/keystonercv3; openstack service list; openstack image list; openstack flavor list; openstack compute service list; openstack server list; openstack network list; openstack volume list; openstack orchestration service list')
-
- if (INTERACTIVE.toBoolean() && STAGE_TEST_UPGRADE.toBoolean() == true && STAGE_REAL_UPGRADE.toBoolean() == true) {
- stage('Ask for manual confirmation') {
- input message: "Do you want to continue with upgrade?"
+ for (target in upgradeTargets){
+ common.stageWrapper(upgradeStageMap, "Pre upgrade", target, interactive) {
+ openstack.runOpenStackUpgradePhase(env, target, 'pre')
}
}
-}
-
-def vcpRealUpgrade(pepperEnv) {
- def common = new com.mirantis.mk.Common()
- def salt = new com.mirantis.mk.Salt()
- def openstack = new com.mirantis.mk.Openstack()
- def virsh = new com.mirantis.mk.Virsh()
-
- def upgrade_target = []
- upgrade_target.add('I@horizon:server')
- upgrade_target.add('I@keystone:server and not upg*')
-
- def proxy_general_target = "I@horizon:server"
- def control_general_target = "I@keystone:server and not upg*"
- def upgrade_general_target = "( I@keystone:server and not upg* ) or I@horizon:server"
-
- def snapshotName = "upgradeSnapshot1"
-
- def domain = salt.getDomainName(pepperEnv)
- def errorOccured = false
-
- for (tgt in upgrade_target) {
- def target_hosts = salt.getMinionsSorted(pepperEnv, "${tgt}")
- def node = salt.getFirstMinion(pepperEnv, "${tgt}")
- def general_target = ""
-
- if (tgt.toString().contains('horizon:server')) {
- general_target = 'prx'
- } else if (tgt.toString().contains('keystone:server')) {
- general_target = 'ctl'
- }
-
- if (OPERATING_SYSTEM_RELEASE_UPGRADE.toBoolean() == false) {
- stopServices(pepperEnv, node, tgt, general_target)
- }
-
- for (t in target_hosts) {
- def target = salt.stripDomainName(t)
- def nodeProvider = salt.getNodeProvider(pepperEnv, t)
- if ((OPERATING_SYSTEM_RELEASE_UPGRADE.toBoolean() == true) && (SKIP_VM_RELAUNCH.toBoolean() == false)) {
- salt.runSaltProcessStep(pepperEnv, "${nodeProvider}", 'virt.destroy', ["${target}.${domain}"])
- sleep(2)
- try {
- salt.cmdRun(pepperEnv, "${nodeProvider}", "[ ! -f /root/${target}.${domain}.qcow2.bak ] && cp /var/lib/libvirt/images/${target}.${domain}/system.qcow2 ./${target}.${domain}.qcow2.bak")
- } catch (Exception e) {
- common.warningMsg('File already exists')
- }
- salt.runSaltProcessStep(pepperEnv, "${nodeProvider}", 'virt.undefine', ["${target}.${domain}"])
- try {
- salt.cmdRun(pepperEnv, 'I@salt:master', "salt-key -d ${target}.${domain} -y")
- } catch (Exception e) {
- common.warningMsg('does not match any accepted, unaccepted or rejected keys. They were probably already removed. We should continue to run')
- }
- } else if (OPERATING_SYSTEM_RELEASE_UPGRADE.toBoolean() == false) {
- virsh.liveSnapshotPresent(pepperEnv, nodeProvider, target, snapshotName)
- }
- }
+ for (target in upgradeTargets) {
+ common.stageWrapper(upgradeStageMap, "Stop OpenStack services", target, interactive) {
+ stopOpenStackServices(env, target)
+ }
}
- if ((OPERATING_SYSTEM_RELEASE_UPGRADE.toBoolean() == true) && (SKIP_VM_RELAUNCH.toBoolean() == false)) {
- salt.cmdRun(pepperEnv, 'I@xtrabackup:client', "su root -c '/usr/local/bin/innobackupex-runner.sh -f -s'")
+ for (target in upgradeTargets) {
+ common.stageWrapper(upgradeStageMap, "Upgrade OpenStack", target, interactive) {
+ openstack.runOpenStackUpgradePhase(env, target, 'upgrade')
+ openstack.applyOpenstackAppsStates(env, target)
+ openstack.runOpenStackUpgradePhase(env, target, 'verify')
+ }
- salt.enforceState(pepperEnv, 'I@salt:control', 'salt.control')
-
- for (tgt in upgrade_target) {
- salt.minionsPresent(pepperEnv, 'I@salt:master', tgt)
+ common.stageWrapper(upgradeStageMap, "Upgrade OS", target, interactive) {
+ if (OS_DIST_UPGRADE.toBoolean() == true){
+ upgrade_mode = 'dist-upgrade'
+ } else if (OS_UPGRADE.toBoolean() == true){
+ upgrade_mode = 'upgrade'
}
+ if (OS_DIST_UPGRADE.toBoolean() == true || OS_UPGRADE.toBoolean() == true) {
+ debian.osUpgradeNode(env, target, upgrade_mode, false)
+ }
+ openstack.applyOpenstackAppsStates(env, target)
+ openstack.runOpenStackUpgradePhase(env, target, 'verify')
+ }
}
-
- // salt '*' saltutil.refresh_pillar
- salt.runSaltProcessStep(pepperEnv, upgrade_general_target, 'saltutil.refresh_pillar', [])
- // salt '*' saltutil.sync_all
- salt.runSaltProcessStep(pepperEnv, upgrade_general_target, 'saltutil.sync_all', [])
-
- stateRun(pepperEnv, upgrade_general_target, ['linux.network.proxy'])
- try {
- salt.runSaltProcessStep(pepperEnv, upgrade_general_target, 'state.sls', ["salt.minion.base"], null, true, 60)
- } catch (Exception e) {
- common.warningMsg(e)
- }
-
- if (OPERATING_SYSTEM_RELEASE_UPGRADE.toBoolean() == false) {
-
- try {
- salt.enforceState(pepperEnv, upgrade_general_target, ['linux.system.repo'])
- } catch (Exception e) {
- common.warningMsg(e)
- }
-
- salt.runSaltProcessStep(pepperEnv, upgrade_general_target, 'pkg.install', ['salt-minion'], null, true, 5)
- salt.minionsReachable(pepperEnv, 'I@salt:master', upgrade_general_target)
-
- // Apply package upgrades
- args = 'export DEBIAN_FRONTEND=noninteractive; apt-get -y -q --allow-downgrades --allow-unauthenticated -o Dpkg::Options::=\"--force-confdef\" -o Dpkg::Options::=\"--force-confold\" dist-upgrade;'
- common.warningMsg("Running apt dist-upgrade on ${proxy_general_target} and ${control_general_target}, this might take a while...")
- out = salt.runSaltProcessStep(pepperEnv, upgrade_general_target, 'cmd.run', [args])
- // stop services again
- def proxy_node = salt.getFirstMinion(pepperEnv, proxy_general_target)
- def control_node = salt.getFirstMinion(pepperEnv, control_general_target)
- stopServices(pepperEnv, proxy_node, proxy_general_target, 'prx')
- stopServices(pepperEnv, control_node, control_general_target, 'ctl')
- salt.printSaltCommandResult(out)
- if (out.toString().contains("dpkg returned an error code")){
- if (INTERACTIVE.toBoolean()) {
- input message: "Apt dist-upgrade failed, please fix it manually and then click on proceed. If unable to fix it, click on abort and run the rollback stage."
- } else {
- error("Apt dist-upgrade failed. And interactive mode was disabled, failing...")
- }
- }
- // run base states
- try {
- salt.enforceState(pepperEnv, upgrade_general_target, ['linux', 'openssh', 'salt.minion', 'ntp', 'rsyslog'])
- } catch (Exception e) {
- common.warningMsg(e)
- }
- salt.enforceState(pepperEnv, control_general_target, ['keepalived', 'haproxy'])
- } else {
- // initial VM setup
- try {
- salt.enforceState(pepperEnv, upgrade_general_target, ['linux', 'openssh'])
- } catch (Exception e) {
- common.warningMsg(e)
- }
- try {
- salt.runSaltProcessStep(pepperEnv, upgrade_general_target, 'state.sls', ["salt.minion"], null, true, 60)
- } catch (Exception e) {
- common.warningMsg(e)
- }
- try {
- salt.enforceState(pepperEnv, upgrade_general_target, ['ntp', 'rsyslog'])
- } catch (Exception e) {
- common.warningMsg(e)
- }
- salt.enforceState(pepperEnv, upgrade_general_target, ['linux', 'openssh', 'salt.minion', 'ntp', 'rsyslog'])
- salt.enforceState(pepperEnv, control_general_target, ['keepalived', 'haproxy'])
- salt.runSaltProcessStep(pepperEnv, control_general_target, 'service.restart', ['rsyslog'])
- }
-
- try {
- try {
- salt.enforceState(pepperEnv, control_general_target, ['memcached', 'keystone.server'])
- salt.runSaltProcessStep(pepperEnv, control_general_target, 'service.restart', ['apache2'])
- } catch (Exception e) {
- common.warningMsg('Restarting Apache2 and enforcing keystone.server state again')
- salt.runSaltProcessStep(pepperEnv, control_general_target, 'service.restart', ['apache2'])
- salt.enforceState(pepperEnv, control_general_target, 'keystone.server')
- }
- // salt 'ctl01*' state.sls keystone.client
- retryStateRun(pepperEnv, "I@keystone:client and ${control_general_target}", 'keystone.client')
- retryStateRun(pepperEnv, control_general_target, 'glance')
- salt.enforceState(pepperEnv, control_general_target, 'glusterfs.client')
- salt.enforceState(pepperEnv, control_general_target, 'keystone.server')
- retryStateRun(pepperEnv, control_general_target, 'nova')
- retryStateRun(pepperEnv, control_general_target, 'cinder')
- retryStateRun(pepperEnv, control_general_target, 'neutron')
- retryStateRun(pepperEnv, control_general_target, 'heat')
- } catch (Exception e) {
- errorOccured = true
- if (INTERACTIVE.toBoolean()){
- if (OPERATING_SYSTEM_RELEASE_UPGRADE.toBoolean() == false) {
- input message: "Some states that require syncdb failed. Please check the reason. Click proceed only if you want to restore database into it's pre-upgrade state. If you want restore production database and also the VMs into its pre-upgrade state please click on abort and run the rollback stage."
- } else {
- input message: "Some states that require syncdb failed. Please check the reason and click proceed only if you want to restore database into it's pre-upgrade state. Otherwise, click abort."
- }
- } else {
- error("Stage Real control upgrade failed. And interactive mode was disabled, failing...")
- }
- openstack.restoreGaleraDb(pepperEnv)
- common.errorMsg("Stage Real control upgrade failed")
- }
- if(!errorOccured){
-
- if (OPERATING_SYSTEM_RELEASE_UPGRADE.toBoolean() == true) {
-
- try {
- if (salt.testTarget(pepperEnv, "I@ceph:client and ${control_general_target}*")) {
- salt.enforceState(pepperEnv, "I@ceph:client and ${control_general_target}*", 'ceph.client')
- }
- } catch (Exception er) {
- common.warningMsg("Ceph client state on controllers failed. Please fix it manually")
- }
- try {
- if (salt.testTarget(pepperEnv, "I@ceph:common and ${control_general_target}*")) {
- salt.enforceState(pepperEnv, "I@ceph:common and ${control_general_target}*", ['ceph.common', 'ceph.setup.keyring'])
- }
- } catch (Exception er) {
- common.warningMsg("Ceph common state on controllers failed. Please fix it manually")
- }
- try {
- if (salt.testTarget(pepperEnv, "I@ceph:common and ${control_general_target}*")) {
- salt.runSaltProcessStep(master, "I@ceph:common and ${control_general_target}*", 'service.restart', ['glance-api', 'glance-glare', 'glance-registry'])
- }
- } catch (Exception er) {
- common.warningMsg("Restarting Glance services on controllers failed. Please fix it manually")
- }
- }
-
- // salt 'cmp*' cmd.run 'service nova-compute restart'
- salt.runSaltProcessStep(pepperEnv, 'I@nova:compute', 'service.restart', ['nova-compute'])
- salt.runSaltProcessStep(pepperEnv, control_general_target, 'service.restart', ['nova-conductor'])
- salt.runSaltProcessStep(pepperEnv, control_general_target, 'service.restart', ['nova-scheduler'])
-
- retryStateRun(pepperEnv, proxy_general_target, 'keepalived')
- retryStateRun(pepperEnv, proxy_general_target, 'horizon')
- retryStateRun(pepperEnv, proxy_general_target, 'nginx')
- retryStateRun(pepperEnv, proxy_general_target, 'memcached')
-
- try {
- salt.enforceHighstate(pepperEnv, control_general_target)
- } catch (Exception er) {
- common.errorMsg("Highstate was executed on controller nodes but something failed. Please check it and fix it accordingly.")
- }
-
- try {
- salt.enforceHighstate(pepperEnv, proxy_general_target)
- } catch (Exception er) {
- common.errorMsg("Highstate was executed on proxy nodes but something failed. Please check it and fix it accordingly.")
- }
-
- try {
- salt.cmdRun(pepperEnv, "${control_general_target}01*", '. /root/keystonercv3; openstack service list; openstack image list; openstack flavor list; openstack compute service list; openstack server list; openstack network list; openstack volume list; openstack orchestration service list')
- } catch (Exception er) {
- common.errorMsg(er)
- }
-
- /*
- if (OPERATING_SYSTEM_RELEASE_UPGRADE.toBoolean() == false) {
- if (INTERACTIVE.toBoolean()){
- input message: "Please verify if the control upgrade was successful! If so, by clicking proceed the original VMs disk images will be backed up and snapshot will be merged to the upgraded VMs which will finalize the upgrade procedure"
- }
- node_count = 1
- for (t in proxy_target_hosts) {
- def target = salt.stripDomainName(t)
- def nodeProvider = salt.getNodeProvider(pepperEnv, "${general_target}0${node_count}")
- try {
- salt.cmdRun(pepperEnv, "${nodeProvider}", "[ ! -f /root/${target}.${domain}.qcow2.bak ] && cp /var/lib/libvirt/images/${target}.${domain}/system.qcow2 ./${target}.${domain}.qcow2.bak")
- } catch (Exception e) {
- common.warningMsg('File already exists')
- }
- virsh.liveSnapshotMerge(pepperEnv, nodeProvider, target, snapshotName)
- node_count++
- }
- node_count = 1
- for (t in control_target_hosts) {
- def target = salt.stripDomainName(t)
- def nodeProvider = salt.getNodeProvider(pepperEnv, "${general_target}0${node_count}")
- try {
- salt.cmdRun(pepperEnv, "${nodeProvider}", "[ ! -f /root/${target}.${domain}.qcow2.bak ] && cp /var/lib/libvirt/images/${target}.${domain}/system.qcow2 ./${target}.${domain}.qcow2.bak")
- } catch (Exception e) {
- common.warningMsg('File already exists')
- }
- virsh.liveSnapshotMerge(pepperEnv, nodeProvider, target, snapshotName)
- node_count++
- }
- if (INTERACTIVE.toBoolean()){
- input message: "Please scroll up and look for red highlighted messages containing 'virsh blockcommit' string.
- If there are any fix it manually. Otherwise click on proceed."
- }
- }
- */
- }
-}
-
-
-def vcpRollback(pepperEnv) {
- def common = new com.mirantis.mk.Common()
- def salt = new com.mirantis.mk.Salt()
- def openstack = new com.mirantis.mk.Openstack()
- def virsh = new com.mirantis.mk.Virsh()
- def snapshotName = "upgradeSnapshot1"
- try {
- salt.runSaltProcessStep(pepperEnv, '*', 'saltutil.refresh_pillar', [], null, true, 2)
- } catch (Exception e) {
- common.warningMsg("No response from some minions. We should continue to run")
- }
-
- def domain = salt.getDomainName(pepperEnv)
-
- def rollback_target = []
- rollback_target.add('I@horizon:server')
- rollback_target.add('I@keystone:server and not upg*')
-
- def control_general_target = "I@keystone:server and not upg*"
- def upgrade_general_target = "( I@keystone:server and not upg* ) or I@horizon:server"
-
- openstack.restoreGaleraDb(pepperEnv)
-
- for (tgt in rollback_target) {
- def target_hosts = salt.getMinionsSorted(pepperEnv, "${tgt}")
- def node = salt.getFirstMinion(pepperEnv, "${tgt}")
- def general_target = salt.getMinionsGeneralName(pepperEnv, "${tgt}")
-
- if (tgt.toString().contains('horizon:server')) {
- general_target = 'prx'
- } else if (tgt.toString().contains('keystone:server')) {
- general_target = 'ctl'
- }
-
- for (t in target_hosts) {
- def target = salt.stripDomainName(t)
- def nodeProvider = salt.getNodeProvider(pepperEnv, t)
- salt.runSaltProcessStep(pepperEnv, "${nodeProvider}", 'virt.destroy', ["${target}.${domain}"])
- sleep(2)
- if (OPERATING_SYSTEM_RELEASE_UPGRADE.toBoolean() == true) {
- salt.runSaltProcessStep(pepperEnv, "${nodeProvider}", 'file.copy', ["/root/${target}.${domain}.qcow2.bak", "/var/lib/libvirt/images/${target}.${domain}/system.qcow2"])
- try {
- salt.cmdRun(pepperEnv, 'I@salt:master', "salt-key -d ${target}.${domain} -y")
- } catch (Exception e) {
- common.warningMsg('does not match any accepted, unaccepted or rejected keys. They were probably already removed. We should continue to run')
- }
- salt.runSaltProcessStep(pepperEnv, "${nodeProvider}", 'virt.start', ["${target}.${domain}"])
- } else {
- salt.cmdRun(pepperEnv, "${nodeProvider}", "virsh define /var/lib/libvirt/images/${target}.${domain}.xml")
- salt.runSaltProcessStep(pepperEnv, "${nodeProvider}", 'virt.start', ["${target}.${domain}"])
- virsh.liveSnapshotAbsent(pepperEnv, nodeProvider, target, snapshotName)
- }
- }
- }
-
- // salt 'cmp*' cmd.run 'service nova-compute restart'
- salt.runSaltProcessStep(pepperEnv, 'I@nova:compute', 'service.restart', ['nova-compute'])
-
- if (OPERATING_SYSTEM_RELEASE_UPGRADE.toBoolean() == true) {
- for (tgt in rollback_target) {
- salt.minionsPresent(pepperEnv, 'I@salt:master', tgt)
- }
- }
-
- salt.minionsReachable(pepperEnv, 'I@salt:master', upgrade_general_target)
-
- salt.runSaltProcessStep(pepperEnv, control_general_target, 'service.restart', ['nova-conductor'])
- salt.runSaltProcessStep(pepperEnv, control_general_target, 'service.restart', ['nova-scheduler'])
-
- def control_node = salt.getFirstMinion(pepperEnv, control_general_target)
-
- salt.cmdRun(pepperEnv, "${control_node}*", '. /root/keystonerc; nova service-list; glance image-list; nova flavor-list; nova hypervisor-list; nova list; neutron net-list; cinder list; heat service-list')
-}
-
-
-def pepperEnv = "pepperEnv"
-timeout(time: 12, unit: 'HOURS') {
- node() {
-
- stage('Setup virtualenv for Pepper') {
- python.setupPepperVirtualenv(pepperEnv, SALT_MASTER_URL, SALT_MASTER_CREDENTIALS)
- }
-
- if (STAGE_TEST_UPGRADE.toBoolean() == true) {
- stage('Test upgrade') {
- vcpTestUpgrade(pepperEnv)
- }
- }
-
- if (STAGE_REAL_UPGRADE.toBoolean() == true) {
- stage('Real upgrade') {
- // # actual upgrade
- vcpRealUpgrade(pepperEnv)
- }
-
- if (INTERACTIVE.toBoolean() && STAGE_REAL_UPGRADE.toBoolean() == true && STAGE_ROLLBACK_UPGRADE.toBoolean() == true) {
- stage('Ask for manual confirmation') {
- input message: "Please verify if the control upgrade was successful. If it did not succeed, in the worst scenario, you can click on proceed to continue with control-upgrade-rollback. Do you want to continue with the rollback?"
- }
- }
- }
-
- if (STAGE_ROLLBACK_UPGRADE.toBoolean() == true) {
- stage('Rollback upgrade') {
- if (INTERACTIVE.toBoolean()){
- stage('Ask for manual confirmation') {
- input message: "Before rollback please check the documentation for reclass model changes. Do you really want to continue with the rollback?"
- }
- }
- vcpRollback(pepperEnv)
- }
- }
- }
+ }
}
diff --git a/openstack-data-upgrade.groovy b/openstack-data-upgrade.groovy
new file mode 100644
index 0000000..88bbf57
--- /dev/null
+++ b/openstack-data-upgrade.groovy
@@ -0,0 +1,185 @@
+/**
+ * Upgrade OpenStack packages on gateway nodes.
+ * Update packages on given nodes
+ *
+ * Expected parameters:
+ * SALT_MASTER_CREDENTIALS Credentials to the Salt API.
+ * SALT_MASTER_URL Full Salt API address [http://10.10.10.1:8000].
+ * OS_DIST_UPGRADE Upgrade system packages including kernel (apt-get dist-upgrade)
+ * OS_UPGRADE Upgrade all installed applications (apt-get upgrade)
+ * TARGET_SERVERS Comma separated list of salt compound definitions to upgrade.
+ * INTERACTIVE Ask interactive questions during pipeline run (bool).
+ *
+ * TODO:
+ * * Add OS_RELEASE_UPGRADE
+**/
+
+def common = new com.mirantis.mk.Common()
+def salt = new com.mirantis.mk.Salt()
+def python = new com.mirantis.mk.Python()
+def openstack = new com.mirantis.mk.Openstack()
+def debian = new com.mirantis.mk.Debian()
+
+def interactive = INTERACTIVE.toBoolean()
+def LinkedHashMap upgradeStageMap = [:]
+
+upgradeStageMap.put('Pre upgrade',
+ [
+ 'Description': 'Only non destructive actions will be applied during this phase. Basic api, service verification will be performed.',
+ 'Status': 'NOT_LAUNCHED',
+ 'Expected behaviors': '''
+ * No service downtime
+ * No workload downtime''',
+ 'Launched actions': '''
+ * Verify API, perform basic CRUD operations for services.
+ * Verify that compute/neutron agents on hosts are up.
+ * Run some service built in checkers like keystone-manage doctor or nova-status upgrade.''',
+ 'State result': 'Basic checks around services API are passed.'
+ ])
+upgradeStageMap.put('Upgrade pre: migrate resources',
+ [
+ 'Description': 'In order to minimize workload downtime smooth resource migration is happening during this phase. Neutron agents on node are set to admin_disabled state, to make sure they are quickly migrated to new node (1-2 ping loss). Instances might be live-migrated from host (this stage is optional) and configured from pillar.',
+ 'Status': 'NOT_LAUNCHED',
+ 'Expected behaviors': '''
+ * No service downtime
+ * Small workload downtime''',
+ 'Launched actions': '''
+ * Set neutron agents to admin disabled sate
+ * Migrate instances if allowed (optional).''',
+ 'State result': '''
+ * Hosts are being removed from scheduling to host new resources.
+ * If instance migration was performed no instances should be present.'''
+ ])
+upgradeStageMap.put('Upgrade OpenStack',
+ [
+ 'Description': 'OpenStack python code will be upgraded during this stage. No workload downtime is expected.',
+ 'Status': 'NOT_LAUNCHED',
+ 'Expected behaviors': '''
+ * OpenStack services might flap
+ * No workload downtime''',
+ 'Launched actions': '''
+ * Install new version of OpenStack packages
+ * Render version of configs
+ * Apply offline dbsync
+ * Start OpenStack services
+ * Verify agents are alive/connected
+ * Run basic API validation''',
+ 'State result': '''
+ * OpenStack packages are upgraded
+ * Services are running
+ * Basic checks around services API are passed
+ * Verified that agents/services on data plane nodes are connected to new control plane
+'''
+ ])
+upgradeStageMap.put('Upgrade OS',
+ [
+ 'Description': 'Optional step. OS packages will be upgraded during this phase, depending on the job parameters dist-upgrade might be called. And reboot of node executed.',
+ 'Status': 'NOT_LAUNCHED',
+ 'Expected behaviors': '''
+ * OpenStack services might flap
+ * No workload downtime
+ * The nodes might be rebooted''',
+ 'Launched actions': '''
+ * Install new version of system packages
+ * If doing dist-upgrade new kernel might be installed and node rebooted
+ * Verify agents are alive/connected
+ * Run basic API validation''',
+ 'State result': '''
+ * System packages are updated
+ * Services are running
+ * Basic checks around services API are passed
+ * Verified that agents/services on data plane nodes are connected
+ * Node might be rebooted
+'''
+ ])
+upgradeStageMap.put('Upgrade post: enable resources',
+ [
+ 'Description': 'Verify that agents/services on node are up, add them back to scheduling.',
+ 'Status': 'NOT_LAUNCHED',
+ 'Expected behaviors': '''
+ * No service downtime
+ * No workload downtime''',
+ 'Launched actions': '''
+ * Set neutron agents to admin sate enabled
+ * Enable nova-compute services''',
+ 'State result': 'Hosts are being added to scheduling to host new resources',
+ ])
+upgradeStageMap.put('Post upgrade',
+ [
+ 'Description': 'Only non destructive actions will be applied during this phase. Like cleanup old configs, cleanup temporary files. Online dbsyncs.',
+ 'Status': 'NOT_LAUNCHED',
+ 'Expected behaviors': '''
+ * No service downtime
+ * No workload downtime''',
+ 'Launched actions': '''
+ * Cleanup os client configs''',
+ 'State result': 'Temporary resources are being cleaned.'
+ ])
+
+
+def env = "env"
+timeout(time: 24, unit: 'HOURS') {
+ node() {
+
+ stage('Setup virtualenv for Pepper') {
+ python.setupPepperVirtualenv(env, SALT_MASTER_URL, SALT_MASTER_CREDENTIALS)
+ }
+
+ def targetNodes = salt.getMinionsSorted(env, TARGET_SERVERS)
+ def migrateResources = true
+
+ if (targetNodes.isEmpty()) {
+ error("No servers for upgrade matched by ${TARGET_SERVERS}")
+ }
+ if (targetNodes.size() == 1 ){
+ migrateResources = false
+ }
+
+ common.printStageMap(upgradeStageMap)
+ if (interactive){
+ input message: common.getColorizedString(
+ "Above you can find detailed info this pipeline will execute.\nThe info provides brief description of each stage, actions that will be performed and service/workload impact during each stage.\nPlease read it carefully.", "yellow")
+ }
+
+ for (target in targetNodes){
+ common.stageWrapper(upgradeStageMap, "Pre upgrade", target, interactive) {
+ openstack.runOpenStackUpgradePhase(env, target, 'pre')
+ openstack.runOpenStackUpgradePhase(env, target, 'verify')
+ }
+
+ common.stageWrapper(upgradeStageMap, "Upgrade pre: migrate resources", target, interactive) {
+ if (migrateResources) {
+ common.infoMsg("Migrating neutron resources from ${target}")
+ openstack.runOpenStackUpgradePhase(env, target, 'upgrade.pre')
+ // Start upgrade only when resources were successfully migrated
+ }
+ }
+
+ common.stageWrapper(upgradeStageMap, "Upgrade OpenStack", target, interactive) {
+ // Stop services on node. //Do actual step by step orch here.
+ openstack.runOpenStackUpgradePhase(env, target, 'service_stopped')
+ openstack.runOpenStackUpgradePhase(env, target, 'pkgs_latest')
+ openstack.runOpenStackUpgradePhase(env, target, 'render_config')
+ openstack.runOpenStackUpgradePhase(env, target, 'service_running')
+ openstack.applyOpenstackAppsStates(env, target)
+ openstack.runOpenStackUpgradePhase(env, target, 'verify')
+ }
+ common.stageWrapper(upgradeStageMap, "Upgrade OS", target, interactive) {
+ if (OS_DIST_UPGRADE.toBoolean() == true){
+ upgrade_mode = 'dist-upgrade'
+ } else if (OS_UPGRADE.toBoolean() == true){
+ upgrade_mode = 'upgrade'
+ }
+ if (OS_DIST_UPGRADE.toBoolean() == true || OS_UPGRADE.toBoolean() == true) {
+ debian.osUpgradeNode(env, target, upgrade_mode, false)
+ }
+ openstack.applyOpenstackAppsStates(env, target)
+ openstack.runOpenStackUpgradePhase(env, target, 'verify')
+ }
+
+ common.stageWrapper(upgradeStageMap, "Upgrade post: enable resources", target, interactive) {
+ openstack.runOpenStackUpgradePhase(env, target, 'upgrade.post')
+ }
+ }
+ }
+}
diff --git a/ovs-gateway-upgrade.groovy b/ovs-gateway-upgrade.groovy
deleted file mode 100644
index 87cf828..0000000
--- a/ovs-gateway-upgrade.groovy
+++ /dev/null
@@ -1,163 +0,0 @@
-/**
- * Update packages on given nodes
- *
- * Expected parameters:
- * SALT_MASTER_CREDENTIALS Credentials to the Salt API.
- * SALT_MASTER_URL Full Salt API address [https://10.10.10.1:8000].
- * TARGET_SERVERS Salt compound target to match nodes to be updated [*, G@osfamily:debian].
- * TARGET_SUBSET_TEST Number of nodes to list package updates, empty string means all targetted nodes.
- * TARGET_SUBSET_LIVE Number of selected nodes to live apply selected package update.
- * INTERACTIVE Ask interactive questions during pipeline run (bool).
- *
-**/
-
-def common = new com.mirantis.mk.Common()
-def salt = new com.mirantis.mk.Salt()
-def python = new com.mirantis.mk.Python()
-
-def pepperEnv = "pepperEnv"
-def targetTestSubset
-def targetLiveSubset
-def targetLiveAll
-def minions
-def result
-def args
-def command
-def commandKwargs
-def probe = 1
-timeout(time: 12, unit: 'HOURS') {
- node() {
- try {
-
- stage('Setup virtualenv for Pepper') {
- python.setupPepperVirtualenv(pepperEnv, SALT_MASTER_URL, SALT_MASTER_CREDENTIALS)
- }
-
- stage('List target servers') {
- minions = salt.getMinions(pepperEnv, TARGET_SERVERS)
-
- if (minions.isEmpty()) {
- throw new Exception("No minion was targeted")
- }
-
- if (TARGET_SUBSET_TEST != "") {
- targetTestSubset = minions.subList(0, Integer.valueOf(TARGET_SUBSET_TEST)).join(' or ')
- } else {
- targetTestSubset = minions.join(' or ')
- }
- targetLiveSubset = minions.subList(0, Integer.valueOf(TARGET_SUBSET_LIVE)).join(' or ')
- targetTestSubsetProbe = minions.subList(0, probe).join(' or ')
- targetLiveSubsetProbe = minions.subList(0, probe).join(' or ')
-
- targetLiveAll = minions.join(' or ')
- common.infoMsg("Found nodes: ${targetLiveAll}")
- common.infoMsg("Selected test nodes: ${targetTestSubset}")
- common.infoMsg("Selected sample nodes: ${targetLiveSubset}")
- }
-
-
- stage("Add new repos on test nodes") {
- salt.enforceState(pepperEnv, targetTestSubset, 'linux.system.repo')
- }
-
- stage("List package upgrades") {
- salt.runSaltProcessStep(pepperEnv, targetTestSubset, 'pkg.list_upgrades', [], null, true)
- }
-
- if (INTERACTIVE.toBoolean()){
- stage('Confirm upgrade on sample nodes') {
- input message: "Please verify the list of packages that you want to be upgraded. Do you want to continue with upgrade?"
- }
- }
-
- stage("Add new repos on sample nodes") {
- salt.enforceState(pepperEnv, targetLiveSubset, 'linux.system.repo')
- }
-
- args = "apt-get -y -s -o Dpkg::Options::=\"--force-confdef\" -o Dpkg::Options::=\"--force-confold\" dist-upgrade"
-
- stage('Test upgrade on sample') {
- try {
- salt.cmdRun(pepperEnv, targetLiveSubset, args)
- } catch (Exception er) {
- print(er)
- }
- }
-
- if (INTERACTIVE.toBoolean()){
- stage('Confirm upgrade on sample') {
- input message: "Please verify if there are packages that it wants to downgrade. If so, execute apt-cache policy on them and verify if everything is fine. Do you want to continue with upgrade?"
- }
- }
-
- command = "cmd.run"
- args = 'export DEBIAN_FRONTEND=noninteractive; apt-get -y -q --allow-downgrades -o Dpkg::Options::=\"--force-confdef\" -o Dpkg::Options::=\"--force-confold\" dist-upgrade;'
-
- stage('Apply package upgrades on sample') {
- out = salt.runSaltCommand(pepperEnv, 'local', ['expression': targetLiveSubset, 'type': 'compound'], command, null, args, commandKwargs)
- salt.printSaltCommandResult(out)
- }
-
- args = "sudo /usr/share/openvswitch/scripts/ovs-ctl start"
-
- stage('Start ovs on sample nodes') {
- out = salt.runSaltCommand(pepperEnv, 'local', ['expression': targetLiveSubset, 'type': 'compound'], command, null, args, commandKwargs)
- salt.printSaltCommandResult(out)
- }
- stage("Run Neutron state on sample nodes") {
- salt.enforceState(pepperEnv, targetLiveSubset, ['neutron'])
- }
-
- stage("Run Highstate on sample nodes") {
- try {
- salt.enforceHighstate(pepperEnv, targetLiveSubset)
- } catch (Exception er) {
- common.errorMsg("Highstate was executed on ${targetLiveSubset} but something failed. Please check it and fix it accordingly.")
- }
- }
-
- if (INTERACTIVE.toBoolean()){
- stage('Confirm upgrade on all targeted nodes') {
- timeout(time: 2, unit: 'HOURS') {
- input message: "Verify that the upgraded sample nodes are working correctly. If so, do you want to approve live upgrade on ${targetLiveAll} nodes?"
- }
- }
- }
-
- stage("Add new repos on all targeted nodes") {
- salt.enforceState(pepperEnv, targetLiveAll, 'linux.system.repo')
- }
-
- args = 'export DEBIAN_FRONTEND=noninteractive; apt-get -y -q --allow-downgrades -o Dpkg::Options::=\"--force-confdef\" -o Dpkg::Options::=\"--force-confold\" dist-upgrade;'
-
- stage('Apply package upgrades on all targeted nodes') {
- out = salt.runSaltCommand(pepperEnv, 'local', ['expression': targetLiveAll, 'type': 'compound'], command, null, args, commandKwargs)
- salt.printSaltCommandResult(out)
- }
-
- args = "sudo /usr/share/openvswitch/scripts/ovs-ctl start"
-
- stage('Start ovs on all targeted nodes') {
- out = salt.runSaltCommand(pepperEnv, 'local', ['expression': targetLiveAll, 'type': 'compound'], command, null, args, commandKwargs)
- salt.printSaltCommandResult(out)
- }
- stage("Run Neutron state on all targeted nodes") {
- salt.enforceState(pepperEnv, targetLiveAll, ['neutron'])
- }
-
- stage("Run Highstate on all targeted nodes") {
- try {
- salt.enforceHighstate(pepperEnv, targetLiveAll)
- } catch (Exception er) {
- common.errorMsg("Highstate was executed ${targetLiveAll} but something failed. Please check it and fix it accordingly.")
- }
- }
-
- } catch (Throwable e) {
- // If there was an error or exception thrown, the build failed
- currentBuild.result = "FAILURE"
- currentBuild.description = currentBuild.description ? e.message + " " + currentBuild.description : e.message
- throw e
- }
- }
-}