blob: 21134a2d9f6328e52af0b8437e56f4d32f444c00 [file] [log] [blame]
Jiri Broulikbb447ac2017-05-04 15:36:22 +02001/**
2 * Update packages on given nodes
3 *
4 * Expected parameters:
Jiri Brouliked3a9e62018-02-13 16:08:40 +01005 * SALT_MASTER_CREDENTIALS Credentials to the Salt API.
6 * SALT_MASTER_URL Full Salt API address [http://10.10.10.1:8000].
7 * STAGE_TEST_UPGRADE Run test upgrade stage (bool)
8 * STAGE_REAL_UPGRADE Run real upgrade stage (bool)
9 * STAGE_ROLLBACK_UPGRADE Run rollback upgrade stage (bool)
10 * SKIP_VM_RELAUNCH Set to true if vms should not be recreated (bool)
11 * OPERATING_SYSTEM_RELEASE_UPGRADE Set to true if operating system of vms should be upgraded to newer release (bool)
Jiri Broulikbb447ac2017-05-04 15:36:22 +020012 *
13**/
14
15def common = new com.mirantis.mk.Common()
16def salt = new com.mirantis.mk.Salt()
chnyda625f4b42017-10-11 14:10:31 +020017def python = new com.mirantis.mk.Python()
Jiri Broulikbb447ac2017-05-04 15:36:22 +020018
Jiri Brouliked3a9e62018-02-13 16:08:40 +010019def stopServices(pepperEnv, probe, target, type) {
20 def openstack = new com.mirantis.mk.Openstack()
21 def services = []
22 if (type == 'prx') {
23 services.add('keepalived')
24 services.add('nginx')
25 } else if (type == 'ctl') {
26 services.add('keepalived')
27 services.add('haproxy')
28 services.add('nova')
29 services.add('cinder')
30 services.add('glance')
31 services.add('heat')
32 services.add('neutron')
33 services.add('apache2')
34 }
35 openstack.stopServices(pepperEnv, probe, target, services)
36}
37
38def retryStateRun(pepperEnv, target, state) {
39 def common = new com.mirantis.mk.Common()
40 def salt = new com.mirantis.mk.Salt()
41 try {
42 salt.enforceState(pepperEnv, target, state)
43 } catch (Exception e) {
44 common.warningMsg("running ${state} state again")
45 salt.enforceState(pepperEnv, target, state)
46 }
47}
48
49def stateRun(pepperEnv, target, state) {
50 def common = new com.mirantis.mk.Common()
51 def salt = new com.mirantis.mk.Salt()
52 try {
53 salt.enforceState(pepperEnv, target, state)
54 } catch (Exception e) {
55 common.warningMsg("Some parts of ${state} state failed. We should continue to run.")
56 }
57}
58
59
60def vcpTestUpgrade(pepperEnv) {
61 def common = new com.mirantis.mk.Common()
62 def salt = new com.mirantis.mk.Salt()
63 def test_upgrade_node = "upg01"
64 salt.runSaltProcessStep(pepperEnv, 'I@salt:master', 'saltutil.refresh_pillar', [], null, true, 2)
65
66 stateRun(pepperEnv, 'I@salt:master', 'linux.system.repo')
67 stateRun(pepperEnv, 'I@salt:master', 'salt.master')
68 stateRun(pepperEnv, 'I@salt:master', 'reclass')
69 stateRun(pepperEnv, 'I@salt:master', 'linux.system.repo')
70
71 try {
72 salt.runSaltProcessStep(pepperEnv, '*', 'saltutil.refresh_pillar', [], null, true, 2)
73 } catch (Exception e) {
74 common.warningMsg("No response from some minions. We should continue to run")
75 }
76
77 try {
78 salt.runSaltProcessStep(pepperEnv, '*', 'saltutil.sync_all', [], null, true, 2)
79 } catch (Exception e) {
80 common.warningMsg("No response from some minions. We should continue to run")
81 }
82
83 def domain = salt.getDomainName(pepperEnv)
84
85 def backupninja_backup_host = salt.getReturnValues(salt.getPillar(pepperEnv, '( I@galera:master or I@galera:slave ) and I@backupninja:client', '_param:backupninja_backup_host'))
86
87 if (SKIP_VM_RELAUNCH.toBoolean() == false) {
88
89 def upgNodeProvider = salt.getNodeProvider(pepperEnv, test_upgrade_node)
90
91 salt.runSaltProcessStep(pepperEnv, "${upgNodeProvider}", 'virt.destroy', ["${test_upgrade_node}.${domain}"])
92 salt.runSaltProcessStep(pepperEnv, "${upgNodeProvider}", 'virt.undefine', ["${test_upgrade_node}.${domain}"])
93
94 try {
95 salt.cmdRun(pepperEnv, 'I@salt:master', "salt-key -d ${test_upgrade_node}.${domain} -y")
96 } catch (Exception e) {
97 common.warningMsg("${test_upgrade_node}.${domain} does not match any accepted, unaccepted or rejected keys. The key did not exist yet or was already removed. We should continue to run")
98 }
99
100 // salt 'kvm02*' state.sls salt.control
101 salt.enforceState(pepperEnv, "${upgNodeProvider}", 'salt.control')
102 // wait until upg node is registered in salt-key
103 salt.minionPresent(pepperEnv, 'I@salt:master', test_upgrade_node)
104 // salt '*' saltutil.refresh_pillar
105 salt.runSaltProcessStep(pepperEnv, "${test_upgrade_node}*", 'saltutil.refresh_pillar', [])
106 // salt '*' saltutil.sync_all
107 salt.runSaltProcessStep(pepperEnv, "${test_upgrade_node}*", 'saltutil.sync_all', [])
108 }
109
110 stateRun(pepperEnv, "${test_upgrade_node}*", ['linux', 'openssh'])
111
112 try {
113 salt.runSaltProcessStep(master, "${test_upgrade_node}*", 'state.sls', ["salt.minion"], null, true, 60)
114 } catch (Exception e) {
115 common.warningMsg(e)
116 }
117 stateRun(pepperEnv, "${test_upgrade_node}*", ['ntp', 'rsyslog'])
118 salt.enforceState(pepperEnv, "${test_upgrade_node}*", ['linux', 'openssh', 'salt.minion', 'ntp', 'rsyslog'])
119 salt.enforceState(pepperEnv, "${test_upgrade_node}*", ['rabbitmq', 'memcached'])
120 try {
121 salt.enforceState(pepperEnv, '( I@galera:master or I@galera:slave ) and I@backupninja:client', ['openssh.client', 'salt.minion'])
122 } catch (Exception e) {
123 common.warningMsg('salt-minion was restarted. We should continue to run')
124 }
125 try {
126 salt.enforceState(pepperEnv, 'I@backupninja:server', ['salt.minion'])
127 } catch (Exception e) {
128 common.warningMsg('salt-minion was restarted. We should continue to run')
129 }
130 // salt '*' state.apply salt.minion.grains
131 //salt.enforceState(pepperEnv, '*', 'salt.minion.grains')
132 // salt -C 'I@backupninja:server' state.sls backupninja
133 salt.enforceState(pepperEnv, 'I@backupninja:server', 'backupninja')
134 salt.enforceState(pepperEnv, '( I@galera:master or I@galera:slave ) and I@backupninja:client', 'backupninja')
135 salt.runSaltProcessStep(pepperEnv, '( I@galera:master or I@galera:slave ) and I@backupninja:client', 'ssh.rm_known_host', ["root", "${backupninja_backup_host}"])
136 try {
137 salt.cmdRun(pepperEnv, '( I@galera:master or I@galera:slave ) and I@backupninja:client', "arp -d ${backupninja_backup_host}")
138 } catch (Exception e) {
139 common.warningMsg('The ARP entry does not exist. We should continue to run.')
140 }
141 salt.runSaltProcessStep(pepperEnv, '( I@galera:master or I@galera:slave ) and I@backupninja:client', 'ssh.set_known_host', ["root", "${backupninja_backup_host}"])
142 salt.cmdRun(pepperEnv, '( I@galera:master or I@galera:slave ) and I@backupninja:client', 'backupninja -n --run /etc/backup.d/101.mysql')
143 salt.cmdRun(pepperEnv, '( I@galera:master or I@galera:slave ) and I@backupninja:client', 'backupninja -n --run /etc/backup.d/200.backup.rsync > /tmp/backupninjalog')
144
145 salt.enforceState(pepperEnv, 'I@xtrabackup:server', 'xtrabackup')
146 salt.enforceState(pepperEnv, 'I@xtrabackup:client', 'openssh.client')
147 salt.cmdRun(pepperEnv, 'I@xtrabackup:client', "su root -c 'salt-call state.sls xtrabackup'")
Jiri Broulik60dcab32018-03-08 17:42:06 +0100148 salt.cmdRun(pepperEnv, 'I@xtrabackup:client', "su root -c '/usr/local/bin/innobackupex-runner.sh -f -s'")
Jiri Brouliked3a9e62018-02-13 16:08:40 +0100149
150 def databases = salt.cmdRun(pepperEnv, 'I@mysql:client','salt-call mysql.db_list | grep upgrade | awk \'/-/ {print \$2}\'')
151 if(databases && databases != ""){
152 def databasesList = salt.getReturnValues(databases).trim().tokenize("\n")
153 for( i = 0; i < databasesList.size(); i++){
154 if(databasesList[i].toLowerCase().contains('upgrade')){
155 salt.runSaltProcessStep(pepperEnv, 'I@mysql:client', 'mysql.db_remove', ["${databasesList[i]}"])
156 common.warningMsg("removing database ${databasesList[i]}")
157 salt.runSaltProcessStep(pepperEnv, 'I@mysql:client', 'file.remove', ["/root/mysql/flags/${databasesList[i]}-installed"])
158 }
159 }
160 salt.enforceState(pepperEnv, 'I@mysql:client', 'mysql.client')
161 }else{
162 common.errorMsg("No _upgrade databases were returned")
163 }
164
165 try {
166 salt.enforceState(pepperEnv, "${test_upgrade_node}*", 'keystone.server')
167 salt.runSaltProcessStep(pepperEnv, "${test_upgrade_node}*", 'service.restart', ['apache2'])
168 } catch (Exception e) {
169 common.warningMsg('Restarting Apache2')
170 salt.runSaltProcessStep(pepperEnv, "${test_upgrade_node}*", 'service.restart', ['apache2'])
171 }
172 retryStateRun(pepperEnv, "${test_upgrade_node}*", 'keystone.client')
173 retryStateRun(pepperEnv, "${test_upgrade_node}*", 'glance')
174 salt.enforceState(pepperEnv, "${test_upgrade_node}*", 'keystone.server')
175
176 retryStateRun(pepperEnv, "${test_upgrade_node}*", 'nova')
177 retryStateRun(pepperEnv, "${test_upgrade_node}*", 'nova') // run nova state again as sometimes nova does not enforce itself for some reason
178 retryStateRun(pepperEnv, "${test_upgrade_node}*", 'cinder')
179 retryStateRun(pepperEnv, "${test_upgrade_node}*", 'neutron')
180 retryStateRun(pepperEnv, "${test_upgrade_node}*", 'heat')
181
182 salt.cmdRun(pepperEnv, "${test_upgrade_node}*", '. /root/keystonercv3; openstack service list; openstack image list; openstack flavor list; openstack compute service list; openstack server list; openstack network list; openstack volume list; openstack orchestration service list')
183
184 if (STAGE_TEST_UPGRADE.toBoolean() == true && STAGE_REAL_UPGRADE.toBoolean() == true) {
185 stage('Ask for manual confirmation') {
186 input message: "Do you want to continue with upgrade?"
187 }
188 }
189}
190
191
192def vcpRealUpgrade(pepperEnv) {
193 def common = new com.mirantis.mk.Common()
194 def salt = new com.mirantis.mk.Salt()
195 def openstack = new com.mirantis.mk.Openstack()
196 def virsh = new com.mirantis.mk.Virsh()
197
198 def upgrade_target = []
199 upgrade_target.add('I@horizon:server')
200 upgrade_target.add('I@keystone:server and not upg*')
201
202 def proxy_general_target = "I@horizon:server"
203 def control_general_target = "I@keystone:server and not upg*"
204 def upgrade_general_target = "( I@keystone:server and not upg* ) or I@horizon:server"
205
206 def snapshotName = "upgradeSnapshot1"
207
208 def domain = salt.getDomainName(pepperEnv)
209 def errorOccured = false
210
211 for (tgt in upgrade_target) {
212 def target_hosts = salt.getMinionsSorted(pepperEnv, "${tgt}")
213 def node = salt.getFirstMinion(pepperEnv, "${tgt}")
214 def general_target = ""
215
216 if (tgt.toString().contains('horizon:server')) {
217 general_target = 'prx'
218 } else if (tgt.toString().contains('keystone:server')) {
219 general_target = 'ctl'
220 }
221
222 if (OPERATING_SYSTEM_RELEASE_UPGRADE.toBoolean() == false) {
223 stopServices(pepperEnv, node, tgt, general_target)
224 }
225
226 def node_count = 1
227 for (t in target_hosts) {
228 def target = salt.stripDomainName(t)
229 def nodeProvider = salt.getNodeProvider(pepperEnv, "${general_target}0${node_count}")
230 if ((OPERATING_SYSTEM_RELEASE_UPGRADE.toBoolean() == true) && (SKIP_VM_RELAUNCH.toBoolean() == false)) {
231 salt.runSaltProcessStep(pepperEnv, "${nodeProvider}", 'virt.destroy', ["${target}.${domain}"])
232 sleep(2)
233 try {
234 salt.cmdRun(pepperEnv, "${nodeProvider}", "[ ! -f /root/${target}.${domain}.qcow2.bak ] && cp /var/lib/libvirt/images/${target}.${domain}/system.qcow2 ./${target}.${domain}.qcow2.bak")
235 } catch (Exception e) {
236 common.warningMsg('File already exists')
237 }
238 salt.runSaltProcessStep(pepperEnv, "${nodeProvider}", 'virt.undefine', ["${target}.${domain}"])
239 try {
240 salt.cmdRun(pepperEnv, 'I@salt:master', "salt-key -d ${target}.${domain} -y")
241 } catch (Exception e) {
242 common.warningMsg('does not match any accepted, unaccepted or rejected keys. They were probably already removed. We should continue to run')
243 }
244 } else if (OPERATING_SYSTEM_RELEASE_UPGRADE.toBoolean() == false) {
245 virsh.liveSnapshotPresent(pepperEnv, nodeProvider, target, snapshotName)
246 }
247 node_count++
248 }
249 }
250
251 if ((OPERATING_SYSTEM_RELEASE_UPGRADE.toBoolean() == true) && (SKIP_VM_RELAUNCH.toBoolean() == false)) {
Jiri Broulik60dcab32018-03-08 17:42:06 +0100252 salt.cmdRun(pepperEnv, 'I@xtrabackup:client', "su root -c '/usr/local/bin/innobackupex-runner.sh -f -s'")
Jiri Brouliked3a9e62018-02-13 16:08:40 +0100253
254 salt.enforceState(pepperEnv, 'I@salt:control', 'salt.control')
255
256 for (tgt in upgrade_target) {
257 salt.minionsPresent(pepperEnv, 'I@salt:master', tgt)
258 }
259 }
260
261 // salt '*' saltutil.refresh_pillar
262 salt.runSaltProcessStep(pepperEnv, upgrade_general_target, 'saltutil.refresh_pillar', [])
263 // salt '*' saltutil.sync_all
264 salt.runSaltProcessStep(pepperEnv, upgrade_general_target, 'saltutil.sync_all', [])
265
266 if (OPERATING_SYSTEM_RELEASE_UPGRADE.toBoolean() == false) {
267
268 try {
269 salt.enforceState(pepperEnv, upgrade_general_target, ['linux.system.repo'])
270 } catch (Exception e) {
271 common.warningMsg(e)
272 }
273
274 salt.runSaltProcessStep(pepperEnv, upgrade_general_target, 'pkg.install', ['salt-minion'], null, true, 5)
275 salt.minionsReachable(pepperEnv, 'I@salt:master', upgrade_general_target)
276
277 // Apply package upgrades
278 args = 'export DEBIAN_FRONTEND=noninteractive; apt-get -y -q --allow-downgrades --allow-unauthenticated -o Dpkg::Options::=\"--force-confdef\" -o Dpkg::Options::=\"--force-confold\" dist-upgrade;'
279 common.warningMsg("Running apt dist-upgrade on ${proxy_general_target} and ${control_general_target}, this might take a while...")
280 out = salt.runSaltProcessStep(pepperEnv, upgrade_general_target, 'cmd.run', [args])
281 // stop services again
282 def proxy_node = salt.getFirstMinion(pepperEnv, proxy_general_target)
283 def control_node = salt.getFirstMinion(pepperEnv, control_general_target)
284 stopServices(pepperEnv, proxy_node, proxy_general_target, 'prx')
285 stopServices(pepperEnv, control_node, control_general_target, 'ctl')
286 salt.printSaltCommandResult(out)
287 if (out.toString().contains("dpkg returned an error code")) {
288 input message: "Apt dist-upgrade failed, please fix it manually and then click on proceed. If unable to fix it, click on abort and run the rollback stage."
289 }
290 // run base states
291 try {
292 salt.enforceState(pepperEnv, upgrade_general_target, ['linux', 'openssh', 'salt.minion', 'ntp', 'rsyslog'])
293 } catch (Exception e) {
294 common.warningMsg(e)
295 }
296 salt.enforceState(pepperEnv, control_general_target, ['keepalived', 'haproxy'])
297 } else {
298 // initial VM setup
299 try {
300 salt.enforceState(pepperEnv, upgrade_general_target, ['linux', 'openssh'])
301 } catch (Exception e) {
302 common.warningMsg(e)
303 }
304 try {
305 salt.runSaltProcessStep(master, upgrade_general_target, 'state.sls', ["salt.minion"], null, true, 60)
306 } catch (Exception e) {
307 common.warningMsg(e)
308 }
309 try {
310 salt.enforceState(pepperEnv, upgrade_general_target, ['ntp', 'rsyslog'])
311 } catch (Exception e) {
312 common.warningMsg(e)
313 }
314 salt.enforceState(pepperEnv, upgrade_general_target, ['linux', 'openssh', 'salt.minion', 'ntp', 'rsyslog'])
315 salt.enforceState(pepperEnv, control_general_target, ['keepalived', 'haproxy'])
316 salt.runSaltProcessStep(pepperEnv, control_general_target, 'service.restart', ['rsyslog'])
317 }
318
319 try {
320 try {
321 salt.enforceState(pepperEnv, control_general_target, ['memcached', 'keystone.server'])
322 salt.runSaltProcessStep(pepperEnv, control_general_target, 'service.restart', ['apache2'])
323 } catch (Exception e) {
324 common.warningMsg('Restarting Apache2 and enforcing keystone.server state again')
325 salt.runSaltProcessStep(pepperEnv, control_general_target, 'service.restart', ['apache2'])
326 salt.enforceState(pepperEnv, control_general_target, 'keystone.server')
327 }
328 // salt 'ctl01*' state.sls keystone.client
329 retryStateRun(pepperEnv, "I@keystone:client and ${control_general_target}", 'keystone.client')
330 retryStateRun(pepperEnv, control_general_target, 'glance')
331 salt.enforceState(pepperEnv, control_general_target, 'glusterfs.client')
332 salt.enforceState(pepperEnv, control_general_target, 'keystone.server')
333 retryStateRun(pepperEnv, control_general_target, 'nova')
334 retryStateRun(pepperEnv, control_general_target, 'cinder')
335 retryStateRun(pepperEnv, control_general_target, 'neutron')
336 retryStateRun(pepperEnv, control_general_target, 'heat')
337 } catch (Exception e) {
338 errorOccured = true
339 if (OPERATING_SYSTEM_RELEASE_UPGRADE.toBoolean() == false) {
340 input message: "Some states that require syncdb failed. Please check the reason.Click proceed only if you want to restore database into it's pre-upgrade state. If you want restore production database and also the VMs into its pre-upgrade state please click on abort and run the rollback stage."
341 } else {
342 input message: "Some states that require syncdb failed. Please check the reason and click proceed only if you want to restore database into it's pre-upgrade state. Otherwise, click abort."
343 }
344 openstack.restoreGaleraDb(pepperEnv)
345 common.errorMsg("Stage Real control upgrade failed")
346 }
347 if(!errorOccured){
348
349 if (OPERATING_SYSTEM_RELEASE_UPGRADE.toBoolean() == true) {
350
351 try {
352 if (salt.testTarget(pepperEnv, "I@ceph:client and ${control_general_target}*")) {
353 salt.enforceState(pepperEnv, "I@ceph:client and ${control_general_target}*", 'ceph.client')
354 }
355 } catch (Exception er) {
356 common.warningMsg("Ceph client state on controllers failed. Please fix it manually")
357 }
358 try {
359 if (salt.testTarget(pepperEnv, "I@ceph:common and ${control_general_target}*")) {
360 salt.enforceState(pepperEnv, "I@ceph:common and ${control_general_target}*", ['ceph.common', 'ceph.setup.keyring'])
361 }
362 } catch (Exception er) {
363 common.warningMsg("Ceph common state on controllers failed. Please fix it manually")
364 }
365 try {
366 if (salt.testTarget(pepperEnv, "I@ceph:common and ${control_general_target}*")) {
367 salt.runSaltProcessStep(master, "I@ceph:common and ${control_general_target}*", 'service.restart', ['glance-api', 'glance-glare', 'glance-registry'])
368 }
369 } catch (Exception er) {
370 common.warningMsg("Restarting Glance services on controllers failed. Please fix it manually")
371 }
372 }
373
374 // salt 'cmp*' cmd.run 'service nova-compute restart'
375 salt.runSaltProcessStep(pepperEnv, 'I@nova:compute', 'service.restart', ['nova-compute'])
376 salt.runSaltProcessStep(pepperEnv, control_general_target, 'service.restart', ['nova-conductor'])
377 salt.runSaltProcessStep(pepperEnv, control_general_target, 'service.restart', ['nova-scheduler'])
378
379 retryStateRun(pepperEnv, proxy_general_target, 'keepalived')
380 retryStateRun(pepperEnv, proxy_general_target, 'horizon')
381 retryStateRun(pepperEnv, proxy_general_target, 'nginx')
382 retryStateRun(pepperEnv, proxy_general_target, 'memcached')
383
384 try {
385 salt.enforceHighstate(pepperEnv, control_general_target)
386 } catch (Exception er) {
387 common.errorMsg("Highstate was executed on controller nodes but something failed. Please check it and fix it accordingly.")
388 }
389
390 try {
391 salt.enforceHighstate(pepperEnv, proxy_general_target)
392 } catch (Exception er) {
393 common.errorMsg("Highstate was executed on proxy nodes but something failed. Please check it and fix it accordingly.")
394 }
395
396 try {
397 salt.cmdRun(pepperEnv, "${control_general_target}01*", '. /root/keystonercv3; openstack service list; openstack image list; openstack flavor list; openstack compute service list; openstack server list; openstack network list; openstack volume list; openstack orchestration service list')
398 } catch (Exception er) {
399 common.errorMsg(er)
400 }
401
402 /*
403 if (OPERATING_SYSTEM_RELEASE_UPGRADE.toBoolean() == false) {
404 input message: "Please verify if the control upgrade was successful! If so, by clicking proceed the original VMs disk images will be backed up and snapshot will be merged to the upgraded VMs which will finalize the upgrade procedure"
405 node_count = 1
406 for (t in proxy_target_hosts) {
407 def target = salt.stripDomainName(t)
408 def nodeProvider = salt.getNodeProvider(pepperEnv, "${general_target}0${node_count}")
409 try {
410 salt.cmdRun(pepperEnv, "${nodeProvider}", "[ ! -f /root/${target}.${domain}.qcow2.bak ] && cp /var/lib/libvirt/images/${target}.${domain}/system.qcow2 ./${target}.${domain}.qcow2.bak")
411 } catch (Exception e) {
412 common.warningMsg('File already exists')
413 }
414 virsh.liveSnapshotMerge(pepperEnv, nodeProvider, target, snapshotName)
415 node_count++
416 }
417 node_count = 1
418 for (t in control_target_hosts) {
419 def target = salt.stripDomainName(t)
420 def nodeProvider = salt.getNodeProvider(pepperEnv, "${general_target}0${node_count}")
421 try {
422 salt.cmdRun(pepperEnv, "${nodeProvider}", "[ ! -f /root/${target}.${domain}.qcow2.bak ] && cp /var/lib/libvirt/images/${target}.${domain}/system.qcow2 ./${target}.${domain}.qcow2.bak")
423 } catch (Exception e) {
424 common.warningMsg('File already exists')
425 }
426 virsh.liveSnapshotMerge(pepperEnv, nodeProvider, target, snapshotName)
427 node_count++
428 }
429 input message: "Please scroll up and look for red highlighted messages containing 'virsh blockcommit' string.
430 If there are any fix it manually. Otherwise click on proceed."
431 }
432 */
433 }
434}
435
436
437def vcpRollback(pepperEnv) {
438 def common = new com.mirantis.mk.Common()
439 def salt = new com.mirantis.mk.Salt()
440 def openstack = new com.mirantis.mk.Openstack()
441 def virsh = new com.mirantis.mk.Virsh()
442 def snapshotName = "upgradeSnapshot1"
443 try {
444 salt.runSaltProcessStep(pepperEnv, '*', 'saltutil.refresh_pillar', [], null, true, 2)
445 } catch (Exception e) {
446 common.warningMsg("No response from some minions. We should continue to run")
447 }
448
449 def domain = salt.getDomainName(pepperEnv)
450
451 def rollback_target = []
452 rollback_target.add('I@horizon:server')
453 rollback_target.add('I@keystone:server and not upg*')
454
455 def control_general_target = "I@keystone:server and not upg*"
456 def upgrade_general_target = "( I@keystone:server and not upg* ) or I@horizon:server"
457
458 openstack.restoreGaleraDb(pepperEnv)
459
460 for (tgt in rollback_target) {
461 def target_hosts = salt.getMinionsSorted(pepperEnv, "${tgt}")
462 def node = salt.getFirstMinion(pepperEnv, "${tgt}")
463 def general_target = salt.getMinionsGeneralName(pepperEnv, "${tgt}")
464
465 if (tgt.toString().contains('horizon:server')) {
466 general_target = 'prx'
467 } else if (tgt.toString().contains('keystone:server')) {
468 general_target = 'ctl'
469 }
470
471 def node_count = 1
472 for (t in target_hosts) {
473 def target = salt.stripDomainName(t)
474 def nodeProvider = salt.getNodeProvider(pepperEnv, "${general_target}0${node_count}")
475 salt.runSaltProcessStep(pepperEnv, "${nodeProvider}", 'virt.destroy', ["${target}.${domain}"])
476 sleep(2)
477 if (OPERATING_SYSTEM_RELEASE_UPGRADE.toBoolean() == true) {
478 salt.runSaltProcessStep(pepperEnv, "${nodeProvider}", 'file.copy', ["/root/${target}.${domain}.qcow2.bak", "/var/lib/libvirt/images/${target}.${domain}/system.qcow2"])
479 try {
480 salt.cmdRun(pepperEnv, 'I@salt:master', "salt-key -d ${target}.${domain} -y")
481 } catch (Exception e) {
482 common.warningMsg('does not match any accepted, unaccepted or rejected keys. They were probably already removed. We should continue to run')
483 }
484 salt.runSaltProcessStep(pepperEnv, "${nodeProvider}", 'virt.start', ["${target}.${domain}"])
485 } else {
486 salt.cmdRun(pepperEnv, "${nodeProvider}", "virsh define /var/lib/libvirt/images/${target}.${domain}.xml")
487 salt.runSaltProcessStep(pepperEnv, "${nodeProvider}", 'virt.start', ["${target}.${domain}"])
488 virsh.liveSnapshotAbsent(pepperEnv, nodeProvider, target, snapshotName)
489 }
490 node_count++
491 }
492 }
493
494 // salt 'cmp*' cmd.run 'service nova-compute restart'
495 salt.runSaltProcessStep(pepperEnv, 'I@nova:compute', 'service.restart', ['nova-compute'])
496
497 if (OPERATING_SYSTEM_RELEASE_UPGRADE.toBoolean() == true) {
498 for (tgt in rollback_target) {
499 salt.minionsPresent(pepperEnv, 'I@salt:master', tgt)
500 }
501 }
502
503 salt.minionsReachable(pepperEnv, 'I@salt:master', upgrade_general_target)
504
505 salt.runSaltProcessStep(pepperEnv, control_general_target, 'service.restart', ['nova-conductor'])
506 salt.runSaltProcessStep(pepperEnv, control_general_target, 'service.restart', ['nova-scheduler'])
507
508 def control_node = salt.getFirstMinion(pepperEnv, control_general_target)
509
510 salt.cmdRun(pepperEnv, "${control_node}*", '. /root/keystonerc; nova service-list; glance image-list; nova flavor-list; nova hypervisor-list; nova list; neutron net-list; cinder list; heat service-list')
511}
512
513
chnyda625f4b42017-10-11 14:10:31 +0200514def pepperEnv = "pepperEnv"
Jakub Josefa63f9862018-01-11 17:58:38 +0100515timeout(time: 12, unit: 'HOURS') {
516 node() {
Jiri Broulikbb447ac2017-05-04 15:36:22 +0200517
Jakub Josefa63f9862018-01-11 17:58:38 +0100518 stage('Setup virtualenv for Pepper') {
519 python.setupPepperVirtualenv(pepperEnv, SALT_MASTER_URL, SALT_MASTER_CREDENTIALS)
520 }
Jiri Broulikbb447ac2017-05-04 15:36:22 +0200521
Jakub Josefa63f9862018-01-11 17:58:38 +0100522 if (STAGE_TEST_UPGRADE.toBoolean() == true) {
523 stage('Test upgrade') {
Jiri Brouliked3a9e62018-02-13 16:08:40 +0100524 vcpTestUpgrade(pepperEnv)
Ruslan Kamaldinov6feef402017-08-02 16:55:58 +0400525 }
526 }
Ruslan Kamaldinov6feef402017-08-02 16:55:58 +0400527
Jakub Josefa63f9862018-01-11 17:58:38 +0100528 if (STAGE_REAL_UPGRADE.toBoolean() == true) {
529 stage('Real upgrade') {
530 // # actual upgrade
Jiri Brouliked3a9e62018-02-13 16:08:40 +0100531 vcpRealUpgrade(pepperEnv)
Jakub Josefa63f9862018-01-11 17:58:38 +0100532 }
533
534 if (STAGE_REAL_UPGRADE.toBoolean() == true && STAGE_ROLLBACK_UPGRADE.toBoolean() == true) {
535 stage('Ask for manual confirmation') {
Jiri Brouliked3a9e62018-02-13 16:08:40 +0100536 input message: "Please verify if the control upgrade was successful. If it did not succeed, in the worst scenario, you can click on proceed to continue with control-upgrade-rollback. Do you want to continue with the rollback?"
Jakub Josefa63f9862018-01-11 17:58:38 +0100537 }
538 }
539 }
540
541 if (STAGE_ROLLBACK_UPGRADE.toBoolean() == true) {
542 stage('Rollback upgrade') {
Jakub Josefa63f9862018-01-11 17:58:38 +0100543 stage('Ask for manual confirmation') {
Jiri Brouliked3a9e62018-02-13 16:08:40 +0100544 input message: "Before rollback please check the documentation for reclass model changes. Do you really want to continue with the rollback?"
Jakub Josefa63f9862018-01-11 17:58:38 +0100545 }
Jiri Brouliked3a9e62018-02-13 16:08:40 +0100546 vcpRollback(pepperEnv)
Ruslan Kamaldinov6feef402017-08-02 16:55:58 +0400547 }
548 }
549 }
Jakub Josefa63f9862018-01-11 17:58:38 +0100550}