blob: 23b280d2a712adfbb7eaf7f573eda334a70c6ef1 [file] [log] [blame]
Alexandr Lovtsov1a4e1422019-05-15 11:56:29 +03001/**
2 * Update packages on given server nodes
3 *
4 * Expected parameters:
5 * DRIVE_TRAIN_PARAMS Yaml, DriveTrain releated params:
6 * SALT_MASTER_CREDENTIALS Credentials to the Salt API
7 * SALT_MASTER_URL Full Salt API address [https://10.10.10.1:8000]
8 * IGNORE_SERVER_STATUS Does not validate server availability/status before update
9 * IGNORE_NON_REPLICATED_VOLUMES Update GlusterFS even there is a non-replicated volume(s)
10 * TARGET_SERVERS Salt compound target to match nodes to be updated [*, G@osfamily:debian]
11 */
12
13def pEnv = "pepperEnv"
14def salt = new com.mirantis.mk.Salt()
15def common = new com.mirantis.mk.Common()
16def python = new com.mirantis.mk.Python()
17def pkg_name = 'glusterfs-server'
18
19// Convert parameters from yaml to env variables
20params = readYaml text: env.DRIVE_TRAIN_PARAMS
21for (key in params.keySet()) {
22 value = params[key]
23 env.setProperty(key, value)
24}
25
26/**
27 * - choose only those hosts where update is available
28 * - validate that all servers are in normal working state. Can be skipped with option
29 * - validate all volumes are replicated. If there is a non-replicated volume stop update. Can be skipped with option
30 * - run update state on one server at a time
31 */
32
33timeout(time: 12, unit: 'HOURS') {
34 node() {
35 try {
36
37 stage('Setup virtualenv for Pepper') {
38 python.setupPepperVirtualenv(pEnv, SALT_MASTER_URL, SALT_MASTER_CREDENTIALS)
39 }
40
41 stage('List target servers') {
42 all_minions = salt.getMinions(pEnv, TARGET_SERVERS)
43
44 if (all_minions.isEmpty()) {
45 throw new Exception("No minion was targeted")
46 }
47 minions = []
48 for (minion in all_minions) {
49 latest_version = salt.getReturnValues(salt.runSaltProcessStep(pEnv, minion, 'pkg.latest_version', [pkg_name, 'show_installed=True'])).split('\n')[0]
50 current_version = salt.getReturnValues(salt.runSaltProcessStep(pEnv, minion, 'pkg.version', [pkg_name])).split('\n')[0]
51 if (latest_version != current_version) {
52 minions.add(minion)
53 } else {
54 common.infoMsg("${pkg_name} has been already upgraded or newer version is not available on ${minion}. Skip upgrade")
55 }
56 }
57 }
58 if (!minions.isEmpty()) {
59 if (!IGNORE_SERVER_STATUS.toBoolean()){
60 stage('Validate servers availability') {
61 salt.commandStatus(pEnv, TARGET_SERVERS, "gluster pool list | fgrep localhost", 'Connected', true, true, null, true, 1)
62 common.successMsg("All servers are available")
63 }
64 } else {
65 common.warningMsg("Check of servers availability has been disabled")
66 }
67 if (!IGNORE_NON_REPLICATED_VOLUMES.toBoolean()){
68 stage('Check that all volumes are replicated') {
69 salt.commandStatus(pEnv, TARGET_SERVERS, "gluster volume info | fgrep 'Type:' | fgrep -v Replicate", null, false, true, null, true, 1)
70 common.successMsg("All volumes are replicated")
71 }
72 } else {
73 common.warningMsg("Check of volumes' replication has been disabled. Be aware, you may lost data during update!")
74 }
75 // Actual update
76 for (tgt in minions) {
77 stage("Update glusterfs on ${tgt}") {
78 salt.runSaltProcessStep(pEnv, tgt, 'state.apply', ['glusterfs.update.server'])
79 }
80 }
81 } else {
82 common.warningMsg("No hosts to update glusterfs on")
83 }
84 } catch (Throwable e) {
85 // If there was an error or exception thrown, the build failed
86 currentBuild.result = "FAILURE"
87 currentBuild.description = currentBuild.description ? e.message + " " + currentBuild.description : e.message
88 salt.runSaltProcessStep(pEnv, TARGET_SERVERS, 'state.apply', ['glusterfs'])
89 throw e
90 }
91 }
92}