blob: 86a1f0fa76d8b511103c780a7598d5ebfbfb3e93 [file] [log] [blame]
Jiri Broulikdc87d722017-11-03 15:43:22 +01001/**
2 *
3 * Upgrade Ceph mon/mgr/osd/rgw/client
4 *
5 * Requred parameters:
6 * SALT_MASTER_URL URL of Salt master
7 * SALT_MASTER_CREDENTIALS Credentials to the Salt API
8 *
9 * ADMIN_HOST Host (minion id) with admin keyring and /etc/crushmap file present
10 * CLUSTER_FLAGS Comma separated list of tags to apply to cluster
11 * WAIT_FOR_HEALTHY Wait for cluster rebalance before stoping daemons
12 * ORIGIN_RELEASE Ceph release version before upgrade
13 * TARGET_RELEASE Ceph release version after upgrade
14 * STAGE_UPGRADE_MON Set to True if Ceph mon nodes upgrade is desired
15 * STAGE_UPGRADE_MGR Set to True if Ceph mgr nodes upgrade or new deploy is desired
16 * STAGE_UPGRADE_OSD Set to True if Ceph osd nodes upgrade is desired
17 * STAGE_UPGRADE_RGW Set to True if Ceph rgw nodes upgrade is desired
18 * STAGE_UPGRADE_CLIENT Set to True if Ceph client nodes upgrade is desired (includes for example ctl/cmp nodes)
19 *
20 */
21
22common = new com.mirantis.mk.Common()
23salt = new com.mirantis.mk.Salt()
24def python = new com.mirantis.mk.Python()
25
26def pepperEnv = "pepperEnv"
27def flags = CLUSTER_FLAGS.tokenize(',')
28
29def runCephCommand(master, target, cmd) {
30 return salt.cmdRun(master, target, cmd)
31}
32
Jiri Broulik96c867a2017-11-07 16:14:10 +010033def waitForHealthy(master, count=0, attempts=300) {
34 // wait for healthy cluster
35 while (count<attempts) {
36 def health = runCephCommand(master, ADMIN_HOST, 'ceph health')['return'][0].values()[0]
37 if (health.contains('HEALTH_OK')) {
38 common.infoMsg('Cluster is healthy')
39 break;
40 }
41 count++
42 sleep(10)
43 }
44}
45
46def backup(master, target) {
47 stage("backup ${target}") {
48
Jiri Broulikfd2dcaf2017-12-08 15:19:51 +010049 if (target == 'osd') {
Jiri Broulik96c867a2017-11-07 16:14:10 +010050 try {
Jiri Broulikfd2dcaf2017-12-08 15:19:51 +010051 salt.enforceState(master, "I@ceph:${target}", "ceph.backup", true)
52 runCephCommand(master, "I@ceph:${target}", "su root -c '/usr/local/bin/ceph-backup-runner-call.sh'")
Jiri Broulik96c867a2017-11-07 16:14:10 +010053 } catch (Exception e) {
Jiri Broulikfd2dcaf2017-12-08 15:19:51 +010054 common.errorMsg(e)
55 common.errorMsg("Make sure Ceph backup on OSD nodes is enabled")
56 throw new InterruptedException()
Jiri Broulik96c867a2017-11-07 16:14:10 +010057 }
Jiri Broulikfd2dcaf2017-12-08 15:19:51 +010058 } else {
59 def _pillar = salt.getGrain(master, 'I@salt:master', 'domain')
60 def domain = _pillar['return'][0].values()[0].values()[0]
61
62 def kvm_pillar = salt.getGrain(master, 'I@salt:control', 'id')
63 def kvm01 = kvm_pillar['return'][0].values()[0].values()[0]
64
65 def target_pillar = salt.getGrain(master, "I@ceph:${target}", 'host')
66 def minions = target_pillar['return'][0].values()
67 for (minion in minions) {
68 def minion_name = minion.values()[0]
69 def provider_pillar = salt.getPillar(master, "${kvm01}", "salt:control:cluster:internal:node:${minion_name}:provider")
70 def minionProvider = provider_pillar['return'][0].values()[0]
71
72 waitForHealthy(master)
73 try {
74 salt.cmdRun(master, "${minionProvider}", "[ ! -f /root/${minion_name}.${domain}.qcow2.bak ] && virsh destroy ${minion_name}.${domain}")
75 } catch (Exception e) {
76 common.warningMsg('Backup already exists')
77 }
78 try {
79 salt.cmdRun(master, "${minionProvider}", "[ ! -f /root/${minion_name}.${domain}.qcow2.bak ] && cp /var/lib/libvirt/images/${minion_name}.${domain}/system.qcow2 /root/${minion_name}.${domain}.qcow2.bak")
80 } catch (Exception e) {
81 common.warningMsg('Backup already exists')
82 }
83 try {
84 salt.cmdRun(master, "${minionProvider}", "virsh start ${minion_name}.${domain}")
85 } catch (Exception e) {
86 common.warningMsg(e)
87 }
88 salt.minionsReachable(master, 'I@salt:master', "${minion_name}*")
89 waitForHealthy(master)
Jiri Broulik96c867a2017-11-07 16:14:10 +010090 }
Jiri Broulik96c867a2017-11-07 16:14:10 +010091 }
92 }
93 return
94}
95
Jiri Broulikdc87d722017-11-03 15:43:22 +010096def upgrade(master, target) {
97
98 stage("Change ${target} repos") {
99 salt.runSaltProcessStep(master, "I@ceph:${target}", 'saltutil.refresh_pillar', [], null, true, 5)
100 salt.enforceState(master, "I@ceph:${target}", 'linux.system.repo', true)
101 }
Jiri Broulikdc87d722017-11-03 15:43:22 +0100102 if (target == 'mgr') {
103 stage('Run ceph mgr state') {
104 salt.enforceState(master, "I@ceph:mgr", "ceph.mgr", true)
105 }
106 }
Jiri Broulikdc87d722017-11-03 15:43:22 +0100107 if (target == 'common') {
108 stage('Upgrade ceph-common pkgs') {
Jiri Broulik96c867a2017-11-07 16:14:10 +0100109 runCephCommand(master, "I@ceph:${target}", "apt install ceph-${target} -y")
Jiri Broulikdc87d722017-11-03 15:43:22 +0100110 }
111 } else {
Jiri Broulik96c867a2017-11-07 16:14:10 +0100112 minions = salt.getMinions(master, "I@ceph:${target}")
Jiri Broulikdc87d722017-11-03 15:43:22 +0100113
Jiri Broulik96c867a2017-11-07 16:14:10 +0100114 for (minion in minions) {
115 // upgrade pkgs
116 if (target == 'radosgw') {
117 stage('Upgrade radosgw pkgs') {
118 runCephCommand(master, "I@ceph:${target}", "apt install ${target} -y ")
119 }
120 } else {
121 stage("Upgrade ${target} pkgs on ${minion}") {
122 runCephCommand(master, "${minion}", "apt install ceph-${target} -y")
123 }
124 }
125 // restart services
126 stage("Restart ${target} services on ${minion}") {
127 runCephCommand(master, "${minion}", "systemctl restart ceph-${target}.target")
128 }
129
130 stage("Verify services for ${minion}") {
131 sleep(10)
132 runCephCommand(master, ADMIN_HOST, "ceph -s")
133 }
134
135 stage('Ask for manual confirmation') {
136 input message: "From the verification command above, please check Ceph ${target} joined the cluster correctly. If so, Do you want to continue to upgrade next node?"
137 }
Jiri Broulikdc87d722017-11-03 15:43:22 +0100138 }
139 }
140 runCephCommand(master, ADMIN_HOST, "ceph versions")
141 sleep(5)
142 return
143}
Jakub Josefa63f9862018-01-11 17:58:38 +0100144timeout(time: 12, unit: 'HOURS') {
145 node("python") {
Jiri Broulikdc87d722017-11-03 15:43:22 +0100146
Jakub Josefa63f9862018-01-11 17:58:38 +0100147 // create connection to salt master
148 python.setupPepperVirtualenv(pepperEnv, SALT_MASTER_URL, SALT_MASTER_CREDENTIALS)
Jiri Broulikdc87d722017-11-03 15:43:22 +0100149
Alena Kiseleva30f780c2019-01-22 17:09:33 +0300150 stage ('Check user choices') {
151 if (STAGE_UPGRADE_RGW.toBoolean() == true) {
152 // if rgw, check if other stuff has required version
153 def mon_ok = true
154 if (STAGE_UPGRADE_MON.toBoolean() == false) {
155 def mon_v = runCephCommand(pepperEnv, ADMIN_HOST, "ceph mon versions")['return'][0].values()[0]
156 mon_ok = mon_v.contains("${TARGET_RELEASE}") && !mon_v.contains("${ORIGIN_RELEASE}")
157 }
158 def mgr_ok = true
159 if (STAGE_UPGRADE_MGR.toBoolean() == false) {
160 def mgr_v = runCephCommand(pepperEnv, ADMIN_HOST, "ceph mgr versions")['return'][0].values()[0]
161 mgr_ok = mgr_v.contains("${TARGET_RELEASE}") && !mgr_v.contains("${ORIGIN_RELEASE}")
162 }
163 def osd_ok = true
164 if (STAGE_UPGRADE_OSD.toBoolean() == false) {
165 def osd_v = runCephCommand(pepperEnv, ADMIN_HOST, "ceph osd versions")['return'][0].values()[0]
166 osd_ok = osd_v.contains("${TARGET_RELEASE}") && !osd_v.contains("${ORIGIN_RELEASE}")
167 }
168 if (!mon_ok || !osd_ok || !mgr_ok) {
169 common.errorMsg('You may choose stages in any order, but RGW should be upgraded last')
170 throw new InterruptedException()
171 }
172 }
173 }
174
Jakub Josefa63f9862018-01-11 17:58:38 +0100175 if (BACKUP_ENABLED.toBoolean() == true) {
176 if (STAGE_UPGRADE_MON.toBoolean() == true) {
177 backup(pepperEnv, 'mon')
178 }
179 if (STAGE_UPGRADE_RGW.toBoolean() == true) {
180 backup(pepperEnv, 'radosgw')
181 }
182 if (STAGE_UPGRADE_OSD.toBoolean() == true) {
183 backup(pepperEnv, 'osd')
Jiri Broulikdc87d722017-11-03 15:43:22 +0100184 }
185 }
Jiri Broulikdc87d722017-11-03 15:43:22 +0100186
Jakub Josefa63f9862018-01-11 17:58:38 +0100187 if (flags.size() > 0) {
188 stage('Set cluster flags') {
189 for (flag in flags) {
190 runCephCommand(pepperEnv, ADMIN_HOST, 'ceph osd set ' + flag)
Jiri Broulikdc87d722017-11-03 15:43:22 +0100191 }
Jiri Broulikdc87d722017-11-03 15:43:22 +0100192 }
193 }
Jiri Broulikdc87d722017-11-03 15:43:22 +0100194
Jakub Josefa63f9862018-01-11 17:58:38 +0100195 if (STAGE_UPGRADE_MON.toBoolean() == true) {
196 upgrade(pepperEnv, 'mon')
197 }
198
199 if (STAGE_UPGRADE_MGR.toBoolean() == true) {
200 upgrade(pepperEnv, 'mgr')
201 }
202
203 if (STAGE_UPGRADE_OSD.toBoolean() == true) {
204 upgrade(pepperEnv, 'osd')
205 }
206
207 if (STAGE_UPGRADE_RGW.toBoolean() == true) {
208 upgrade(pepperEnv, 'radosgw')
209 }
210
211 if (STAGE_UPGRADE_CLIENT.toBoolean() == true) {
212 upgrade(pepperEnv, 'common')
213 }
214
215 // remove cluster flags
216 if (flags.size() > 0) {
217 stage('Unset cluster flags') {
218 for (flag in flags) {
219 if (!flag.contains('sortbitwise')) {
220 common.infoMsg('Removing flag ' + flag)
221 runCephCommand(pepperEnv, ADMIN_HOST, 'ceph osd unset ' + flag)
222 }
223
224 }
Jiri Broulik96c867a2017-11-07 16:14:10 +0100225 }
Jiri Broulikdc87d722017-11-03 15:43:22 +0100226 }
Jiri Broulikdc87d722017-11-03 15:43:22 +0100227
Jakub Josefa63f9862018-01-11 17:58:38 +0100228 if (STAGE_FINALIZE.toBoolean() == true) {
229 stage("Finalize ceph version upgrade") {
230 runCephCommand(pepperEnv, ADMIN_HOST, "ceph osd require-osd-release ${TARGET_RELEASE}")
231 try {
232 runCephCommand(pepperEnv, ADMIN_HOST, "ceph osd set-require-min-compat-client ${ORIGIN_RELEASE}")
233 } catch (Exception e) {
234 common.warningMsg(e)
235 }
236 try {
237 runCephCommand(pepperEnv, ADMIN_HOST, "ceph osd crush tunables optimal")
238 } catch (Exception e) {
239 common.warningMsg(e)
240 }
241 }
242 }
243
244 // wait for healthy cluster
245 if (WAIT_FOR_HEALTHY.toBoolean() == true) {
246 waitForHealthy(pepperEnv)
247 }
Jiri Broulikdc87d722017-11-03 15:43:22 +0100248 }
249}