blob: cc8a84d15900231a74b8d7e18a7cd54ec5aef468 [file] [log] [blame]
Jiri Broulikdc87d722017-11-03 15:43:22 +01001/**
2 *
3 * Upgrade Ceph mon/mgr/osd/rgw/client
4 *
5 * Requred parameters:
6 * SALT_MASTER_URL URL of Salt master
7 * SALT_MASTER_CREDENTIALS Credentials to the Salt API
8 *
9 * ADMIN_HOST Host (minion id) with admin keyring and /etc/crushmap file present
10 * CLUSTER_FLAGS Comma separated list of tags to apply to cluster
11 * WAIT_FOR_HEALTHY Wait for cluster rebalance before stoping daemons
12 * ORIGIN_RELEASE Ceph release version before upgrade
13 * TARGET_RELEASE Ceph release version after upgrade
14 * STAGE_UPGRADE_MON Set to True if Ceph mon nodes upgrade is desired
15 * STAGE_UPGRADE_MGR Set to True if Ceph mgr nodes upgrade or new deploy is desired
16 * STAGE_UPGRADE_OSD Set to True if Ceph osd nodes upgrade is desired
17 * STAGE_UPGRADE_RGW Set to True if Ceph rgw nodes upgrade is desired
18 * STAGE_UPGRADE_CLIENT Set to True if Ceph client nodes upgrade is desired (includes for example ctl/cmp nodes)
Michael Vollmanafe91522019-05-07 08:10:00 -040019 * STAGE_FINALIZE Set to True if configs recommended for TARGET_RELEASE should be set after upgrade is done
20 * BACKUP_ENABLED Select to copy the disks of Ceph VMs before upgrade and backup Ceph directories on OSD nodes
21 * BACKUP_DIR Select the target dir to backup to when BACKUP_ENABLED
Jiri Broulikdc87d722017-11-03 15:43:22 +010022 *
23 */
24
25common = new com.mirantis.mk.Common()
26salt = new com.mirantis.mk.Salt()
27def python = new com.mirantis.mk.Python()
28
29def pepperEnv = "pepperEnv"
30def flags = CLUSTER_FLAGS.tokenize(',')
31
32def runCephCommand(master, target, cmd) {
33 return salt.cmdRun(master, target, cmd)
34}
35
Jiri Broulik96c867a2017-11-07 16:14:10 +010036def waitForHealthy(master, count=0, attempts=300) {
37 // wait for healthy cluster
38 while (count<attempts) {
39 def health = runCephCommand(master, ADMIN_HOST, 'ceph health')['return'][0].values()[0]
40 if (health.contains('HEALTH_OK')) {
41 common.infoMsg('Cluster is healthy')
42 break;
43 }
44 count++
45 sleep(10)
46 }
47}
48
49def backup(master, target) {
50 stage("backup ${target}") {
51
Jiri Broulikfd2dcaf2017-12-08 15:19:51 +010052 if (target == 'osd') {
Jiri Broulik96c867a2017-11-07 16:14:10 +010053 try {
Jiri Broulikfd2dcaf2017-12-08 15:19:51 +010054 salt.enforceState(master, "I@ceph:${target}", "ceph.backup", true)
55 runCephCommand(master, "I@ceph:${target}", "su root -c '/usr/local/bin/ceph-backup-runner-call.sh'")
Jiri Broulik96c867a2017-11-07 16:14:10 +010056 } catch (Exception e) {
Jiri Broulikfd2dcaf2017-12-08 15:19:51 +010057 common.errorMsg(e)
58 common.errorMsg("Make sure Ceph backup on OSD nodes is enabled")
59 throw new InterruptedException()
Jiri Broulik96c867a2017-11-07 16:14:10 +010060 }
Jiri Broulikfd2dcaf2017-12-08 15:19:51 +010061 } else {
62 def _pillar = salt.getGrain(master, 'I@salt:master', 'domain')
63 def domain = _pillar['return'][0].values()[0].values()[0]
64
65 def kvm_pillar = salt.getGrain(master, 'I@salt:control', 'id')
66 def kvm01 = kvm_pillar['return'][0].values()[0].values()[0]
67
68 def target_pillar = salt.getGrain(master, "I@ceph:${target}", 'host')
69 def minions = target_pillar['return'][0].values()
70 for (minion in minions) {
71 def minion_name = minion.values()[0]
72 def provider_pillar = salt.getPillar(master, "${kvm01}", "salt:control:cluster:internal:node:${minion_name}:provider")
73 def minionProvider = provider_pillar['return'][0].values()[0]
74
75 waitForHealthy(master)
76 try {
Michael Vollmanafe91522019-05-07 08:10:00 -040077 salt.cmdRun(master, "${minionProvider}", "[ ! -f ${BACKUP_DIR}/${minion_name}.${domain}.qcow2.bak ] && virsh destroy ${minion_name}.${domain}")
Jiri Broulikfd2dcaf2017-12-08 15:19:51 +010078 } catch (Exception e) {
79 common.warningMsg('Backup already exists')
80 }
81 try {
Michael Vollmanafe91522019-05-07 08:10:00 -040082 salt.cmdRun(master, "${minionProvider}", "[ ! -f ${BACKUP_DIR}/${minion_name}.${domain}.qcow2.bak ] && cp /var/lib/libvirt/images/${minion_name}.${domain}/system.qcow2 ${BACKUP_DIR}/${minion_name}.${domain}.qcow2.bak")
Jiri Broulikfd2dcaf2017-12-08 15:19:51 +010083 } catch (Exception e) {
84 common.warningMsg('Backup already exists')
85 }
86 try {
87 salt.cmdRun(master, "${minionProvider}", "virsh start ${minion_name}.${domain}")
88 } catch (Exception e) {
89 common.warningMsg(e)
90 }
91 salt.minionsReachable(master, 'I@salt:master', "${minion_name}*")
92 waitForHealthy(master)
Jiri Broulik96c867a2017-11-07 16:14:10 +010093 }
Jiri Broulik96c867a2017-11-07 16:14:10 +010094 }
95 }
96 return
97}
98
Jiri Broulikdc87d722017-11-03 15:43:22 +010099def upgrade(master, target) {
100
101 stage("Change ${target} repos") {
102 salt.runSaltProcessStep(master, "I@ceph:${target}", 'saltutil.refresh_pillar', [], null, true, 5)
103 salt.enforceState(master, "I@ceph:${target}", 'linux.system.repo', true)
104 }
Jiri Broulikdc87d722017-11-03 15:43:22 +0100105 if (target == 'mgr') {
106 stage('Run ceph mgr state') {
107 salt.enforceState(master, "I@ceph:mgr", "ceph.mgr", true)
108 }
109 }
Jiri Broulikdc87d722017-11-03 15:43:22 +0100110 if (target == 'common') {
111 stage('Upgrade ceph-common pkgs') {
Jiri Broulik96c867a2017-11-07 16:14:10 +0100112 runCephCommand(master, "I@ceph:${target}", "apt install ceph-${target} -y")
Jiri Broulikdc87d722017-11-03 15:43:22 +0100113 }
114 } else {
Jiri Broulik96c867a2017-11-07 16:14:10 +0100115 minions = salt.getMinions(master, "I@ceph:${target}")
Jiri Broulikdc87d722017-11-03 15:43:22 +0100116
Jiri Broulik96c867a2017-11-07 16:14:10 +0100117 for (minion in minions) {
118 // upgrade pkgs
119 if (target == 'radosgw') {
120 stage('Upgrade radosgw pkgs') {
121 runCephCommand(master, "I@ceph:${target}", "apt install ${target} -y ")
122 }
123 } else {
124 stage("Upgrade ${target} pkgs on ${minion}") {
125 runCephCommand(master, "${minion}", "apt install ceph-${target} -y")
126 }
127 }
128 // restart services
129 stage("Restart ${target} services on ${minion}") {
130 runCephCommand(master, "${minion}", "systemctl restart ceph-${target}.target")
131 }
132
133 stage("Verify services for ${minion}") {
134 sleep(10)
Mateusz Lose1ae6002019-05-08 11:55:39 +0200135 runCephCommand(master, "${minion}", "systemctl status ceph-${target}.target")
136 waitForHealthy(master)
Jiri Broulik96c867a2017-11-07 16:14:10 +0100137 }
138
139 stage('Ask for manual confirmation') {
Mateusz Lose1ae6002019-05-08 11:55:39 +0200140 runCephCommand(master, ADMIN_HOST, "ceph -s")
Jiri Broulik96c867a2017-11-07 16:14:10 +0100141 input message: "From the verification command above, please check Ceph ${target} joined the cluster correctly. If so, Do you want to continue to upgrade next node?"
142 }
Jiri Broulikdc87d722017-11-03 15:43:22 +0100143 }
144 }
145 runCephCommand(master, ADMIN_HOST, "ceph versions")
146 sleep(5)
147 return
148}
Jakub Josefa63f9862018-01-11 17:58:38 +0100149timeout(time: 12, unit: 'HOURS') {
150 node("python") {
Jiri Broulikdc87d722017-11-03 15:43:22 +0100151
Jakub Josefa63f9862018-01-11 17:58:38 +0100152 // create connection to salt master
153 python.setupPepperVirtualenv(pepperEnv, SALT_MASTER_URL, SALT_MASTER_CREDENTIALS)
Jiri Broulikdc87d722017-11-03 15:43:22 +0100154
Alena Kiseleva30f780c2019-01-22 17:09:33 +0300155 stage ('Check user choices') {
156 if (STAGE_UPGRADE_RGW.toBoolean() == true) {
157 // if rgw, check if other stuff has required version
158 def mon_ok = true
159 if (STAGE_UPGRADE_MON.toBoolean() == false) {
160 def mon_v = runCephCommand(pepperEnv, ADMIN_HOST, "ceph mon versions")['return'][0].values()[0]
161 mon_ok = mon_v.contains("${TARGET_RELEASE}") && !mon_v.contains("${ORIGIN_RELEASE}")
162 }
163 def mgr_ok = true
164 if (STAGE_UPGRADE_MGR.toBoolean() == false) {
165 def mgr_v = runCephCommand(pepperEnv, ADMIN_HOST, "ceph mgr versions")['return'][0].values()[0]
166 mgr_ok = mgr_v.contains("${TARGET_RELEASE}") && !mgr_v.contains("${ORIGIN_RELEASE}")
167 }
168 def osd_ok = true
169 if (STAGE_UPGRADE_OSD.toBoolean() == false) {
170 def osd_v = runCephCommand(pepperEnv, ADMIN_HOST, "ceph osd versions")['return'][0].values()[0]
171 osd_ok = osd_v.contains("${TARGET_RELEASE}") && !osd_v.contains("${ORIGIN_RELEASE}")
172 }
173 if (!mon_ok || !osd_ok || !mgr_ok) {
174 common.errorMsg('You may choose stages in any order, but RGW should be upgraded last')
175 throw new InterruptedException()
176 }
177 }
178 }
179
Jakub Josefa63f9862018-01-11 17:58:38 +0100180 if (BACKUP_ENABLED.toBoolean() == true) {
181 if (STAGE_UPGRADE_MON.toBoolean() == true) {
182 backup(pepperEnv, 'mon')
183 }
184 if (STAGE_UPGRADE_RGW.toBoolean() == true) {
185 backup(pepperEnv, 'radosgw')
186 }
187 if (STAGE_UPGRADE_OSD.toBoolean() == true) {
188 backup(pepperEnv, 'osd')
Jiri Broulikdc87d722017-11-03 15:43:22 +0100189 }
190 }
Jiri Broulikdc87d722017-11-03 15:43:22 +0100191
Jakub Josefa63f9862018-01-11 17:58:38 +0100192 if (flags.size() > 0) {
193 stage('Set cluster flags') {
194 for (flag in flags) {
195 runCephCommand(pepperEnv, ADMIN_HOST, 'ceph osd set ' + flag)
Jiri Broulikdc87d722017-11-03 15:43:22 +0100196 }
Jiri Broulikdc87d722017-11-03 15:43:22 +0100197 }
198 }
Jiri Broulikdc87d722017-11-03 15:43:22 +0100199
Jakub Josefa63f9862018-01-11 17:58:38 +0100200 if (STAGE_UPGRADE_MON.toBoolean() == true) {
201 upgrade(pepperEnv, 'mon')
202 }
203
204 if (STAGE_UPGRADE_MGR.toBoolean() == true) {
205 upgrade(pepperEnv, 'mgr')
206 }
207
208 if (STAGE_UPGRADE_OSD.toBoolean() == true) {
209 upgrade(pepperEnv, 'osd')
210 }
211
212 if (STAGE_UPGRADE_RGW.toBoolean() == true) {
213 upgrade(pepperEnv, 'radosgw')
214 }
215
216 if (STAGE_UPGRADE_CLIENT.toBoolean() == true) {
217 upgrade(pepperEnv, 'common')
218 }
219
220 // remove cluster flags
221 if (flags.size() > 0) {
222 stage('Unset cluster flags') {
223 for (flag in flags) {
224 if (!flag.contains('sortbitwise')) {
225 common.infoMsg('Removing flag ' + flag)
226 runCephCommand(pepperEnv, ADMIN_HOST, 'ceph osd unset ' + flag)
227 }
228
229 }
Jiri Broulik96c867a2017-11-07 16:14:10 +0100230 }
Jiri Broulikdc87d722017-11-03 15:43:22 +0100231 }
Jiri Broulikdc87d722017-11-03 15:43:22 +0100232
Jakub Josefa63f9862018-01-11 17:58:38 +0100233 if (STAGE_FINALIZE.toBoolean() == true) {
234 stage("Finalize ceph version upgrade") {
235 runCephCommand(pepperEnv, ADMIN_HOST, "ceph osd require-osd-release ${TARGET_RELEASE}")
236 try {
237 runCephCommand(pepperEnv, ADMIN_HOST, "ceph osd set-require-min-compat-client ${ORIGIN_RELEASE}")
238 } catch (Exception e) {
239 common.warningMsg(e)
240 }
241 try {
242 runCephCommand(pepperEnv, ADMIN_HOST, "ceph osd crush tunables optimal")
243 } catch (Exception e) {
244 common.warningMsg(e)
245 }
246 }
247 }
248
249 // wait for healthy cluster
250 if (WAIT_FOR_HEALTHY.toBoolean() == true) {
251 waitForHealthy(pepperEnv)
252 }
Jiri Broulikdc87d722017-11-03 15:43:22 +0100253 }
254}