blob: ec232b81501065ebcaa05d3c7ecac2c74bef30e1 [file] [log] [blame]
Jiri Broulikdc87d722017-11-03 15:43:22 +01001/**
2 *
3 * Upgrade Ceph mon/mgr/osd/rgw/client
4 *
5 * Requred parameters:
6 * SALT_MASTER_URL URL of Salt master
7 * SALT_MASTER_CREDENTIALS Credentials to the Salt API
8 *
9 * ADMIN_HOST Host (minion id) with admin keyring and /etc/crushmap file present
10 * CLUSTER_FLAGS Comma separated list of tags to apply to cluster
11 * WAIT_FOR_HEALTHY Wait for cluster rebalance before stoping daemons
12 * ORIGIN_RELEASE Ceph release version before upgrade
13 * TARGET_RELEASE Ceph release version after upgrade
14 * STAGE_UPGRADE_MON Set to True if Ceph mon nodes upgrade is desired
15 * STAGE_UPGRADE_MGR Set to True if Ceph mgr nodes upgrade or new deploy is desired
16 * STAGE_UPGRADE_OSD Set to True if Ceph osd nodes upgrade is desired
17 * STAGE_UPGRADE_RGW Set to True if Ceph rgw nodes upgrade is desired
18 * STAGE_UPGRADE_CLIENT Set to True if Ceph client nodes upgrade is desired (includes for example ctl/cmp nodes)
19 *
20 */
21
22common = new com.mirantis.mk.Common()
23salt = new com.mirantis.mk.Salt()
24def python = new com.mirantis.mk.Python()
25
26def pepperEnv = "pepperEnv"
27def flags = CLUSTER_FLAGS.tokenize(',')
28
29def runCephCommand(master, target, cmd) {
30 return salt.cmdRun(master, target, cmd)
31}
32
Jiri Broulik96c867a2017-11-07 16:14:10 +010033def waitForHealthy(master, count=0, attempts=300) {
34 // wait for healthy cluster
35 while (count<attempts) {
36 def health = runCephCommand(master, ADMIN_HOST, 'ceph health')['return'][0].values()[0]
37 if (health.contains('HEALTH_OK')) {
38 common.infoMsg('Cluster is healthy')
39 break;
40 }
41 count++
42 sleep(10)
43 }
44}
45
46def backup(master, target) {
47 stage("backup ${target}") {
48
Jiri Broulikfd2dcaf2017-12-08 15:19:51 +010049 if (target == 'osd') {
Jiri Broulik96c867a2017-11-07 16:14:10 +010050 try {
Jiri Broulikfd2dcaf2017-12-08 15:19:51 +010051 salt.enforceState(master, "I@ceph:${target}", "ceph.backup", true)
52 runCephCommand(master, "I@ceph:${target}", "su root -c '/usr/local/bin/ceph-backup-runner-call.sh'")
Jiri Broulik96c867a2017-11-07 16:14:10 +010053 } catch (Exception e) {
Jiri Broulikfd2dcaf2017-12-08 15:19:51 +010054 common.errorMsg(e)
55 common.errorMsg("Make sure Ceph backup on OSD nodes is enabled")
56 throw new InterruptedException()
Jiri Broulik96c867a2017-11-07 16:14:10 +010057 }
Jiri Broulikfd2dcaf2017-12-08 15:19:51 +010058 } else {
59 def _pillar = salt.getGrain(master, 'I@salt:master', 'domain')
60 def domain = _pillar['return'][0].values()[0].values()[0]
61
62 def kvm_pillar = salt.getGrain(master, 'I@salt:control', 'id')
63 def kvm01 = kvm_pillar['return'][0].values()[0].values()[0]
64
65 def target_pillar = salt.getGrain(master, "I@ceph:${target}", 'host')
66 def minions = target_pillar['return'][0].values()
67 for (minion in minions) {
68 def minion_name = minion.values()[0]
69 def provider_pillar = salt.getPillar(master, "${kvm01}", "salt:control:cluster:internal:node:${minion_name}:provider")
70 def minionProvider = provider_pillar['return'][0].values()[0]
71
72 waitForHealthy(master)
73 try {
74 salt.cmdRun(master, "${minionProvider}", "[ ! -f /root/${minion_name}.${domain}.qcow2.bak ] && virsh destroy ${minion_name}.${domain}")
75 } catch (Exception e) {
76 common.warningMsg('Backup already exists')
77 }
78 try {
79 salt.cmdRun(master, "${minionProvider}", "[ ! -f /root/${minion_name}.${domain}.qcow2.bak ] && cp /var/lib/libvirt/images/${minion_name}.${domain}/system.qcow2 /root/${minion_name}.${domain}.qcow2.bak")
80 } catch (Exception e) {
81 common.warningMsg('Backup already exists')
82 }
83 try {
84 salt.cmdRun(master, "${minionProvider}", "virsh start ${minion_name}.${domain}")
85 } catch (Exception e) {
86 common.warningMsg(e)
87 }
88 salt.minionsReachable(master, 'I@salt:master', "${minion_name}*")
89 waitForHealthy(master)
Jiri Broulik96c867a2017-11-07 16:14:10 +010090 }
Jiri Broulik96c867a2017-11-07 16:14:10 +010091 }
92 }
93 return
94}
95
Jiri Broulikdc87d722017-11-03 15:43:22 +010096def upgrade(master, target) {
97
98 stage("Change ${target} repos") {
99 salt.runSaltProcessStep(master, "I@ceph:${target}", 'saltutil.refresh_pillar', [], null, true, 5)
100 salt.enforceState(master, "I@ceph:${target}", 'linux.system.repo', true)
101 }
Jiri Broulikdc87d722017-11-03 15:43:22 +0100102 if (target == 'mgr') {
103 stage('Run ceph mgr state') {
104 salt.enforceState(master, "I@ceph:mgr", "ceph.mgr", true)
105 }
106 }
Jiri Broulikdc87d722017-11-03 15:43:22 +0100107 if (target == 'common') {
108 stage('Upgrade ceph-common pkgs') {
Jiri Broulik96c867a2017-11-07 16:14:10 +0100109 runCephCommand(master, "I@ceph:${target}", "apt install ceph-${target} -y")
Jiri Broulikdc87d722017-11-03 15:43:22 +0100110 }
111 } else {
Jiri Broulik96c867a2017-11-07 16:14:10 +0100112 minions = salt.getMinions(master, "I@ceph:${target}")
Jiri Broulikdc87d722017-11-03 15:43:22 +0100113
Jiri Broulik96c867a2017-11-07 16:14:10 +0100114 for (minion in minions) {
115 // upgrade pkgs
116 if (target == 'radosgw') {
117 stage('Upgrade radosgw pkgs') {
118 runCephCommand(master, "I@ceph:${target}", "apt install ${target} -y ")
119 }
120 } else {
121 stage("Upgrade ${target} pkgs on ${minion}") {
122 runCephCommand(master, "${minion}", "apt install ceph-${target} -y")
123 }
124 }
125 // restart services
126 stage("Restart ${target} services on ${minion}") {
127 runCephCommand(master, "${minion}", "systemctl restart ceph-${target}.target")
128 }
129
130 stage("Verify services for ${minion}") {
131 sleep(10)
132 runCephCommand(master, ADMIN_HOST, "ceph -s")
133 }
134
135 stage('Ask for manual confirmation') {
136 input message: "From the verification command above, please check Ceph ${target} joined the cluster correctly. If so, Do you want to continue to upgrade next node?"
137 }
Jiri Broulikdc87d722017-11-03 15:43:22 +0100138 }
139 }
140 runCephCommand(master, ADMIN_HOST, "ceph versions")
141 sleep(5)
142 return
143}
Jakub Josefa63f9862018-01-11 17:58:38 +0100144timeout(time: 12, unit: 'HOURS') {
145 node("python") {
Jiri Broulikdc87d722017-11-03 15:43:22 +0100146
Jakub Josefa63f9862018-01-11 17:58:38 +0100147 // create connection to salt master
148 python.setupPepperVirtualenv(pepperEnv, SALT_MASTER_URL, SALT_MASTER_CREDENTIALS)
Jiri Broulikdc87d722017-11-03 15:43:22 +0100149
Jakub Josefa63f9862018-01-11 17:58:38 +0100150 if (BACKUP_ENABLED.toBoolean() == true) {
151 if (STAGE_UPGRADE_MON.toBoolean() == true) {
152 backup(pepperEnv, 'mon')
153 }
154 if (STAGE_UPGRADE_RGW.toBoolean() == true) {
155 backup(pepperEnv, 'radosgw')
156 }
157 if (STAGE_UPGRADE_OSD.toBoolean() == true) {
158 backup(pepperEnv, 'osd')
Jiri Broulikdc87d722017-11-03 15:43:22 +0100159 }
160 }
Jiri Broulikdc87d722017-11-03 15:43:22 +0100161
Jakub Josefa63f9862018-01-11 17:58:38 +0100162 if (flags.size() > 0) {
163 stage('Set cluster flags') {
164 for (flag in flags) {
165 runCephCommand(pepperEnv, ADMIN_HOST, 'ceph osd set ' + flag)
Jiri Broulikdc87d722017-11-03 15:43:22 +0100166 }
Jiri Broulikdc87d722017-11-03 15:43:22 +0100167 }
168 }
Jiri Broulikdc87d722017-11-03 15:43:22 +0100169
Jakub Josefa63f9862018-01-11 17:58:38 +0100170 if (STAGE_UPGRADE_MON.toBoolean() == true) {
171 upgrade(pepperEnv, 'mon')
172 }
173
174 if (STAGE_UPGRADE_MGR.toBoolean() == true) {
175 upgrade(pepperEnv, 'mgr')
176 }
177
178 if (STAGE_UPGRADE_OSD.toBoolean() == true) {
179 upgrade(pepperEnv, 'osd')
180 }
181
182 if (STAGE_UPGRADE_RGW.toBoolean() == true) {
183 upgrade(pepperEnv, 'radosgw')
184 }
185
186 if (STAGE_UPGRADE_CLIENT.toBoolean() == true) {
187 upgrade(pepperEnv, 'common')
188 }
189
190 // remove cluster flags
191 if (flags.size() > 0) {
192 stage('Unset cluster flags') {
193 for (flag in flags) {
194 if (!flag.contains('sortbitwise')) {
195 common.infoMsg('Removing flag ' + flag)
196 runCephCommand(pepperEnv, ADMIN_HOST, 'ceph osd unset ' + flag)
197 }
198
199 }
Jiri Broulik96c867a2017-11-07 16:14:10 +0100200 }
Jiri Broulikdc87d722017-11-03 15:43:22 +0100201 }
Jiri Broulikdc87d722017-11-03 15:43:22 +0100202
Jakub Josefa63f9862018-01-11 17:58:38 +0100203 if (STAGE_FINALIZE.toBoolean() == true) {
204 stage("Finalize ceph version upgrade") {
205 runCephCommand(pepperEnv, ADMIN_HOST, "ceph osd require-osd-release ${TARGET_RELEASE}")
206 try {
207 runCephCommand(pepperEnv, ADMIN_HOST, "ceph osd set-require-min-compat-client ${ORIGIN_RELEASE}")
208 } catch (Exception e) {
209 common.warningMsg(e)
210 }
211 try {
212 runCephCommand(pepperEnv, ADMIN_HOST, "ceph osd crush tunables optimal")
213 } catch (Exception e) {
214 common.warningMsg(e)
215 }
216 }
217 }
218
219 // wait for healthy cluster
220 if (WAIT_FOR_HEALTHY.toBoolean() == true) {
221 waitForHealthy(pepperEnv)
222 }
Jiri Broulikdc87d722017-11-03 15:43:22 +0100223 }
224}