blob: e616a2840fc1679cd4ac13396848110059fcc805 [file] [log] [blame]
Jiri Broulik99887c82017-10-31 09:27:52 +01001/**
2 *
3 * Remove Ceph node from existing cluster
4 *
5 * Requred parameters:
6 * SALT_MASTER_URL URL of Salt master
7 * SALT_MASTER_CREDENTIALS Credentials to the Salt API
8 * HOST Host (minion id) to be removed
9 * HOST_TYPE Type of Ceph node to be removed. Valid values are mon/osd/rgw
10 * ADMIN_HOST Host (minion id) with admin keyring
11 * WAIT_FOR_HEALTHY Wait for cluster rebalance before stoping daemons
12 * GENERATE_CRUSHMAP Set to true if the crush map should be generated
13 *
14 */
15
16common = new com.mirantis.mk.Common()
17salt = new com.mirantis.mk.Salt()
18orchestrate = new com.mirantis.mk.Orchestrate()
19def python = new com.mirantis.mk.Python()
20
21def pepperEnv = "pepperEnv"
22
Jiri Broulikeb7b82f2017-11-30 13:55:40 +010023def removePartition(master, target, partition_uuid) {
24 def partition = ""
25 try {
26 // partition = /dev/sdi2
27 partition = runCephCommand(master, target, "blkid | grep ${partition_uuid} ")['return'][0].values()[0].split("(?<=[0-9])")[0]
28 } catch (Exception e) {
29 common.warningMsg(e)
30 }
31
32 if (partition?.trim()) {
33 // dev = /dev/sdi
34 def dev = partition.replaceAll('\\d+$', "")
35 // part_id = 2
36 def part_id = partition.substring(partition.lastIndexOf("/")+1).replaceAll("[^0-9]", "")
37 runCephCommand(master, target, "parted ${dev} rm ${part_id}")
38 }
39 return
40}
41
Jiri Broulik99887c82017-10-31 09:27:52 +010042def runCephCommand(master, target, cmd) {
43 return salt.cmdRun(master, target, cmd)
44}
45
Jiri Broulik96c867a2017-11-07 16:14:10 +010046def waitForHealthy(master, count=0, attempts=300) {
47 // wait for healthy cluster
48 while (count<attempts) {
49 def health = runCephCommand(master, ADMIN_HOST, 'ceph health')['return'][0].values()[0]
50 if (health.contains('HEALTH_OK')) {
51 common.infoMsg('Cluster is healthy')
52 break;
53 }
54 count++
55 sleep(10)
56 }
57}
Jakub Josefa63f9862018-01-11 17:58:38 +010058timeout(time: 12, unit: 'HOURS') {
59 node("python") {
Jiri Broulik96c867a2017-11-07 16:14:10 +010060
Jakub Josefa63f9862018-01-11 17:58:38 +010061 // create connection to salt master
62 python.setupPepperVirtualenv(pepperEnv, SALT_MASTER_URL, SALT_MASTER_CREDENTIALS)
Jiri Broulik99887c82017-10-31 09:27:52 +010063
Jakub Josefa63f9862018-01-11 17:58:38 +010064 matches = ["osd", "mon", "rgw"]
65 def found = false
66 for (s in matches) {
67 if (HOST_TYPE.toLowerCase() == s) {
68 found = true
Jiri Broulik99887c82017-10-31 09:27:52 +010069 }
70 }
71
Jakub Josefa63f9862018-01-11 17:58:38 +010072 if (!found) {
73 common.errorMsg("No such HOST_TYPE was found. Please insert one of the following types: mon/osd/rgw")
74 throw new InterruptedException()
75 }
76
77 stage('Refresh_pillar') {
78 salt.runSaltProcessStep(pepperEnv, '*', 'saltutil.refresh_pillar', [], null, true, 5)
79 }
80
81 // split minion id on '.' and remove '*'
82 def target = HOST.split("\\.")[0].replace("*", "")
83
84 salt.runSaltProcessStep(pepperEnv, 'I@salt:master', 'saltutil.sync_grains', [], null, true, 5)
85 def _pillar = salt.getGrain(pepperEnv, 'I@salt:master', 'domain')
86 domain = _pillar['return'][0].values()[0].values()[0]
87
88 if (HOST_TYPE.toLowerCase() == 'rgw') {
89 // Remove Ceph rgw
90 stage('Remove Ceph RGW') {
91 salt.enforceState(pepperEnv, 'I@ceph:radosgw', ['keepalived', 'haproxy'], true)
Jiri Broulik99887c82017-10-31 09:27:52 +010092 }
93 }
94
Jakub Josefa63f9862018-01-11 17:58:38 +010095 if (HOST_TYPE.toLowerCase() != 'osd') {
96
97 // virsh destroy rgw04.deploy-name.local; virsh undefine rgw04.deploy-name.local;
98 stage('Destroy/Undefine VM') {
99 _pillar = salt.getGrain(pepperEnv, 'I@salt:control', 'id')
100 def kvm01 = _pillar['return'][0].values()[0].values()[0]
101
102 _pillar = salt.getPillar(pepperEnv, "${kvm01}", "salt:control:cluster:internal:node:${target}:provider")
103 def targetProvider = _pillar['return'][0].values()[0]
104
105 salt.cmdRun(pepperEnv, "${targetProvider}", "virsh destroy ${target}.${domain}")
106 salt.cmdRun(pepperEnv, "${targetProvider}", "virsh undefine ${target}.${domain}")
107 }
108 } else if (HOST_TYPE.toLowerCase() == 'osd') {
109 def osd_ids = []
110
111 // get list of osd disks of the host
112 salt.runSaltProcessStep(pepperEnv, HOST, 'saltutil.sync_grains', [], null, true, 5)
113 def ceph_disks = salt.getGrain(pepperEnv, HOST, 'ceph')['return'][0].values()[0].values()[0]['ceph_disk']
114
115 for (i in ceph_disks) {
116 def osd_id = i.getKey().toString()
117 osd_ids.add('osd.' + osd_id)
118 print("Will delete " + osd_id)
119 }
120
121 // `ceph osd out <id> <id>`
122 stage('Set OSDs out') {
123 runCephCommand(pepperEnv, ADMIN_HOST, 'ceph osd out ' + osd_ids.join(' '))
124 }
125
126 // wait for healthy cluster
127 if (WAIT_FOR_HEALTHY.toBoolean() == true) {
128 sleep(5)
129 waitForHealthy(pepperEnv)
130 }
131
132 // stop osd daemons
133 stage('Stop OSD daemons') {
134 for (i in osd_ids) {
135 salt.runSaltProcessStep(pepperEnv, HOST, 'service.stop', ['ceph-osd@' + i.replaceAll('osd.', '')], null, true)
136 }
137 }
138
139 // `ceph osd crush remove osd.2`
140 stage('Remove OSDs from CRUSH') {
141 for (i in osd_ids) {
142 runCephCommand(pepperEnv, ADMIN_HOST, 'ceph osd crush remove ' + i)
143 }
144 }
145
146 // remove keyring `ceph auth del osd.3`
147 stage('Remove OSD keyrings from auth') {
148 for (i in osd_ids) {
149 runCephCommand(pepperEnv, ADMIN_HOST, 'ceph auth del ' + i)
150 }
151 }
152
153 // remove osd `ceph osd rm osd.3`
154 stage('Remove OSDs') {
155 for (i in osd_ids) {
156 runCephCommand(pepperEnv, ADMIN_HOST, 'ceph osd rm ' + i)
157 }
158 }
159
160 for (osd_id in osd_ids) {
161
162 id = osd_id.replaceAll('osd.', '')
163 def dmcrypt = ""
164 try {
165 dmcrypt = runCephCommand(pepperEnv, HOST, "ls -la /var/lib/ceph/osd/ceph-${id}/ | grep dmcrypt")['return'][0].values()[0]
166 } catch (Exception e) {
167 common.warningMsg(e)
168 }
169
170 if (dmcrypt?.trim()) {
171 mount = runCephCommand(pepperEnv, HOST, "lsblk -rp | grep /var/lib/ceph/osd/ceph-${id} -B1")['return'][0].values()[0]
172 dev = mount.split()[0].replaceAll("[0-9]","")
173
174 // remove partition tables
175 stage("dd part table on ${dev}") {
176 runCephCommand(pepperEnv, HOST, "dd if=/dev/zero of=${dev} bs=512 count=1 conv=notrunc")
177 }
178
179 }
180 // remove journal, block_db, block_wal partition `parted /dev/sdj rm 3`
181 stage('Remove journal / block_db / block_wal partition') {
182 def partition_uuid = ""
183 def journal_partition_uuid = ""
184 def block_db_partition_uuid = ""
185 def block_wal_partition_uuid = ""
186 try {
187 journal_partition_uuid = runCephCommand(pepperEnv, HOST, "ls -la /var/lib/ceph/osd/ceph-${id}/ | grep journal | grep partuuid")
188 journal_partition_uuid = journal_partition_uuid.toString().trim().split("\n")[0].substring(journal_partition_uuid.toString().trim().lastIndexOf("/")+1)
189 } catch (Exception e) {
190 common.infoMsg(e)
191 }
192 try {
193 block_db_partition_uuid = runCephCommand(pepperEnv, HOST, "ls -la /var/lib/ceph/osd/ceph-${id}/ | grep 'block.db' | grep partuuid")
194 block_db_partition_uuid = block_db_partition_uuid.toString().trim().split("\n")[0].substring(block_db_partition_uuid.toString().trim().lastIndexOf("/")+1)
195 } catch (Exception e) {
196 common.infoMsg(e)
197 }
198
199 try {
200 block_wal_partition_uuid = runCephCommand(pepperEnv, HOST, "ls -la /var/lib/ceph/osd/ceph-${id}/ | grep 'block.wal' | grep partuuid")
201 block_wal_partition_uuid = block_wal_partition_uuid.toString().trim().split("\n")[0].substring(block_wal_partition_uuid.toString().trim().lastIndexOf("/")+1)
202 } catch (Exception e) {
203 common.infoMsg(e)
204 }
205
206 // set partition_uuid = 2c76f144-f412-481e-b150-4046212ca932
207 if (journal_partition_uuid?.trim()) {
208 partition_uuid = journal_partition_uuid
209 } else if (block_db_partition_uuid?.trim()) {
210 partition_uuid = block_db_partition_uuid
211 }
212
213 // if disk has journal, block_db or block_wal on different disk, then remove the partition
214 if (partition_uuid?.trim()) {
215 removePartition(pepperEnv, HOST, partition_uuid)
216 }
217 if (block_wal_partition_uuid?.trim()) {
218 removePartition(pepperEnv, HOST, block_wal_partition_uuid)
219 }
220 }
221 }
222
223 // purge Ceph pkgs
224 stage('Purge Ceph OSD pkgs') {
225 runCephCommand(pepperEnv, HOST, 'apt purge ceph-base ceph-common ceph-fuse ceph-mds ceph-osd python-cephfs librados2 python-rados -y')
226 }
227
228 stage('Remove OSD host from crushmap') {
229 def hostname = runCephCommand(pepperEnv, HOST, "hostname -s")['return'][0].values()[0].split('\n')[0]
230 try {
231 runCephCommand(pepperEnv, ADMIN_HOST, "ceph osd crush remove ${hostname}")
232 } catch (Exception e) {
233 common.warningMsg(e)
234 }
235 }
236
237 // stop salt-minion service and move its configuration
238 stage('Stop salt-minion') {
239 salt.cmdRun(pepperEnv, HOST, "mv /etc/salt/minion.d/minion.conf minion.conf")
240 salt.runSaltProcessStep(pepperEnv, HOST, 'service.stop', ['salt-minion'], [], null, true, 5)
Jiri Broulik99887c82017-10-31 09:27:52 +0100241 }
242 }
243
Jakub Josefa63f9862018-01-11 17:58:38 +0100244 stage('Remove salt-key') {
Jiri Broulikeb7b82f2017-11-30 13:55:40 +0100245 try {
Jakub Josefa63f9862018-01-11 17:58:38 +0100246 salt.cmdRun(pepperEnv, 'I@salt:master', "salt-key -d ${target}.${domain} -y")
Jiri Broulikeb7b82f2017-11-30 13:55:40 +0100247 } catch (Exception e) {
248 common.warningMsg(e)
249 }
Jiri Broulikc7ec65e2017-11-30 16:55:58 +0100250 try {
Jakub Josefa63f9862018-01-11 17:58:38 +0100251 salt.cmdRun(pepperEnv, 'I@salt:master', "rm /srv/salt/reclass/nodes/_generated/${target}.${domain}.yml")
Jiri Broulikc7ec65e2017-11-30 16:55:58 +0100252 } catch (Exception e) {
253 common.warningMsg(e)
254 }
255 }
256
Jakub Josefa63f9862018-01-11 17:58:38 +0100257 stage('Remove keyring') {
258 def keyring = ""
259 def keyring_lines = ""
Jiri Broulik99887c82017-10-31 09:27:52 +0100260 try {
Jakub Josefa63f9862018-01-11 17:58:38 +0100261 keyring_lines = runCephCommand(pepperEnv, ADMIN_HOST, "ceph auth list | grep ${target}")['return'][0].values()[0].split('\n')
Jiri Broulik99887c82017-10-31 09:27:52 +0100262 } catch (Exception e) {
263 common.warningMsg(e)
264 }
Jakub Josefa63f9862018-01-11 17:58:38 +0100265 for (line in keyring_lines) {
266 if (line.toLowerCase().contains(target.toLowerCase())) {
267 keyring = line
268 break
269 }
270 }
271 if (keyring?.trim()) {
272 runCephCommand(pepperEnv, ADMIN_HOST, "ceph auth del ${keyring}")
Jiri Broulik3485b2c2017-11-28 15:06:12 +0100273 }
Jiri Broulik99887c82017-10-31 09:27:52 +0100274 }
Jiri Broulik99887c82017-10-31 09:27:52 +0100275
Jakub Josefa63f9862018-01-11 17:58:38 +0100276 if (HOST_TYPE.toLowerCase() == 'mon') {
277 // Update Monmap
278 stage('Update monmap') {
279 runCephCommand(pepperEnv, 'I@ceph:mon', "ceph mon getmap -o monmap.backup")
280 try {
281 runCephCommand(pepperEnv, 'I@ceph:mon', "ceph mon remove ${target}")
282 } catch (Exception e) {
283 common.warningMsg(e)
284 }
285 runCephCommand(pepperEnv, 'I@ceph:mon', "monmaptool /tmp/monmap --rm ${target}")
286 }
287
288 def target_hosts = salt.getMinions(pepperEnv, 'I@ceph:common')
289 print target_hosts
290
291 // Update configs
292 stage('Update Ceph configs') {
293 for (tgt in target_hosts) {
294 salt.enforceState(pepperEnv, tgt, 'ceph.common', true)
295 }
296 }
297 }
298
299 if (HOST_TYPE.toLowerCase() == 'osd' && GENERATE_CRUSHMAP.toBoolean() == true) {
300 stage('Generate CRUSHMAP') {
301 salt.enforceState(pepperEnv, 'I@ceph:setup:crush', 'ceph.setup.crush', true)
302 }
Jiri Broulik99887c82017-10-31 09:27:52 +0100303 }
304 }
305}