blob: 957b43851b48a2137c9cc93079893d4214021974 [file] [log] [blame]
Ivan Berezovskiy14436462019-11-05 17:42:09 +04001package com.mirantis.mk
2
3/**
Tomek Jaroszykb8dc1a12020-04-30 15:14:39 +02004 * Install and configure ceph clients
Ivan Berezovskiy14436462019-11-05 17:42:09 +04005 *
Tomek Jaroszykb8dc1a12020-04-30 15:14:39 +02006 * @param master Salt connection object
7 * @param extra_tgt Extra targets for compound
Ivan Berezovskiy14436462019-11-05 17:42:09 +04008 */
Tomek Jaroszykb8dc1a12020-04-30 15:14:39 +02009def installClient(master, extra_tgt='') {
10 def salt = new Salt()
11
12 // install Ceph Radosgw
13 installRgw(master, "I@ceph:radosgw", extra_tgt)
14
15 // setup keyring for Openstack services
16 salt.enforceStateWithTest([saltId: master, target: "I@ceph:common and I@glance:server $extra_tgt", state: ['ceph.common', 'ceph.setup.keyring']])
17 salt.enforceStateWithTest([saltId: master, target: "I@ceph:common and I@cinder:controller $extra_tgt", state: ['ceph.common', 'ceph.setup.keyring']])
18 salt.enforceStateWithTest([saltId: master, target: "I@ceph:common and I@nova:compute $extra_tgt", state: ['ceph.common', 'ceph.setup.keyring']])
19 salt.enforceStateWithTest([saltId: master, target: "I@ceph:common and I@gnocchi:server $extra_tgt", state: ['ceph.common', 'ceph.setup.keyring']])
20}
Ivan Berezovskiy14436462019-11-05 17:42:09 +040021
22/**
Tomek Jaroszykb8dc1a12020-04-30 15:14:39 +020023 * Install and configure ceph monitor on target
Ivan Berezovskiy14436462019-11-05 17:42:09 +040024 *
Tomek Jaroszykb8dc1a12020-04-30 15:14:39 +020025 * @param master Salt connection object
26 * @param target Target specification, compliance to compound matcher in salt
27 * @param extra_tgt Extra targets for compound
Ivan Berezovskiy14436462019-11-05 17:42:09 +040028 */
Tomek Jaroszykb8dc1a12020-04-30 15:14:39 +020029def installMon(master, target="I@ceph:mon", extra_tgt='') {
30 def salt = new Salt()
31
32 salt.enforceState([saltId: master, target: "$target $extra_tgt", state: 'salt.minion.grains'])
33
34 // TODO: can we re-add cmn01 with proper keyrings?
35 // generate keyrings
36 if(salt.testTarget(master, "( I@ceph:mon:keyring:mon or I@ceph:common:keyring:admin ) $extra_tgt")) {
37 salt.enforceState([saltId: master, target: "( I@ceph:mon:keyring:mon or I@ceph:common:keyring:admin ) $extra_tgt", state: 'ceph.mon'])
38 salt.runSaltProcessStep(master, "I@ceph:mon $extra_tgt", 'saltutil.sync_grains')
39 salt.runSaltProcessStep(master, "( I@ceph:mon:keyring:mon or I@ceph:common:keyring:admin ) $extra_tgt", 'mine.update')
40
41 // on target nodes mine is used to get pillar from 'ceph:common:keyring:admin' via grain.items
42 // we need to refresh all pillar/grains to make data sharing work correctly
43 salt.fullRefresh(master, "( I@ceph:mon:keyring:mon or I@ceph:common:keyring:admin ) $extra_tgt")
44
45 sleep(5)
46 }
47 // install Ceph Mons
48 salt.enforceState([saltId: master, target: "I@ceph:mon $extra_tgt", state: 'ceph.mon'])
49 salt.enforceStateWithTest([saltId: master, target: "I@ceph:mgr $extra_tgt", state: 'ceph.mgr'])
50
51 // update config
52 salt.enforceState([saltId: master, target: "I@ceph:common $extra_tgt", state: 'ceph.common'])
53}
54
55/**
56 * Install and configure osd daemons on target
57 *
58 * @param master Salt connection object
59 * @param target Target specification, compliance to compound matcher in salt
60 * @param extra_tgt Extra targets for compound
61 */
62def installOsd(master, target="I@ceph:osd", setup=true, extra_tgt='') {
63 def salt = new Salt()
64 def orchestrate = new Orchestrate()
65
66 // install Ceph OSDs
67 salt.enforceState([saltId: master, target: target, state: ['linux.storage','ceph.osd']])
68 salt.runSaltProcessStep(master, "I@ceph:osd $extra_tgt", 'saltutil.sync_grains')
69 salt.enforceState([saltId: master, target: target, state: 'ceph.osd.custom'])
70 salt.runSaltProcessStep(master, "I@ceph:osd $extra_tgt", 'saltutil.sync_grains')
71 salt.runSaltProcessStep(master, "I@ceph:osd $extra_tgt", 'mine.update')
72
73 // setup pools, keyrings and maybe crush
74 if(salt.testTarget(master, "I@ceph:setup $extra_tgt") && setup) {
75 orchestrate.installBackup(master, 'ceph')
76 salt.enforceState([saltId: master, target: "I@ceph:setup $extra_tgt", state: 'ceph.setup'])
77 }
78}
79
80/**
81 * Install and configure rgw service on target
82 *
83 * @param master Salt connection object
84 * @param target Target specification, compliance to compound matcher in salt
85 * @param extra_tgt Extra targets for compound
86 */
87def installRgw(master, target="I@ceph:radosgw", extra_tgt='') {
88 def salt = new Salt()
89
90 if(salt.testTarget(master, "I@ceph:radosgw $extra_tgt")) {
91 salt.fullRefresh(master, "I@ceph:radosgw $extra_tgt")
92 salt.enforceState([saltId: master, target: "I@ceph:radosgw $extra_tgt", state: ['keepalived', 'haproxy', 'ceph.radosgw']])
93 }
94}
95
96/**
97 * Remove rgw daemons from target
98 *
99 * @param master Salt connection object
100 * @param target Target specification, compliance to compound matcher in salt
101 * @param extra_tgt Extra targets for compound
102 */
103def removeRgw(master, target, extra_tgt='') {
104 def salt = new Salt()
105
106 // TODO needs to be reviewed
107 salt.fullRefresh(master, "I@ceph:radosgw $extra_tgt")
108 salt.enforceState([saltId: master, target: "I@ceph:radosgw $extra_tgt", state: ['keepalived', 'haproxy', 'ceph.radosgw']])
109}
110
111/**
112 * Remove osd daemons from target
113 *
114 * @param master Salt connection object
115 * @param target Target specification, compliance to compound matcher in salt
116 * @param osds List of osd to remove
117 * @param safeRemove Wait for data rebalance before remove drive
118 * @param target Target specification, compliance to compound matcher in salt
119 */
120def removeOsd(master, target, osds, flags, safeRemove=true, wipeDisks=false) {
121 def common = new Common()
122 def salt = new Salt()
123
124 // systemctl stop ceph-osd@0 && ceph osd purge 0 --yes-i-really-mean-it && umount /dev/vdc1; test -b /dev/vdc1 && dd if=/dev/zero of=/dev/vdc1 bs=1M; test -b /dev/vdc2 && dd if=/dev/zero of=/dev/vdc2 bs=1M count=100; sgdisk -d1 -d2 /dev/vdc; partprobe
125 if(osds.isEmpty()) {
126 common.warningMsg('List of OSDs was empty. No OSD is removed from cluster')
127 return
128 }
129
130 // `ceph osd out <id> <id>`
131 cmdRun(master, 'ceph osd out ' + osds.join(' '), true, true)
132
133 if(safeRemove) {
134 waitForHealthy(master, flags)
135 }
136
137 for(osd in osds) {
138 salt.runSaltProcessStep(master, target, 'service.stop', "ceph-osd@$osd", null, true)
139 cmdRun(master, "ceph osd purge $osd --yes-i-really-mean-it", true, true)
140 }
141
142 for(osd in osds) {
143 def lvm_enabled = getPillar(master, target, "ceph:osd:lvm_enabled")
144 if(lvm_enabled) {
145 // ceph-volume lvm zap --osd-id 1 --osd-fsid 55BD4219-16A7-4037-BC20-0F158EFCC83D --destroy
146 def output = cmdRunOnTarget(master, target, "ceph-volume lvm zap --osd-id $osd --destroy >/dev/null && echo 'zaped'", false)
147 if(output == 'zaped') { continue }
148 }
149
150 common.infoMsg("Removing legacy osd.")
151 def journal_partition = ""
152 def block_db_partition = ""
153 def block_wal_partition = ""
154 def block_partition = ""
155 def data_partition = ""
156 def dataDir = "/var/lib/ceph/osd/ceph-$osd"
157 journal_partition = cmdRunOnTarget(master, target,
158 "test -f $dataDir/journal_uuid && readlink -f /dev/disk/by-partuuid/`cat $dataDir/journal_uuid`", false)
159 block_db_partition = cmdRunOnTarget(master, target,
160 "test -f $dataDir/block.db_uuid && readlink -f /dev/disk/by-partuuid/`cat $dataDir/block.db_uuid`", false)
161 block_wal_partition = cmdRunOnTarget(master, target,
162 "test -f $dataDir/block.wal_uuid && readlink -f /dev/disk/by-partuuid/`cat $dataDir/block.wal_uuid`", false)
163 block_partition = cmdRunOnTarget(master, target,
164 "test -f $dataDir/block_uuid && readlink -f /dev/disk/by-partuuid/`cat $dataDir/block_uuid`", false)
165 data_partition = cmdRunOnTarget(master, target,
166 "test -f $dataDir/fsid && readlink -f /dev/disk/by-partuuid/`cat $dataDir/fsid`", false)
167
168 try {
169 if(journal_partition.trim()) { removePartition(master, target, journal_partition) }
170 if(block_db_partition.trim()) { removePartition(master, target, block_db_partition) }
171 if(block_wal_partition.trim()) { removePartition(master, target, block_wal_partition) }
172 if(block_partition.trim()) { removePartition(master, target, block_partition, 'block', wipeDisks) }
173 if(data_partition.trim()) { removePartition(master, target, data_partition, 'data', wipeDisks) }
174 else { common.warningMsg("Can't find data partition for osd.$osd") }
175 }
176 catch(Exception e) {
177 // report but continue as problem on one osd could be sorted out after
178 common.errorMsg("Found some issue during cleaning partition for osd.$osd on $target")
179 common.errorMsg(e)
180 currentBuild.result = 'FAILURE'
181 }
182
183 cmdRunOnTarget(master, target, "partprobe", false)
184 }
185}
186
187/**
188 * Update montoring for target hosts
189 *
190 * @param master Salt connection object
191 * @param target Target specification, compliance to compound matcher in salt
192 * @param extra_tgt Extra targets for compound
193 */
194def updateMonitoring(master, target="I@ceph:common", extra_tgt='') {
195 def common = new Common()
196 def salt = new Salt()
197
198 def prometheusNodes = salt.getMinions(master, "I@prometheus:server $extra_tgt")
199 if(!prometheusNodes.isEmpty()) {
200 //Collect Grains
201 salt.enforceState([saltId: master, target: "$target $extra_tgt", state: 'salt.minion.grains'])
202 salt.runSaltProcessStep(master, "$target $extra_tgt", 'saltutil.refresh_modules')
203 salt.runSaltProcessStep(master, "$target $extra_tgt", 'mine.update')
204 sleep(5)
205 salt.enforceState([saltId: master, target: "$target $extra_tgt", state: ['fluentd', 'telegraf', 'prometheus']])
206 salt.enforceState([saltId: master, target: "I@prometheus:server $extra_tgt", state: 'prometheus'])
207 }
208 else {
209 common.infoMsg('No Prometheus nodes in cluster. Nothing to do.')
210 }
211}
212
213def connectCeph(master, extra_tgt='') {
214 new Common().infoMsg("This method was renamed. Use method connectOS insead.")
215 connectOS(master, extra_tgt)
216}
217
218/**
219 * Enforce configuration and connect OpenStack clients
220 *
221 * @param master Salt connection object
222 * @param extra_tgt Extra targets for compound
223 */
224def connectOS(master, extra_tgt='') {
225 def salt = new Salt()
226
227 // setup Keystone service and endpoints for swift or / and S3
228 salt.enforceStateWithTest([saltId: master, target: "I@keystone:client $extra_tgt", state: 'keystone.client'])
229
230 // connect Ceph to the env
231 if(salt.testTarget(master, "I@ceph:common and I@glance:server $extra_tgt")) {
232 salt.enforceState([saltId: master, target: "I@ceph:common and I@glance:server $extra_tgt", state: ['glance']])
233 salt.runSaltProcessStep(master, "I@ceph:common and I@glance:server $extra_tgt", 'service.restart', ['glance-api'])
234 }
235 if(salt.testTarget(master, "I@ceph:common and I@cinder:controller $extra_tgt")) {
236 salt.enforceState([saltId: master, target: "I@ceph:common and I@cinder:controller $extra_tgt", state: ['cinder']])
237 salt.runSaltProcessStep(master, "I@ceph:common and I@cinder:controller $extra_tgt", 'service.restart', ['cinder-volume'])
238 }
239 if(salt.testTarget(master, "I@ceph:common and I@nova:compute $extra_tgt")) {
240 salt.enforceState([saltId: master, target: "I@ceph:common and I@nova:compute $extra_tgt", state: ['nova']])
241 salt.runSaltProcessStep(master, "I@ceph:common and I@nova:compute $extra_tgt", 'service.restart', ['nova-compute'])
242 }
243 if(salt.testTarget(master, "I@ceph:common and I@gnocchi:server $extra_tgt")) {
244 salt.enforceState([saltId: master, target: "I@ceph:common and I@gnocchi:server:role:primary $extra_tgt", state: 'gnocchi.server'])
245 salt.enforceState([saltId: master, target: "I@ceph:common and I@gnocchi:server $extra_tgt", state: 'gnocchi.server'])
246 }
247}
248
249/**
250 * Remove vm from VCP
251 *
252 * @param master Salt connection object
253 * @param target Target specification, compliance to compound matcher in salt
254 */
255def removeVm(master, target) {
256 def common = new Common()
257 def salt = new Salt()
258
259 def fqdn = getGrain(master, target, 'id')
260 def hostname = salt.stripDomainName(fqdn)
261 def hypervisor = getPillar(master, "I@salt:control", "salt:control:cluster:internal:node:$hostname:provider")
262
263 removeSalt(master, target)
264
265 if(hypervisor?.trim()) {
266 cmdRunOnTarget(master, hypervisor, "virsh destroy $fqdn")
267 cmdRunOnTarget(master, hypervisor, "virsh undefine $fqdn")
268 }
269 else {
Denis Egorenkod45fda52021-03-10 17:02:10 +0400270 common.errorMsg("There is no provider in pillar for $hostname")
Tomek Jaroszykb8dc1a12020-04-30 15:14:39 +0200271 }
272}
273
274/**
275 * Stop target salt minion, remove its key on master and definition in reclass
276 *
277 * @param master Salt connection object
278 * @param target Target specification, compliance to compound matcher in salt
279 */
280def removeSalt(master, target) {
281 def common = new Common()
282
283 def fqdn = getGrain(master, target, 'id')
284 try {
285 cmdRunOnTarget(master, 'I@salt:master', "salt-key --include-accepted -r $fqdn -y")
286 }
287 catch(Exception e) {
288 common.warningMsg(e)
289 }
290}
291
292def deleteKeyrings(master, target, extra_tgt='') {
293 def host = getGrain(master, target, 'host')
Tomek Jaroszyk262d6882021-02-05 16:58:24 +0100294 def keys = cmdRun(master, "ceph auth list 2>/dev/null | grep $host", false).tokenize('\n')
295 if(keys.isEmpty()) {
Tomek Jaroszykb8dc1a12020-04-30 15:14:39 +0200296 new Common().warningMsg("Nothing to do. There is no keyring for $host")
297 }
298 for(key in keys) {
299 cmdRun(master, "ceph auth del $key")
300 }
301}
302
303def generateMapping(pgmap,map) {
304 def pg_new
305 def pg_old
306 for(pg in pgmap) {
307 pg_new = pg["up"].minus(pg["acting"])
308 pg_old = pg["acting"].minus(pg["up"])
309 for(int i = 0; i < pg_new.size(); i++) {
310 // def string = "ceph osd pg-upmap-items " + pg["pgid"].toString() + " " + pg_new[i] + " " + pg_old[i] + ";"
311 def string = "ceph osd pg-upmap-items ${pg["pgid"]} ${pg_new[i]} ${pg_old[i]}"
312 map.add(string)
313 }
314 }
315}
316
317/**
318 * Run command on the first of avaliable ceph monitors
319 *
320 * @param master Salt connection object
321 * @param cmd Command to run
322 * @param checkResponse Check response of command. (optional, default true)
323 * @param output Print output (optional, default false)
324 */
325def cmdRun(master, cmd, checkResponse=true, output=false) {
326 def salt = new Salt()
327 def cmn01 = salt.getFirstMinion(master, "I@ceph:mon")
328 return salt.cmdRun(master, cmn01, cmd, checkResponse, null, output)['return'][0][cmn01]
329}
330
331/**
332 * Run command on target host
333 *
334 * @param master Salt connection object
335 * @param target Target specification, compliance to compound matcher in salt
336 * @param cmd Command to run
337 * @param checkResponse Check response of command. (optional, default true)
338 * @param output Print output (optional, default false)
339 */
340def cmdRunOnTarget(master, target, cmd, checkResponse=true, output=false) {
341 def salt = new Salt()
342 return salt.cmdRun(master, target, cmd, checkResponse, null, output)['return'][0].values()[0]
343}
344
345/**
346 * Ceph refresh pillars and get one for first host
347 *
348 * @param master Salt connection object
349 * @param target Target specification, compliance to compound matcher in salt
350 * @param pillar Pillar to obtain
351 */
352def getPillar(master, target, pillar) {
353 def common = new Common()
354 def salt = new Salt()
355 try {
356 return salt.getPillar(master, target, pillar)['return'][0].values()[0]
357 }
358 catch(Exception e) {
359 common.warningMsg('There was no pillar for the target.')
360 }
361}
362
363/**
364 * Ceph refresh grains and get one for first host
365 *
366 * @param master Salt connection object
367 * @param target Target specification, compliance to compound matcher in salt
368 * @param grain Grain to obtain
369 */
370def getGrain(master, target, grain) {
371 def common = new Common()
372 def salt = new Salt()
373 try {
374 return salt.getGrain(master, target, grain)['return'][0].values()[0].values()[0]
375 }
376 catch(Exception e) {
377 common.warningMsg('There was no grain for the target.')
378 }
379}
380
381/**
382 * Set flags
383 *
384 * @param master Salt connection object
385 * @param flags Collection of flags to set
386 */
387def setFlags(master, flags) {
388 if(flags instanceof String) { flags = [flags] }
389 for(flag in flags) {
390 cmdRun(master, 'ceph osd set ' + flag)
391 }
392}
393
394/**
395 * Unset flags
396 *
397 * @param master Salt connection object
398 * @param flags Collection of flags to unset (optional)
399 */
400def unsetFlags(master, flags=[]) {
401 if(flags instanceof String) { flags = [flags] }
402 for(flag in flags) {
403 cmdRun(master, 'ceph osd unset ' + flag)
404 }
405}
406
407/**
408 * Wait for healthy cluster while ignoring flags which have been set
409 *
410 * @param master Salt connection object
411 * @param attempts Attempts before it pause execution (optional, default 300)
412 */
413def waitForHealthy(master, flags, attempts=300) {
414 def common = new Common()
415
416 def count = 0
Tomek Jaroszykb8dc1a12020-04-30 15:14:39 +0200417 def health = ''
418
Tomek Jaroszyk85a75482021-08-19 02:43:06 +0200419 // warning that can appeared during operation while are unrelated to data safety
420 def acceptableWarnings = [
421 'AUTH_INSECURE_GLOBAL_ID_RECLAIM',
422 'AUTH_INSECURE_GLOBAL_ID_RECLAIM_ALLOWED',
423 'MON_MSGR2_NOT_ENABLED'
424 ]
Tomek Jaroszykb8dc1a12020-04-30 15:14:39 +0200425 // wait for current ops will be reflected in status
426 sleep(5)
427
428 while(count++ < attempts) {
Tomek Jaroszyk85a75482021-08-19 02:43:06 +0200429 health = cmdRun(master, 'ceph health -f json', false)
430 health = common.parseJSON(health)
431
432 if(health['status'] == 'HEALTH_OK') { return }
433 if(health['checks'].containsKey('OSDMAP_FLAGS')) {
434 def unexpectedFlags = health['checks']['OSDMAP_FLAGS']['summary']['message'].tokenize(' ').getAt(0)?.tokenize(',')
Tomek Jaroszykb8dc1a12020-04-30 15:14:39 +0200435 unexpectedFlags.removeAll(flags)
Tomek Jaroszyk85a75482021-08-19 02:43:06 +0200436 if(unexpectedFlags.isEmpty()) {
437 health['checks'].remove('OSDMAP_FLAGS')
438 }
Tomek Jaroszykb8dc1a12020-04-30 15:14:39 +0200439 }
Tomek Jaroszyk85a75482021-08-19 02:43:06 +0200440
441 // ignore acceptable warnings
442 for(w in acceptableWarnings) {
443 if(health['checks'].containsKey(w)) {
444 health['checks'].remove(w)
445 }
446 }
447
448 if(health['checks'].isEmpty()) { return }
449
450 common.warningMsg("Ceph cluster is still unhealthy: " + health['status'])
451 for(check in health['checks']) {
452 common.warningMsg(check.value['summary']['message'])
453 }
Tomek Jaroszykb8dc1a12020-04-30 15:14:39 +0200454 sleep(10)
455 }
456 // TODO: MissingMethodException
457 input message: "After ${count} attempts cluster is still unhealthy."
458 //throw new RuntimeException("After ${count} attempts cluster is still unhealthy. Can't proceed")
459}
460def waitForHealthy(master, String host, flags, attempts=300) {
461 new Common().warningMsg('This method will be deprecated.')
462 waitForHealthy(master, flags, attempts)
463}
464
465/**
466 * Remove unused orphan partition after some osds
467 *
468 * @param master Salt connection object
469 * @param target Target specification, compliance to compound matcher in salt
470 * @param wipePartitions Wipe each found partitions completely (optional, defaul false)
471 */
472def removeOrphans(master, target, wipePartitions=false) {
473 def common = new Common()
474 def salt = new Salt()
475
476 def orphans = []
477 // TODO: ceph-disk is avaliable only in luminous
478 def disks = cmdRunOnTarget(master, target, "ceph-disk list --format json 2>/dev/null",false)
479 disks = "{\"disks\":$disks}" // common.parseJSON() can't parse a list of maps
480 disks = common.parseJSON(disks)['disks']
481 for(disk in disks) {
482 for(partition in disk.get('partitions')) {
483 def orphan = false
484 if(partition.get('type') == 'block.db' && !partition.containsKey('block.db_for')) { orphan = true }
485 else if(partition.get('type') == 'block' && !partition.containsKey('block_for')) { orphan = true }
486 else if(partition.get('type') == 'data' && !partition.get('state') == 'active') { orphan = true }
487 // TODO: test for the rest of types
488
489 if(orphan) {
490 if(partition.get('path')) {
491 removePartition(master, target, partition['path'], partition['type'], wipePartitions)
492 }
493 else {
494 common.warningMsg("Found orphan partition on $target but failed to remove it.")
Ivan Berezovskiy14436462019-11-05 17:42:09 +0400495 }
496 }
497 }
Ivan Berezovskiy14436462019-11-05 17:42:09 +0400498 }
Tomek Jaroszykb8dc1a12020-04-30 15:14:39 +0200499 cmdRunOnTarget(master, target, "partprobe", false)
Ivan Berezovskiy14436462019-11-05 17:42:09 +0400500}
501
502/**
503 * Ceph remove partition
504 *
Tomek Jaroszykb8dc1a12020-04-30 15:14:39 +0200505 * @param master Salt connection object
506 * @param target Target specification, compliance to compound matcher in salt
507 * @param partition Partition to remove on target host
508 * @param type Type of partition. Some partition need additional steps (optional, default empty string)
509 * @param fullWipe Fill the entire partition with zeros (optional, default false)
Ivan Berezovskiy14436462019-11-05 17:42:09 +0400510 */
Tomek Jaroszykb8dc1a12020-04-30 15:14:39 +0200511def removePartition(master, target, partition, type='', fullWipe=false) {
512 def common = new Common()
513 def salt = new Salt()
514
mjedynskiffed8f82019-12-12 20:46:47 +0100515 def dev = ''
Tomek Jaroszyk3a82bbe2020-04-07 11:34:48 +0200516 def part_id = ''
Tomek Jaroszykb8dc1a12020-04-30 15:14:39 +0200517 def partitionID = ''
518 def disk = ''
519 def wipeCmd = ''
520 def lvm_enabled = getPillar(master, target, "ceph:osd:lvm_enabled")
mjedynskiffed8f82019-12-12 20:46:47 +0100521
Tomek Jaroszykb8dc1a12020-04-30 15:14:39 +0200522 if(!partition?.trim()) {
523 throw new Exception("Can't proceed without defined partition.")
524 }
525 cmdRunOnTarget(master, target, "test -b $partition")
526
527 if(fullWipe) { wipeCmd = "dd if=/dev/zero of=$partition bs=1M 2>/dev/null" }
528 else { wipeCmd = "dd if=/dev/zero of=$partition bs=1M count=100 2>/dev/null" }
529
530 common.infoMsg("Removing from the cluster $type partition $partition on $target.")
531 if(type == 'lockbox') {
532 try {
533 partition = cmdRunOnTarget(master, target, "lsblk -rp | grep -v mapper | grep $partition", false)
534 cmdRunOnTarget(master, target, "umount $partition")
535 }
536 catch (Exception e) {
537 common.warningMsg(e)
Ivan Berezovskiy14436462019-11-05 17:42:09 +0400538 }
539 }
Tomek Jaroszykb8dc1a12020-04-30 15:14:39 +0200540 else if(type == 'data') {
541 cmdRunOnTarget(master, target, "umount $partition 2>/dev/null", false)
542 cmdRunOnTarget(master, target, wipeCmd, false)
Ivan Berezovskiy14436462019-11-05 17:42:09 +0400543 }
Tomek Jaroszykb8dc1a12020-04-30 15:14:39 +0200544 else if(type == 'block' || fullWipe) {
545 cmdRunOnTarget(master, target, wipeCmd, false)
546 }
547 try {
548 partitionID = cmdRunOnTarget(master, target, "cat /sys/dev/block/`lsblk $partition -no MAJ:MIN | xargs`/partition", false)
549 disk = cmdRunOnTarget(master, target, "lsblk $partition -no pkname", false)
550 }
551 catch (Exception e) {
552 common.errorMsg("Couldn't get disk name or partition number for $partition")
553 common.warningMsg(e)
554 }
555 try {
556 cmdRunOnTarget(master, target, "sgdisk -d$partitionID /dev/$disk", true, true)
557 }
558 catch (Exception e) {
559 common.warningMsg("Did not found any device to be wiped.")
560 common.warningMsg(e)
561 }
562 // try to remove partition table if disk have no partitions left - required by ceph-volume
563 cmdRunOnTarget(master, target, "partprobe -d -s /dev/$disk | grep partitions\$ && sgdisk -Z /dev/$disk", false, true)
Ivan Berezovskiy14436462019-11-05 17:42:09 +0400564}