blob: 0fba6a074f598d8125aa6e1ec9a59a859688375b [file] [log] [blame]
Jiri Broulik99887c82017-10-31 09:27:52 +01001/**
2 *
3 * Remove Ceph node from existing cluster
4 *
5 * Requred parameters:
6 * SALT_MASTER_URL URL of Salt master
7 * SALT_MASTER_CREDENTIALS Credentials to the Salt API
8 * HOST Host (minion id) to be removed
9 * HOST_TYPE Type of Ceph node to be removed. Valid values are mon/osd/rgw
10 * ADMIN_HOST Host (minion id) with admin keyring
11 * WAIT_FOR_HEALTHY Wait for cluster rebalance before stoping daemons
12 * GENERATE_CRUSHMAP Set to true if the crush map should be generated
13 *
14 */
15
16common = new com.mirantis.mk.Common()
17salt = new com.mirantis.mk.Salt()
18orchestrate = new com.mirantis.mk.Orchestrate()
19def python = new com.mirantis.mk.Python()
20
21def pepperEnv = "pepperEnv"
22
Jiri Broulikeb7b82f2017-11-30 13:55:40 +010023def removePartition(master, target, partition_uuid) {
24 def partition = ""
25 try {
26 // partition = /dev/sdi2
27 partition = runCephCommand(master, target, "blkid | grep ${partition_uuid} ")['return'][0].values()[0].split("(?<=[0-9])")[0]
28 } catch (Exception e) {
29 common.warningMsg(e)
30 }
31
32 if (partition?.trim()) {
33 // dev = /dev/sdi
34 def dev = partition.replaceAll('\\d+$', "")
35 // part_id = 2
36 def part_id = partition.substring(partition.lastIndexOf("/")+1).replaceAll("[^0-9]", "")
37 runCephCommand(master, target, "parted ${dev} rm ${part_id}")
38 }
39 return
40}
41
Jiri Broulik99887c82017-10-31 09:27:52 +010042def runCephCommand(master, target, cmd) {
43 return salt.cmdRun(master, target, cmd)
44}
45
Jiri Broulik96c867a2017-11-07 16:14:10 +010046def waitForHealthy(master, count=0, attempts=300) {
47 // wait for healthy cluster
48 while (count<attempts) {
49 def health = runCephCommand(master, ADMIN_HOST, 'ceph health')['return'][0].values()[0]
50 if (health.contains('HEALTH_OK')) {
51 common.infoMsg('Cluster is healthy')
52 break;
53 }
54 count++
55 sleep(10)
56 }
57}
Jakub Josefa63f9862018-01-11 17:58:38 +010058timeout(time: 12, unit: 'HOURS') {
59 node("python") {
Jiri Broulik96c867a2017-11-07 16:14:10 +010060
Jakub Josefa63f9862018-01-11 17:58:38 +010061 // create connection to salt master
62 python.setupPepperVirtualenv(pepperEnv, SALT_MASTER_URL, SALT_MASTER_CREDENTIALS)
Jiri Broulik99887c82017-10-31 09:27:52 +010063
Jakub Josefa63f9862018-01-11 17:58:38 +010064 matches = ["osd", "mon", "rgw"]
65 def found = false
66 for (s in matches) {
67 if (HOST_TYPE.toLowerCase() == s) {
68 found = true
Jiri Broulik99887c82017-10-31 09:27:52 +010069 }
70 }
71
Jakub Josefa63f9862018-01-11 17:58:38 +010072 if (!found) {
73 common.errorMsg("No such HOST_TYPE was found. Please insert one of the following types: mon/osd/rgw")
74 throw new InterruptedException()
75 }
76
77 stage('Refresh_pillar') {
78 salt.runSaltProcessStep(pepperEnv, '*', 'saltutil.refresh_pillar', [], null, true, 5)
79 }
80
81 // split minion id on '.' and remove '*'
82 def target = HOST.split("\\.")[0].replace("*", "")
83
84 salt.runSaltProcessStep(pepperEnv, 'I@salt:master', 'saltutil.sync_grains', [], null, true, 5)
85 def _pillar = salt.getGrain(pepperEnv, 'I@salt:master', 'domain')
86 domain = _pillar['return'][0].values()[0].values()[0]
87
88 if (HOST_TYPE.toLowerCase() == 'rgw') {
89 // Remove Ceph rgw
90 stage('Remove Ceph RGW') {
91 salt.enforceState(pepperEnv, 'I@ceph:radosgw', ['keepalived', 'haproxy'], true)
Jiri Broulik99887c82017-10-31 09:27:52 +010092 }
Mateusz Los9f503772019-05-07 15:10:45 +020093
94 stage('Purge Ceph RGW pkgs') {
95 salt.runSaltProcessStep(pepperEnv, HOST, 'pkg.purge', 'ceph-common,libcephfs2,python-cephfs,radosgw,python-rados,python-rbd,python-rgw')
96 }
Jiri Broulik99887c82017-10-31 09:27:52 +010097 }
98
Jakub Josefa63f9862018-01-11 17:58:38 +010099 if (HOST_TYPE.toLowerCase() != 'osd') {
100
101 // virsh destroy rgw04.deploy-name.local; virsh undefine rgw04.deploy-name.local;
102 stage('Destroy/Undefine VM') {
103 _pillar = salt.getGrain(pepperEnv, 'I@salt:control', 'id')
104 def kvm01 = _pillar['return'][0].values()[0].values()[0]
105
106 _pillar = salt.getPillar(pepperEnv, "${kvm01}", "salt:control:cluster:internal:node:${target}:provider")
107 def targetProvider = _pillar['return'][0].values()[0]
108
109 salt.cmdRun(pepperEnv, "${targetProvider}", "virsh destroy ${target}.${domain}")
110 salt.cmdRun(pepperEnv, "${targetProvider}", "virsh undefine ${target}.${domain}")
111 }
112 } else if (HOST_TYPE.toLowerCase() == 'osd') {
113 def osd_ids = []
114
115 // get list of osd disks of the host
116 salt.runSaltProcessStep(pepperEnv, HOST, 'saltutil.sync_grains', [], null, true, 5)
117 def ceph_disks = salt.getGrain(pepperEnv, HOST, 'ceph')['return'][0].values()[0].values()[0]['ceph_disk']
118
119 for (i in ceph_disks) {
120 def osd_id = i.getKey().toString()
121 osd_ids.add('osd.' + osd_id)
122 print("Will delete " + osd_id)
123 }
124
125 // `ceph osd out <id> <id>`
126 stage('Set OSDs out') {
127 runCephCommand(pepperEnv, ADMIN_HOST, 'ceph osd out ' + osd_ids.join(' '))
128 }
129
130 // wait for healthy cluster
131 if (WAIT_FOR_HEALTHY.toBoolean() == true) {
132 sleep(5)
133 waitForHealthy(pepperEnv)
134 }
135
136 // stop osd daemons
137 stage('Stop OSD daemons') {
138 for (i in osd_ids) {
139 salt.runSaltProcessStep(pepperEnv, HOST, 'service.stop', ['ceph-osd@' + i.replaceAll('osd.', '')], null, true)
140 }
141 }
142
143 // `ceph osd crush remove osd.2`
144 stage('Remove OSDs from CRUSH') {
145 for (i in osd_ids) {
146 runCephCommand(pepperEnv, ADMIN_HOST, 'ceph osd crush remove ' + i)
147 }
148 }
149
150 // remove keyring `ceph auth del osd.3`
151 stage('Remove OSD keyrings from auth') {
152 for (i in osd_ids) {
153 runCephCommand(pepperEnv, ADMIN_HOST, 'ceph auth del ' + i)
154 }
155 }
156
157 // remove osd `ceph osd rm osd.3`
158 stage('Remove OSDs') {
159 for (i in osd_ids) {
160 runCephCommand(pepperEnv, ADMIN_HOST, 'ceph osd rm ' + i)
161 }
162 }
163
164 for (osd_id in osd_ids) {
165
166 id = osd_id.replaceAll('osd.', '')
167 def dmcrypt = ""
168 try {
169 dmcrypt = runCephCommand(pepperEnv, HOST, "ls -la /var/lib/ceph/osd/ceph-${id}/ | grep dmcrypt")['return'][0].values()[0]
170 } catch (Exception e) {
171 common.warningMsg(e)
172 }
173
174 if (dmcrypt?.trim()) {
175 mount = runCephCommand(pepperEnv, HOST, "lsblk -rp | grep /var/lib/ceph/osd/ceph-${id} -B1")['return'][0].values()[0]
176 dev = mount.split()[0].replaceAll("[0-9]","")
177
178 // remove partition tables
179 stage("dd part table on ${dev}") {
180 runCephCommand(pepperEnv, HOST, "dd if=/dev/zero of=${dev} bs=512 count=1 conv=notrunc")
181 }
182
183 }
184 // remove journal, block_db, block_wal partition `parted /dev/sdj rm 3`
185 stage('Remove journal / block_db / block_wal partition') {
186 def partition_uuid = ""
187 def journal_partition_uuid = ""
188 def block_db_partition_uuid = ""
189 def block_wal_partition_uuid = ""
190 try {
191 journal_partition_uuid = runCephCommand(pepperEnv, HOST, "ls -la /var/lib/ceph/osd/ceph-${id}/ | grep journal | grep partuuid")
192 journal_partition_uuid = journal_partition_uuid.toString().trim().split("\n")[0].substring(journal_partition_uuid.toString().trim().lastIndexOf("/")+1)
193 } catch (Exception e) {
194 common.infoMsg(e)
195 }
196 try {
197 block_db_partition_uuid = runCephCommand(pepperEnv, HOST, "ls -la /var/lib/ceph/osd/ceph-${id}/ | grep 'block.db' | grep partuuid")
198 block_db_partition_uuid = block_db_partition_uuid.toString().trim().split("\n")[0].substring(block_db_partition_uuid.toString().trim().lastIndexOf("/")+1)
199 } catch (Exception e) {
200 common.infoMsg(e)
201 }
202
203 try {
204 block_wal_partition_uuid = runCephCommand(pepperEnv, HOST, "ls -la /var/lib/ceph/osd/ceph-${id}/ | grep 'block.wal' | grep partuuid")
205 block_wal_partition_uuid = block_wal_partition_uuid.toString().trim().split("\n")[0].substring(block_wal_partition_uuid.toString().trim().lastIndexOf("/")+1)
206 } catch (Exception e) {
207 common.infoMsg(e)
208 }
209
210 // set partition_uuid = 2c76f144-f412-481e-b150-4046212ca932
211 if (journal_partition_uuid?.trim()) {
212 partition_uuid = journal_partition_uuid
213 } else if (block_db_partition_uuid?.trim()) {
214 partition_uuid = block_db_partition_uuid
215 }
216
217 // if disk has journal, block_db or block_wal on different disk, then remove the partition
218 if (partition_uuid?.trim()) {
219 removePartition(pepperEnv, HOST, partition_uuid)
220 }
221 if (block_wal_partition_uuid?.trim()) {
222 removePartition(pepperEnv, HOST, block_wal_partition_uuid)
223 }
224 }
225 }
226
227 // purge Ceph pkgs
228 stage('Purge Ceph OSD pkgs') {
Mateusz Los9f503772019-05-07 15:10:45 +0200229 salt.runSaltProcessStep(pepperEnv, HOST, 'pkg.purge', 'ceph-base,ceph-common,ceph-fuse,ceph-mds,ceph-osd,python-cephfs,librados2,python-rados,python-rbd,python-rgw')
Jakub Josefa63f9862018-01-11 17:58:38 +0100230 }
231
232 stage('Remove OSD host from crushmap') {
233 def hostname = runCephCommand(pepperEnv, HOST, "hostname -s")['return'][0].values()[0].split('\n')[0]
234 try {
235 runCephCommand(pepperEnv, ADMIN_HOST, "ceph osd crush remove ${hostname}")
236 } catch (Exception e) {
237 common.warningMsg(e)
238 }
239 }
240
241 // stop salt-minion service and move its configuration
242 stage('Stop salt-minion') {
243 salt.cmdRun(pepperEnv, HOST, "mv /etc/salt/minion.d/minion.conf minion.conf")
244 salt.runSaltProcessStep(pepperEnv, HOST, 'service.stop', ['salt-minion'], [], null, true, 5)
Jiri Broulik99887c82017-10-31 09:27:52 +0100245 }
246 }
247
Jakub Josefa63f9862018-01-11 17:58:38 +0100248 stage('Remove salt-key') {
Jiri Broulikeb7b82f2017-11-30 13:55:40 +0100249 try {
Jakub Josefa63f9862018-01-11 17:58:38 +0100250 salt.cmdRun(pepperEnv, 'I@salt:master', "salt-key -d ${target}.${domain} -y")
Jiri Broulikeb7b82f2017-11-30 13:55:40 +0100251 } catch (Exception e) {
252 common.warningMsg(e)
253 }
Jiri Broulikc7ec65e2017-11-30 16:55:58 +0100254 try {
Jakub Josefa63f9862018-01-11 17:58:38 +0100255 salt.cmdRun(pepperEnv, 'I@salt:master', "rm /srv/salt/reclass/nodes/_generated/${target}.${domain}.yml")
Jiri Broulikc7ec65e2017-11-30 16:55:58 +0100256 } catch (Exception e) {
257 common.warningMsg(e)
258 }
259 }
260
Jakub Josefa63f9862018-01-11 17:58:38 +0100261 stage('Remove keyring') {
262 def keyring = ""
263 def keyring_lines = ""
Jiri Broulik99887c82017-10-31 09:27:52 +0100264 try {
Jakub Josefa63f9862018-01-11 17:58:38 +0100265 keyring_lines = runCephCommand(pepperEnv, ADMIN_HOST, "ceph auth list | grep ${target}")['return'][0].values()[0].split('\n')
Jiri Broulik99887c82017-10-31 09:27:52 +0100266 } catch (Exception e) {
267 common.warningMsg(e)
268 }
Jakub Josefa63f9862018-01-11 17:58:38 +0100269 for (line in keyring_lines) {
270 if (line.toLowerCase().contains(target.toLowerCase())) {
271 keyring = line
272 break
273 }
274 }
275 if (keyring?.trim()) {
276 runCephCommand(pepperEnv, ADMIN_HOST, "ceph auth del ${keyring}")
Jiri Broulik3485b2c2017-11-28 15:06:12 +0100277 }
Jiri Broulik99887c82017-10-31 09:27:52 +0100278 }
Jiri Broulik99887c82017-10-31 09:27:52 +0100279
Jakub Josefa63f9862018-01-11 17:58:38 +0100280 if (HOST_TYPE.toLowerCase() == 'mon') {
281 // Update Monmap
282 stage('Update monmap') {
283 runCephCommand(pepperEnv, 'I@ceph:mon', "ceph mon getmap -o monmap.backup")
284 try {
285 runCephCommand(pepperEnv, 'I@ceph:mon', "ceph mon remove ${target}")
286 } catch (Exception e) {
287 common.warningMsg(e)
288 }
289 runCephCommand(pepperEnv, 'I@ceph:mon', "monmaptool /tmp/monmap --rm ${target}")
290 }
291
292 def target_hosts = salt.getMinions(pepperEnv, 'I@ceph:common')
293 print target_hosts
294
295 // Update configs
296 stage('Update Ceph configs') {
297 for (tgt in target_hosts) {
298 salt.enforceState(pepperEnv, tgt, 'ceph.common', true)
299 }
300 }
Mateusz Los9f503772019-05-07 15:10:45 +0200301
302 stage('Purge Ceph MON pkgs') {
303 salt.runSaltProcessStep(pepperEnv, HOST, 'pkg.purge', 'ceph-base,ceph-common,ceph-mgr,ceph-mon,libcephfs2,python-cephfs,python-rbd,python-rgw')
304 }
Jakub Josefa63f9862018-01-11 17:58:38 +0100305 }
306
307 if (HOST_TYPE.toLowerCase() == 'osd' && GENERATE_CRUSHMAP.toBoolean() == true) {
308 stage('Generate CRUSHMAP') {
309 salt.enforceState(pepperEnv, 'I@ceph:setup:crush', 'ceph.setup.crush', true)
310 }
Jiri Broulik99887c82017-10-31 09:27:52 +0100311 }
312 }
313}