blob: cda53bee2bab5af665395ba32627d14c70c8eaaf [file] [log] [blame]
Jiri Broulik99887c82017-10-31 09:27:52 +01001/**
2 *
3 * Remove Ceph node from existing cluster
4 *
5 * Requred parameters:
6 * SALT_MASTER_URL URL of Salt master
7 * SALT_MASTER_CREDENTIALS Credentials to the Salt API
8 * HOST Host (minion id) to be removed
9 * HOST_TYPE Type of Ceph node to be removed. Valid values are mon/osd/rgw
10 * ADMIN_HOST Host (minion id) with admin keyring
11 * WAIT_FOR_HEALTHY Wait for cluster rebalance before stoping daemons
12 * GENERATE_CRUSHMAP Set to true if the crush map should be generated
13 *
14 */
15
16common = new com.mirantis.mk.Common()
17salt = new com.mirantis.mk.Salt()
18orchestrate = new com.mirantis.mk.Orchestrate()
19def python = new com.mirantis.mk.Python()
20
21def pepperEnv = "pepperEnv"
22
23def runCephCommand(master, target, cmd) {
24 return salt.cmdRun(master, target, cmd)
25}
26
Jiri Broulik96c867a2017-11-07 16:14:10 +010027def waitForHealthy(master, count=0, attempts=300) {
28 // wait for healthy cluster
29 while (count<attempts) {
30 def health = runCephCommand(master, ADMIN_HOST, 'ceph health')['return'][0].values()[0]
31 if (health.contains('HEALTH_OK')) {
32 common.infoMsg('Cluster is healthy')
33 break;
34 }
35 count++
36 sleep(10)
37 }
38}
39
Jiri Broulik99887c82017-10-31 09:27:52 +010040node("python") {
41
42 // create connection to salt master
43 python.setupPepperVirtualenv(pepperEnv, SALT_MASTER_URL, SALT_MASTER_CREDENTIALS)
44
45 matches = ["osd", "mon", "rgw"]
46 def found = false
47 for (s in matches) {
48 if (HOST_TYPE.toLowerCase() == s) {
49 found = true
50 }
51 }
52
53 if (!found) {
54 common.errorMsg("No such HOST_TYPE was found. Please insert one of the following types: mon/osd/rgw")
55 break
56 }
57
58 stage('Refresh_pillar') {
59 salt.runSaltProcessStep(pepperEnv, '*', 'saltutil.refresh_pillar', [], null, true, 5)
60 }
61
62 // split minion id on '.' and remove '*'
63 def target = HOST.split("\\.")[0].replace("*", "")
64
65 def _pillar = salt.getGrain(pepperEnv, 'I@salt:master', 'domain')
66 domain = _pillar['return'][0].values()[0].values()[0]
67
68 if (HOST_TYPE.toLowerCase() == 'rgw') {
69 // Remove Ceph rgw
70 stage('Remove Ceph RGW') {
71 salt.enforceState(pepperEnv, 'I@ceph:radosgw', ['keepalived', 'haproxy'], true)
72 }
73 }
74
75 if (HOST_TYPE.toLowerCase() != 'osd') {
76
77 // virsh destroy rgw04.deploy-name.local; virsh undefine rgw04.deploy-name.local;
78 stage('Destroy VM') {
79 _pillar = salt.getGrain(pepperEnv, 'I@salt:control', 'id')
80 def kvm01 = _pillar['return'][0].values()[0].values()[0]
81
82 _pillar = salt.getPillar(pepperEnv, "${kvm01}", "salt:control:cluster:internal:node:${target}:provider")
83 def targetProvider = _pillar['return'][0].values()[0]
84
85 salt.cmdRun(pepperEnv, "${targetProvider}", "virsh destroy ${target}.${domain}")
86 salt.cmdRun(pepperEnv, "${targetProvider}", "virsh undefine ${target}.${domain}")
87 }
88 } else if (HOST_TYPE.toLowerCase() == 'osd') {
89 def osd_ids = []
90
91 // get list of osd disks of the host
92 def ceph_disks = salt.getGrain(pepperEnv, HOST, 'ceph')['return'][0].values()[0].values()[0]['ceph_disk']
93
94 for (i in ceph_disks) {
95 def osd_id = i.getKey().toString()
96 osd_ids.add('osd.' + osd_id)
97 print("Will delete " + osd_id)
98 }
99
100 // `ceph osd out <id> <id>`
101 stage('Set OSDs out') {
102 runCephCommand(pepperEnv, ADMIN_HOST, 'ceph osd out ' + osd_ids.join(' '))
103 }
104
105 // wait for healthy cluster
106 if (WAIT_FOR_HEALTHY.toBoolean() == true) {
Jiri Broulik96c867a2017-11-07 16:14:10 +0100107 sleep(5)
108 waitForHealthy(pepperEnv)
Jiri Broulik99887c82017-10-31 09:27:52 +0100109 }
110
111 // stop osd daemons
112 stage('Stop OSD daemons') {
113 for (i in osd_ids) {
114 salt.runSaltProcessStep(pepperEnv, HOST, 'service.stop', ['ceph-osd@' + i.replaceAll('osd.', '')], null, true)
115 }
116 }
117
118 // `ceph osd crush remove osd.2`
119 stage('Remove OSDs from CRUSH') {
120 for (i in osd_ids) {
121 runCephCommand(pepperEnv, ADMIN_HOST, 'ceph osd crush remove ' + i)
122 }
123 }
124
125 // remove keyring `ceph auth del osd.3`
126 stage('Remove OSD keyrings from auth') {
127 for (i in osd_ids) {
128 runCephCommand(pepperEnv, ADMIN_HOST, 'ceph auth del ' + i)
129 }
130 }
131
132 // remove osd `ceph osd rm osd.3`
133 stage('Remove OSDs') {
134 for (i in osd_ids) {
135 runCephCommand(pepperEnv, ADMIN_HOST, 'ceph osd rm ' + i)
136 }
137 }
138
139 // purge Ceph pkgs
140 stage('Purge Ceph OSD pkgs') {
141 runCephCommand(pepperEnv, HOST, 'apt purge ceph-base ceph-common ceph-fuse ceph-mds ceph-osd libcephfs2 python-cephfs librados2 python-rados -y')
142 }
143
144 // stop salt-minion service and move its configuration
145 stage('Stop salt-minion') {
146 salt.cmdRun(pepperEnv, HOST, "mv /etc/salt/minion.d/minion.conf minion.conf")
147 salt.runSaltProcessStep(pepperEnv, HOST, 'service.stop', ['salt-minion'], [], null, true, 5)
148 }
149 }
150
151 stage('Remove salt-key') {
152 try {
153 salt.cmdRun(pepperEnv, 'I@salt:master', "salt-key -d ${target}.${domain} -y")
154 } catch (Exception e) {
155 common.warningMsg(e)
156 }
157 try {
158 salt.cmdRun(pepperEnv, 'I@salt:master', "rm /srv/salt/reclass/nodes/_generated/${target}.${domain}.yml")
159 } catch (Exception e) {
160 common.warningMsg(e)
161 }
162 }
163
164 if (HOST_TYPE.toLowerCase() == 'mon') {
165 // Update Monmap
166 stage('Update monmap') {
167 runCephCommand(pepperEnv, 'I@ceph:mon', "ceph mon getmap -o monmap.backup")
168 try {
169 runCephCommand(pepperEnv, 'I@ceph:mon', "ceph mon remove ${target}")
170 } catch (Exception e) {
171 common.warningMsg(e)
172 }
173 runCephCommand(pepperEnv, 'I@ceph:mon', "monmaptool /tmp/monmap --rm ${target}")
174 }
175
176 // Update configs
177 stage('Update Ceph configs') {
178 salt.enforceState(pepperEnv, 'I@ceph:common', 'ceph.common', true)
179 }
180 }
181
182 if (HOST_TYPE.toLowerCase() == 'osd' && GENERATE_CRUSHMAP.toBoolean() == true) {
183 stage('Generate CRUSHMAP') {
184 salt.enforceState(pepperEnv, 'I@ceph:setup:crush', 'ceph.setup.crush', true)
185 }
186 }
187}