blob: bc0b09df27c07769452d91823e52093ff45cb961 [file] [log] [blame]
Jiri Broulik99887c82017-10-31 09:27:52 +01001/**
2 *
3 * Remove Ceph node from existing cluster
4 *
5 * Requred parameters:
6 * SALT_MASTER_URL URL of Salt master
7 * SALT_MASTER_CREDENTIALS Credentials to the Salt API
8 * HOST Host (minion id) to be removed
9 * HOST_TYPE Type of Ceph node to be removed. Valid values are mon/osd/rgw
10 * ADMIN_HOST Host (minion id) with admin keyring
11 * WAIT_FOR_HEALTHY Wait for cluster rebalance before stoping daemons
12 * GENERATE_CRUSHMAP Set to true if the crush map should be generated
13 *
14 */
15
16common = new com.mirantis.mk.Common()
17salt = new com.mirantis.mk.Salt()
18orchestrate = new com.mirantis.mk.Orchestrate()
19def python = new com.mirantis.mk.Python()
20
21def pepperEnv = "pepperEnv"
22
23def runCephCommand(master, target, cmd) {
24 return salt.cmdRun(master, target, cmd)
25}
26
27node("python") {
28
29 // create connection to salt master
30 python.setupPepperVirtualenv(pepperEnv, SALT_MASTER_URL, SALT_MASTER_CREDENTIALS)
31
32 matches = ["osd", "mon", "rgw"]
33 def found = false
34 for (s in matches) {
35 if (HOST_TYPE.toLowerCase() == s) {
36 found = true
37 }
38 }
39
40 if (!found) {
41 common.errorMsg("No such HOST_TYPE was found. Please insert one of the following types: mon/osd/rgw")
42 break
43 }
44
45 stage('Refresh_pillar') {
46 salt.runSaltProcessStep(pepperEnv, '*', 'saltutil.refresh_pillar', [], null, true, 5)
47 }
48
49 // split minion id on '.' and remove '*'
50 def target = HOST.split("\\.")[0].replace("*", "")
51
52 def _pillar = salt.getGrain(pepperEnv, 'I@salt:master', 'domain')
53 domain = _pillar['return'][0].values()[0].values()[0]
54
55 if (HOST_TYPE.toLowerCase() == 'rgw') {
56 // Remove Ceph rgw
57 stage('Remove Ceph RGW') {
58 salt.enforceState(pepperEnv, 'I@ceph:radosgw', ['keepalived', 'haproxy'], true)
59 }
60 }
61
62 if (HOST_TYPE.toLowerCase() != 'osd') {
63
64 // virsh destroy rgw04.deploy-name.local; virsh undefine rgw04.deploy-name.local;
65 stage('Destroy VM') {
66 _pillar = salt.getGrain(pepperEnv, 'I@salt:control', 'id')
67 def kvm01 = _pillar['return'][0].values()[0].values()[0]
68
69 _pillar = salt.getPillar(pepperEnv, "${kvm01}", "salt:control:cluster:internal:node:${target}:provider")
70 def targetProvider = _pillar['return'][0].values()[0]
71
72 salt.cmdRun(pepperEnv, "${targetProvider}", "virsh destroy ${target}.${domain}")
73 salt.cmdRun(pepperEnv, "${targetProvider}", "virsh undefine ${target}.${domain}")
74 }
75 } else if (HOST_TYPE.toLowerCase() == 'osd') {
76 def osd_ids = []
77
78 // get list of osd disks of the host
79 def ceph_disks = salt.getGrain(pepperEnv, HOST, 'ceph')['return'][0].values()[0].values()[0]['ceph_disk']
80
81 for (i in ceph_disks) {
82 def osd_id = i.getKey().toString()
83 osd_ids.add('osd.' + osd_id)
84 print("Will delete " + osd_id)
85 }
86
87 // `ceph osd out <id> <id>`
88 stage('Set OSDs out') {
89 runCephCommand(pepperEnv, ADMIN_HOST, 'ceph osd out ' + osd_ids.join(' '))
90 }
91
92 // wait for healthy cluster
93 if (WAIT_FOR_HEALTHY.toBoolean() == true) {
94 stage('Waiting for healthy cluster') {
Jiri Broulikdc87d722017-11-03 15:43:22 +010095 sleep(5)
Jiri Broulik99887c82017-10-31 09:27:52 +010096 while (true) {
97 def health = runCephCommand(pepperEnv, ADMIN_HOST, 'ceph health')['return'][0].values()[0]
98 if (health.contains('HEALTH_OK')) {
99 common.infoMsg('Cluster is healthy')
100 break;
101 }
102 sleep(10)
103 }
104 }
105 }
106
107 // stop osd daemons
108 stage('Stop OSD daemons') {
109 for (i in osd_ids) {
110 salt.runSaltProcessStep(pepperEnv, HOST, 'service.stop', ['ceph-osd@' + i.replaceAll('osd.', '')], null, true)
111 }
112 }
113
114 // `ceph osd crush remove osd.2`
115 stage('Remove OSDs from CRUSH') {
116 for (i in osd_ids) {
117 runCephCommand(pepperEnv, ADMIN_HOST, 'ceph osd crush remove ' + i)
118 }
119 }
120
121 // remove keyring `ceph auth del osd.3`
122 stage('Remove OSD keyrings from auth') {
123 for (i in osd_ids) {
124 runCephCommand(pepperEnv, ADMIN_HOST, 'ceph auth del ' + i)
125 }
126 }
127
128 // remove osd `ceph osd rm osd.3`
129 stage('Remove OSDs') {
130 for (i in osd_ids) {
131 runCephCommand(pepperEnv, ADMIN_HOST, 'ceph osd rm ' + i)
132 }
133 }
134
135 // purge Ceph pkgs
136 stage('Purge Ceph OSD pkgs') {
137 runCephCommand(pepperEnv, HOST, 'apt purge ceph-base ceph-common ceph-fuse ceph-mds ceph-osd libcephfs2 python-cephfs librados2 python-rados -y')
138 }
139
140 // stop salt-minion service and move its configuration
141 stage('Stop salt-minion') {
142 salt.cmdRun(pepperEnv, HOST, "mv /etc/salt/minion.d/minion.conf minion.conf")
143 salt.runSaltProcessStep(pepperEnv, HOST, 'service.stop', ['salt-minion'], [], null, true, 5)
144 }
145 }
146
147 stage('Remove salt-key') {
148 try {
149 salt.cmdRun(pepperEnv, 'I@salt:master', "salt-key -d ${target}.${domain} -y")
150 } catch (Exception e) {
151 common.warningMsg(e)
152 }
153 try {
154 salt.cmdRun(pepperEnv, 'I@salt:master', "rm /srv/salt/reclass/nodes/_generated/${target}.${domain}.yml")
155 } catch (Exception e) {
156 common.warningMsg(e)
157 }
158 }
159
160 if (HOST_TYPE.toLowerCase() == 'mon') {
161 // Update Monmap
162 stage('Update monmap') {
163 runCephCommand(pepperEnv, 'I@ceph:mon', "ceph mon getmap -o monmap.backup")
164 try {
165 runCephCommand(pepperEnv, 'I@ceph:mon', "ceph mon remove ${target}")
166 } catch (Exception e) {
167 common.warningMsg(e)
168 }
169 runCephCommand(pepperEnv, 'I@ceph:mon', "monmaptool /tmp/monmap --rm ${target}")
170 }
171
172 // Update configs
173 stage('Update Ceph configs') {
174 salt.enforceState(pepperEnv, 'I@ceph:common', 'ceph.common', true)
175 }
176 }
177
178 if (HOST_TYPE.toLowerCase() == 'osd' && GENERATE_CRUSHMAP.toBoolean() == true) {
179 stage('Generate CRUSHMAP') {
180 salt.enforceState(pepperEnv, 'I@ceph:setup:crush', 'ceph.setup.crush', true)
181 }
182 }
183}