blob: 21671bf7d9bbf1a881e94b315931460f32e5cc28 [file] [log] [blame]
Jiri Broulik99887c82017-10-31 09:27:52 +01001/**
2 *
3 * Remove Ceph node from existing cluster
4 *
5 * Requred parameters:
6 * SALT_MASTER_URL URL of Salt master
7 * SALT_MASTER_CREDENTIALS Credentials to the Salt API
8 * HOST Host (minion id) to be removed
9 * HOST_TYPE Type of Ceph node to be removed. Valid values are mon/osd/rgw
10 * ADMIN_HOST Host (minion id) with admin keyring
11 * WAIT_FOR_HEALTHY Wait for cluster rebalance before stoping daemons
12 * GENERATE_CRUSHMAP Set to true if the crush map should be generated
13 *
14 */
15
16common = new com.mirantis.mk.Common()
17salt = new com.mirantis.mk.Salt()
18orchestrate = new com.mirantis.mk.Orchestrate()
19def python = new com.mirantis.mk.Python()
20
21def pepperEnv = "pepperEnv"
22
23def runCephCommand(master, target, cmd) {
24 return salt.cmdRun(master, target, cmd)
25}
26
27node("python") {
28
29 // create connection to salt master
30 python.setupPepperVirtualenv(pepperEnv, SALT_MASTER_URL, SALT_MASTER_CREDENTIALS)
31
32 matches = ["osd", "mon", "rgw"]
33 def found = false
34 for (s in matches) {
35 if (HOST_TYPE.toLowerCase() == s) {
36 found = true
37 }
38 }
39
40 if (!found) {
41 common.errorMsg("No such HOST_TYPE was found. Please insert one of the following types: mon/osd/rgw")
42 break
43 }
44
45 stage('Refresh_pillar') {
46 salt.runSaltProcessStep(pepperEnv, '*', 'saltutil.refresh_pillar', [], null, true, 5)
47 }
48
49 // split minion id on '.' and remove '*'
50 def target = HOST.split("\\.")[0].replace("*", "")
51
52 def _pillar = salt.getGrain(pepperEnv, 'I@salt:master', 'domain')
53 domain = _pillar['return'][0].values()[0].values()[0]
54
55 if (HOST_TYPE.toLowerCase() == 'rgw') {
56 // Remove Ceph rgw
57 stage('Remove Ceph RGW') {
58 salt.enforceState(pepperEnv, 'I@ceph:radosgw', ['keepalived', 'haproxy'], true)
59 }
60 }
61
62 if (HOST_TYPE.toLowerCase() != 'osd') {
63
64 // virsh destroy rgw04.deploy-name.local; virsh undefine rgw04.deploy-name.local;
65 stage('Destroy VM') {
66 _pillar = salt.getGrain(pepperEnv, 'I@salt:control', 'id')
67 def kvm01 = _pillar['return'][0].values()[0].values()[0]
68
69 _pillar = salt.getPillar(pepperEnv, "${kvm01}", "salt:control:cluster:internal:node:${target}:provider")
70 def targetProvider = _pillar['return'][0].values()[0]
71
72 salt.cmdRun(pepperEnv, "${targetProvider}", "virsh destroy ${target}.${domain}")
73 salt.cmdRun(pepperEnv, "${targetProvider}", "virsh undefine ${target}.${domain}")
74 }
75 } else if (HOST_TYPE.toLowerCase() == 'osd') {
76 def osd_ids = []
77
78 // get list of osd disks of the host
79 def ceph_disks = salt.getGrain(pepperEnv, HOST, 'ceph')['return'][0].values()[0].values()[0]['ceph_disk']
80
81 for (i in ceph_disks) {
82 def osd_id = i.getKey().toString()
83 osd_ids.add('osd.' + osd_id)
84 print("Will delete " + osd_id)
85 }
86
87 // `ceph osd out <id> <id>`
88 stage('Set OSDs out') {
89 runCephCommand(pepperEnv, ADMIN_HOST, 'ceph osd out ' + osd_ids.join(' '))
90 }
91
92 // wait for healthy cluster
93 if (WAIT_FOR_HEALTHY.toBoolean() == true) {
94 stage('Waiting for healthy cluster') {
95 while (true) {
96 def health = runCephCommand(pepperEnv, ADMIN_HOST, 'ceph health')['return'][0].values()[0]
97 if (health.contains('HEALTH_OK')) {
98 common.infoMsg('Cluster is healthy')
99 break;
100 }
101 sleep(10)
102 }
103 }
104 }
105
106 // stop osd daemons
107 stage('Stop OSD daemons') {
108 for (i in osd_ids) {
109 salt.runSaltProcessStep(pepperEnv, HOST, 'service.stop', ['ceph-osd@' + i.replaceAll('osd.', '')], null, true)
110 }
111 }
112
113 // `ceph osd crush remove osd.2`
114 stage('Remove OSDs from CRUSH') {
115 for (i in osd_ids) {
116 runCephCommand(pepperEnv, ADMIN_HOST, 'ceph osd crush remove ' + i)
117 }
118 }
119
120 // remove keyring `ceph auth del osd.3`
121 stage('Remove OSD keyrings from auth') {
122 for (i in osd_ids) {
123 runCephCommand(pepperEnv, ADMIN_HOST, 'ceph auth del ' + i)
124 }
125 }
126
127 // remove osd `ceph osd rm osd.3`
128 stage('Remove OSDs') {
129 for (i in osd_ids) {
130 runCephCommand(pepperEnv, ADMIN_HOST, 'ceph osd rm ' + i)
131 }
132 }
133
134 // purge Ceph pkgs
135 stage('Purge Ceph OSD pkgs') {
136 runCephCommand(pepperEnv, HOST, 'apt purge ceph-base ceph-common ceph-fuse ceph-mds ceph-osd libcephfs2 python-cephfs librados2 python-rados -y')
137 }
138
139 // stop salt-minion service and move its configuration
140 stage('Stop salt-minion') {
141 salt.cmdRun(pepperEnv, HOST, "mv /etc/salt/minion.d/minion.conf minion.conf")
142 salt.runSaltProcessStep(pepperEnv, HOST, 'service.stop', ['salt-minion'], [], null, true, 5)
143 }
144 }
145
146 stage('Remove salt-key') {
147 try {
148 salt.cmdRun(pepperEnv, 'I@salt:master', "salt-key -d ${target}.${domain} -y")
149 } catch (Exception e) {
150 common.warningMsg(e)
151 }
152 try {
153 salt.cmdRun(pepperEnv, 'I@salt:master', "rm /srv/salt/reclass/nodes/_generated/${target}.${domain}.yml")
154 } catch (Exception e) {
155 common.warningMsg(e)
156 }
157 }
158
159 if (HOST_TYPE.toLowerCase() == 'mon') {
160 // Update Monmap
161 stage('Update monmap') {
162 runCephCommand(pepperEnv, 'I@ceph:mon', "ceph mon getmap -o monmap.backup")
163 try {
164 runCephCommand(pepperEnv, 'I@ceph:mon', "ceph mon remove ${target}")
165 } catch (Exception e) {
166 common.warningMsg(e)
167 }
168 runCephCommand(pepperEnv, 'I@ceph:mon', "monmaptool /tmp/monmap --rm ${target}")
169 }
170
171 // Update configs
172 stage('Update Ceph configs') {
173 salt.enforceState(pepperEnv, 'I@ceph:common', 'ceph.common', true)
174 }
175 }
176
177 if (HOST_TYPE.toLowerCase() == 'osd' && GENERATE_CRUSHMAP.toBoolean() == true) {
178 stage('Generate CRUSHMAP') {
179 salt.enforceState(pepperEnv, 'I@ceph:setup:crush', 'ceph.setup.crush', true)
180 }
181 }
182}