blob: 8483f3a10b11d3bcae4d67deee4cdd6227440695 [file] [log] [blame]
Tomáš Kukrálf72096d2017-08-11 12:58:03 +02001/**
2 *
3 * Remove OSD from existing cluster
4 *
5 * Requred parameters:
6 * SALT_MASTER_URL URL of Salt master
7 * SALT_MASTER_CREDENTIALS Credentials to the Salt API
8 *
9 * HOST Host (minion id) to be removed
Jiri Broulik2c00f4c2017-10-26 13:23:11 +020010 * OSD Comma separated list of osd ids to be removed
Tomáš Kukrálf72096d2017-08-11 12:58:03 +020011 * ADMIN_HOST Host (minion id) with admin keyring
12 * CLUSTER_FLAGS Comma separated list of tags to apply to cluster
13 * WAIT_FOR_HEALTHY Wait for cluster rebalance before stoping daemons
14 *
15 */
16
17common = new com.mirantis.mk.Common()
18salt = new com.mirantis.mk.Salt()
chnyda625f4b42017-10-11 14:10:31 +020019def python = new com.mirantis.mk.Python()
Tomáš Kukrálf72096d2017-08-11 12:58:03 +020020
chnyda625f4b42017-10-11 14:10:31 +020021def pepperEnv = "pepperEnv"
Tomáš Kukrálf72096d2017-08-11 12:58:03 +020022def flags = CLUSTER_FLAGS.tokenize(',')
Tomáš Kukrál9d6228b2017-08-15 16:54:55 +020023def osds = OSD.tokenize(',')
Tomáš Kukrálf72096d2017-08-11 12:58:03 +020024
Jiri Broulikeb7b82f2017-11-30 13:55:40 +010025def removePartition(master, target, partition_uuid) {
26 def partition = ""
27 try {
28 // partition = /dev/sdi2
29 partition = runCephCommand(master, target, "blkid | grep ${partition_uuid} ")['return'][0].values()[0].split("(?<=[0-9])")[0]
30 } catch (Exception e) {
31 common.warningMsg(e)
32 }
33
34 if (partition?.trim()) {
35 // dev = /dev/sdi
36 def dev = partition.replaceAll('\\d+$', "")
37 // part_id = 2
38 def part_id = partition.substring(partition.lastIndexOf("/")+1).replaceAll("[^0-9]", "")
39 runCephCommand(master, target, "parted ${dev} rm ${part_id}")
40 }
41 return
42}
43
44def runCephCommand(master, target, cmd) {
45 return salt.cmdRun(master, target, cmd)
Tomáš Kukrálf72096d2017-08-11 12:58:03 +020046}
47
Jiri Broulik96c867a2017-11-07 16:14:10 +010048def waitForHealthy(master, count=0, attempts=300) {
49 // wait for healthy cluster
50 while (count<attempts) {
51 def health = runCephCommand(master, ADMIN_HOST, 'ceph health')['return'][0].values()[0]
52 if (health.contains('HEALTH_OK')) {
53 common.infoMsg('Cluster is healthy')
54 break;
55 }
56 count++
57 sleep(10)
58 }
59}
60
Tomáš Kukrálf72096d2017-08-11 12:58:03 +020061node("python") {
62
63 // create connection to salt master
Dmitrii Kabanovf31c8962017-10-12 21:00:30 -070064 python.setupPepperVirtualenv(pepperEnv, SALT_MASTER_URL, SALT_MASTER_CREDENTIALS)
Tomáš Kukrálf72096d2017-08-11 12:58:03 +020065
66 if (flags.size() > 0) {
67 stage('Set cluster flags') {
68 for (flag in flags) {
Jiri Broulikeb7b82f2017-11-30 13:55:40 +010069 runCephCommand(pepperEnv, ADMIN_HOST, 'ceph osd set ' + flag)
Tomáš Kukrálf72096d2017-08-11 12:58:03 +020070 }
71 }
72 }
73
Tomáš Kukrálf72096d2017-08-11 12:58:03 +020074 def osd_ids = []
75
Jiri Broulikadc7ecd2017-10-18 06:59:27 +020076 // get list of osd disks of the host
Jiri Broulikeb7b82f2017-11-30 13:55:40 +010077 salt.runSaltProcessStep(pepperEnv, HOST, 'saltutil.sync_grains', [], null, true, 5)
Jiri Broulikadc7ecd2017-10-18 06:59:27 +020078 def ceph_disks = salt.getGrain(pepperEnv, HOST, 'ceph')['return'][0].values()[0].values()[0]['ceph_disk']
79 common.prettyPrint(ceph_disks)
80
81 for (i in ceph_disks) {
82 def osd_id = i.getKey().toString()
Tomáš Kukrál00e06912017-08-16 13:33:12 +020083 if (osd_id in osds || OSD == '*') {
Tomáš Kukrál9d6228b2017-08-15 16:54:55 +020084 osd_ids.add('osd.' + osd_id)
Tomáš Kukrálacadfb52017-08-18 19:07:42 +020085 print("Will delete " + osd_id)
86 } else {
87 print("Skipping " + osd_id)
Tomáš Kukrál9d6228b2017-08-15 16:54:55 +020088 }
Tomáš Kukrálf72096d2017-08-11 12:58:03 +020089 }
90
Jiri Broulikeb7b82f2017-11-30 13:55:40 +010091 // wait for healthy cluster
92 if (WAIT_FOR_HEALTHY.toBoolean() == true) {
93 waitForHealthy(pepperEnv)
94 }
95
Tomáš Kukrálf72096d2017-08-11 12:58:03 +020096 // `ceph osd out <id> <id>`
97 stage('Set OSDs out') {
Jiri Broulikeb7b82f2017-11-30 13:55:40 +010098 runCephCommand(pepperEnv, ADMIN_HOST, 'ceph osd out ' + osd_ids.join(' '))
Tomáš Kukrálf72096d2017-08-11 12:58:03 +020099 }
100
101 // wait for healthy cluster
Jiri Broulik99887c82017-10-31 09:27:52 +0100102 if (WAIT_FOR_HEALTHY.toBoolean() == true) {
Jiri Broulik96c867a2017-11-07 16:14:10 +0100103 sleep(5)
104 waitForHealthy(pepperEnv)
Tomáš Kukrálf72096d2017-08-11 12:58:03 +0200105 }
106
107 // stop osd daemons
108 stage('Stop OSD daemons') {
109 for (i in osd_ids) {
chnyda625f4b42017-10-11 14:10:31 +0200110 salt.runSaltProcessStep(pepperEnv, HOST, 'service.stop', ['ceph-osd@' + i.replaceAll('osd.', '')], null, true)
Tomáš Kukrálf72096d2017-08-11 12:58:03 +0200111 }
112 }
113
114 // `ceph osd crush remove osd.2`
115 stage('Remove OSDs from CRUSH') {
116 for (i in osd_ids) {
Jiri Broulikeb7b82f2017-11-30 13:55:40 +0100117 runCephCommand(pepperEnv, ADMIN_HOST, 'ceph osd crush remove ' + i)
Tomáš Kukrálf72096d2017-08-11 12:58:03 +0200118 }
119 }
120
121 // remove keyring `ceph auth del osd.3`
122 stage('Remove OSD keyrings from auth') {
123 for (i in osd_ids) {
Jiri Broulikeb7b82f2017-11-30 13:55:40 +0100124 runCephCommand(pepperEnv, ADMIN_HOST, 'ceph auth del ' + i)
Tomáš Kukrálf72096d2017-08-11 12:58:03 +0200125 }
126 }
127
128 // remove osd `ceph osd rm osd.3`
129 stage('Remove OSDs') {
130 for (i in osd_ids) {
Jiri Broulikeb7b82f2017-11-30 13:55:40 +0100131 runCephCommand(pepperEnv, ADMIN_HOST, 'ceph osd rm ' + i)
Tomáš Kukrálf72096d2017-08-11 12:58:03 +0200132 }
133 }
134
Jiri Broulikeb7b82f2017-11-30 13:55:40 +0100135 for (osd_id in osd_ids) {
136
137 id = osd_id.replaceAll('osd.', '')
138 def dmcrypt = ""
139 try {
140 dmcrypt = runCephCommand(pepperEnv, HOST, "ls -la /var/lib/ceph/osd/ceph-${id}/ | grep dmcrypt")['return'][0].values()[0]
141 } catch (Exception e) {
142 common.warningMsg(e)
143 }
144
145 if (dmcrypt?.trim()) {
146 mount = runCephCommand(pepperEnv, HOST, "lsblk -rp | grep /var/lib/ceph/osd/ceph-${id} -B1")['return'][0].values()[0]
147 dev = mount.split()[0].replaceAll("[0-9]","")
148
149 // remove partition tables
150 stage("dd part table on ${dev}") {
151 runCephCommand(pepperEnv, HOST, "dd if=/dev/zero of=${dev} bs=512 count=1 conv=notrunc")
152 }
153
154 }
155 // remove journal, block_db, block_wal partition `parted /dev/sdj rm 3`
156 stage('Remove journal / block_db / block_wal partition') {
157 def partition_uuid = ""
158 def journal_partition_uuid = ""
159 def block_db_partition_uuid = ""
160 def block_wal_partition_uuid = ""
161 try {
162 journal_partition_uuid = runCephCommand(pepperEnv, HOST, "ls -la /var/lib/ceph/osd/ceph-${id}/ | grep journal | grep partuuid")
163 journal_partition_uuid = journal_partition_uuid.toString().trim().split("\n")[0].substring(journal_partition_uuid.toString().trim().lastIndexOf("/")+1)
164 } catch (Exception e) {
165 common.infoMsg(e)
166 }
167 try {
168 block_db_partition_uuid = runCephCommand(pepperEnv, HOST, "ls -la /var/lib/ceph/osd/ceph-${id}/ | grep 'block.db' | grep partuuid")
169 block_db_partition_uuid = block_db_partition_uuid.toString().trim().split("\n")[0].substring(block_db_partition_uuid.toString().trim().lastIndexOf("/")+1)
170 } catch (Exception e) {
171 common.infoMsg(e)
172 }
173
174 try {
175 block_wal_partition_uuid = runCephCommand(pepperEnv, HOST, "ls -la /var/lib/ceph/osd/ceph-${id}/ | grep 'block.wal' | grep partuuid")
176 block_wal_partition_uuid = block_wal_partition_uuid.toString().trim().split("\n")[0].substring(block_wal_partition_uuid.toString().trim().lastIndexOf("/")+1)
177 } catch (Exception e) {
178 common.infoMsg(e)
179 }
180
181 // set partition_uuid = 2c76f144-f412-481e-b150-4046212ca932
182 if (journal_partition_uuid?.trim()) {
183 partition_uuid = journal_partition_uuid
184 } else if (block_db_partition_uuid?.trim()) {
185 partition_uuid = block_db_partition_uuid
186 }
187
188 // if disk has journal, block_db or block_wal on different disk, then remove the partition
189 if (partition_uuid?.trim()) {
190 removePartition(pepperEnv, HOST, partition_uuid)
191 }
192 if (block_wal_partition_uuid?.trim()) {
193 removePartition(pepperEnv, HOST, block_wal_partition_uuid)
194 }
195 }
196 }
Tomáš Kukrálf72096d2017-08-11 12:58:03 +0200197 // remove cluster flags
198 if (flags.size() > 0) {
199 stage('Unset cluster flags') {
200 for (flag in flags) {
201 common.infoMsg('Removing flag ' + flag)
Jiri Broulikeb7b82f2017-11-30 13:55:40 +0100202 runCephCommand(pepperEnv, ADMIN_HOST, 'ceph osd unset ' + flag)
Tomáš Kukrálf72096d2017-08-11 12:58:03 +0200203 }
204 }
205 }
Tomáš Kukrálf72096d2017-08-11 12:58:03 +0200206}