blob: 71946b7b670e7b8213d34a1c0405031e1c0b65d6 [file] [log] [blame]
Tomáš Kukrálf72096d2017-08-11 12:58:03 +02001/**
2 *
3 * Remove OSD from existing cluster
4 *
5 * Requred parameters:
6 * SALT_MASTER_URL URL of Salt master
7 * SALT_MASTER_CREDENTIALS Credentials to the Salt API
8 *
9 * HOST Host (minion id) to be removed
Jiri Broulik2c00f4c2017-10-26 13:23:11 +020010 * OSD Comma separated list of osd ids to be removed
Tomáš Kukrálf72096d2017-08-11 12:58:03 +020011 * ADMIN_HOST Host (minion id) with admin keyring
12 * CLUSTER_FLAGS Comma separated list of tags to apply to cluster
13 * WAIT_FOR_HEALTHY Wait for cluster rebalance before stoping daemons
14 *
15 */
16
17common = new com.mirantis.mk.Common()
18salt = new com.mirantis.mk.Salt()
chnyda625f4b42017-10-11 14:10:31 +020019def python = new com.mirantis.mk.Python()
Tomáš Kukrálf72096d2017-08-11 12:58:03 +020020
chnyda625f4b42017-10-11 14:10:31 +020021def pepperEnv = "pepperEnv"
Tomáš Kukrálf72096d2017-08-11 12:58:03 +020022def flags = CLUSTER_FLAGS.tokenize(',')
Tomáš Kukrál9d6228b2017-08-15 16:54:55 +020023def osds = OSD.tokenize(',')
Tomáš Kukrálf72096d2017-08-11 12:58:03 +020024
Jiri Broulikeb7b82f2017-11-30 13:55:40 +010025def removePartition(master, target, partition_uuid) {
26 def partition = ""
27 try {
28 // partition = /dev/sdi2
29 partition = runCephCommand(master, target, "blkid | grep ${partition_uuid} ")['return'][0].values()[0].split("(?<=[0-9])")[0]
30 } catch (Exception e) {
31 common.warningMsg(e)
32 }
33
34 if (partition?.trim()) {
35 // dev = /dev/sdi
36 def dev = partition.replaceAll('\\d+$', "")
37 // part_id = 2
38 def part_id = partition.substring(partition.lastIndexOf("/")+1).replaceAll("[^0-9]", "")
39 runCephCommand(master, target, "parted ${dev} rm ${part_id}")
40 }
41 return
42}
43
44def runCephCommand(master, target, cmd) {
45 return salt.cmdRun(master, target, cmd)
Tomáš Kukrálf72096d2017-08-11 12:58:03 +020046}
47
Jiri Broulik96c867a2017-11-07 16:14:10 +010048def waitForHealthy(master, count=0, attempts=300) {
49 // wait for healthy cluster
50 while (count<attempts) {
51 def health = runCephCommand(master, ADMIN_HOST, 'ceph health')['return'][0].values()[0]
52 if (health.contains('HEALTH_OK')) {
53 common.infoMsg('Cluster is healthy')
54 break;
55 }
56 count++
57 sleep(10)
58 }
59}
Jakub Josefa63f9862018-01-11 17:58:38 +010060timeout(time: 12, unit: 'HOURS') {
61 node("python") {
Jiri Broulik96c867a2017-11-07 16:14:10 +010062
Jakub Josefa63f9862018-01-11 17:58:38 +010063 // create connection to salt master
64 python.setupPepperVirtualenv(pepperEnv, SALT_MASTER_URL, SALT_MASTER_CREDENTIALS)
Tomáš Kukrálf72096d2017-08-11 12:58:03 +020065
Jakub Josefa63f9862018-01-11 17:58:38 +010066 if (flags.size() > 0) {
67 stage('Set cluster flags') {
68 for (flag in flags) {
69 runCephCommand(pepperEnv, ADMIN_HOST, 'ceph osd set ' + flag)
70 }
Tomáš Kukrálf72096d2017-08-11 12:58:03 +020071 }
72 }
Tomáš Kukrálf72096d2017-08-11 12:58:03 +020073
Jakub Josefa63f9862018-01-11 17:58:38 +010074 def osd_ids = []
Tomáš Kukrálf72096d2017-08-11 12:58:03 +020075
Jakub Josefa63f9862018-01-11 17:58:38 +010076 // get list of osd disks of the host
77 salt.runSaltProcessStep(pepperEnv, HOST, 'saltutil.sync_grains', [], null, true, 5)
Jakub Josefed670ca2018-01-18 14:22:20 +010078 def cephGrain = salt.getGrain(pepperEnv, HOST, 'ceph')['return']
79 if(cephGrain['return'].isEmpty()){
80 throw new Exception("Ceph salt grain cannot be found!")
81 }
82 def ceph_disks = cephGrain['return'][0].values()[0].values()[0]['ceph_disk']
Jakub Josefa63f9862018-01-11 17:58:38 +010083 common.prettyPrint(ceph_disks)
Jiri Broulikadc7ecd2017-10-18 06:59:27 +020084
Jakub Josefa63f9862018-01-11 17:58:38 +010085 for (i in ceph_disks) {
86 def osd_id = i.getKey().toString()
87 if (osd_id in osds || OSD == '*') {
88 osd_ids.add('osd.' + osd_id)
89 print("Will delete " + osd_id)
90 } else {
91 print("Skipping " + osd_id)
Jiri Broulikeb7b82f2017-11-30 13:55:40 +010092 }
Jiri Broulikeb7b82f2017-11-30 13:55:40 +010093 }
Jakub Josefa63f9862018-01-11 17:58:38 +010094
95 // wait for healthy cluster
Jakub Josefed670ca2018-01-18 14:22:20 +010096 if (WAIT_FOR_HEALTHY.toBoolean()) {
Jakub Josefa63f9862018-01-11 17:58:38 +010097 waitForHealthy(pepperEnv)
98 }
99
100 // `ceph osd out <id> <id>`
101 stage('Set OSDs out') {
102 runCephCommand(pepperEnv, ADMIN_HOST, 'ceph osd out ' + osd_ids.join(' '))
103 }
104
105 // wait for healthy cluster
Jakub Josefed670ca2018-01-18 14:22:20 +0100106 if (WAIT_FOR_HEALTHY.toBoolean()) {
Jakub Josefa63f9862018-01-11 17:58:38 +0100107 sleep(5)
108 waitForHealthy(pepperEnv)
109 }
110
111 // stop osd daemons
112 stage('Stop OSD daemons') {
113 for (i in osd_ids) {
114 salt.runSaltProcessStep(pepperEnv, HOST, 'service.stop', ['ceph-osd@' + i.replaceAll('osd.', '')], null, true)
115 }
116 }
117
118 // `ceph osd crush remove osd.2`
119 stage('Remove OSDs from CRUSH') {
120 for (i in osd_ids) {
121 runCephCommand(pepperEnv, ADMIN_HOST, 'ceph osd crush remove ' + i)
122 }
123 }
124
125 // remove keyring `ceph auth del osd.3`
126 stage('Remove OSD keyrings from auth') {
127 for (i in osd_ids) {
128 runCephCommand(pepperEnv, ADMIN_HOST, 'ceph auth del ' + i)
129 }
130 }
131
132 // remove osd `ceph osd rm osd.3`
133 stage('Remove OSDs') {
134 for (i in osd_ids) {
135 runCephCommand(pepperEnv, ADMIN_HOST, 'ceph osd rm ' + i)
136 }
137 }
138
139 for (osd_id in osd_ids) {
140
141 id = osd_id.replaceAll('osd.', '')
142 def dmcrypt = ""
Jiri Broulikeb7b82f2017-11-30 13:55:40 +0100143 try {
Jakub Josefa63f9862018-01-11 17:58:38 +0100144 dmcrypt = runCephCommand(pepperEnv, HOST, "ls -la /var/lib/ceph/osd/ceph-${id}/ | grep dmcrypt")['return'][0].values()[0]
Jiri Broulikeb7b82f2017-11-30 13:55:40 +0100145 } catch (Exception e) {
Jakub Josefa63f9862018-01-11 17:58:38 +0100146 common.warningMsg(e)
Jiri Broulikeb7b82f2017-11-30 13:55:40 +0100147 }
148
Jakub Josefa63f9862018-01-11 17:58:38 +0100149 if (dmcrypt?.trim()) {
150 mount = runCephCommand(pepperEnv, HOST, "lsblk -rp | grep /var/lib/ceph/osd/ceph-${id} -B1")['return'][0].values()[0]
151 dev = mount.split()[0].replaceAll("[0-9]","")
Jiri Broulikeb7b82f2017-11-30 13:55:40 +0100152
Jakub Josefa63f9862018-01-11 17:58:38 +0100153 // remove partition tables
154 stage("dd part table on ${dev}") {
155 runCephCommand(pepperEnv, HOST, "dd if=/dev/zero of=${dev} bs=512 count=1 conv=notrunc")
156 }
Jiri Broulikeb7b82f2017-11-30 13:55:40 +0100157
Jiri Broulikeb7b82f2017-11-30 13:55:40 +0100158 }
Jakub Josefa63f9862018-01-11 17:58:38 +0100159 // remove journal, block_db, block_wal partition `parted /dev/sdj rm 3`
160 stage('Remove journal / block_db / block_wal partition') {
161 def partition_uuid = ""
162 def journal_partition_uuid = ""
163 def block_db_partition_uuid = ""
164 def block_wal_partition_uuid = ""
165 try {
166 journal_partition_uuid = runCephCommand(pepperEnv, HOST, "ls -la /var/lib/ceph/osd/ceph-${id}/ | grep journal | grep partuuid")
167 journal_partition_uuid = journal_partition_uuid.toString().trim().split("\n")[0].substring(journal_partition_uuid.toString().trim().lastIndexOf("/")+1)
168 } catch (Exception e) {
169 common.infoMsg(e)
170 }
171 try {
172 block_db_partition_uuid = runCephCommand(pepperEnv, HOST, "ls -la /var/lib/ceph/osd/ceph-${id}/ | grep 'block.db' | grep partuuid")
173 block_db_partition_uuid = block_db_partition_uuid.toString().trim().split("\n")[0].substring(block_db_partition_uuid.toString().trim().lastIndexOf("/")+1)
174 } catch (Exception e) {
175 common.infoMsg(e)
176 }
177
178 try {
179 block_wal_partition_uuid = runCephCommand(pepperEnv, HOST, "ls -la /var/lib/ceph/osd/ceph-${id}/ | grep 'block.wal' | grep partuuid")
180 block_wal_partition_uuid = block_wal_partition_uuid.toString().trim().split("\n")[0].substring(block_wal_partition_uuid.toString().trim().lastIndexOf("/")+1)
181 } catch (Exception e) {
182 common.infoMsg(e)
183 }
184
185 // set partition_uuid = 2c76f144-f412-481e-b150-4046212ca932
186 if (journal_partition_uuid?.trim()) {
187 partition_uuid = journal_partition_uuid
188 } else if (block_db_partition_uuid?.trim()) {
189 partition_uuid = block_db_partition_uuid
190 }
191
192 // if disk has journal, block_db or block_wal on different disk, then remove the partition
193 if (partition_uuid?.trim()) {
194 removePartition(pepperEnv, HOST, partition_uuid)
195 }
196 if (block_wal_partition_uuid?.trim()) {
197 removePartition(pepperEnv, HOST, block_wal_partition_uuid)
198 }
Jiri Broulikeb7b82f2017-11-30 13:55:40 +0100199 }
200 }
Jakub Josefa63f9862018-01-11 17:58:38 +0100201 // remove cluster flags
202 if (flags.size() > 0) {
203 stage('Unset cluster flags') {
204 for (flag in flags) {
205 common.infoMsg('Removing flag ' + flag)
206 runCephCommand(pepperEnv, ADMIN_HOST, 'ceph osd unset ' + flag)
207 }
Tomáš Kukrálf72096d2017-08-11 12:58:03 +0200208 }
209 }
210 }
Tomáš Kukrálf72096d2017-08-11 12:58:03 +0200211}