blob: 1e62bb26474a4f28c8d8bb98076432233ecafff9 [file] [log] [blame]
Tomáš Kukrálf72096d2017-08-11 12:58:03 +02001/**
2 *
3 * Remove OSD from existing cluster
4 *
5 * Requred parameters:
6 * SALT_MASTER_URL URL of Salt master
7 * SALT_MASTER_CREDENTIALS Credentials to the Salt API
8 *
9 * HOST Host (minion id) to be removed
10 * ADMIN_HOST Host (minion id) with admin keyring
11 * CLUSTER_FLAGS Comma separated list of tags to apply to cluster
12 * WAIT_FOR_HEALTHY Wait for cluster rebalance before stoping daemons
13 *
14 */
15
16common = new com.mirantis.mk.Common()
17salt = new com.mirantis.mk.Salt()
18
19// configure global variables
20def saltMaster
21def flags = CLUSTER_FLAGS.tokenize(',')
Tomáš Kukrál9d6228b2017-08-15 16:54:55 +020022def osds = OSD.tokenize(',')
Tomáš Kukrálf72096d2017-08-11 12:58:03 +020023
24def runCephCommand(master, cmd) {
25 return salt.cmdRun(master, ADMIN_HOST, cmd)
26}
27
28node("python") {
29
30 // create connection to salt master
31 saltMaster = salt.connection(SALT_MASTER_URL, SALT_MASTER_CREDENTIALS)
32
33 if (flags.size() > 0) {
34 stage('Set cluster flags') {
35 for (flag in flags) {
36 runCephCommand(saltMaster, 'ceph osd set ' + flag)
37 }
38 }
39 }
40
41 // get list of disk at the osd
42 def pillar_disks = salt.getPillar(saltMaster, HOST, 'ceph:osd:disk')['return'][0].values()[0]
43 def hostname = salt.getPillar(saltMaster, HOST, 'linux:system:name')['return'][0].values()[0]
44 def hostname_id = hostname.replaceAll('osd', '')
45 def osd_ids = []
46
47 for (i in pillar_disks.keySet()) {
Tomáš Kukrál9d6228b2017-08-15 16:54:55 +020048 def osd_id = (hostname_id + i).toInteger()
49 if (osd_id in osds) {
50 osd_ids.add('osd.' + osd_id)
51 }
Tomáš Kukrálf72096d2017-08-11 12:58:03 +020052 }
53
54 // `ceph osd out <id> <id>`
55 stage('Set OSDs out') {
56 runCephCommand(saltMaster, 'ceph osd out ' + osd_ids.join(' '))
57 }
58
59 // wait for healthy cluster
60 if (common.validInputParam('WAIT_FOR_HEALTHY') && WAIT_FOR_HEALTHY.toBoolean()) {
61 stage('Waiting for healthy cluster') {
62 while (true) {
63 def health = runCephCommand(saltMaster, 'ceph health')['return'][0].values()[0]
64 if (health.contains('HEALTH OK')) {
65 common.infoMsg('Cluster is healthy')
66 break;
67 }
68 sleep(60)
69 }
70 }
71 }
72
73 // stop osd daemons
74 stage('Stop OSD daemons') {
75 for (i in osd_ids) {
76 salt.runSaltProcessStep(saltMaster, HOST, 'service.stop', ['ceph-osd@' + i.replaceAll('osd.', '')], null, true)
77 }
78 }
79
80 // `ceph osd crush remove osd.2`
81 stage('Remove OSDs from CRUSH') {
82 for (i in osd_ids) {
83 runCephCommand(saltMaster, 'ceph osd crush remove ' + i)
84 }
85 }
86
87 // remove keyring `ceph auth del osd.3`
88 stage('Remove OSD keyrings from auth') {
89 for (i in osd_ids) {
90 runCephCommand(saltMaster, 'ceph auth del ' + i)
91 }
92 }
93
94 // remove osd `ceph osd rm osd.3`
95 stage('Remove OSDs') {
96 for (i in osd_ids) {
97 runCephCommand(saltMaster, 'ceph osd rm ' + i)
98 }
99 }
100
101 // remove cluster flags
102 if (flags.size() > 0) {
103 stage('Unset cluster flags') {
104 for (flag in flags) {
105 common.infoMsg('Removing flag ' + flag)
106 runCephCommand(saltMaster, 'ceph osd unset ' + flag)
107 }
108 }
109 }
110
111}