blob: 0e9b6b734695e192e096ae2ab282fb344d9990c3 [file] [log] [blame]
Jiri Broulikdc87d722017-11-03 15:43:22 +01001/**
2 *
3 * Filestore to Bluestore or vice versa backend migration
4 *
5 * Requred parameters:
6 * SALT_MASTER_URL URL of Salt master
7 * SALT_MASTER_CREDENTIALS Credentials to the Salt API
8 *
9 * ADMIN_HOST Host (minion id) with admin keyring and /etc/crushmap file present
10 * OSD OSD ids to be migrated if single OSD host is targeted (comma-separated list - 1,2,3)
11 * TARGET Hosts (minion ids) to be targeted
12 * CLUSTER_FLAGS Comma separated list of tags to apply to cluster
13 * WAIT_FOR_HEALTHY Wait for cluster rebalance before stoping daemons
14 * ORIGIN_BACKEND Ceph backend before upgrade
15 *
16 */
17
18common = new com.mirantis.mk.Common()
19salt = new com.mirantis.mk.Salt()
20def python = new com.mirantis.mk.Python()
21
22MIGRATION_METHOD = "per-osd"
23// TBD: per-host
24
25def pepperEnv = "pepperEnv"
26def flags = CLUSTER_FLAGS.tokenize(',')
27def osds = OSD.tokenize(',')
28
29def runCephCommand(master, target, cmd) {
30 return salt.cmdRun(master, target, cmd)
31}
32
33node("python") {
34
35 // create connection to salt master
36 python.setupPepperVirtualenv(pepperEnv, SALT_MASTER_URL, SALT_MASTER_CREDENTIALS)
37
38 if (MIGRATION_METHOD == 'per-osd') {
39
40 if (flags.size() > 0) {
41 stage('Set cluster flags') {
42 for (flag in flags) {
43 runCephCommand(pepperEnv, ADMIN_HOST, 'ceph osd set ' + flag)
44 }
45 }
46 }
47
48 def target_hosts = salt.getMinions(pepperEnv, TARGET)
49
50 for (HOST in target_hosts) {
51 def osd_ids = []
52
53 // get list of osd disks of the host
54 def ceph_disks = salt.getGrain(pepperEnv, HOST, 'ceph')['return'][0].values()[0].values()[0]['ceph_disk']
55
56 for (i in ceph_disks) {
57 def osd_id = i.getKey().toString()
58 if (osd_id in osds || OSD == '*') {
59 osd_ids.add('osd.' + osd_id)
60 print("Will migrate " + osd_id)
61 } else {
62 print("Skipping " + osd_id)
63 }
64 }
65
66 for (osd_id in osd_ids) {
67
68 def id = osd_id.replaceAll('osd.', '')
69 def backend = runCephCommand(pepperEnv, ADMIN_HOST, "ceph osd metadata ${id} | grep osd_objectstore")['return'][0].values()[0]
70
71 if (backend.contains(ORIGIN_BACKEND)) {
72
73 // wait for healthy cluster before manipulating with osds
74 if (WAIT_FOR_HEALTHY.toBoolean() == true) {
75 stage('Waiting for healthy cluster') {
76 while (true) {
77 def health = runCephCommand(pepperEnv, ADMIN_HOST, 'ceph health')['return'][0].values()[0]
78 if (health.contains('HEALTH_OK')) {
79 common.infoMsg('Cluster is healthy')
80 break;
81 }
82 sleep(5)
83 }
84 }
85 }
86
87 // `ceph osd out <id> <id>`
88 stage('Set OSDs out') {
89 runCephCommand(pepperEnv, ADMIN_HOST, "ceph osd out ${osd_id}")
90 }
91
92 // wait for healthy cluster
93 if (WAIT_FOR_HEALTHY.toBoolean() == true) {
94 stage('Waiting for healthy cluster') {
95 sleep(5)
96 while (true) {
97 def health = runCephCommand(pepperEnv, ADMIN_HOST, 'ceph health')['return'][0].values()[0]
98 if (health.contains('HEALTH_OK')) {
99 common.infoMsg('Cluster is healthy')
100 break;
101 }
102 sleep(10)
103 }
104 }
105 }
106
107 // stop osd daemons
108 stage('Stop OSD daemons') {
109 salt.runSaltProcessStep(pepperEnv, HOST, 'service.stop', ['ceph-osd@' + osd_id.replaceAll('osd.', '')], null, true)
110 }
111
112 // remove keyring `ceph auth del osd.3`
113 stage('Remove OSD keyrings from auth') {
114 runCephCommand(pepperEnv, ADMIN_HOST, 'ceph auth del ' + osd_id)
115 }
116
117 // remove osd `ceph osd rm osd.3`
118 stage('Remove OSDs') {
119 runCephCommand(pepperEnv, ADMIN_HOST, 'ceph osd rm ' + osd_id)
120 }
121
122 def mount = runCephCommand(pepperEnv, HOST, "mount | grep /var/lib/ceph/osd/ceph-${id}")['return'][0].values()[0]
123 dev = mount.split()[0].replaceAll("[0-9]","")
124
125 // remove journal or block_db partition `parted /dev/sdj rm 3`
126 stage('Remove journal / block_db partition') {
127 def partition_uuid = ""
128 def journal_partition_uuid = ""
129 def block_db_partition_uuid = ""
130 try {
131 journal_partition_uuid = runCephCommand(pepperEnv, HOST, "ls -la /var/lib/ceph/osd/ceph-${id}/ | grep journal | grep partuuid")
132 journal_partition_uuid = journal_partition_uuid.toString().trim().split("\n")[0].substring(journal_partition_uuid.toString().trim().lastIndexOf("/")+1)
133 } catch (Exception e) {
134 common.infoMsg(e)
135 }
136 try {
137 block_db_partition_uuid = runCephCommand(pepperEnv, HOST, "ls -la /var/lib/ceph/osd/ceph-${id}/ | grep 'block.db' | grep partuuid")
138 block_db_partition_uuid = block_db_partition_uuid.toString().trim().split("\n")[0].substring(block_db_partition_uuid.toString().trim().lastIndexOf("/")+1)
139 } catch (Exception e) {
140 common.infoMsg(e)
141 }
142
143 // set partition_uuid = 2c76f144-f412-481e-b150-4046212ca932
144 if (journal_partition_uuid?.trim()) {
145 partition_uuid = journal_partition_uuid
146 } else if (block_db_partition_uuid?.trim()) {
147 partition_uuid = block_db_partition_uuid
148 }
149
150 // if failed disk had block_db or journal on different disk, then remove the partition
151 if (partition_uuid?.trim()) {
152 def partition = ""
153 try {
154 // partition = /dev/sdi2
155 partition = runCephCommand(pepperEnv, HOST, "blkid | grep ${partition_uuid} ")['return'][0].values()[0].split("(?<=[0-9])")[0]
156 } catch (Exception e) {
157 common.warningMsg(e)
158 }
159
160 if (partition?.trim()) {
161 // dev = /dev/sdi
162 def dev = partition.replaceAll("[0-9]", "")
163 // part_id = 2
164 def part_id = partition.substring(partition.lastIndexOf("/")+1).replaceAll("[^0-9]", "")
165 runCephCommand(pepperEnv, HOST, "parted ${dev} rm ${part_id}")
166 }
167 }
168 }
169
170 // umount `umount /dev/sdi1`
171 stage('Umount devices') {
172 runCephCommand(pepperEnv, HOST, "umount /var/lib/ceph/osd/ceph-${id}")
173 }
174
175 // zap disks `ceph-disk zap /dev/sdi`
176 stage('Zap device') {
177 runCephCommand(pepperEnv, HOST, 'ceph-disk zap ' + dev)
178 }
179
180 // Deploy failed Ceph OSD
181 stage('Deploy Ceph OSD') {
182 salt.runSaltProcessStep(pepperEnv, HOST, 'saltutil.refresh_pillar', [], null, true, 5)
183 salt.enforceState(pepperEnv, HOST, 'ceph.osd', true)
184 }
185 }
186 }
187 }
188 // remove cluster flags
189 if (flags.size() > 0) {
190 stage('Unset cluster flags') {
191 for (flag in flags) {
192 common.infoMsg('Removing flag ' + flag)
193 runCephCommand(pepperEnv, ADMIN_HOST, 'ceph osd unset ' + flag)
194 }
195 }
196 }
197 }
198}