blob: 1b7264de217c63da9d83f95bd1f14d677efc7839 [file] [log] [blame]
Mateusz Losd08bbcf2019-03-18 17:16:34 +01001/**
2 *
3 * Performs MCP component packages updates
4 *
5 * Expected parameters:
6 * SALT_MASTER_URL Url to Salt API
7 * SALT_MASTER_CREDENTIALS Credentials to the Salt API
8 * TARGET_SERVERS (Optional) String containing list of Salt targets split by comma.
9 * NOTE: For if this parameter is set, it will be used to run packages updates on specified targets
10 * If it isn't set targets will be detected automatically.
11 * COMPONENTS String containing comma-separated list of supported for update components
12 * Currently only nova and ceph are supported.
13 *
14 */
15
16common = new com.mirantis.mk.Common()
17def python = new com.mirantis.mk.Python()
18salt = new com.mirantis.mk.Salt()
19
20pepperEnv = "pepperEnv"
21
22/**
23 * Execute shell command using salt
24 *
25 * @param saltMaster Object pointing to salt master
26 * @param target Minion to execute on
27 * @param cmd Command to execute
28 * @return string with shell output
29 */
30
31def runShCommand(saltMaster, target, cmd) {
32 return salt.cmdRun(saltMaster, target, cmd)
33}
34
35/**
36 * Installs packages updates by running apt install with
37 * flag --only-upgrade on target
38 *
39 * @param saltMaster Object pointing to salt master
40 * @param target Minion to execute on
41 * @param pkgs List of packages to update e.g nova*, salt*, ceph*
42 */
43
44def installPkgUpdate(saltMaster, target, pkgs) {
45 common.infoMsg("Installing ${pkgs} updates on ${target}")
46 runShCommand(saltMaster, target, "apt install --only-upgrade ${pkgs.join(' ')} -y")
47}
48
49/**
50 * Returns string with values from pillar
51 *
52 * @param saltMaster Object pointing to salt master
53 * @param target Minion to execute on
54 * @param pillar Pillar path to get values (nova:controller)
55 * @return string with Pillar values
56 */
57def getPillarValues(saltMaster, target, pillar) {
58 return salt.getReturnValues(salt.getPillar(saltMaster, target, pillar))
59}
60
61/**
62 * Returns pillar value converted to boolean
63 *
64 * @param saltMaster Object pointing to salt master
65 * @param target Minion to execute on
66 * @param pillar Pillar path to get values (nova:controller:enabled)
67 * @return Boolean as result of Pillar output string
68 */
69
70def getPillarBoolValues(saltMaster, target, pillar){
71 return getPillarValues(saltMaster, target, pillar).toBoolean()
72}
73
74/**
75 * Returns first minion from sorted in alphsberical
76 * order list of minions
77 *
78 * @param saltMaster Object pointing to salt master
79 * @param target Criteria by which to choose minions
80 * @return string with minion id
81 */
82
83def getFirstMinion(saltMaster, target) {
84 def minionsSorted = salt.getMinionsSorted(saltMaster, target)
85 return minionsSorted[0]
86}
87
88/**
89 * Stops list of services one by one on target
90 *
91 * @param saltMaster Object pointing to salt master
92 * @param target Criteria by which to choose minions
93 *
94 */
95
96def stopServices(saltMaster, target, services) {
97 common.infoMsg("Stopping ${services} on ${target}")
98 for (s in services){
99 runShCommand(saltMaster, target, "systemctl stop ${s}")
100 }
101}
102
103def waitForHealthy(saltMaster, count=0, attempts=100) {
104 // wait for healthy cluster
105 while (count<attempts) {
106 def health = runShCommand(saltMaster, "I@ceph:mon and I@ceph:common:keyring:admin", 'ceph health')['return'][0].values()[0]
107 if (health.contains('HEALTH_OK')) {
108 common.infoMsg('Cluster is healthy')
109 break;
110 }
111 count++
112 sleep(10)
113 }
114}
115
116/**
117 * Returns nova service status in as list of hashes e.g.
118 * [
119 * {
120 * "Status": "enabled",
121 * "Binary": "nova-conductor",
122 * "Zone": "internal",
123 * "State": "up",
124 * "Host": "ctl01",
125 * "Updated At": "2019-03-22T17:39:02.000000",
126 * "ID": 7
127 * }
128 * ]
129 *
130 *
131 * @param saltMaster Object pointing to salt master
132 * @param target on which to run openstack client command
133 * @param host for which to get service status e.g. cmp1
134 * @param service name to check e.g. nova-compute
135 * @return List of hashes with service status data
136 *
137 */
138
139def getServiceStatus(saltMaster, target, host, service){
140 def cmd = ". /root/keystonercv3; openstack compute service list --host ${host} --service ${service} -f json"
141 common.retry(3, 10) {
142 res = readJSON text: salt.cmdRun(saltMaster, target, cmd)['return'][0].values()[0].replaceAll('Salt command execution success','')
143 }
144 return res
145}
146
147/**
148 * Waits while services are back to up state in Nova api output, if state
149 * doesn't change to 'up' raises error
150 *
151 * @param saltMaster Object pointing to salt master
152 * @param target Criteria by which to choose hosts where to check services states
153 * @param clientTarget Criteria by which to choose minion where to run openstack commands
154 * @param binaries lsit of services to wait for
155 * @param retries number of tries to to get service status
156 * @param timeout number of seconds to wait between tries
157 *
158 */
159
160def waitForServices(saltMaster, target, clientTarget, binaries, retries=18, timeout=10) {
161 for (host in salt.getMinionsSorted(saltMaster, target)) {
162 for (b in binaries) {
163 common.retry(retries, timeout) {
164 def status = getServiceStatus(saltMaster, clientTarget, host.tokenize('.')[0], b)[0]
165 if (status['State'] == 'up') {
166 common.infoMsg("Service ${b} on host ${host} is UP and Running")
167 } else {
168 error("Service ${b} status check failed or service isn't running on host ${host}")
169 }
170 }
171 }
172 }
173}
174
175node(){
176 try {
177 stage('Setup virtualenv for Pepper') {
178 python.setupPepperVirtualenv(pepperEnv, SALT_MASTER_URL, SALT_MASTER_CREDENTIALS)
179 }
180
181 def components = COMPONENTS.tokenize(',')
182
183 if ('ceph' in components) {
184 def monPillar = 'ceph:mon:enabled'
185 def commonPillar = 'ceph:common:enabled'
186 def osdPillar = 'ceph:osd:enabled'
187 def rgwPillar = 'ceph:radosgw:enabled'
188
189 def monTarget = "I@${monPillar}:true"
190 def commonTarget = "I@${commonPillar}:true"
191 def osdTarget = "I@${osdPillar}:true"
192 def rgwTarget = "I@${rgwPillar}:true"
193 def targets = TARGET_SERVERS.tokenize(',')
194
195 // If TARGET_SERVERS is empty
196 if (!targets) {
197 targets = salt.getMinionsSorted(pepperEnv, commonTarget) + salt.getMinionsSorted(pepperEnv, monTarget) + salt.getMinionsSorted(pepperEnv, rgwTarget) + salt.getMinionsSorted(pepperEnv, osdTarget)
198 }
199 // Ceph common and other roles can be combined, so making host list elements to be unique
200 targets = targets.toSet()
201
202 stage('Update Ceph configuration using new defaults') {
203 for (t in targets) {
204 if (getPillarBoolValues(pepperEnv, t, commonPillar)) {
205 salt.enforceState(pepperEnv, t, 'ceph.common', true)
206 }
207 }
208 }
209
210 stage('Restart Ceph services') {
211 for (t in targets) {
212 if (getPillarBoolValues(pepperEnv, t, monPillar)) {
213 def monitors = salt.getMinions(pepperEnv, t)
214 for (tgt in monitors) {
215 runShCommand(pepperEnv, tgt, "systemctl restart ceph-mon.target")
216 runShCommand(pepperEnv, tgt, "systemctl restart ceph-mgr.target")
217 waitForHealthy(pepperEnv)
218 }
219 }
220 }
221 for (t in targets) {
222 if (getPillarBoolValues(pepperEnv, t, rgwPillar)) {
223 runShCommand(pepperEnv, t, "systemctl restart ceph-radosgw.target")
224 waitForHealthy(pepperEnv)
225 }
226 }
227 for (t in targets) {
228 if (getPillarBoolValues(pepperEnv, t, osdPillar)) {
229 def nodes = salt.getMinions(pepperEnv, t)
230 for (tgt in nodes) {
231 salt.runSaltProcessStep(pepperEnv, tgt, 'saltutil.sync_grains', [], null, true, 5)
232 def ceph_disks = salt.getGrain(pepperEnv, tgt, 'ceph')['return'][0].values()[0].values()[0]['ceph_disk']
233
234 def osd_ids = []
235 for (i in ceph_disks) {
236 def osd_id = i.getKey().toString()
237 osd_ids.add('osd.' + osd_id)
238 }
239
240 for (i in osd_ids) {
241 runShCommand(pepperEnv, tgt, 'ceph osd set noout')
242 salt.runSaltProcessStep(pepperEnv, tgt, 'service.restart', ['ceph-osd@' + i.replaceAll('osd.', '')], null, true)
243 sleep(60)
244 runShCommand(pepperEnv, tgt, 'ceph osd unset noout')
245 // wait for healthy cluster
246 waitForHealthy(pepperEnv)
247 }
248 }
249 }
250 }
251 }
252 }
253
254 if ('nova' in components) {
255 def ctlPillar = 'nova:controller:enabled'
256 def cmpPillar = 'nova:compute:enabled'
257
258 def cmpTarget = "I@${cmpPillar}:true"
259 def ctlTarget = "I@${ctlPillar}:true"
260 // Target for colling openstack client containing keystonercv3
261 def clientTarget = getFirstMinion(pepperEnv, 'I@keystone:client:enabled:true')
262 def targets = TARGET_SERVERS.tokenize(',')
263 // If TARGET_SERVERS is empty
264 if (!targets) {
265 targets = salt.getMinionsSorted(pepperEnv, ctlTarget) + salt.getMinionsSorted(pepperEnv, cmpTarget)
266 }
267
268 for (t in targets){
269 if (getPillarBoolValues(pepperEnv, t, ctlPillar) || getPillarBoolValues(pepperEnv, t, cmpPillar)) {
270 def tservices = ['nova*']
271 def tbinaries = []
272 if (getPillarBoolValues(pepperEnv, t, ctlPillar)) {
273 tservices += ['apache2']
274 tbinaries += ['nova-consoleauth', 'nova-scheduler', 'nova-conductor']
275 }
276 if (getPillarBoolValues(pepperEnv, t, cmpPillar)) {
277 tbinaries += ['nova-compute']
278 }
279 // Stop component services to ensure that updated code is running
280 stopServices(pepperEnv, t, tservices)
281 // Update all installed nova packages
282 installPkgUpdate(pepperEnv, t, ['nova*', 'python-nova*'])
283 common.infoMsg("Applying component states on ${t}")
284 salt.enforceState(pepperEnv, t, 'nova')
285 waitForServices(pepperEnv, t, clientTarget, tbinaries)
286 } else {
287 // If no compute or controller pillar is detected just packages will be updated
288 installPkgUpdate(pepperEnv, t, ['nova*', 'python-nova*'])
289 }
290 }
291 }
292 } catch (Throwable e) {
293 // If there was an error or exception thrown, the build failed
294 currentBuild.result = "FAILURE"
295 currentBuild.description = currentBuild.description ? e.message + " " + currentBuild.description : e.message
296 throw e
297 }
298}