Mateusz Los | d08bbcf | 2019-03-18 17:16:34 +0100 | [diff] [blame] | 1 | /** |
| 2 | * |
| 3 | * Performs MCP component packages updates |
| 4 | * |
| 5 | * Expected parameters: |
| 6 | * SALT_MASTER_URL Url to Salt API |
| 7 | * SALT_MASTER_CREDENTIALS Credentials to the Salt API |
| 8 | * TARGET_SERVERS (Optional) String containing list of Salt targets split by comma. |
| 9 | * NOTE: For if this parameter is set, it will be used to run packages updates on specified targets |
| 10 | * If it isn't set targets will be detected automatically. |
| 11 | * COMPONENTS String containing comma-separated list of supported for update components |
| 12 | * Currently only nova and ceph are supported. |
| 13 | * |
| 14 | */ |
| 15 | |
| 16 | common = new com.mirantis.mk.Common() |
| 17 | def python = new com.mirantis.mk.Python() |
| 18 | salt = new com.mirantis.mk.Salt() |
| 19 | |
| 20 | pepperEnv = "pepperEnv" |
| 21 | |
| 22 | /** |
| 23 | * Execute shell command using salt |
| 24 | * |
| 25 | * @param saltMaster Object pointing to salt master |
| 26 | * @param target Minion to execute on |
| 27 | * @param cmd Command to execute |
| 28 | * @return string with shell output |
| 29 | */ |
| 30 | |
| 31 | def runShCommand(saltMaster, target, cmd) { |
| 32 | return salt.cmdRun(saltMaster, target, cmd) |
| 33 | } |
| 34 | |
| 35 | /** |
| 36 | * Installs packages updates by running apt install with |
| 37 | * flag --only-upgrade on target |
| 38 | * |
| 39 | * @param saltMaster Object pointing to salt master |
| 40 | * @param target Minion to execute on |
| 41 | * @param pkgs List of packages to update e.g nova*, salt*, ceph* |
| 42 | */ |
| 43 | |
| 44 | def installPkgUpdate(saltMaster, target, pkgs) { |
Mykyta Karpin | 824b52e | 2019-04-16 12:26:00 +0300 | [diff] [blame] | 45 | common.infoMsg("Updating apt cache on ${target}") |
| 46 | runShCommand(saltMaster, target, 'apt update') |
Mateusz Los | d08bbcf | 2019-03-18 17:16:34 +0100 | [diff] [blame] | 47 | common.infoMsg("Installing ${pkgs} updates on ${target}") |
| 48 | runShCommand(saltMaster, target, "apt install --only-upgrade ${pkgs.join(' ')} -y") |
| 49 | } |
| 50 | |
| 51 | /** |
| 52 | * Returns string with values from pillar |
| 53 | * |
| 54 | * @param saltMaster Object pointing to salt master |
| 55 | * @param target Minion to execute on |
| 56 | * @param pillar Pillar path to get values (nova:controller) |
| 57 | * @return string with Pillar values |
| 58 | */ |
| 59 | def getPillarValues(saltMaster, target, pillar) { |
| 60 | return salt.getReturnValues(salt.getPillar(saltMaster, target, pillar)) |
| 61 | } |
| 62 | |
| 63 | /** |
| 64 | * Returns pillar value converted to boolean |
| 65 | * |
| 66 | * @param saltMaster Object pointing to salt master |
| 67 | * @param target Minion to execute on |
| 68 | * @param pillar Pillar path to get values (nova:controller:enabled) |
| 69 | * @return Boolean as result of Pillar output string |
| 70 | */ |
| 71 | |
| 72 | def getPillarBoolValues(saltMaster, target, pillar){ |
| 73 | return getPillarValues(saltMaster, target, pillar).toBoolean() |
| 74 | } |
| 75 | |
| 76 | /** |
| 77 | * Returns first minion from sorted in alphsberical |
| 78 | * order list of minions |
| 79 | * |
| 80 | * @param saltMaster Object pointing to salt master |
| 81 | * @param target Criteria by which to choose minions |
| 82 | * @return string with minion id |
| 83 | */ |
| 84 | |
| 85 | def getFirstMinion(saltMaster, target) { |
| 86 | def minionsSorted = salt.getMinionsSorted(saltMaster, target) |
| 87 | return minionsSorted[0] |
| 88 | } |
| 89 | |
| 90 | /** |
| 91 | * Stops list of services one by one on target |
| 92 | * |
| 93 | * @param saltMaster Object pointing to salt master |
| 94 | * @param target Criteria by which to choose minions |
| 95 | * |
| 96 | */ |
| 97 | |
| 98 | def stopServices(saltMaster, target, services) { |
| 99 | common.infoMsg("Stopping ${services} on ${target}") |
| 100 | for (s in services){ |
| 101 | runShCommand(saltMaster, target, "systemctl stop ${s}") |
| 102 | } |
| 103 | } |
| 104 | |
| 105 | def waitForHealthy(saltMaster, count=0, attempts=100) { |
| 106 | // wait for healthy cluster |
| 107 | while (count<attempts) { |
| 108 | def health = runShCommand(saltMaster, "I@ceph:mon and I@ceph:common:keyring:admin", 'ceph health')['return'][0].values()[0] |
| 109 | if (health.contains('HEALTH_OK')) { |
| 110 | common.infoMsg('Cluster is healthy') |
| 111 | break; |
| 112 | } |
| 113 | count++ |
| 114 | sleep(10) |
| 115 | } |
| 116 | } |
| 117 | |
| 118 | /** |
| 119 | * Returns nova service status in as list of hashes e.g. |
| 120 | * [ |
| 121 | * { |
| 122 | * "Status": "enabled", |
| 123 | * "Binary": "nova-conductor", |
| 124 | * "Zone": "internal", |
| 125 | * "State": "up", |
| 126 | * "Host": "ctl01", |
| 127 | * "Updated At": "2019-03-22T17:39:02.000000", |
| 128 | * "ID": 7 |
| 129 | * } |
| 130 | * ] |
| 131 | * |
| 132 | * |
| 133 | * @param saltMaster Object pointing to salt master |
| 134 | * @param target on which to run openstack client command |
| 135 | * @param host for which to get service status e.g. cmp1 |
| 136 | * @param service name to check e.g. nova-compute |
| 137 | * @return List of hashes with service status data |
| 138 | * |
| 139 | */ |
| 140 | |
| 141 | def getServiceStatus(saltMaster, target, host, service){ |
| 142 | def cmd = ". /root/keystonercv3; openstack compute service list --host ${host} --service ${service} -f json" |
| 143 | common.retry(3, 10) { |
| 144 | res = readJSON text: salt.cmdRun(saltMaster, target, cmd)['return'][0].values()[0].replaceAll('Salt command execution success','') |
| 145 | } |
| 146 | return res |
| 147 | } |
| 148 | |
| 149 | /** |
| 150 | * Waits while services are back to up state in Nova api output, if state |
| 151 | * doesn't change to 'up' raises error |
| 152 | * |
| 153 | * @param saltMaster Object pointing to salt master |
| 154 | * @param target Criteria by which to choose hosts where to check services states |
| 155 | * @param clientTarget Criteria by which to choose minion where to run openstack commands |
| 156 | * @param binaries lsit of services to wait for |
| 157 | * @param retries number of tries to to get service status |
| 158 | * @param timeout number of seconds to wait between tries |
| 159 | * |
| 160 | */ |
| 161 | |
| 162 | def waitForServices(saltMaster, target, clientTarget, binaries, retries=18, timeout=10) { |
| 163 | for (host in salt.getMinionsSorted(saltMaster, target)) { |
| 164 | for (b in binaries) { |
| 165 | common.retry(retries, timeout) { |
| 166 | def status = getServiceStatus(saltMaster, clientTarget, host.tokenize('.')[0], b)[0] |
| 167 | if (status['State'] == 'up') { |
| 168 | common.infoMsg("Service ${b} on host ${host} is UP and Running") |
| 169 | } else { |
| 170 | error("Service ${b} status check failed or service isn't running on host ${host}") |
| 171 | } |
| 172 | } |
| 173 | } |
| 174 | } |
| 175 | } |
| 176 | |
| 177 | node(){ |
| 178 | try { |
| 179 | stage('Setup virtualenv for Pepper') { |
| 180 | python.setupPepperVirtualenv(pepperEnv, SALT_MASTER_URL, SALT_MASTER_CREDENTIALS) |
| 181 | } |
| 182 | |
| 183 | def components = COMPONENTS.tokenize(',') |
| 184 | |
| 185 | if ('ceph' in components) { |
| 186 | def monPillar = 'ceph:mon:enabled' |
| 187 | def commonPillar = 'ceph:common:enabled' |
| 188 | def osdPillar = 'ceph:osd:enabled' |
| 189 | def rgwPillar = 'ceph:radosgw:enabled' |
| 190 | |
| 191 | def monTarget = "I@${monPillar}:true" |
| 192 | def commonTarget = "I@${commonPillar}:true" |
| 193 | def osdTarget = "I@${osdPillar}:true" |
| 194 | def rgwTarget = "I@${rgwPillar}:true" |
| 195 | def targets = TARGET_SERVERS.tokenize(',') |
| 196 | |
| 197 | // If TARGET_SERVERS is empty |
| 198 | if (!targets) { |
| 199 | targets = salt.getMinionsSorted(pepperEnv, commonTarget) + salt.getMinionsSorted(pepperEnv, monTarget) + salt.getMinionsSorted(pepperEnv, rgwTarget) + salt.getMinionsSorted(pepperEnv, osdTarget) |
| 200 | } |
| 201 | // Ceph common and other roles can be combined, so making host list elements to be unique |
| 202 | targets = targets.toSet() |
| 203 | |
| 204 | stage('Update Ceph configuration using new defaults') { |
| 205 | for (t in targets) { |
| 206 | if (getPillarBoolValues(pepperEnv, t, commonPillar)) { |
| 207 | salt.enforceState(pepperEnv, t, 'ceph.common', true) |
| 208 | } |
| 209 | } |
| 210 | } |
| 211 | |
| 212 | stage('Restart Ceph services') { |
| 213 | for (t in targets) { |
| 214 | if (getPillarBoolValues(pepperEnv, t, monPillar)) { |
| 215 | def monitors = salt.getMinions(pepperEnv, t) |
| 216 | for (tgt in monitors) { |
| 217 | runShCommand(pepperEnv, tgt, "systemctl restart ceph-mon.target") |
| 218 | runShCommand(pepperEnv, tgt, "systemctl restart ceph-mgr.target") |
| 219 | waitForHealthy(pepperEnv) |
| 220 | } |
| 221 | } |
| 222 | } |
| 223 | for (t in targets) { |
| 224 | if (getPillarBoolValues(pepperEnv, t, rgwPillar)) { |
| 225 | runShCommand(pepperEnv, t, "systemctl restart ceph-radosgw.target") |
| 226 | waitForHealthy(pepperEnv) |
| 227 | } |
| 228 | } |
| 229 | for (t in targets) { |
| 230 | if (getPillarBoolValues(pepperEnv, t, osdPillar)) { |
| 231 | def nodes = salt.getMinions(pepperEnv, t) |
| 232 | for (tgt in nodes) { |
| 233 | salt.runSaltProcessStep(pepperEnv, tgt, 'saltutil.sync_grains', [], null, true, 5) |
| 234 | def ceph_disks = salt.getGrain(pepperEnv, tgt, 'ceph')['return'][0].values()[0].values()[0]['ceph_disk'] |
| 235 | |
| 236 | def osd_ids = [] |
| 237 | for (i in ceph_disks) { |
| 238 | def osd_id = i.getKey().toString() |
| 239 | osd_ids.add('osd.' + osd_id) |
| 240 | } |
| 241 | |
| 242 | for (i in osd_ids) { |
| 243 | runShCommand(pepperEnv, tgt, 'ceph osd set noout') |
| 244 | salt.runSaltProcessStep(pepperEnv, tgt, 'service.restart', ['ceph-osd@' + i.replaceAll('osd.', '')], null, true) |
| 245 | sleep(60) |
| 246 | runShCommand(pepperEnv, tgt, 'ceph osd unset noout') |
| 247 | // wait for healthy cluster |
| 248 | waitForHealthy(pepperEnv) |
| 249 | } |
| 250 | } |
| 251 | } |
| 252 | } |
| 253 | } |
| 254 | } |
| 255 | |
| 256 | if ('nova' in components) { |
| 257 | def ctlPillar = 'nova:controller:enabled' |
| 258 | def cmpPillar = 'nova:compute:enabled' |
| 259 | |
| 260 | def cmpTarget = "I@${cmpPillar}:true" |
| 261 | def ctlTarget = "I@${ctlPillar}:true" |
| 262 | // Target for colling openstack client containing keystonercv3 |
| 263 | def clientTarget = getFirstMinion(pepperEnv, 'I@keystone:client:enabled:true') |
| 264 | def targets = TARGET_SERVERS.tokenize(',') |
| 265 | // If TARGET_SERVERS is empty |
| 266 | if (!targets) { |
| 267 | targets = salt.getMinionsSorted(pepperEnv, ctlTarget) + salt.getMinionsSorted(pepperEnv, cmpTarget) |
| 268 | } |
| 269 | |
| 270 | for (t in targets){ |
| 271 | if (getPillarBoolValues(pepperEnv, t, ctlPillar) || getPillarBoolValues(pepperEnv, t, cmpPillar)) { |
| 272 | def tservices = ['nova*'] |
| 273 | def tbinaries = [] |
| 274 | if (getPillarBoolValues(pepperEnv, t, ctlPillar)) { |
| 275 | tservices += ['apache2'] |
| 276 | tbinaries += ['nova-consoleauth', 'nova-scheduler', 'nova-conductor'] |
| 277 | } |
| 278 | if (getPillarBoolValues(pepperEnv, t, cmpPillar)) { |
| 279 | tbinaries += ['nova-compute'] |
| 280 | } |
| 281 | // Stop component services to ensure that updated code is running |
| 282 | stopServices(pepperEnv, t, tservices) |
| 283 | // Update all installed nova packages |
| 284 | installPkgUpdate(pepperEnv, t, ['nova*', 'python-nova*']) |
| 285 | common.infoMsg("Applying component states on ${t}") |
| 286 | salt.enforceState(pepperEnv, t, 'nova') |
| 287 | waitForServices(pepperEnv, t, clientTarget, tbinaries) |
| 288 | } else { |
| 289 | // If no compute or controller pillar is detected just packages will be updated |
| 290 | installPkgUpdate(pepperEnv, t, ['nova*', 'python-nova*']) |
| 291 | } |
| 292 | } |
| 293 | } |
| 294 | } catch (Throwable e) { |
| 295 | // If there was an error or exception thrown, the build failed |
| 296 | currentBuild.result = "FAILURE" |
| 297 | currentBuild.description = currentBuild.description ? e.message + " " + currentBuild.description : e.message |
| 298 | throw e |
| 299 | } |
| 300 | } |