| Dzmitry Stremkouski | 9d2a556 | 2019-08-08 19:56:38 +0200 | [diff] [blame] | 1 | import datetime | 
| Dzmitry Stremkouski | 88275d3 | 2019-07-23 19:42:42 +0200 | [diff] [blame] | 2 | import hashlib | 
| Dzmitry Stremkouski | 9d2a556 | 2019-08-08 19:56:38 +0200 | [diff] [blame] | 3 | import json | 
| Dzmitry Stremkouski | b71ada9 | 2019-04-05 22:37:59 +0200 | [diff] [blame] | 4 | import logging | 
|  | 5 | import os | 
|  | 6 | import re | 
| Dzmitry Stremkouski | 9d2a556 | 2019-08-08 19:56:38 +0200 | [diff] [blame] | 7 | import requests | 
|  | 8 | import salt.utils | 
|  | 9 | import socket | 
|  | 10 | import subprocess | 
| Dzmitry Stremkouski | 3629020 | 2019-05-05 21:26:25 +0200 | [diff] [blame] | 11 | import yaml | 
| Dzmitry Stremkouski | b71ada9 | 2019-04-05 22:37:59 +0200 | [diff] [blame] | 12 |  | 
|  | 13 | __author__ = "Dzmitry Stremkouski" | 
|  | 14 | __copyright__ = "Copyright 2019, Mirantis Inc." | 
|  | 15 | __license__ = "Apache 2.0" | 
|  | 16 |  | 
|  | 17 | logger = logging.getLogger(__name__) | 
|  | 18 | stream = logging.StreamHandler() | 
|  | 19 | logger.addHandler(stream) | 
|  | 20 |  | 
| Dzmitry Stremkouski | 3629020 | 2019-05-05 21:26:25 +0200 | [diff] [blame] | 21 | try: | 
|  | 22 | from yaml import CLoader as Loader, CDumper as Dumper | 
|  | 23 | except ImportError: | 
|  | 24 | from yaml import Loader, Dumper | 
|  | 25 |  | 
| Dzmitry Stremkouski | 1ce1b35 | 2019-11-02 10:47:14 +0100 | [diff] [blame^] | 26 | db_ver_map=yaml.load(""" | 
|  | 27 | kilo: | 
|  | 28 | cinder: 41 | 
|  | 29 | glance: 41 | 
|  | 30 | heat: 62 | 
|  | 31 | keystone: 67 | 
|  | 32 | neutron: [ kilo ] | 
|  | 33 | nova: | 
|  | 34 | api_db: 2 | 
|  | 35 | db: 280 | 
|  | 36 | liberty: | 
|  | 37 | cinder: 60 | 
|  | 38 | glance: 42 | 
|  | 39 | heat: 65 | 
|  | 40 | keystone: 75 | 
|  | 41 | neutron: | 
|  | 42 | - 1b4c6e320f79 | 
|  | 43 | - 26c371498592 | 
|  | 44 | - 599c6a226151 | 
|  | 45 | - 45f955889773 | 
|  | 46 | - 1c844d1677f7 | 
|  | 47 | - 52c5312f6baf | 
|  | 48 | - 9859ac9c136 | 
|  | 49 | - 8675309a5c4f | 
|  | 50 | - 48153cb5f051 | 
|  | 51 | - 31337ec0ffee | 
|  | 52 | - 34af2b5c5a59 | 
|  | 53 | - 354db87e3225 | 
|  | 54 | - 11926bcfe72d | 
|  | 55 | - 5498d17be016 | 
|  | 56 | - 4af11ca47297 | 
|  | 57 | - 2e5352a0ad4d | 
|  | 58 | - 2a16083502f3 | 
|  | 59 | - 4ffceebfada | 
|  | 60 | - 30018084ec99 | 
|  | 61 | nova: | 
|  | 62 | api_db: 3 | 
|  | 63 | db: 302 | 
|  | 64 | mitaka: | 
|  | 65 | cinder: 72 | 
|  | 66 | glance: 44 | 
|  | 67 | heat: 71 | 
|  | 68 | keystone: 97 | 
|  | 69 | neutron: | 
|  | 70 | - 15be73214821 | 
|  | 71 | - dce3ec7a25c9 | 
|  | 72 | - 659bf3d90664 | 
|  | 73 | - 19f26505c74f | 
|  | 74 | - 0e66c5227a8a | 
|  | 75 | - ec7fcfbf72ee | 
|  | 76 | - 32e5974ada25 | 
|  | 77 | - 3894bccad37f | 
|  | 78 | - c3a73f615e4 | 
|  | 79 | - 13cfb89f881a | 
|  | 80 | - 1df244e556f5 | 
|  | 81 | - 2f9e956e7532 | 
|  | 82 | - 15e43b934f81 | 
|  | 83 | - 59cb5b6cf4d | 
|  | 84 | - b4caf27aae4 | 
|  | 85 | - 31ed664953e6 | 
|  | 86 | - 8a6d8bdae39 | 
|  | 87 | - c6c112992c9 | 
|  | 88 | - 2b4c2465d44b | 
|  | 89 | - 5ffceebfada | 
|  | 90 | - 1b294093239c | 
|  | 91 | - 4ffceebfcdc | 
|  | 92 | - e3278ee65050 | 
|  | 93 | nova: | 
|  | 94 | api_db: 7 | 
|  | 95 | db: 319 | 
|  | 96 | newton: | 
|  | 97 | cinder: 79 | 
|  | 98 | glance: 44 | 
|  | 99 | heat: 73 | 
|  | 100 | keystone: | 
|  | 101 | contract: 1 | 
|  | 102 | data: 4 | 
|  | 103 | db: 109 | 
|  | 104 | expand: 1 | 
|  | 105 | neutron: | 
|  | 106 | - 030a959ceafa | 
|  | 107 | - 67daae611b6e | 
|  | 108 | - a5648cfeeadf | 
|  | 109 | - a963b38d82f4 | 
|  | 110 | - 6b461a21bcfc | 
|  | 111 | - 0f5bef0f87d4 | 
|  | 112 | - d3435b514502 | 
|  | 113 | - 5cd92597d11d | 
|  | 114 | - 3d0e74aa7d37 | 
|  | 115 | - 5abc0278ca73 | 
|  | 116 | - 30107ab6a3ee | 
|  | 117 | - 45f8dd33480b | 
|  | 118 | - c415aab1c048 | 
|  | 119 | - 2e0d7a8a1586 | 
|  | 120 | - 5c85685d616d | 
|  | 121 | - a8b517cff8ab | 
|  | 122 | - a84ccf28f06a | 
|  | 123 | - 7d9d8eeec6ad | 
|  | 124 | - 7bbb25278f53 | 
|  | 125 | - 89ab9a816d70 | 
|  | 126 | - 8fd3918ef6f4 | 
|  | 127 | - c879c5e1ee90 | 
|  | 128 | - b67e765a3524 | 
|  | 129 | - 3b935b28e7a0 | 
|  | 130 | - b12a3ef66e62 | 
|  | 131 | - 4bcd4df1f426 | 
|  | 132 | - 97c25b0d2353 | 
|  | 133 | nova: | 
|  | 134 | api_db: 22 | 
|  | 135 | db: 334 | 
|  | 136 | ocata: | 
|  | 137 | cinder: 96 | 
|  | 138 | glance: 45 | 
|  | 139 | heat: 79 | 
|  | 140 | keystone: | 
|  | 141 | contract: 1 | 
|  | 142 | data: 16 | 
|  | 143 | db: 109 | 
|  | 144 | expand: 1 | 
|  | 145 | neutron: | 
|  | 146 | - a9c43481023c | 
|  | 147 | - 929c968efe70 | 
|  | 148 | nova: | 
|  | 149 | api_db: 31 | 
|  | 150 | db: 347 | 
|  | 151 | pike: | 
|  | 152 | cinder: 105 | 
|  | 153 | glance: 45 | 
|  | 154 | heat: 80 | 
|  | 155 | keystone: | 
|  | 156 | contract: 1 | 
|  | 157 | data: 24 | 
|  | 158 | db: 109 | 
|  | 159 | expand: 1 | 
|  | 160 | neutron: | 
|  | 161 | - 62c781cb6192 | 
|  | 162 | - 2b42d90729da | 
|  | 163 | - 7d32f979895f | 
|  | 164 | - 349b6fd605a6 | 
|  | 165 | - 804a3c76314c | 
|  | 166 | - c8c222d42aa9 | 
|  | 167 | nova: | 
|  | 168 | api_db: 45 | 
|  | 169 | db: 362 | 
|  | 170 | queens: | 
|  | 171 | cinder: 117 | 
|  | 172 | glance: 45 | 
|  | 173 | heat: 85 | 
|  | 174 | keystone: | 
|  | 175 | contract: 1 | 
|  | 176 | data: 44 | 
|  | 177 | db: 109 | 
|  | 178 | expand: 1 | 
|  | 179 | neutron: | 
|  | 180 | - 594422d373ee | 
|  | 181 | nova: | 
|  | 182 | api_db: 52 | 
|  | 183 | db: 378 | 
|  | 184 | rocky: | 
|  | 185 | cinder: 123 | 
|  | 186 | glance: 45 | 
|  | 187 | heat: 86 | 
|  | 188 | keystone: | 
|  | 189 | contract: 1 | 
|  | 190 | data: 52 | 
|  | 191 | db: 109 | 
|  | 192 | expand: 1 | 
|  | 193 | neutron: | 
|  | 194 | - 61663558142c | 
|  | 195 | - 867d39095bf4 | 
|  | 196 | nova: | 
|  | 197 | api_db: 61 | 
|  | 198 | db: 390 | 
|  | 199 | stein: | 
|  | 200 | cinder: 128 | 
|  | 201 | glance: 45 | 
|  | 202 | heat: 86 | 
|  | 203 | keystone: | 
|  | 204 | contract: 1 | 
|  | 205 | data: 61 | 
|  | 206 | db: 109 | 
|  | 207 | expand: 1 | 
|  | 208 | neutron: | 
|  | 209 | - 0ff9e3881597 | 
|  | 210 | - 195176fb410d | 
|  | 211 | - d72db3e25539 | 
|  | 212 | - fb0167bd9639 | 
|  | 213 | - 9bfad3f1e780 | 
|  | 214 | - cada2437bf41 | 
|  | 215 | nova: | 
|  | 216 | api_db: 62 | 
|  | 217 | db: 391 | 
|  | 218 | train: | 
|  | 219 | cinder: 132 | 
|  | 220 | glance: 45 | 
|  | 221 | heat: 86 | 
|  | 222 | keystone: | 
|  | 223 | contract: 1 | 
|  | 224 | data: 71 | 
|  | 225 | db: 109 | 
|  | 226 | expand: 1 | 
|  | 227 | neutron: | 
|  | 228 | - 63fd95af7dcd | 
|  | 229 | - c613d0b82681 | 
|  | 230 | nova: | 
|  | 231 | api_db: 67 | 
|  | 232 | db: 402 | 
|  | 233 | """) | 
|  | 234 |  | 
| Dzmitry Stremkouski | 3629020 | 2019-05-05 21:26:25 +0200 | [diff] [blame] | 235 | default_vrouter_info_map = yaml.load(""" | 
|  | 236 | ContrailConfig: | 
|  | 237 | - deleted | 
|  | 238 | - elements:uuid | 
|  | 239 | - elements:virtual_router_dpdk_enabled | 
|  | 240 | - elements:virtual_router_type | 
|  | 241 | VrouterAgent: | 
|  | 242 | - build_info:build-info:0:build-version | 
|  | 243 | - build_info:build-info:0:build-number | 
| Dzmitry Stremkouski | 3629020 | 2019-05-05 21:26:25 +0200 | [diff] [blame] | 244 | - config_file | 
|  | 245 | - control_ip | 
|  | 246 | - control_node_list_cfg | 
|  | 247 | - dns_server_list_cfg | 
|  | 248 | - dns_servers | 
|  | 249 | - down_interface_count | 
|  | 250 | - eth_name | 
|  | 251 | - headless_mode_cfg | 
|  | 252 | - hostname_cfg | 
|  | 253 | - hypervisor | 
|  | 254 | - mode | 
|  | 255 | - phy_if | 
|  | 256 | - platform | 
|  | 257 | - self_ip_list | 
|  | 258 | - total_interface_count | 
|  | 259 | - tunnel_type | 
|  | 260 | - vhost_cfg | 
|  | 261 | - vhost_if | 
|  | 262 | - vr_limits:max_interfaces | 
|  | 263 | - vr_limits:max_labels | 
|  | 264 | - vr_limits:max_mirror_entries | 
|  | 265 | - vr_limits:max_nexthops | 
|  | 266 | - vr_limits:max_vrfs | 
|  | 267 | - vr_limits:vrouter_max_bridge_entries | 
|  | 268 | - vr_limits:vrouter_max_flow_entries | 
|  | 269 | - vr_limits:vrouter_max_oflow_bridge_entries | 
|  | 270 | - vr_limits:vrouter_max_oflow_entries | 
|  | 271 | - xmpp_peer_list:*:ip | 
|  | 272 | - xmpp_peer_list:*:primary | 
|  | 273 | - xmpp_peer_list:*:status | 
|  | 274 | """, Loader=Loader) | 
|  | 275 |  | 
| Dzmitry Stremkouski | a78a04d | 2019-07-13 11:05:03 +0200 | [diff] [blame] | 276 | default_peer_filter = ["encoding", "peer_address", "state"] | 
|  | 277 |  | 
| Dzmitry Stremkouski | b71ada9 | 2019-04-05 22:37:59 +0200 | [diff] [blame] | 278 |  | 
|  | 279 | def _failed_minions(out, agent, failed_minions): | 
|  | 280 |  | 
|  | 281 | ''' Verify failed minions ''' | 
|  | 282 |  | 
|  | 283 | if len(failed_minions) > 0: | 
|  | 284 | logger.error("%s check FAILED" % agent) | 
|  | 285 | logger.error("Some minions returned non-zero exit code or empty data") | 
|  | 286 | logger.error("Failed minions:" + str(failed_minions)) | 
|  | 287 | for minion in failed_minions: | 
|  | 288 | logger.error(minion) | 
|  | 289 | logger.debug(str(out[minion]['ret'])) | 
|  | 290 | __context__['retcode'] = 2 | 
|  | 291 | return False | 
|  | 292 |  | 
|  | 293 | return True | 
|  | 294 |  | 
|  | 295 |  | 
|  | 296 | def _minions_output(out, agent, ignore_dead, ignore_empty=False): | 
|  | 297 |  | 
|  | 298 | ''' Verify minions output and exit code ''' | 
|  | 299 |  | 
|  | 300 | if not out: | 
|  | 301 | logger.error("%s check FAILED" % agent) | 
|  | 302 | logger.error("No response from master cmd") | 
|  | 303 | __context__['retcode'] = 2 | 
|  | 304 | return False | 
|  | 305 |  | 
|  | 306 | if not ignore_dead: | 
|  | 307 | jid = out.itervalues().next()['jid'] | 
|  | 308 | job_stats = __salt__['saltutil.runner']( 'jobs.print_job', arg=[jid] ) or None | 
|  | 309 | if not job_stats: | 
|  | 310 | logger.error("%s check FAILED" % agent) | 
|  | 311 | logger.error("No response from master runner") | 
|  | 312 | __context__['retcode'] = 2 | 
|  | 313 | return False | 
|  | 314 |  | 
|  | 315 | job_result = job_stats[jid]['Result'] | 
|  | 316 | job_minions = job_stats[jid]['Minions'] | 
|  | 317 | if len(job_minions) != len(job_result): | 
|  | 318 | logger.error("%s check FAILED" % agent) | 
|  | 319 | logger.error("Some minions are offline") | 
|  | 320 | logger.error(list(set(job_minions) - set(job_result.keys()))) | 
|  | 321 | __context__['retcode'] = 2 | 
|  | 322 | return False | 
|  | 323 |  | 
|  | 324 | failed_minions = [] | 
|  | 325 | for minion in out: | 
|  | 326 | if 'retcode' in out[minion]: | 
|  | 327 | if out[minion]['retcode'] == 0: | 
|  | 328 | if not ignore_empty: | 
|  | 329 | if isinstance(out[minion]['ret'], bool): | 
|  | 330 | if minion not in failed_minions: | 
|  | 331 | failed_minions.append(minion) | 
|  | 332 | elif len(out[minion]['ret']) == 0: | 
|  | 333 | if minion not in failed_minions: | 
|  | 334 | failed_minions.append(minion) | 
|  | 335 | else: | 
|  | 336 | if minion not in failed_minions: | 
|  | 337 | failed_minions.append(minion) | 
|  | 338 | else: | 
|  | 339 | if minion not in failed_minions: | 
|  | 340 | failed_minions.append(minion) | 
|  | 341 |  | 
|  | 342 | if not _failed_minions(out, agent, failed_minions): | 
|  | 343 | __context__['retcode'] = 2 | 
|  | 344 | return False | 
|  | 345 |  | 
|  | 346 | return True | 
|  | 347 |  | 
|  | 348 |  | 
|  | 349 | def minions_check(wait_timeout=1, gather_job_wait_timeout=1, target='*', target_type='glob', ignore_dead=False): | 
|  | 350 |  | 
|  | 351 | ''' Verify minions are online ''' | 
|  | 352 |  | 
|  | 353 | agent = "Minions" | 
|  | 354 | out = __salt__['saltutil.cmd']( tgt=target, | 
|  | 355 | tgt_type=target_type, | 
|  | 356 | fun='test.ping', | 
|  | 357 | timeout=wait_timeout, | 
|  | 358 | gather_job_timeout=gather_job_wait_timeout | 
|  | 359 | ) or None | 
|  | 360 |  | 
|  | 361 | return _minions_output(out, agent, ignore_dead, ignore_empty=True) | 
|  | 362 |  | 
|  | 363 |  | 
|  | 364 | def time_diff_check(time_diff=1, target='*', target_type='glob', ignore_dead=False, **kwargs): | 
|  | 365 |  | 
|  | 366 | ''' Verify time diff on servers ''' | 
|  | 367 |  | 
|  | 368 | agent = "Time diff" | 
|  | 369 | out = __salt__['saltutil.cmd']( tgt=target, | 
|  | 370 | tgt_type=target_type, | 
|  | 371 | fun='status.time', | 
|  | 372 | arg=['%s'], | 
|  | 373 | timeout=3 | 
|  | 374 | ) or None | 
|  | 375 |  | 
|  | 376 | if not _minions_output(out, agent, ignore_dead): | 
|  | 377 | __context__['retcode'] = 2 | 
|  | 378 | return False | 
|  | 379 |  | 
|  | 380 | minions_times = {} | 
|  | 381 | env_times = [] | 
|  | 382 | verified_minions = [] | 
|  | 383 |  | 
|  | 384 | for minion in out: | 
|  | 385 | verified_minions.append(minion) | 
|  | 386 | if out[minion]['retcode'] == 0: | 
|  | 387 | minion_time = int(out[minion]['ret']) | 
|  | 388 | if str(minion_time) not in minions_times: | 
|  | 389 | minions_times[str(minion_time)] = [] | 
|  | 390 | minions_times[str(minion_time)].append(minion) | 
|  | 391 | env_times.append(minion_time) | 
|  | 392 |  | 
|  | 393 | env_times.sort() | 
|  | 394 | diff = env_times[-1] - env_times[0] | 
|  | 395 |  | 
|  | 396 | if diff > time_diff: | 
|  | 397 | __context__['retcode'] = 2 | 
|  | 398 | if kwargs.get("debug", False): | 
|  | 399 | return False, minions_times | 
|  | 400 | else: | 
|  | 401 | return False | 
|  | 402 |  | 
|  | 403 | if kwargs.get("debug", False): | 
|  | 404 | logger.info(verified_minions) | 
|  | 405 | return True | 
|  | 406 |  | 
|  | 407 |  | 
| Dzmitry Stremkouski | 9d2a556 | 2019-08-08 19:56:38 +0200 | [diff] [blame] | 408 | def contrail_process_list(**kwargs): | 
|  | 409 |  | 
|  | 410 | ''' Retrieve contrail process pids and start_time ''' | 
|  | 411 |  | 
|  | 412 | cmd = ['contrail-status', '-d'] | 
|  | 413 |  | 
|  | 414 | proc = subprocess.Popen(cmd, stdout=subprocess.PIPE) | 
|  | 415 | stdout, stderr =  proc.communicate() | 
|  | 416 |  | 
|  | 417 | procs = {} | 
|  | 418 | for line in stdout.split('\n'): | 
|  | 419 | if re.findall('^(\S+).*pid ([0-9]+),.*$', line): | 
|  | 420 | stat = line.split() | 
|  | 421 | procs[stat[0]] = int(stat[3][:-1]) | 
|  | 422 |  | 
|  | 423 | if kwargs.get('role', 'compute') == 'controller': | 
|  | 424 |  | 
|  | 425 | for service in ['zookeeper', 'ifmap-server']: | 
|  | 426 | cmd = ['service', service, 'status'] | 
|  | 427 |  | 
|  | 428 | proc = subprocess.Popen(cmd, stdout=subprocess.PIPE) | 
|  | 429 | stdout, stderr =  proc.communicate() | 
|  | 430 |  | 
|  | 431 | for line in stdout.split('\n'): | 
|  | 432 | if re.findall('^(\S+).*process ([0-9]+)$', line): | 
|  | 433 | stat = line.split() | 
|  | 434 | procs[stat[0]] = int(stat[3]) | 
|  | 435 |  | 
|  | 436 | ctime = int(datetime.datetime.now().strftime("%s")) | 
|  | 437 | btime_re = re.compile(r"^btime (\d+)$", re.MULTILINE) | 
|  | 438 | btime_groups = btime_re.search(open("/proc/stat").read()) | 
|  | 439 | btime = int(btime_groups.groups()[0]) | 
|  | 440 | clk_tck = os.sysconf(os.sysconf_names["SC_CLK_TCK"]) | 
|  | 441 | for proc in procs: | 
|  | 442 | pid = procs[proc] | 
|  | 443 | try: | 
|  | 444 | with open('/proc/%s/stat' % str(pid), 'r') as f: | 
|  | 445 | stat = f.read() | 
|  | 446 | jitty_time = int(stat.split(') ')[1].split()[19]) / clk_tck | 
|  | 447 | proc_uptime = ctime - btime - int(jitty_time) | 
|  | 448 | except: | 
|  | 449 | proc_uptime = 0 | 
|  | 450 | procs[proc] = { 'pid': pid, 'uptime': proc_uptime } | 
|  | 451 |  | 
|  | 452 | return procs | 
|  | 453 |  | 
|  | 454 |  | 
|  | 455 | def contrail_check(target='I@opencontrail:control or I@opencontrail:collector or I@opencontrail:compute', nodetool_target='I@opencontrail:control or I@opencontrail:collector', compute_target='I@opencontrail:compute', target_type='compound', nodetool_target_type='compound', compute_target_type='compound', nodetool_expected_size=3, proc_min_uptime=30, ignore_dead=False, **kwargs): | 
| Dzmitry Stremkouski | b71ada9 | 2019-04-05 22:37:59 +0200 | [diff] [blame] | 456 |  | 
| Dzmitry Stremkouski | 83b9c16 | 2019-09-25 09:41:45 +0200 | [diff] [blame] | 457 | ''' Verify contrail infrastructure ''' | 
|  | 458 |  | 
|  | 459 | use_doctrail = False | 
|  | 460 | oc_ver = str(__salt__['pillar.get']('_param:opencontrail_version')) | 
|  | 461 | if len(oc_ver) > 1: | 
|  | 462 | if oc_ver[0] == '4': | 
|  | 463 | use_doctrail = True | 
| Dzmitry Stremkouski | b71ada9 | 2019-04-05 22:37:59 +0200 | [diff] [blame] | 464 |  | 
|  | 465 | agent = "Contrail status" | 
| Dzmitry Stremkouski | 83b9c16 | 2019-09-25 09:41:45 +0200 | [diff] [blame] | 466 | if use_doctrail: | 
|  | 467 | # Compute nodes does not use doctrail still, but are in compund. | 
|  | 468 | # In order to minimize salt calls we are using exception pipes | 
|  | 469 | arg_cmd = 'test $(whereis -b doctrail | grep -c " ") -eq 0 && contrail-status || doctrail all contrail-status' | 
|  | 470 | else: | 
|  | 471 | arg_cmd = "contrail-status" | 
| Dzmitry Stremkouski | 9d2a556 | 2019-08-08 19:56:38 +0200 | [diff] [blame] | 472 |  | 
|  | 473 | # Check #1 contrail-status | 
| Dzmitry Stremkouski | b71ada9 | 2019-04-05 22:37:59 +0200 | [diff] [blame] | 474 | out = __salt__['saltutil.cmd']( tgt=target, | 
|  | 475 | tgt_type=target_type, | 
|  | 476 | fun='cmd.run', | 
| Dzmitry Stremkouski | 83b9c16 | 2019-09-25 09:41:45 +0200 | [diff] [blame] | 477 | arg=[arg_cmd], | 
| Dzmitry Stremkouski | b71ada9 | 2019-04-05 22:37:59 +0200 | [diff] [blame] | 478 | timeout=5 | 
|  | 479 | ) or None | 
|  | 480 |  | 
|  | 481 | if not _minions_output(out, agent, ignore_dead): | 
|  | 482 | __context__['retcode'] = 2 | 
|  | 483 | return False | 
|  | 484 |  | 
|  | 485 | failed_minions = [] | 
| Dzmitry Stremkouski | 83b9c16 | 2019-09-25 09:41:45 +0200 | [diff] [blame] | 486 | pattern = '^(==|\*+$|$|\S+\s+(active|backup|inactive\s\(disabled\son\sboot\)))' | 
| Dzmitry Stremkouski | b71ada9 | 2019-04-05 22:37:59 +0200 | [diff] [blame] | 487 | prog = re.compile(pattern) | 
|  | 488 |  | 
|  | 489 | validated = [] | 
|  | 490 | for minion in out: | 
|  | 491 | for line in out[minion]['ret'].split('\n'): | 
| Dzmitry Stremkouski | 83b9c16 | 2019-09-25 09:41:45 +0200 | [diff] [blame] | 492 | check_line = True | 
|  | 493 | if " FOR NODE " in line: | 
|  | 494 | check_line = False | 
|  | 495 | if check_line and not prog.match(line) and minion not in failed_minions: | 
| Dzmitry Stremkouski | b71ada9 | 2019-04-05 22:37:59 +0200 | [diff] [blame] | 496 | failed_minions.append(minion) | 
|  | 497 | validated.append(minion) | 
|  | 498 |  | 
|  | 499 | if not _failed_minions(out, agent, failed_minions): | 
|  | 500 | __context__['retcode'] = 2 | 
|  | 501 | return False | 
| Dzmitry Stremkouski | b71ada9 | 2019-04-05 22:37:59 +0200 | [diff] [blame] | 502 | if kwargs.get("debug", False): | 
|  | 503 | logger.info(validated) | 
| Dzmitry Stremkouski | 9d2a556 | 2019-08-08 19:56:38 +0200 | [diff] [blame] | 504 |  | 
| Dzmitry Stremkouski | 83b9c16 | 2019-09-25 09:41:45 +0200 | [diff] [blame] | 505 | if use_doctrail: | 
|  | 506 | arg_cmd = "doctrail all nodetool status" | 
|  | 507 | else: | 
|  | 508 | arg_cmd = "nodetool status" | 
|  | 509 |  | 
| Dzmitry Stremkouski | 9d2a556 | 2019-08-08 19:56:38 +0200 | [diff] [blame] | 510 | # Check #2 nodetool | 
|  | 511 | out = __salt__['saltutil.cmd']( tgt=nodetool_target, | 
|  | 512 | tgt_type=nodetool_target_type, | 
|  | 513 | fun='cmd.run', | 
| Dzmitry Stremkouski | 83b9c16 | 2019-09-25 09:41:45 +0200 | [diff] [blame] | 514 | arg=[arg_cmd], | 
| Dzmitry Stremkouski | 9d2a556 | 2019-08-08 19:56:38 +0200 | [diff] [blame] | 515 | timeout=5 | 
|  | 516 | ) or None | 
|  | 517 |  | 
|  | 518 | if not _minions_output(out, agent, ignore_dead): | 
|  | 519 | __context__['retcode'] = 2 | 
|  | 520 | return False | 
|  | 521 |  | 
|  | 522 | failed_minions = [] | 
|  | 523 | pattern = '^UN' | 
|  | 524 | prog = re.compile(pattern) | 
|  | 525 |  | 
|  | 526 | validated = [] | 
|  | 527 | for minion in out: | 
|  | 528 | size = 0 | 
|  | 529 | for line in out[minion]['ret'].split('\n'): | 
|  | 530 | if prog.match(line): | 
|  | 531 | size += 1 | 
|  | 532 | if not size == nodetool_expected_size and minion not in failed_minions: | 
|  | 533 | failed_minions.append(minion) | 
|  | 534 | validated.append(minion) | 
|  | 535 |  | 
|  | 536 | if not _failed_minions(out, agent, failed_minions): | 
|  | 537 | __context__['retcode'] = 2 | 
|  | 538 | return False | 
|  | 539 | if kwargs.get("debug", False): | 
|  | 540 | logger.info(validated) | 
|  | 541 |  | 
|  | 542 | # Check #3 process status control | 
| Dzmitry Stremkouski | 9d2a556 | 2019-08-08 19:56:38 +0200 | [diff] [blame] | 543 |  | 
| Dzmitry Stremkouski | 83b9c16 | 2019-09-25 09:41:45 +0200 | [diff] [blame] | 544 | # Contrail 4.x does not produce pid info from contrail-status -d | 
|  | 545 | # Will skip this check and use another method further | 
|  | 546 | # TODO: check process list state for oc4 env | 
|  | 547 | if not use_doctrail: | 
| Dzmitry Stremkouski | 9d2a556 | 2019-08-08 19:56:38 +0200 | [diff] [blame] | 548 |  | 
| Dzmitry Stremkouski | 83b9c16 | 2019-09-25 09:41:45 +0200 | [diff] [blame] | 549 | out = __salt__['saltutil.cmd']( tgt=nodetool_target, | 
|  | 550 | tgt_type=nodetool_target_type, | 
|  | 551 | fun='health_checks.contrail_process_list', | 
|  | 552 | arg=['role=controller'], | 
|  | 553 | timeout=5 | 
|  | 554 | ) or None | 
| Dzmitry Stremkouski | 9d2a556 | 2019-08-08 19:56:38 +0200 | [diff] [blame] | 555 |  | 
| Dzmitry Stremkouski | 83b9c16 | 2019-09-25 09:41:45 +0200 | [diff] [blame] | 556 | if not _minions_output(out, agent, ignore_dead): | 
|  | 557 | __context__['retcode'] = 2 | 
|  | 558 | return False | 
|  | 559 |  | 
|  | 560 | failed_minions = [] | 
|  | 561 | validated = [] | 
|  | 562 | for minion in out: | 
|  | 563 | procs = out[minion]['ret'] | 
|  | 564 | for proc in procs: | 
|  | 565 | proc_uptime = procs[proc]['uptime'] | 
|  | 566 | if proc_uptime < proc_min_uptime: | 
|  | 567 | if minion not in failed_minions: | 
|  | 568 | failed_minions.append(minion) | 
|  | 569 | logger.error({'minion': minion, 'name': proc, 'uptime': proc_uptime}) | 
|  | 570 | validated.append(minion) | 
|  | 571 |  | 
|  | 572 | if not _failed_minions(out, agent, failed_minions): | 
|  | 573 | __context__['retcode'] = 2 | 
|  | 574 | return False | 
|  | 575 | if kwargs.get("debug", False): | 
|  | 576 | logger.info(validated) | 
| Dzmitry Stremkouski | 9d2a556 | 2019-08-08 19:56:38 +0200 | [diff] [blame] | 577 |  | 
|  | 578 | # Check #4 process status computes | 
| Dzmitry Stremkouski | 83b9c16 | 2019-09-25 09:41:45 +0200 | [diff] [blame] | 579 |  | 
|  | 580 | # Contrail 4.x does not produce pid info from contrail-status -d | 
|  | 581 | # Will skip this check and use another method further | 
|  | 582 | # TODO: check process list state for oc4 env | 
|  | 583 | if not use_doctrail: | 
|  | 584 |  | 
|  | 585 | out = __salt__['saltutil.cmd']( tgt=compute_target, | 
|  | 586 | tgt_type=compute_target_type, | 
|  | 587 | fun='health_checks.contrail_process_list', | 
|  | 588 | timeout=5 | 
|  | 589 | ) or None | 
|  | 590 |  | 
|  | 591 | if not _minions_output(out, agent, ignore_dead): | 
|  | 592 | __context__['retcode'] = 2 | 
|  | 593 | return False | 
|  | 594 |  | 
|  | 595 | failed_minions = [] | 
|  | 596 | validated = [] | 
|  | 597 | for minion in out: | 
|  | 598 | procs = out[minion]['ret'] | 
|  | 599 | for proc in procs: | 
|  | 600 | proc_uptime = procs[proc]['uptime'] | 
|  | 601 | if proc_uptime < proc_min_uptime: | 
|  | 602 | if minion not in failed_minions: | 
|  | 603 | failed_minions.append(minion) | 
|  | 604 | logger.error({'minion': minion, 'name': proc, 'uptime': proc_uptime}) | 
|  | 605 | validated.append(minion) | 
|  | 606 |  | 
|  | 607 | if not _failed_minions(out, agent, failed_minions): | 
|  | 608 | __context__['retcode'] = 2 | 
|  | 609 | return False | 
|  | 610 | if kwargs.get("debug", False): | 
|  | 611 | logger.info(validated) | 
|  | 612 |  | 
|  | 613 | # Check # 5 compute vrouter namespaces dumplicates check | 
| Dzmitry Stremkouski | 9d2a556 | 2019-08-08 19:56:38 +0200 | [diff] [blame] | 614 | out = __salt__['saltutil.cmd']( tgt=compute_target, | 
|  | 615 | tgt_type=compute_target_type, | 
| Dzmitry Stremkouski | 83b9c16 | 2019-09-25 09:41:45 +0200 | [diff] [blame] | 616 | fun='health_checks.list_namespaces', | 
| Dzmitry Stremkouski | 9d2a556 | 2019-08-08 19:56:38 +0200 | [diff] [blame] | 617 | timeout=5 | 
|  | 618 | ) or None | 
|  | 619 |  | 
|  | 620 | if not _minions_output(out, agent, ignore_dead): | 
|  | 621 | __context__['retcode'] = 2 | 
|  | 622 | return False | 
|  | 623 |  | 
|  | 624 | failed_minions = [] | 
|  | 625 | validated = [] | 
| Dzmitry Stremkouski | 83b9c16 | 2019-09-25 09:41:45 +0200 | [diff] [blame] | 626 | all_namespaces = [] | 
| Dzmitry Stremkouski | 9d2a556 | 2019-08-08 19:56:38 +0200 | [diff] [blame] | 627 | for minion in out: | 
| Dzmitry Stremkouski | 83b9c16 | 2019-09-25 09:41:45 +0200 | [diff] [blame] | 628 | namespaces = out[minion]['ret'] | 
|  | 629 | for ns in namespaces: | 
|  | 630 | if ns['uuid'] not in all_namespaces: | 
|  | 631 | all_namespaces.append(ns['uuid']) | 
|  | 632 | else: | 
| Dzmitry Stremkouski | 9d2a556 | 2019-08-08 19:56:38 +0200 | [diff] [blame] | 633 | if minion not in failed_minions: | 
|  | 634 | failed_minions.append(minion) | 
| Dzmitry Stremkouski | 83b9c16 | 2019-09-25 09:41:45 +0200 | [diff] [blame] | 635 | logger.error({'minion': minion, 'uuid': ns['uuid']}) | 
|  | 636 | validated.append(minion) | 
| Dzmitry Stremkouski | 9d2a556 | 2019-08-08 19:56:38 +0200 | [diff] [blame] | 637 |  | 
|  | 638 | if not _failed_minions(out, agent, failed_minions): | 
| Dzmitry Stremkouski | 83b9c16 | 2019-09-25 09:41:45 +0200 | [diff] [blame] | 639 | logger.error("Duplicated SNAT vrouters found. Please reset their gateways") | 
| Dzmitry Stremkouski | 9d2a556 | 2019-08-08 19:56:38 +0200 | [diff] [blame] | 640 | __context__['retcode'] = 2 | 
|  | 641 | return False | 
|  | 642 | if kwargs.get("debug", False): | 
|  | 643 | logger.info(validated) | 
|  | 644 |  | 
| Dzmitry Stremkouski | 83b9c16 | 2019-09-25 09:41:45 +0200 | [diff] [blame] | 645 | # TODO: peers check | 
| Dzmitry Stremkouski | b71ada9 | 2019-04-05 22:37:59 +0200 | [diff] [blame] | 646 | return True | 
|  | 647 |  | 
|  | 648 |  | 
|  | 649 | def galera_check(cluster_size=3, target='I@galera:master or I@galera:slave', target_type='compound', ignore_dead=False, **kwargs): | 
|  | 650 |  | 
|  | 651 | ''' Verify galera cluster size and state ''' | 
|  | 652 |  | 
|  | 653 | agent = "Galera status" | 
|  | 654 | out = __salt__['saltutil.cmd']( tgt=target, | 
|  | 655 | tgt_type=target_type, | 
|  | 656 | fun='mysql.status', | 
|  | 657 | timeout=3 | 
|  | 658 | ) or None | 
|  | 659 |  | 
|  | 660 | if not _minions_output(out, agent, ignore_dead): | 
|  | 661 | __context__['retcode'] = 2 | 
|  | 662 | return False | 
|  | 663 |  | 
|  | 664 | failed_minions = [] | 
|  | 665 |  | 
|  | 666 | validated = [] | 
|  | 667 | for minion in out: | 
|  | 668 | if int(out[minion]['ret']['wsrep_cluster_size']) != int(cluster_size) and minion not in failed_minions: | 
|  | 669 | failed_minions.append(minion) | 
|  | 670 | if out[minion]['ret']['wsrep_evs_state'] != 'OPERATIONAL' and minion not in failed_minions: | 
|  | 671 | failed_minions.append(minion) | 
|  | 672 | validated.append(minion) | 
|  | 673 |  | 
|  | 674 | if not _failed_minions(out, agent, failed_minions): | 
|  | 675 | __context__['retcode'] = 2 | 
|  | 676 | return False | 
|  | 677 |  | 
|  | 678 | if kwargs.get("debug", False): | 
|  | 679 | logger.info(validated) | 
|  | 680 | logger.info("Cluster size: " + str(out[validated[0]]['ret']['wsrep_cluster_size'])) | 
|  | 681 | logger.info("Cluster state: " + str(out[validated[0]]['ret']['wsrep_evs_state'])) | 
|  | 682 | return True | 
|  | 683 |  | 
|  | 684 |  | 
|  | 685 | def _quote_str(s, l=False, r=False): | 
|  | 686 |  | 
|  | 687 | ''' Quting rabbitmq erl objects for json import ''' | 
|  | 688 |  | 
|  | 689 | if len(s) > 0: | 
|  | 690 | if l: | 
|  | 691 | s = s.lstrip() | 
|  | 692 | if r: | 
|  | 693 | s = s.rstrip() | 
|  | 694 | if (s[0] == "'") and (s[-1] != "'") and r and not l: | 
|  | 695 | s += "'" | 
|  | 696 | if (s[0] == '"') and (s[-1] != '"') and r and not l: | 
|  | 697 | s += '"' | 
|  | 698 | if (s[-1] == "'") and (s[0] != "'") and l and not r: | 
|  | 699 | s = "'" + s | 
|  | 700 | if (s[-1] == '"') and (s[0] != '"') and l and not r: | 
|  | 701 | s = '"' + s | 
|  | 702 | if (s[-1] != "'") and (s[-1] != '"') and (s[0] != "'") and (s[0] != '"'): | 
|  | 703 | s = '"' + s.replace('"', '\\\"') + '"' | 
|  | 704 | else: | 
|  | 705 | if (not l) and (not r) and s[0] != '"' and not s[-1] != '"': | 
|  | 706 | s= s.replace('"', '\\\"') | 
|  | 707 | return s.replace("'", '"') | 
|  | 708 | else: | 
|  | 709 | return s | 
|  | 710 |  | 
|  | 711 |  | 
|  | 712 | def _sanitize_rmqctl_output(string): | 
|  | 713 |  | 
|  | 714 | ''' Sanitizing rabbitmq erl objects for json import ''' | 
|  | 715 |  | 
|  | 716 | rabbitctl_json = "" | 
|  | 717 | for line in string.split(','): | 
|  | 718 | copy = line | 
|  | 719 | left = "" | 
|  | 720 | right = "" | 
|  | 721 | mid = copy | 
|  | 722 | lpar = False | 
|  | 723 | rpar = False | 
|  | 724 | if re.search('([\[\{\s]+)(.*)', copy): | 
|  | 725 | mid = re.sub('^([\[\{\s]+)','', copy) | 
|  | 726 | left = copy[:-len(mid)] | 
|  | 727 | copy = mid | 
|  | 728 | lpar = True | 
|  | 729 | if re.search('(.*)([\]\}\s]+)$', copy): | 
|  | 730 | mid = re.sub('([\]\}\s]+)$','', copy) | 
|  | 731 | right = copy[len(mid):] | 
|  | 732 | copy = mid | 
|  | 733 | rpar = True | 
|  | 734 | result = left + _quote_str(mid, l=lpar, r=rpar) + right | 
|  | 735 | if (not rpar) and lpar and (len(left.strip()) > 0) and (left.strip()[-1] == '{'): | 
|  | 736 | result += ":" | 
|  | 737 | else: | 
|  | 738 | result += "," | 
|  | 739 | rabbitctl_json += result | 
|  | 740 |  | 
|  | 741 | rabbitctl_json = rabbitctl_json[:-1] | 
|  | 742 | new_rabbitctl_json = rabbitctl_json | 
|  | 743 | for s in re.findall('"[^:\[{\]}]+"\s*:\s*("[^\[{\]}]+")', rabbitctl_json): | 
|  | 744 | if '"' in s[1:][:-1]: | 
|  | 745 | orig = s | 
|  | 746 | changed = '"' + s.replace('\\', '\\\\').replace('"', '\\\"') + '"' | 
|  | 747 | new_rabbitctl_json = new_rabbitctl_json.replace(orig, changed) | 
|  | 748 | return new_rabbitctl_json | 
|  | 749 |  | 
|  | 750 |  | 
| Dzmitry Stremkouski | f1bcbb5 | 2019-04-11 15:48:24 +0200 | [diff] [blame] | 751 | def rabbitmq_list_queues(vhost='/'): | 
|  | 752 |  | 
|  | 753 | ''' JSON formatted RabbitMQ queues list ''' | 
|  | 754 |  | 
|  | 755 | proc = subprocess.Popen(['rabbitmqctl', 'list_queues' , '-p', vhost], stdout=subprocess.PIPE) | 
|  | 756 | stdout, stderr =  proc.communicate() | 
|  | 757 |  | 
|  | 758 | queues = {} | 
|  | 759 | for line in stdout.split('\n'): | 
|  | 760 | if re.findall('[0-9]$', line): | 
|  | 761 | queue_name, num = re.sub(r"\s+", " ", line).split() | 
|  | 762 | queues[queue_name] = int(num) | 
|  | 763 |  | 
|  | 764 | return queues | 
|  | 765 |  | 
|  | 766 |  | 
|  | 767 | def rabbitmq_list_vhosts(): | 
|  | 768 |  | 
|  | 769 | ''' JSON formatted RabbitMQ vhosts list ''' | 
|  | 770 |  | 
|  | 771 | proc = subprocess.Popen(['rabbitmqctl', 'list_vhosts'], stdout=subprocess.PIPE) | 
|  | 772 | stdout, stderr =  proc.communicate() | 
|  | 773 |  | 
|  | 774 | vhosts = [] | 
|  | 775 | for line in stdout.split('\n'): | 
|  | 776 | if re.findall('^/', line): | 
|  | 777 | vhosts.append(line) | 
|  | 778 |  | 
|  | 779 | return vhosts | 
|  | 780 |  | 
|  | 781 |  | 
| Dzmitry Stremkouski | b71ada9 | 2019-04-05 22:37:59 +0200 | [diff] [blame] | 782 | def rabbitmq_cmd(cmd): | 
|  | 783 |  | 
|  | 784 | ''' JSON formatted RabbitMQ command output ''' | 
|  | 785 |  | 
|  | 786 | supported_commands = ['status', 'cluster_status', 'list_hashes', 'list_ciphers'] | 
|  | 787 | if cmd not in supported_commands: | 
|  | 788 | logger.error("Command is not supported yet, sorry") | 
|  | 789 | logger.error("Supported commands are: " + str(supported_commands)) | 
|  | 790 | __context__['retcode'] = 2 | 
|  | 791 | return False | 
|  | 792 |  | 
|  | 793 | proc = subprocess.Popen(['rabbitmqctl', cmd], stdout=subprocess.PIPE) | 
|  | 794 | stdout, stderr =  proc.communicate() | 
|  | 795 |  | 
|  | 796 | rabbitmqctl_cutoff = stdout[int(stdout.find('[')):int(stdout.rfind(']'))+1].replace('\n','') | 
|  | 797 | return json.loads(_sanitize_rmqctl_output(rabbitmqctl_cutoff)) | 
|  | 798 |  | 
|  | 799 |  | 
|  | 800 | def rabbitmq_check(target='I@rabbitmq:server', target_type='compound', ignore_dead=False, **kwargs): | 
|  | 801 |  | 
|  | 802 | ''' Verify rabbit cluster and it's alarms ''' | 
|  | 803 |  | 
|  | 804 | agent = "RabbitMQ status" | 
|  | 805 | out = __salt__['saltutil.cmd']( tgt=target, | 
|  | 806 | tgt_type=target_type, | 
|  | 807 | fun='health_checks.rabbitmq_cmd', | 
|  | 808 | arg=['cluster_status'], | 
|  | 809 | timeout=3 | 
|  | 810 | ) or None | 
|  | 811 |  | 
|  | 812 | if not _minions_output(out, agent, ignore_dead): | 
|  | 813 | __context__['retcode'] = 2 | 
|  | 814 | return False | 
|  | 815 |  | 
|  | 816 | failed_minions = [] | 
|  | 817 |  | 
|  | 818 | for minion in out: | 
|  | 819 | rabbitmqctl_json = out[minion]['ret'] | 
|  | 820 | running_nodes = [] | 
|  | 821 | available_nodes = [] | 
|  | 822 | alarms = [] | 
|  | 823 | for el in rabbitmqctl_json: | 
|  | 824 | if 'alarms' in el: | 
|  | 825 | alarms = el['alarms'] | 
|  | 826 | if 'nodes' in el: | 
|  | 827 | available_nodes = el['nodes'][0]['disc'] | 
|  | 828 | if 'running_nodes' in el: | 
|  | 829 | running_nodes = el['running_nodes'] | 
|  | 830 |  | 
|  | 831 | if running_nodes.sort() == available_nodes.sort(): | 
|  | 832 | nodes_alarms = [] | 
|  | 833 | for node in running_nodes: | 
|  | 834 | for el in alarms: | 
|  | 835 | if node in el: | 
|  | 836 | if len(el[node]) > 0: | 
|  | 837 | nodes_alarms.append(el[node]) | 
|  | 838 | if len(nodes_alarms) > 0: | 
|  | 839 | failed_minions.append(minion) | 
|  | 840 | else: | 
|  | 841 | failed_minions.append(minion) | 
|  | 842 |  | 
|  | 843 | if not _failed_minions(out, agent, failed_minions): | 
|  | 844 | __context__['retcode'] = 2 | 
|  | 845 | return False | 
|  | 846 |  | 
|  | 847 | if kwargs.get("debug", False): | 
|  | 848 | logger.info(running_nodes) | 
|  | 849 | return True | 
|  | 850 |  | 
|  | 851 |  | 
|  | 852 | def haproxy_status(socket_path='/run/haproxy/admin.sock', buff_size = 8192, encoding = 'UTF-8', stats_filter=[]): | 
|  | 853 |  | 
|  | 854 | ''' JSON formatted haproxy status ''' | 
|  | 855 |  | 
|  | 856 | stat_cmd = 'show stat\n' | 
|  | 857 |  | 
|  | 858 | if not os.path.exists(socket_path): | 
|  | 859 | logger.error('Socket %s does not exist or haproxy not running' % socket_path) | 
|  | 860 | __context__['retcode'] = 2 | 
|  | 861 | return False | 
|  | 862 |  | 
|  | 863 | client = socket.socket( socket.AF_UNIX, socket.SOCK_STREAM) | 
|  | 864 | client.connect(socket_path) | 
|  | 865 | stat_cmd = 'show stat\n' | 
|  | 866 |  | 
|  | 867 | client.send(bytearray(stat_cmd, encoding)) | 
|  | 868 | output = client.recv(buff_size) | 
|  | 869 |  | 
|  | 870 | res = "" | 
|  | 871 | while output: | 
|  | 872 | res += output.decode(encoding) | 
|  | 873 | output = client.recv(buff_size) | 
|  | 874 | client.close() | 
|  | 875 |  | 
|  | 876 | haproxy_stats = {} | 
|  | 877 | res_list = res.split('\n') | 
|  | 878 | fields = res_list[0][2:].split(',') | 
|  | 879 | stats_list = [] | 
|  | 880 | for line in res_list[1:]: | 
|  | 881 | if len(line.strip()) > 0: | 
|  | 882 | stats_list.append(line) | 
|  | 883 |  | 
|  | 884 | for i in range(len(stats_list)): | 
|  | 885 | element = {} | 
|  | 886 | for n in fields: | 
|  | 887 | element[n] = stats_list[i].split(',')[fields.index(n)] | 
|  | 888 | server_name = element.pop('pxname') | 
|  | 889 | server_type = element.pop('svname') | 
|  | 890 | if stats_filter: | 
|  | 891 | filtered_element = element.copy() | 
|  | 892 | for el in element: | 
|  | 893 | if el not in stats_filter: | 
|  | 894 | filtered_element.pop(el) | 
|  | 895 | element = filtered_element | 
|  | 896 | if server_name not in haproxy_stats: | 
|  | 897 | haproxy_stats[server_name] = {} | 
|  | 898 | if server_type == "FRONTEND" or server_type == "BACKEND": | 
|  | 899 | haproxy_stats[server_name][server_type] = element | 
|  | 900 | else: | 
|  | 901 | if 'UPSTREAM' not in haproxy_stats[server_name]: | 
|  | 902 | haproxy_stats[server_name]['UPSTREAM'] = {} | 
|  | 903 | haproxy_stats[server_name]['UPSTREAM'][server_type] = element | 
|  | 904 |  | 
|  | 905 | return haproxy_stats | 
|  | 906 |  | 
|  | 907 |  | 
|  | 908 | def haproxy_check(target='I@haproxy:proxy', target_type='compound', ignore_dead=False, ignore_services=[], ignore_upstreams=[], ignore_no_upstream=False, **kwargs): | 
|  | 909 |  | 
|  | 910 | ''' Verify haproxy backends status ''' | 
|  | 911 |  | 
|  | 912 | agent = "haproxy status" | 
|  | 913 | out = __salt__['saltutil.cmd']( tgt=target, | 
|  | 914 | tgt_type=target_type, | 
|  | 915 | fun='health_checks.haproxy_status', | 
|  | 916 | arg=["stats_filter=['status']"], | 
|  | 917 | timeout=3 | 
|  | 918 | ) or None | 
|  | 919 |  | 
|  | 920 | if not _minions_output(out, agent, ignore_dead): | 
|  | 921 | __context__['retcode'] = 2 | 
|  | 922 | return False | 
|  | 923 |  | 
|  | 924 | failed_minions = [] | 
|  | 925 | verified_minions = [] | 
|  | 926 | for minion in out: | 
|  | 927 | verified_minions.append(minion) | 
|  | 928 | haproxy_json = out[minion]['ret'] | 
|  | 929 | for service in haproxy_json: | 
|  | 930 | if service not in ignore_services: | 
|  | 931 | if haproxy_json[service]['FRONTEND']['status'] != 'OPEN': | 
|  | 932 | if minion not in failed_minions: | 
|  | 933 | failed_minions.append(minion) | 
|  | 934 | if haproxy_json[service]['BACKEND']['status'] != 'UP': | 
|  | 935 | if minion not in failed_minions: | 
|  | 936 | failed_minions.append(minion) | 
|  | 937 | if 'UPSTREAM' in haproxy_json[service]: | 
|  | 938 | for upstream in haproxy_json[service]['UPSTREAM']: | 
|  | 939 | if upstream not in ignore_upstreams: | 
|  | 940 | if haproxy_json[service]['UPSTREAM'][upstream]['status'] != 'UP': | 
|  | 941 | if minion not in failed_minions: | 
|  | 942 | failed_minions.append(minion) | 
|  | 943 | else: | 
|  | 944 | if not ignore_no_upstream: | 
|  | 945 | if minion not in failed_minions: | 
|  | 946 | failed_minions.append(minion) | 
|  | 947 |  | 
|  | 948 | if not _failed_minions(out, agent, failed_minions): | 
|  | 949 | __context__['retcode'] = 2 | 
|  | 950 | return False | 
|  | 951 |  | 
|  | 952 | if kwargs.get("debug", False): | 
|  | 953 | logger.info(verified_minions) | 
|  | 954 | return True | 
|  | 955 |  | 
|  | 956 |  | 
|  | 957 | def df_check(target='*', target_type='glob', verify='space', space_limit=80, inode_limit=80, ignore_dead=False, ignore_partitions=[], **kwargs): | 
|  | 958 |  | 
|  | 959 | ''' Verify storage space/inodes status ''' | 
|  | 960 |  | 
|  | 961 | supported_options = ['space', 'inodes'] | 
|  | 962 | if verify not in supported_options: | 
|  | 963 | logger.error('Unsupported "verify" option.') | 
|  | 964 | logger.error('Supported options are: %s' % str(supported_options)) | 
|  | 965 | __context__['retcode'] = 2 | 
|  | 966 | return False | 
|  | 967 |  | 
|  | 968 | if verify == 'space': | 
|  | 969 | fun_cmd = 'disk.usage' | 
|  | 970 | json_arg = 'capacity' | 
|  | 971 | limit = space_limit | 
|  | 972 | elif verify == 'inodes': | 
|  | 973 | fun_cmd = 'disk.inodeusage' | 
|  | 974 | json_arg = 'use' | 
|  | 975 | limit = inode_limit | 
|  | 976 |  | 
|  | 977 | agent = "df status" | 
|  | 978 | out = __salt__['saltutil.cmd']( tgt=target, | 
|  | 979 | tgt_type=target_type, | 
|  | 980 | fun=fun_cmd, | 
|  | 981 | timeout=3 | 
|  | 982 | ) or None | 
|  | 983 |  | 
|  | 984 | if not _minions_output(out, agent, ignore_dead): | 
|  | 985 | __context__['retcode'] = 2 | 
|  | 986 | return False | 
|  | 987 |  | 
|  | 988 | failed_minions = [] | 
|  | 989 | verified_minions = [] | 
|  | 990 | for minion in out: | 
|  | 991 | verified_minions.append(minion) | 
|  | 992 | df_json = out[minion]['ret'] | 
|  | 993 | for disk in df_json: | 
|  | 994 | if disk not in ignore_partitions: | 
|  | 995 | if int(df_json[disk][json_arg][:-1]) > int(limit): | 
|  | 996 | if minion not in failed_minions: | 
|  | 997 | failed_minions.append(minion) | 
|  | 998 |  | 
|  | 999 | if not _failed_minions(out, agent, failed_minions): | 
|  | 1000 | __context__['retcode'] = 2 | 
|  | 1001 | return False | 
|  | 1002 |  | 
|  | 1003 | if kwargs.get("debug", False): | 
|  | 1004 | logger.info(verified_minions) | 
|  | 1005 | return True | 
|  | 1006 |  | 
|  | 1007 |  | 
|  | 1008 | def load_check(target='*', target_type='glob', la1=3, la5=3, la15=3, ignore_dead=False, **kwargs): | 
|  | 1009 |  | 
|  | 1010 | ''' Verify load average status ''' | 
|  | 1011 |  | 
|  | 1012 | agent = "load average status" | 
|  | 1013 | out = __salt__['saltutil.cmd']( tgt=target, | 
|  | 1014 | tgt_type=target_type, | 
|  | 1015 | fun='status.loadavg', | 
|  | 1016 | timeout=3 | 
|  | 1017 | ) or None | 
|  | 1018 |  | 
|  | 1019 | if not _minions_output(out, agent, ignore_dead): | 
|  | 1020 | __context__['retcode'] = 2 | 
|  | 1021 | return False | 
|  | 1022 |  | 
|  | 1023 | failed_minions = [] | 
|  | 1024 | verified_minions = [] | 
|  | 1025 | for minion in out: | 
|  | 1026 | verified_minions.append(minion) | 
|  | 1027 | la_json = out[minion]['ret'] | 
|  | 1028 | if float(la_json['1-min']) > float(la1): | 
|  | 1029 | if minion not in failed_minions: | 
|  | 1030 | failed_minions.append(minion) | 
|  | 1031 | if float(la_json['5-min']) > float(la5): | 
|  | 1032 | if minion not in failed_minions: | 
|  | 1033 | failed_minions.append(minion) | 
|  | 1034 | if float(la_json['15-min']) > float(la15): | 
|  | 1035 | if minion not in failed_minions: | 
|  | 1036 | failed_minions.append(minion) | 
|  | 1037 |  | 
|  | 1038 | if not _failed_minions(out, agent, failed_minions): | 
|  | 1039 | __context__['retcode'] = 2 | 
|  | 1040 | return False | 
|  | 1041 |  | 
|  | 1042 | if kwargs.get("debug", False): | 
|  | 1043 | logger.info(verified_minions) | 
|  | 1044 | return True | 
|  | 1045 |  | 
|  | 1046 |  | 
|  | 1047 | def netdev_check(target='*', target_type='glob', rx_drop_limit=0, tx_drop_limit=0, ignore_devices=[], ignore_dead=False, **kwargs): | 
|  | 1048 |  | 
|  | 1049 | ''' Verify netdev rx/tx drop status ''' | 
|  | 1050 |  | 
|  | 1051 | agent = "netdev rx/tx status" | 
|  | 1052 | out = __salt__['saltutil.cmd']( tgt=target, | 
|  | 1053 | tgt_type=target_type, | 
|  | 1054 | fun='status.netdev', | 
|  | 1055 | timeout=3 | 
|  | 1056 | ) or None | 
|  | 1057 |  | 
|  | 1058 | if not _minions_output(out, agent, ignore_dead): | 
|  | 1059 | __context__['retcode'] = 2 | 
|  | 1060 | return False | 
|  | 1061 |  | 
| Dzmitry Stremkouski | 2c709f2 | 2019-04-22 02:27:54 +0200 | [diff] [blame] | 1062 | failed_minions = {} | 
| Dzmitry Stremkouski | b71ada9 | 2019-04-05 22:37:59 +0200 | [diff] [blame] | 1063 | verified_minions = [] | 
|  | 1064 | for minion in out: | 
|  | 1065 | verified_minions.append(minion) | 
|  | 1066 | dev_json = out[minion]['ret'] | 
|  | 1067 | for netdev in dev_json: | 
|  | 1068 | if netdev not in ignore_devices: | 
|  | 1069 | if int(dev_json[netdev]['rx_drop']) > int(rx_drop_limit): | 
|  | 1070 | if minion not in failed_minions: | 
| Dzmitry Stremkouski | 2c709f2 | 2019-04-22 02:27:54 +0200 | [diff] [blame] | 1071 | failed_minions[minion] = {} | 
|  | 1072 | if netdev not in failed_minions[minion]: | 
|  | 1073 | failed_minions[minion][netdev] = {} | 
|  | 1074 | failed_minions[minion][netdev]['rx_drop'] = int(dev_json[netdev]['rx_drop']) | 
| Dzmitry Stremkouski | b71ada9 | 2019-04-05 22:37:59 +0200 | [diff] [blame] | 1075 | if int(dev_json[netdev]['tx_drop']) > int(tx_drop_limit): | 
|  | 1076 | if minion not in failed_minions: | 
| Dzmitry Stremkouski | 2c709f2 | 2019-04-22 02:27:54 +0200 | [diff] [blame] | 1077 | failed_minions[minion] = {} | 
|  | 1078 | if netdev not in failed_minions[minion]: | 
|  | 1079 | failed_minions[minion][netdev] = {} | 
|  | 1080 | failed_minions[minion][netdev]['tx_drop'] = int(dev_json[netdev]['tx_drop']) | 
| Dzmitry Stremkouski | b71ada9 | 2019-04-05 22:37:59 +0200 | [diff] [blame] | 1081 |  | 
|  | 1082 | if not _failed_minions(out, agent, failed_minions): | 
|  | 1083 | __context__['retcode'] = 2 | 
|  | 1084 | return False | 
|  | 1085 |  | 
|  | 1086 | if kwargs.get("debug", False): | 
|  | 1087 | logger.info(verified_minions) | 
|  | 1088 | return True | 
|  | 1089 |  | 
|  | 1090 |  | 
|  | 1091 | def mem_check(target='*', target_type='glob', used_limit=80, ignore_dead=False, **kwargs): | 
|  | 1092 |  | 
|  | 1093 | ''' Verify available memory status ''' | 
|  | 1094 |  | 
|  | 1095 | agent = "available memory status" | 
|  | 1096 | out = __salt__['saltutil.cmd']( tgt=target, | 
|  | 1097 | tgt_type=target_type, | 
|  | 1098 | fun='status.meminfo', | 
|  | 1099 | timeout=3 | 
|  | 1100 | ) or None | 
|  | 1101 |  | 
|  | 1102 | if not _minions_output(out, agent, ignore_dead): | 
|  | 1103 | __context__['retcode'] = 2 | 
|  | 1104 | return False | 
|  | 1105 |  | 
|  | 1106 | failed_minions = [] | 
|  | 1107 | verified_minions = [] | 
|  | 1108 | for minion in out: | 
|  | 1109 | mem_avail = int(out[minion]['ret']['MemAvailable']['value']) | 
|  | 1110 | mem_total = int(out[minion]['ret']['MemTotal']['value']) | 
|  | 1111 | used_pct = float((mem_total - mem_avail) * 100 / mem_total) | 
|  | 1112 | if used_pct > float(used_limit): | 
|  | 1113 | if minion not in failed_minions: | 
|  | 1114 | failed_minions.append(minion) | 
|  | 1115 | else: | 
|  | 1116 | verified_minions.append( { minion : str(used_pct) + '%' } ) | 
|  | 1117 |  | 
|  | 1118 | if not _failed_minions(out, agent, failed_minions): | 
|  | 1119 | __context__['retcode'] = 2 | 
|  | 1120 | return False | 
|  | 1121 |  | 
|  | 1122 | if kwargs.get("debug", False): | 
|  | 1123 | logger.info(verified_minions) | 
|  | 1124 | return True | 
|  | 1125 |  | 
|  | 1126 |  | 
|  | 1127 | def ntp_status(params = ['-4', '-p', '-n']): | 
|  | 1128 |  | 
|  | 1129 | ''' JSON formatted ntpq command output ''' | 
|  | 1130 |  | 
|  | 1131 | ntp_states = [ | 
|  | 1132 | { 'indicator': '#', 'comment': 'source selected, distance exceeds maximum value' }, | 
|  | 1133 | { 'indicator': 'o', 'comment': 'source selected, Pulse Per Second (PPS) used' }, | 
|  | 1134 | { 'indicator': '+', 'comment': 'source selected, included in final set' }, | 
|  | 1135 | { 'indicator': 'x', 'comment': 'source false ticker' }, | 
|  | 1136 | { 'indicator': '.', 'comment': 'source selected from end of candidate list' }, | 
|  | 1137 | { 'indicator': '-', 'comment': 'source discarded by cluster algorithm' }, | 
|  | 1138 | { 'indicator': '*', 'comment': 'current time source' }, | 
|  | 1139 | { 'indicator': ' ', 'comment': 'source discarded high stratum, failed sanity' } | 
|  | 1140 | ] | 
|  | 1141 | ntp_state_indicators = [] | 
|  | 1142 | for state in ntp_states: | 
|  | 1143 | ntp_state_indicators.append(state['indicator']) | 
|  | 1144 | source_types = {} | 
|  | 1145 | source_types['l'] = "local (such as a GPS, WWVB)" | 
|  | 1146 | source_types['u'] = "unicast (most common)" | 
|  | 1147 | source_types['m'] = "multicast" | 
|  | 1148 | source_types['b'] = "broadcast" | 
|  | 1149 | source_types['-'] = "netaddr" | 
|  | 1150 |  | 
|  | 1151 | proc = subprocess.Popen(['ntpq'] + params, stdout=subprocess.PIPE) | 
|  | 1152 | stdout, stderr =  proc.communicate() | 
|  | 1153 |  | 
|  | 1154 | ntp_lines = stdout.split('\n') | 
|  | 1155 | fields = re.sub("\s+", " ", ntp_lines[0]).split() | 
|  | 1156 | fields[fields.index('st')] = 'stratum' | 
|  | 1157 | fields[fields.index('t')] = 'source_type' | 
|  | 1158 |  | 
|  | 1159 | ntp_peers = {} | 
|  | 1160 | for line in ntp_lines[2:]: | 
|  | 1161 | if len(line.strip()) > 0: | 
|  | 1162 | element = {} | 
|  | 1163 | values = re.sub("\s+", " ", line).split() | 
|  | 1164 | for i in range(len(values)): | 
|  | 1165 | if fields[i] == 'source_type': | 
|  | 1166 | element[fields[i]] = { 'indicator': values[i], 'comment': source_types[values[i]] } | 
|  | 1167 | elif fields[i] in ['stratum', 'when', 'poll', 'reach']: | 
|  | 1168 | if values[i] == '-': | 
|  | 1169 | element[fields[i]] = int(-1) | 
|  | 1170 | else: | 
|  | 1171 | element[fields[i]] = int(values[i]) | 
|  | 1172 | elif fields[i] in ['delay', 'offset', 'jitter']: | 
|  | 1173 | element[fields[i]] = float(values[i]) | 
|  | 1174 | else: | 
|  | 1175 | element[fields[i]] = values[i] | 
|  | 1176 | peer = element.pop('remote') | 
|  | 1177 | peer_state = peer[0] | 
|  | 1178 | if peer_state in ntp_state_indicators: | 
|  | 1179 | peer = peer[1:] | 
|  | 1180 | else: | 
|  | 1181 | peer_state = 'f' | 
|  | 1182 | element['current'] = False | 
|  | 1183 | if peer_state == '*': | 
|  | 1184 | element['current'] = True | 
|  | 1185 | for state in ntp_states: | 
|  | 1186 | if state['indicator'] == peer_state: | 
|  | 1187 | element['state'] = state.copy() | 
|  | 1188 | if peer_state == 'f' and state['indicator'] == ' ': | 
|  | 1189 | fail_state = state.copy() | 
|  | 1190 | fail_state.pop('indicator') | 
|  | 1191 | fail_state['indicator'] = 'f' | 
|  | 1192 | element['state'] = fail_state | 
|  | 1193 | ntp_peers[peer] = element | 
|  | 1194 |  | 
|  | 1195 | return ntp_peers | 
|  | 1196 |  | 
|  | 1197 |  | 
|  | 1198 | def ntp_check(min_peers=1, max_stratum=3, target='*', target_type='glob', ignore_dead=False, **kwargs): | 
|  | 1199 |  | 
|  | 1200 | ''' Verify NTP peers status ''' | 
|  | 1201 |  | 
|  | 1202 | agent = "ntpd peers status" | 
|  | 1203 | out = __salt__['saltutil.cmd']( tgt=target, | 
|  | 1204 | tgt_type=target_type, | 
|  | 1205 | fun='health_checks.ntp_status', | 
|  | 1206 | timeout=3 | 
|  | 1207 | ) or None | 
|  | 1208 |  | 
|  | 1209 | if not _minions_output(out, agent, ignore_dead): | 
|  | 1210 | __context__['retcode'] = 2 | 
|  | 1211 | return False | 
|  | 1212 |  | 
|  | 1213 | failed_minions = [] | 
|  | 1214 | verified_minions = [] | 
|  | 1215 | for minion in out: | 
|  | 1216 | ntp_json = out[minion]['ret'] | 
|  | 1217 | good_peers = [] | 
|  | 1218 | for peer in ntp_json: | 
|  | 1219 | if ntp_json[peer]['stratum'] < int(max_stratum) + 1: | 
|  | 1220 | good_peers.append(peer) | 
|  | 1221 | if len(good_peers) > int(min_peers) - 1: | 
|  | 1222 | if minion not in verified_minions: | 
|  | 1223 | verified_minions.append(minion) | 
|  | 1224 | else: | 
|  | 1225 | if minion not in failed_minions: | 
|  | 1226 | failed_minions.append(minion) | 
|  | 1227 |  | 
|  | 1228 | if not _failed_minions(out, agent, failed_minions): | 
|  | 1229 | __context__['retcode'] = 2 | 
|  | 1230 | return False | 
|  | 1231 |  | 
|  | 1232 | if kwargs.get("debug", False): | 
|  | 1233 | logger.info(verified_minions) | 
| Dzmitry Stremkouski | f1bcbb5 | 2019-04-11 15:48:24 +0200 | [diff] [blame] | 1234 |  | 
| Dzmitry Stremkouski | b71ada9 | 2019-04-05 22:37:59 +0200 | [diff] [blame] | 1235 | return True | 
| Dzmitry Stremkouski | f1bcbb5 | 2019-04-11 15:48:24 +0200 | [diff] [blame] | 1236 |  | 
|  | 1237 |  | 
|  | 1238 | def gluster_pool_list(): | 
|  | 1239 |  | 
|  | 1240 | ''' JSON formatted GlusterFS pool list command output ''' | 
|  | 1241 |  | 
|  | 1242 | proc = subprocess.Popen(['gluster', 'pool', 'list'], stdout=subprocess.PIPE) | 
|  | 1243 | stdout, stderr =  proc.communicate() | 
|  | 1244 |  | 
|  | 1245 | regex = re.compile('^(\S+)\s+(\S+)\s+(\S+)$') | 
|  | 1246 | fields = regex.findall(stdout.split('\n')[0])[0] | 
|  | 1247 |  | 
|  | 1248 | pool = {} | 
|  | 1249 |  | 
|  | 1250 | for line in stdout.split('\n')[1:]: | 
|  | 1251 | if len(line.strip()) > 0: | 
|  | 1252 | peer = {} | 
|  | 1253 | values = regex.findall(line.strip())[0] | 
|  | 1254 | for i in range(len(fields)): | 
|  | 1255 | peer[fields[i].lower()] = values[i] | 
|  | 1256 | uuid = peer.pop('uuid') | 
|  | 1257 | pool[uuid] = peer | 
|  | 1258 |  | 
|  | 1259 | return pool | 
|  | 1260 |  | 
|  | 1261 |  | 
|  | 1262 | def gluster_volume_status(): | 
|  | 1263 |  | 
|  | 1264 | ''' JSON formatted GlusterFS volumes status command output ''' | 
|  | 1265 |  | 
|  | 1266 | proc = subprocess.Popen(['gluster', 'volume', 'status', 'all', 'detail'], stdout=subprocess.PIPE) | 
|  | 1267 | stdout, stderr =  proc.communicate() | 
|  | 1268 |  | 
|  | 1269 | begin_volume = False | 
|  | 1270 | brick_lookup = False | 
|  | 1271 | volumes = {} | 
|  | 1272 | volume_name = "" | 
|  | 1273 |  | 
|  | 1274 | for line in stdout.split('\n'): | 
|  | 1275 | if 'Status of volume' in line: | 
|  | 1276 | volume_name = line.split(':')[1].strip() | 
|  | 1277 | volumes[volume_name] = { 'bricks': [] } | 
|  | 1278 | begin_volume = True | 
|  | 1279 | elif len(line.strip()) == 0: | 
|  | 1280 | if begin_volume: | 
|  | 1281 | begin_volume = False | 
|  | 1282 | elif '--------' in line: | 
|  | 1283 | brick_lookup = True | 
|  | 1284 | elif brick_lookup and line.split(':')[0].strip() == 'Brick': | 
|  | 1285 | brick_host, brick_path = re.findall('^Brick\ *:\ (.*)', line)[0].split()[1].split(':') | 
|  | 1286 | volumes[volume_name]['bricks'].append({ 'host': brick_host, 'path': brick_path }) | 
|  | 1287 | brick_lookup = False | 
|  | 1288 | else: | 
|  | 1289 | brick_key, brick_value = line.split(':') | 
|  | 1290 | brick_key = brick_key.strip().lower().replace(' ', '_') | 
|  | 1291 | brick_value = brick_value.strip() | 
|  | 1292 | volumes[volume_name]['bricks'][len(volumes[volume_name]['bricks']) - 1][brick_key] = brick_value | 
|  | 1293 |  | 
|  | 1294 | return volumes | 
|  | 1295 |  | 
|  | 1296 |  | 
|  | 1297 | def gluster_pool_check(target='I@glusterfs:server', target_type='compound', expected_size=3, ignore_dead=False, **kwargs): | 
|  | 1298 |  | 
|  | 1299 | ''' Check GlusterFS peer status ''' | 
|  | 1300 |  | 
|  | 1301 | agent = "glusterfs peer status" | 
|  | 1302 | out = __salt__['saltutil.cmd']( tgt=target, | 
|  | 1303 | tgt_type=target_type, | 
|  | 1304 | fun='health_checks.gluster_pool_list', | 
|  | 1305 | timeout=3, | 
|  | 1306 | kwargs='[batch=True]' | 
|  | 1307 | ) or None | 
|  | 1308 |  | 
|  | 1309 | if not _minions_output(out, agent, ignore_dead): | 
|  | 1310 | __context__['retcode'] = 2 | 
|  | 1311 | return False | 
|  | 1312 |  | 
|  | 1313 | failed_minions = [] | 
|  | 1314 | verified_minions = [] | 
|  | 1315 | for minion in out: | 
|  | 1316 | verified_minions.append(minion) | 
|  | 1317 | gluster_json = out[minion]['ret'] | 
|  | 1318 | alive_peers = [] | 
|  | 1319 | for peer in gluster_json: | 
|  | 1320 | if gluster_json[peer]['state'] == 'Connected': | 
|  | 1321 | alive_peers.append(peer) | 
|  | 1322 | else: | 
|  | 1323 | if minion not in failed_minions: | 
|  | 1324 | failed_minions.append(minion) | 
|  | 1325 | if len(alive_peers) < expected_size: | 
|  | 1326 | if minion not in failed_minions: | 
|  | 1327 | failed_minions.append(minion) | 
|  | 1328 |  | 
|  | 1329 | if not _failed_minions(out, agent, failed_minions): | 
|  | 1330 | __context__['retcode'] = 2 | 
|  | 1331 | return False | 
|  | 1332 |  | 
|  | 1333 | if kwargs.get("debug", False): | 
|  | 1334 | logger.info(verified_minions) | 
|  | 1335 |  | 
|  | 1336 | return True | 
|  | 1337 |  | 
|  | 1338 |  | 
|  | 1339 | def gluster_volumes_check(target='I@glusterfs:server', target_type='compound', expected_size=3, ignore_volumes=[], ignore_dead=False, **kwargs): | 
|  | 1340 |  | 
|  | 1341 | ''' Check GlusterFS volumes status ''' | 
|  | 1342 |  | 
|  | 1343 | agent = "glusterfs volumes status" | 
|  | 1344 | out = __salt__['saltutil.cmd']( tgt=target, | 
|  | 1345 | tgt_type=target_type, | 
|  | 1346 | fun='health_checks.gluster_volume_status', | 
|  | 1347 | timeout=3, | 
|  | 1348 | kwargs='[batch=True]' | 
|  | 1349 | ) or None | 
|  | 1350 |  | 
|  | 1351 | if not _minions_output(out, agent, ignore_dead): | 
|  | 1352 | __context__['retcode'] = 2 | 
|  | 1353 | return False | 
|  | 1354 |  | 
|  | 1355 | failed_minions = [] | 
|  | 1356 | verified_minions = [] | 
|  | 1357 | verified_volumes = [] | 
|  | 1358 | for minion in out: | 
|  | 1359 | verified_minions.append(minion) | 
|  | 1360 | gluster_json = out[minion]['ret'] | 
|  | 1361 | for volume in gluster_json: | 
|  | 1362 | if volume in ignore_volumes: | 
|  | 1363 | continue | 
|  | 1364 | else: | 
|  | 1365 | verified_volumes.append(volume) | 
|  | 1366 | alive_bricks = 0 | 
|  | 1367 | if 'bricks' not in gluster_json[volume]: | 
|  | 1368 | if minion not in failed_minions: | 
|  | 1369 | failed_minions.append(minion) | 
|  | 1370 | bricks = gluster_json[volume]['bricks'] | 
|  | 1371 | if len(bricks) < expected_size: | 
|  | 1372 | if minion not in failed_minions: | 
|  | 1373 | failed_minions.append(minion) | 
|  | 1374 | for brick in bricks: | 
|  | 1375 | if brick['online'] == 'Y': | 
|  | 1376 | alive_bricks += 1 | 
|  | 1377 | else: | 
|  | 1378 | if minion not in failed_minions: | 
|  | 1379 | failed_minions.append(minion) | 
|  | 1380 | if alive_bricks < expected_size: | 
|  | 1381 | if minion not in failed_minions: | 
|  | 1382 | failed_minions.append(minion) | 
|  | 1383 |  | 
|  | 1384 | if not _failed_minions(out, agent, failed_minions): | 
|  | 1385 | __context__['retcode'] = 2 | 
|  | 1386 | return False | 
|  | 1387 |  | 
|  | 1388 | if kwargs.get("debug", False): | 
|  | 1389 | logger.info("Verified minions:") | 
|  | 1390 | logger.info(verified_minions) | 
|  | 1391 | logger.info("Verified volumes:") | 
|  | 1392 | logger.info(verified_volumes) | 
|  | 1393 |  | 
|  | 1394 | return True | 
|  | 1395 |  | 
|  | 1396 |  | 
|  | 1397 | def ceph_cmd(cmd): | 
|  | 1398 |  | 
|  | 1399 | ''' JSON formatted ceph command output ''' | 
|  | 1400 |  | 
|  | 1401 | proc = subprocess.Popen(['ceph'] + cmd.split() + ['--format', 'json-pretty'], stdout=subprocess.PIPE) | 
|  | 1402 | stdout, stderr =  proc.communicate() | 
|  | 1403 |  | 
|  | 1404 | return json.loads(stdout) | 
|  | 1405 |  | 
|  | 1406 |  | 
|  | 1407 | def ceph_health_check(target='I@ceph:mon', target_type='compound', expected_status='HEALTH_OK', expected_state='active+clean', ignore_dead=False, **kwargs): | 
|  | 1408 |  | 
|  | 1409 | ''' Check all ceph monitors health status ''' | 
|  | 1410 |  | 
|  | 1411 | agent = "ceph health status" | 
|  | 1412 | out = __salt__['saltutil.cmd']( tgt=target, | 
|  | 1413 | tgt_type=target_type, | 
|  | 1414 | fun='health_checks.ceph_cmd', | 
|  | 1415 | arg=['status'], | 
|  | 1416 | timeout=3 | 
|  | 1417 | ) or None | 
|  | 1418 |  | 
|  | 1419 | if not _minions_output(out, agent, ignore_dead): | 
|  | 1420 | __context__['retcode'] = 2 | 
|  | 1421 | return False | 
|  | 1422 |  | 
|  | 1423 | failed_minions = [] | 
|  | 1424 | verified_minions = [] | 
|  | 1425 | for minion in out: | 
|  | 1426 | verified_minions.append(minion) | 
|  | 1427 | ceph_json = out[minion]['ret'] | 
|  | 1428 | fsid = ceph_json['fsid'] | 
|  | 1429 |  | 
|  | 1430 | if ceph_json['health']['overall_status'] != expected_status: | 
|  | 1431 | if minion not in failed_minions: | 
|  | 1432 | failed_minions.append(minion) | 
|  | 1433 |  | 
|  | 1434 | if ceph_json['osdmap']['osdmap']['full']: | 
|  | 1435 | if minion not in failed_minions: | 
|  | 1436 | failed_minions.append(minion) | 
|  | 1437 |  | 
|  | 1438 | if ceph_json['osdmap']['osdmap']['nearfull']: | 
|  | 1439 | if minion not in failed_minions: | 
|  | 1440 | failed_minions.append(minion) | 
|  | 1441 |  | 
|  | 1442 | num_osds = ceph_json['osdmap']['osdmap']['num_osds'] | 
|  | 1443 | num_in_osds = ceph_json['osdmap']['osdmap']['num_in_osds'] | 
|  | 1444 | num_up_osds = ceph_json['osdmap']['osdmap']['num_up_osds'] | 
|  | 1445 | if not ( num_osds == num_in_osds == num_up_osds ): | 
|  | 1446 | if minion not in failed_minions: | 
|  | 1447 | failed_minions.append(minion) | 
|  | 1448 |  | 
|  | 1449 | quorum = len(ceph_json['quorum']) | 
|  | 1450 | quorum_names = len(ceph_json['quorum_names']) | 
|  | 1451 | mons = len(ceph_json['monmap']['mons']) | 
|  | 1452 | if not ( quorum == quorum_names == mons ): | 
|  | 1453 | if minion not in failed_minions: | 
|  | 1454 | failed_minions.append(minion) | 
|  | 1455 |  | 
|  | 1456 | for mon in ceph_json['health']['timechecks']['mons']: | 
|  | 1457 | if mon['health'] != expected_status: | 
|  | 1458 | if minion not in failed_minions: | 
|  | 1459 | failed_minions.append(minion) | 
|  | 1460 |  | 
|  | 1461 | for srv in ceph_json['health']['health']['health_services']: | 
|  | 1462 | for mon in srv['mons']: | 
|  | 1463 | if mon['health'] != expected_status: | 
|  | 1464 | if minion not in failed_minions: | 
|  | 1465 | failed_minions.append(minion) | 
|  | 1466 |  | 
|  | 1467 | for state in ceph_json['pgmap']['pgs_by_state']: | 
|  | 1468 | if state['state_name'] != expected_state: | 
|  | 1469 | if minion not in failed_minions: | 
|  | 1470 | failed_minions.append(minion) | 
|  | 1471 |  | 
|  | 1472 | if not _failed_minions(out, agent, failed_minions): | 
|  | 1473 | __context__['retcode'] = 2 | 
|  | 1474 | return False | 
|  | 1475 |  | 
|  | 1476 | if kwargs.get("debug", False): | 
|  | 1477 | logger.info("Quorum:") | 
|  | 1478 | logger.info(ceph_json['quorum_names']) | 
|  | 1479 | logger.info("Verified minions:") | 
|  | 1480 | logger.info(verified_minions) | 
|  | 1481 |  | 
|  | 1482 | return True | 
|  | 1483 |  | 
|  | 1484 |  | 
| Dzmitry Stremkouski | 7cd10fc | 2019-04-17 11:51:59 +0200 | [diff] [blame] | 1485 | def get_entropy(): | 
|  | 1486 |  | 
|  | 1487 | ''' Retrieve entropy size for the host ''' | 
|  | 1488 |  | 
|  | 1489 | with open('/proc/sys/kernel/random/entropy_avail', 'r') as f: | 
|  | 1490 | entropy = f.read() | 
|  | 1491 | return entropy | 
|  | 1492 |  | 
|  | 1493 |  | 
|  | 1494 | def entropy_check(target='*', target_type='glob', minimum_bits=700, ignore_dead=False, **kwargs): | 
|  | 1495 |  | 
|  | 1496 | ''' Check entropy size in cluster ''' | 
|  | 1497 |  | 
|  | 1498 | agent = "entropy size status" | 
|  | 1499 | out = __salt__['saltutil.cmd']( tgt=target, | 
|  | 1500 | tgt_type=target_type, | 
|  | 1501 | fun='health_checks.get_entropy', | 
|  | 1502 | timeout=3 | 
|  | 1503 | ) or None | 
|  | 1504 |  | 
|  | 1505 | if not _minions_output(out, agent, ignore_dead): | 
|  | 1506 | __context__['retcode'] = 2 | 
|  | 1507 | return False | 
|  | 1508 |  | 
|  | 1509 | failed_minions = [] | 
|  | 1510 | verified_minions = [] | 
|  | 1511 |  | 
| Dzmitry Stremkouski | 7cd10fc | 2019-04-17 11:51:59 +0200 | [diff] [blame] | 1512 | for minion in out: | 
|  | 1513 | verified_minions.append(minion) | 
|  | 1514 | entropy = int(out[minion]['ret']) | 
|  | 1515 | if entropy < minimum_bits: | 
|  | 1516 | if not minion in failed_minions: | 
|  | 1517 | failed_minions.append(minion) | 
|  | 1518 |  | 
|  | 1519 | if not _failed_minions(out, agent, failed_minions): | 
|  | 1520 | __context__['retcode'] = 2 | 
|  | 1521 | return False | 
|  | 1522 |  | 
|  | 1523 | if kwargs.get("debug", False): | 
|  | 1524 | logger.info(verified_minions) | 
|  | 1525 |  | 
|  | 1526 | return True | 
|  | 1527 |  | 
|  | 1528 |  | 
| Dzmitry Stremkouski | f1bcbb5 | 2019-04-11 15:48:24 +0200 | [diff] [blame] | 1529 | def docker_registry_list(host): | 
|  | 1530 |  | 
|  | 1531 | ''' Retrieve and list docker catalog ''' | 
|  | 1532 |  | 
|  | 1533 | try: | 
|  | 1534 | if host[0:4] == 'http': | 
|  | 1535 | url = host + '/v2/' | 
|  | 1536 | else: | 
|  | 1537 | url = 'http://' + host + '/v2/' | 
|  | 1538 | repos = requests.get(url + '_catalog') | 
|  | 1539 |  | 
|  | 1540 | versions = {} | 
|  | 1541 | for repo in repos.json()['repositories']: | 
|  | 1542 | repo_versions = requests.get(url + repo + '/tags/list') | 
|  | 1543 | versions[repo] = repo_versions.json().pop('tags') | 
|  | 1544 | return versions | 
|  | 1545 | except: | 
|  | 1546 | return {} | 
| Dzmitry Stremkouski | 7cd10fc | 2019-04-17 11:51:59 +0200 | [diff] [blame] | 1547 |  | 
|  | 1548 |  | 
|  | 1549 | def docker_ps(list_all=0): | 
|  | 1550 |  | 
|  | 1551 | import docker | 
|  | 1552 | client = docker.client.Client(base_url='unix://var/run/docker.sock') | 
|  | 1553 | return client.containers(all=list_all) | 
|  | 1554 |  | 
| Dzmitry Stremkouski | 2c709f2 | 2019-04-22 02:27:54 +0200 | [diff] [blame] | 1555 |  | 
|  | 1556 | def zookeeper_cmd(cmd, hostname='localhost', port=2181): | 
|  | 1557 |  | 
|  | 1558 | ''' Execute zookeeper cmd via socket ''' | 
|  | 1559 |  | 
|  | 1560 | buf_size = 1024 | 
|  | 1561 | sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) | 
|  | 1562 | sock.connect((hostname, port)) | 
|  | 1563 | sock.sendall(cmd) | 
|  | 1564 | sock.shutdown(socket.SHUT_WR) | 
|  | 1565 | rdata = "" | 
|  | 1566 | while 1: | 
|  | 1567 | data = sock.recv(buf_size) | 
|  | 1568 | if data == "": | 
|  | 1569 | break | 
|  | 1570 | rdata += data | 
|  | 1571 | sock.close() | 
|  | 1572 | return rdata | 
|  | 1573 |  | 
|  | 1574 |  | 
|  | 1575 | def zookeeper_stats(): | 
|  | 1576 |  | 
|  | 1577 | ''' Retrieve zookeeper stats ''' | 
|  | 1578 |  | 
|  | 1579 | stats = {} | 
|  | 1580 | stats['configuration'] = {} | 
|  | 1581 | for line in zookeeper_cmd('conf').split('\n'): | 
|  | 1582 | if line: | 
|  | 1583 | key, value = line.split('=') | 
|  | 1584 | if value.strip().isdigit(): | 
|  | 1585 | value = int(value) | 
|  | 1586 | else: | 
|  | 1587 | value = value.strip() | 
|  | 1588 | stats['configuration'][key.strip().lower().replace(' ', '_')] = value | 
|  | 1589 |  | 
|  | 1590 | stats['environment'] = {} | 
|  | 1591 | for line in zookeeper_cmd('envi').split('\n')[1:]: | 
|  | 1592 | if line: | 
|  | 1593 | key, value = line.split('=') | 
|  | 1594 | if value.strip().isdigit(): | 
|  | 1595 | value = int(value) | 
|  | 1596 | else: | 
|  | 1597 | value = value.strip() | 
|  | 1598 | stats['environment'][key.strip().lower().replace(' ', '_')] = value | 
|  | 1599 |  | 
|  | 1600 | stats['server'] = {} | 
|  | 1601 | for line in zookeeper_cmd('srvr').split('\n'): | 
|  | 1602 | if line: | 
|  | 1603 | if re.findall('^Zookeeper version:', line, flags=re.IGNORECASE): | 
|  | 1604 | version_str = line.split(':')[1].strip() | 
|  | 1605 | version = version_str | 
|  | 1606 | if '-' in version_str: | 
|  | 1607 | version_str = version_str.split('-')[0] | 
|  | 1608 | if '.' in version_str: | 
|  | 1609 | version = [] | 
|  | 1610 | version_list = version_str.split('.') | 
|  | 1611 | for elem in version_list: | 
|  | 1612 | if elem.strip().isdigit(): | 
|  | 1613 | version.append(int(elem)) | 
|  | 1614 | stats['server']['version'] = version | 
|  | 1615 | continue | 
|  | 1616 | if re.findall('^Latency min/avg/max:', line, flags=re.IGNORECASE): | 
|  | 1617 | latency_min, latency_avg, latency_max = line.split(':')[1].strip().split('/') | 
|  | 1618 | stats['server']['latency'] = {'min':int(latency_min),'max':int(latency_max),'avg':int(latency_avg)} | 
|  | 1619 | continue | 
|  | 1620 | key, value = line.split(':') | 
|  | 1621 | if value.strip().isdigit(): | 
|  | 1622 | value = int(value) | 
|  | 1623 | else: | 
|  | 1624 | value = value.strip() | 
|  | 1625 | stats['server'][key.strip().lower().replace(' ', '_')] = value | 
|  | 1626 |  | 
|  | 1627 | stats['clients'] = {} | 
|  | 1628 | for line in zookeeper_cmd('cons').split('\n'): | 
|  | 1629 | if line: | 
|  | 1630 | clients = re.findall('^(\s*\/)(.+)(:\d+\[\d+\])(\(.+\))$', line)[0][1:] | 
|  | 1631 | addr = clients[0] | 
|  | 1632 | port, direction = re.findall('^(\d+)\[(\d+)\]$', clients[1][1:])[0] | 
|  | 1633 | client = '['+addr+']:'+str(port) | 
|  | 1634 | stats['clients'][client] = {'direction': int(direction)} | 
|  | 1635 | for elem in clients[2][1:-1].split(','): | 
|  | 1636 | key, value = elem.split('=') | 
|  | 1637 | if value.strip().isdigit(): | 
|  | 1638 | value = int(value) | 
|  | 1639 | else: | 
|  | 1640 | value = value.strip() | 
|  | 1641 | stats['clients'][client][key.strip().lower().replace(' ', '_')] = value | 
|  | 1642 |  | 
|  | 1643 | return stats | 
|  | 1644 |  | 
|  | 1645 |  | 
|  | 1646 | def get_zookeeper_leader(target='I@opencontrail:control', target_type='compound', ignore_dead=False, **kwargs): | 
|  | 1647 |  | 
|  | 1648 | ''' Retrieve zookeeper leader ''' | 
|  | 1649 |  | 
|  | 1650 | agent = "zookeeper leader retrieve" | 
|  | 1651 | out = __salt__['saltutil.cmd']( tgt=target, | 
|  | 1652 | tgt_type=target_type, | 
|  | 1653 | fun='health_checks.zookeeper_stats', | 
|  | 1654 | timeout=3 | 
|  | 1655 | ) or None | 
|  | 1656 |  | 
|  | 1657 | if not _minions_output(out, agent, ignore_dead): | 
|  | 1658 | __context__['retcode'] = 2 | 
|  | 1659 | return False | 
|  | 1660 |  | 
|  | 1661 | leader = None | 
|  | 1662 | for minion in out: | 
|  | 1663 | zookeeper_mode = out[minion]['ret']['server']['mode'] | 
|  | 1664 |  | 
|  | 1665 | if zookeeper_mode == 'leader': | 
|  | 1666 | leader = minion | 
|  | 1667 |  | 
|  | 1668 | return leader | 
|  | 1669 |  | 
|  | 1670 |  | 
|  | 1671 | def contrail_vrouter_list(api_host='127.0.0.1', api_port=9100): | 
|  | 1672 |  | 
|  | 1673 | ''' Retrieve and list contrail vrouters. | 
|  | 1674 | Valid targets: Contrail controllers. | 
|  | 1675 | ''' | 
|  | 1676 |  | 
|  | 1677 | try: | 
|  | 1678 | if api_host[0:4] == 'http': | 
|  | 1679 | url = api_host + ':' + str(api_port) | 
|  | 1680 | else: | 
|  | 1681 | url = 'http://' + api_host + ':' + str(api_port) | 
|  | 1682 |  | 
|  | 1683 | vrouters = requests.get(url + '/virtual-routers').json() | 
|  | 1684 | vrouter_list = [] | 
|  | 1685 | for vr in vrouters['virtual-routers']: | 
|  | 1686 | vr_uuid = vr['uuid'] | 
|  | 1687 | for name in vr['fq_name']: | 
|  | 1688 | if name == "default-global-system-config": | 
|  | 1689 | continue | 
|  | 1690 | else: | 
|  | 1691 | vr_name = name | 
|  | 1692 | vrouter_list.append({'name': vr_name, 'uuid': vr_uuid}) | 
|  | 1693 | return vrouter_list | 
|  | 1694 |  | 
|  | 1695 | except: | 
|  | 1696 | return {} | 
|  | 1697 |  | 
|  | 1698 |  | 
|  | 1699 | def contrail_vrouter_show(vr_uuid, api_host='127.0.0.1', api_port=9100): | 
|  | 1700 |  | 
|  | 1701 | ''' Retrieve contrail vrouter data | 
|  | 1702 | Valid targets: Contrail controllers. | 
|  | 1703 | ''' | 
|  | 1704 |  | 
|  | 1705 | try: | 
|  | 1706 | if api_host[0:4] == 'http': | 
|  | 1707 | url = api_host + ':' + str(api_port) | 
|  | 1708 | else: | 
|  | 1709 | url = 'http://' + api_host + ':' + str(api_port) | 
|  | 1710 |  | 
|  | 1711 | return requests.get(url + '/virtual-router/' + vr_uuid).json() | 
|  | 1712 |  | 
|  | 1713 | except: | 
|  | 1714 | return {} | 
|  | 1715 |  | 
|  | 1716 |  | 
|  | 1717 | def _xmletree_descend_child(given_child, tag_requested): | 
|  | 1718 |  | 
|  | 1719 | ''' Returns xmletree subelement by tag name ''' | 
|  | 1720 |  | 
|  | 1721 | my_child = {} | 
|  | 1722 |  | 
|  | 1723 | for child in given_child: | 
|  | 1724 | if child.tag == tag_requested: | 
|  | 1725 | my_child = child | 
|  | 1726 | break | 
|  | 1727 |  | 
|  | 1728 | return my_child | 
|  | 1729 |  | 
|  | 1730 |  | 
|  | 1731 | def contrail_vrouter_agent_status(api_host='127.0.0.1', api_port=8085): | 
|  | 1732 |  | 
|  | 1733 | ''' Retrieve contrail vrouter agent status ''' | 
|  | 1734 |  | 
|  | 1735 | import xml.etree.ElementTree as ET | 
|  | 1736 |  | 
|  | 1737 | if api_host[0:4] == 'http': | 
|  | 1738 | url = api_host + ':' + str(api_port) | 
|  | 1739 | else: | 
|  | 1740 | url = 'http://' + api_host + ':' + str(api_port) | 
|  | 1741 |  | 
|  | 1742 | try: | 
|  | 1743 | req = requests.get(url + '/Snh_SandeshUVECacheReq?x=NodeStatus') | 
|  | 1744 | if int(req.status_code) != 200: | 
|  | 1745 | return "Could not fetch data from vrouter agent via %s.\nGot bad status code: %s\n%s" % (url, str(req.status_code), str(req.text)) | 
|  | 1746 | except: | 
|  | 1747 | pass | 
|  | 1748 |  | 
|  | 1749 | try: | 
|  | 1750 | xmletree = ET.fromstring(req.text) | 
|  | 1751 | except: | 
|  | 1752 | return "Could not parse xml tree %s" % str(req.text) | 
|  | 1753 |  | 
|  | 1754 | try: | 
|  | 1755 | vrouter_data = {} | 
|  | 1756 | child = _xmletree_descend_child(xmletree, 'NodeStatusUVE') | 
|  | 1757 | child = _xmletree_descend_child(child, 'data') | 
|  | 1758 | child = _xmletree_descend_child(child, 'NodeStatus') | 
|  | 1759 | child = _xmletree_descend_child(child, 'process_status') | 
|  | 1760 | child = _xmletree_descend_child(child, 'list') | 
|  | 1761 | child = _xmletree_descend_child(child, 'ProcessStatus') | 
|  | 1762 | vrouter_data['state'] = _xmletree_descend_child(child, 'state').text | 
|  | 1763 | vrouter_data['connections'] = [] | 
|  | 1764 | child = _xmletree_descend_child(child, 'connection_infos') | 
|  | 1765 | for elem in _xmletree_descend_child(child, 'list'): | 
|  | 1766 | conn = {} | 
|  | 1767 | conn['type'] = _xmletree_descend_child(elem,'type').text | 
|  | 1768 | conn['name'] = _xmletree_descend_child(elem,'name').text | 
|  | 1769 | conn['status'] = _xmletree_descend_child(elem,'status').text | 
|  | 1770 | conn['description'] = _xmletree_descend_child(elem,'description').text | 
|  | 1771 | conn['server_addrs'] = [] | 
|  | 1772 | server_addrs = _xmletree_descend_child(elem,'server_addrs') | 
|  | 1773 | for srv in _xmletree_descend_child(server_addrs,'list'): | 
|  | 1774 | host, port = srv.text.split(':') | 
|  | 1775 | conn['server_addrs'].append({'host': host, 'port': port}) | 
|  | 1776 | vrouter_data['connections'].append(conn) | 
|  | 1777 | return vrouter_data | 
|  | 1778 | except: | 
|  | 1779 | return "Unsupported xml tree for this function %s" % str(req.text) | 
|  | 1780 |  | 
|  | 1781 |  | 
| Dzmitry Stremkouski | 3629020 | 2019-05-05 21:26:25 +0200 | [diff] [blame] | 1782 | def contrail_collector_agent_status(vr_name, api_host='auto', api_port=9081): | 
|  | 1783 |  | 
|  | 1784 | ''' Retrieve contrail vrouter agent status from analyticsdb ''' | 
|  | 1785 |  | 
|  | 1786 | if api_host[0:4] == 'http': | 
|  | 1787 | url = api_host + ':' + str(api_port) | 
|  | 1788 | elif api_host == 'auto': | 
|  | 1789 | my_ip = __salt__['pillar.get']('_param:opencontrail_analytics_address') | 
|  | 1790 | url = 'http://' + my_ip+ ':' + str(api_port) | 
|  | 1791 | else: | 
|  | 1792 | url = 'http://' + api_host + ':' + str(api_port) | 
|  | 1793 |  | 
|  | 1794 | req = requests.get(url + '/analytics/uves/vrouter/' + vr_name + '?flat') | 
|  | 1795 | if int(req.status_code) != 200: | 
|  | 1796 | return "Could not fetch data from vrouter agent via %s.\nGot bad status code: %s\n%s" % (url, str(req.status_code), str(req.text)) | 
|  | 1797 |  | 
|  | 1798 | return json.loads(req.text) | 
|  | 1799 |  | 
|  | 1800 |  | 
| Dzmitry Stremkouski | a78a04d | 2019-07-13 11:05:03 +0200 | [diff] [blame] | 1801 | def contrail_control_peers_summary(api_host='auto', api_port=8083): | 
|  | 1802 |  | 
|  | 1803 | ''' Retrieve contrail control peers summary ''' | 
|  | 1804 |  | 
|  | 1805 | import xml.etree.ElementTree as ET | 
|  | 1806 |  | 
|  | 1807 | if api_host[0:4] == 'http': | 
|  | 1808 | url = api_host + ':' + str(api_port) | 
|  | 1809 | elif api_host == 'auto': | 
|  | 1810 | my_ip = '127.0.0.1' | 
|  | 1811 | url = 'http://' + my_ip+ ':' + str(api_port) | 
|  | 1812 | else: | 
|  | 1813 | url = 'http://' + api_host + ':' + str(api_port) | 
|  | 1814 |  | 
|  | 1815 | req = requests.get(url + '/Snh_ShowBgpNeighborSummaryReq') | 
|  | 1816 | if int(req.status_code) != 200: | 
|  | 1817 | return "Could not fetch data from contrail control via %s.\nGot bad status code: %s\n%s" % (url, str(req.status_code), str(req.text)) | 
|  | 1818 |  | 
|  | 1819 | peers = [] | 
|  | 1820 | summary = req.text | 
|  | 1821 |  | 
|  | 1822 | try: | 
|  | 1823 | xmletree = ET.fromstring(summary) | 
|  | 1824 | for elem in xmletree.find('.//list'): | 
|  | 1825 | attrs = {} | 
|  | 1826 | for child in elem: | 
|  | 1827 | attrs[child.tag] = child.text | 
|  | 1828 | peers.append(attrs) | 
|  | 1829 | except: | 
|  | 1830 | return "Could not parse xml tree %s" % str(summary) | 
|  | 1831 |  | 
|  | 1832 | return peers | 
|  | 1833 |  | 
|  | 1834 |  | 
| Dzmitry Stremkouski | 88275d3 | 2019-07-23 19:42:42 +0200 | [diff] [blame] | 1835 | def contrail_control_peer_status(api_host='auto', api_port=8083, fields=default_peer_filter): | 
|  | 1836 |  | 
|  | 1837 | ''' Contrail control peer status ''' | 
|  | 1838 |  | 
|  | 1839 | peer_status = {} | 
|  | 1840 |  | 
|  | 1841 | for peer_elem in contrail_control_peers_summary(): | 
|  | 1842 | elem = {} | 
|  | 1843 | for attr in peer_elem: | 
|  | 1844 | if attr in fields: | 
|  | 1845 | elem[attr] = peer_elem[attr] | 
|  | 1846 |  | 
|  | 1847 | peer_name = peer_elem["peer"] | 
|  | 1848 | peer_status[peer_name] = elem | 
|  | 1849 |  | 
|  | 1850 | return peer_status | 
|  | 1851 |  | 
|  | 1852 |  | 
| Dzmitry Stremkouski | 3629020 | 2019-05-05 21:26:25 +0200 | [diff] [blame] | 1853 | def _get_object(json_obj, obj_path): | 
|  | 1854 |  | 
|  | 1855 | ''' Retrieve subelemet of an JSON object or value ''' | 
|  | 1856 |  | 
|  | 1857 | if ':' in obj_path: | 
|  | 1858 | splitter = obj_path.split(':') | 
|  | 1859 | k = splitter[0] | 
|  | 1860 | v = ':'.join(splitter[1:]) | 
|  | 1861 | if k.isdigit(): | 
|  | 1862 | # Return specific element path | 
|  | 1863 | return [ _get_object(json_obj[int(k)], v) ] | 
|  | 1864 | elif k == '*': | 
|  | 1865 | l = [] | 
|  | 1866 | for el in json_obj: | 
|  | 1867 | l.append(_get_object(el, v)) | 
|  | 1868 | # Return all list elements from the path | 
|  | 1869 | return l | 
|  | 1870 | else: | 
|  | 1871 | # Contrail output may have nested JSON | 
|  | 1872 | if isinstance(json_obj, str) or isinstance(json_obj, unicode): | 
|  | 1873 | json_obj = json.loads(json_obj) | 
|  | 1874 | # Assume list. Return it | 
|  | 1875 | return { k: _get_object(json_obj[k], v) } | 
|  | 1876 | else: | 
|  | 1877 | return { obj_path: json_obj[obj_path] } | 
|  | 1878 |  | 
|  | 1879 |  | 
|  | 1880 | def _deepmerge(o1, o2): | 
|  | 1881 |  | 
|  | 1882 | ''' Deep merge JSON objects ''' | 
|  | 1883 |  | 
|  | 1884 | o3 = {} | 
|  | 1885 | if type(o1) == type(o2): | 
|  | 1886 | if type(o1) == dict or type(o1) == tuple: | 
|  | 1887 | for k in set(o1.keys() + o2.keys()): | 
|  | 1888 | if k in o1: | 
|  | 1889 | if k in o2: | 
|  | 1890 | o3[k] = _deepmerge(o1[k], o2[k]) | 
|  | 1891 | else: | 
|  | 1892 | o3[k] = o1[k] | 
|  | 1893 | else: | 
|  | 1894 | o3[k] = o2[k] | 
|  | 1895 | elif type(o1) == list or type(o1) == set: | 
|  | 1896 | o3 = [] + o2 | 
|  | 1897 | for el in o3: | 
|  | 1898 | i = o3.index(el) | 
|  | 1899 | o3[i] = _deepmerge(o1[i], o2[i]) | 
|  | 1900 | else: | 
|  | 1901 | o3 = o2 | 
|  | 1902 | else: | 
|  | 1903 | o3 = o2 | 
|  | 1904 |  | 
|  | 1905 | return o3 | 
|  | 1906 |  | 
|  | 1907 |  | 
|  | 1908 | def contrail_vrouter_agent_info(vr_name, filter_map=default_vrouter_info_map): | 
|  | 1909 |  | 
|  | 1910 | ''' Retrieve filtered contrail vrouter agent info from analyticsdb ''' | 
|  | 1911 |  | 
|  | 1912 | vr_agent_status = contrail_collector_agent_status(vr_name) | 
|  | 1913 | vr_info = {} | 
|  | 1914 | for conf in filter_map: | 
|  | 1915 | vr_info[conf] = {} | 
|  | 1916 | for el_path in filter_map[conf]: | 
|  | 1917 | vr_info = _deepmerge(vr_info, { conf: _get_object(vr_agent_status[conf], el_path) } ) | 
|  | 1918 |  | 
|  | 1919 | return vr_info | 
|  | 1920 |  | 
|  | 1921 |  | 
| Dzmitry Stremkouski | 480c84a | 2019-10-17 19:33:18 +0200 | [diff] [blame] | 1922 | def contrail_mesh_check(target='I@opencontrail:control', target_type='compound', ignore_dead=False, strict=False, **kwargs): | 
|  | 1923 |  | 
|  | 1924 | ''' Check if contrail elements are connected to each other ''' | 
|  | 1925 |  | 
|  | 1926 | agent = "contrail mesh check" | 
|  | 1927 | out = __salt__['saltutil.cmd']( tgt=target, | 
|  | 1928 | tgt_type=target_type, | 
|  | 1929 | fun='health_checks.contrail_control_peer_status', | 
|  | 1930 | timeout=3 | 
|  | 1931 | ) or None | 
|  | 1932 |  | 
|  | 1933 | if not _minions_output(out, agent, ignore_dead): | 
|  | 1934 | __context__['retcode'] = 2 | 
|  | 1935 | return False | 
|  | 1936 |  | 
|  | 1937 |  | 
|  | 1938 | minions = [] | 
|  | 1939 | for node in out: | 
|  | 1940 | if strict: | 
|  | 1941 | minions.append(node) | 
|  | 1942 | else: | 
|  | 1943 | minions.append(node.split('.')[0]) | 
|  | 1944 |  | 
|  | 1945 | elements = {} | 
|  | 1946 | for node in out: | 
|  | 1947 | peer_elem = out[node]["ret"] | 
|  | 1948 | for elem in peer_elem: | 
|  | 1949 | if not strict: | 
|  | 1950 | elem = elem.split('.')[0] | 
|  | 1951 | if elem in elements: | 
|  | 1952 | continue | 
|  | 1953 | elements[elem] = {} | 
|  | 1954 | elements[elem]["peers"] = [] | 
|  | 1955 | elements[elem]["my_address"] = peer_elem[elem]["peer_address"] | 
|  | 1956 | if peer_elem[elem]["encoding"] == "XMPP": | 
|  | 1957 | elements[elem]["type"] = "COMPUTE" | 
|  | 1958 | elif peer_elem[elem]["encoding"] == "BGP": | 
|  | 1959 | if elem in minions: | 
|  | 1960 | elements[elem]["type"] = "CONTROLLER" | 
|  | 1961 | else: | 
|  | 1962 | elements[elem]["type"] = "EDGE-ROUTER" | 
|  | 1963 |  | 
|  | 1964 | for node in out: | 
|  | 1965 | if strict: | 
|  | 1966 | peer_name = node | 
|  | 1967 | else: | 
|  | 1968 | peer_name = node.split('.')[0] | 
|  | 1969 | peer_elem = out[node]["ret"] | 
|  | 1970 | for elem in peer_elem: | 
|  | 1971 | if not strict: | 
|  | 1972 | elem = elem.split('.')[0] | 
|  | 1973 | peer_elem[elem]["peer"] = peer_name | 
|  | 1974 | del(peer_elem[elem]["peer_address"]) | 
|  | 1975 | elements[elem]["peers"].append(peer_elem[elem]) | 
|  | 1976 |  | 
|  | 1977 | failed_peers = [] | 
|  | 1978 | for elem_name in elements: | 
|  | 1979 | elem = elements[elem_name] | 
|  | 1980 | if elem["type"] == "COMPUTE": | 
|  | 1981 | if len(elem["peers"]) < 2: | 
|  | 1982 | if elem not in failed_peers: | 
|  | 1983 | failed_peers.append(elem) | 
|  | 1984 | if elem["type"] == "CONTROLLER": | 
|  | 1985 | if len(elem["peers"]) < len(minions)-1: | 
|  | 1986 | if elem not in failed_peers: | 
|  | 1987 | failed_peers.append(elem) | 
|  | 1988 | if elem["type"] == "EDGE-ROUTER": | 
|  | 1989 | if not len(elem["peers"]) == len(minions): | 
|  | 1990 | if elem not in failed_peers: | 
|  | 1991 | failed_peers.append(elem) | 
|  | 1992 | for peer in elem["peers"]: | 
|  | 1993 | if not peer["state"] == "Established": | 
|  | 1994 | if elem not in failed_peers: | 
|  | 1995 | failed_peers.append(elem) | 
|  | 1996 |  | 
|  | 1997 | if len(failed_peers) > 0: | 
|  | 1998 | logger.error("%s check FAILED" % agent) | 
|  | 1999 | if strict: | 
|  | 2000 | logger.error("Strict mode is on. Check DNS names in output") | 
|  | 2001 | logger.error("Minions output:") | 
|  | 2002 | logger.error(json.dumps(out, indent=4)) | 
|  | 2003 | else: | 
|  | 2004 | logger.error("Failed peers:") | 
|  | 2005 | logger.error(json.dumps(failed_peers, indent=4)) | 
|  | 2006 | __context__['retcode'] = 2 | 
|  | 2007 | return False | 
|  | 2008 |  | 
|  | 2009 | if kwargs.get("debug", False): | 
|  | 2010 | logger.info(json.dumps(elements, indent=4)) | 
|  | 2011 |  | 
|  | 2012 | return True | 
|  | 2013 |  | 
|  | 2014 |  | 
| Dzmitry Stremkouski | a78a04d | 2019-07-13 11:05:03 +0200 | [diff] [blame] | 2015 | def kafka_brokers_ids(): | 
|  | 2016 |  | 
|  | 2017 | ''' Retrieve kafka brokers ids ''' | 
|  | 2018 |  | 
|  | 2019 | brokers_ids = [] | 
|  | 2020 | for line in zookeeper_cmd('dump').split('\n'): | 
|  | 2021 | if line: | 
|  | 2022 | if '/brokers/ids/' in line: | 
|  | 2023 | brokers_ids.append(int(line.split('/')[3])) | 
|  | 2024 |  | 
|  | 2025 | return brokers_ids | 
|  | 2026 |  | 
|  | 2027 |  | 
| Dzmitry Stremkouski | 2c709f2 | 2019-04-22 02:27:54 +0200 | [diff] [blame] | 2028 | def libvirt_capabilities(): | 
|  | 2029 |  | 
|  | 2030 | ''' JSON formatted libvirtcapabilities list ''' | 
|  | 2031 |  | 
|  | 2032 | import xml.etree.ElementTree as ET | 
|  | 2033 |  | 
|  | 2034 | try: | 
|  | 2035 | proc = subprocess.Popen(['virsh', 'capabilities'], stdout=subprocess.PIPE) | 
|  | 2036 | stdout, stderr =  proc.communicate() | 
|  | 2037 | xmletree = ET.fromstring(stdout) | 
|  | 2038 | except: | 
|  | 2039 | return "Could not parse xml tree %s" % str(stdout) | 
|  | 2040 |  | 
|  | 2041 | try: | 
|  | 2042 | capabilities = {} | 
|  | 2043 | for elem in xmletree: | 
|  | 2044 | if elem.tag == "guest": | 
|  | 2045 | for el in elem: | 
|  | 2046 | if el.tag == 'arch': | 
|  | 2047 | _name = el.attrib['name'] | 
|  | 2048 | capabilities[_name] = [] | 
|  | 2049 | for arch in el: | 
|  | 2050 | if arch.tag == 'machine': | 
|  | 2051 | if 'canonical' not in arch.attrib: | 
|  | 2052 | capabilities[_name].append(arch.text) | 
|  | 2053 |  | 
|  | 2054 | return capabilities | 
|  | 2055 | except: | 
|  | 2056 | return "Unsupported xml tree for this function %s" % str(stdout) | 
|  | 2057 |  | 
| Dzmitry Stremkouski | a78a04d | 2019-07-13 11:05:03 +0200 | [diff] [blame] | 2058 |  | 
| Dzmitry Stremkouski | 88275d3 | 2019-07-23 19:42:42 +0200 | [diff] [blame] | 2059 | def keystone_keys_attractor(keys_dir='/var/lib/keystone/fernet-keys', keys_ids=range(0,-4,-1)): | 
| Dzmitry Stremkouski | a78a04d | 2019-07-13 11:05:03 +0200 | [diff] [blame] | 2060 |  | 
| Dzmitry Stremkouski | 88275d3 | 2019-07-23 19:42:42 +0200 | [diff] [blame] | 2061 | ''' JSON formatted dict of keystone keys sha256 sums ''' | 
| Dzmitry Stremkouski | a78a04d | 2019-07-13 11:05:03 +0200 | [diff] [blame] | 2062 |  | 
| Dzmitry Stremkouski | 88275d3 | 2019-07-23 19:42:42 +0200 | [diff] [blame] | 2063 | keys = os.listdir(keys_dir) | 
|  | 2064 | keys.sort() | 
|  | 2065 | keys_dict = {} | 
|  | 2066 | try: | 
|  | 2067 | for i in keys_ids: | 
|  | 2068 | with open("%s/%s" % (keys_dir, str(keys[i])), 'r') as key_file: | 
|  | 2069 | _iter1 = hashlib.sha256(key_file.read()).hexdigest() | 
|  | 2070 | _iter2 = hashlib.sha256(_iter1).hexdigest() | 
|  | 2071 | _iter3 = hashlib.sha256(_iter2).hexdigest() | 
|  | 2072 | keys_dict[str(keys[i])] = _iter3 | 
|  | 2073 | except: | 
|  | 2074 | pass | 
| Dzmitry Stremkouski | a78a04d | 2019-07-13 11:05:03 +0200 | [diff] [blame] | 2075 |  | 
| Dzmitry Stremkouski | 88275d3 | 2019-07-23 19:42:42 +0200 | [diff] [blame] | 2076 | return keys_dict | 
| Dzmitry Stremkouski | a78a04d | 2019-07-13 11:05:03 +0200 | [diff] [blame] | 2077 |  | 
| Dzmitry Stremkouski | a78a04d | 2019-07-13 11:05:03 +0200 | [diff] [blame] | 2078 |  | 
| Dzmitry Stremkouski | 88275d3 | 2019-07-23 19:42:42 +0200 | [diff] [blame] | 2079 | def keystone_keys_check(target='I@keystone:server', target_type='compound', ignore_dead=False, **kwargs): | 
|  | 2080 |  | 
|  | 2081 | ''' Check cluster keystone keys are in sync ''' | 
|  | 2082 |  | 
|  | 2083 | keys_type = kwargs.get("keys_type", 'fernet') | 
|  | 2084 |  | 
|  | 2085 | supported_key_types = ['fernet', 'credential'] | 
|  | 2086 | if keys_type not in supported_key_types: | 
|  | 2087 | logger.error("Unsupported keys type: %s" % str(keys_type)) | 
|  | 2088 | logger.error("Supported keys type are: %s" % str(supported_key_types)) | 
|  | 2089 | __context__['retcode'] = 2 | 
|  | 2090 | return False | 
|  | 2091 |  | 
|  | 2092 | agent = "keystone %s keys sync" % keys_type | 
|  | 2093 | keys_dir_default = '/var/lib/keystone/%s-keys' % keys_type | 
|  | 2094 | keys_dir = kwargs.get("keys_dir", keys_dir_default) | 
|  | 2095 |  | 
|  | 2096 | out = __salt__['saltutil.cmd']( tgt=target, | 
|  | 2097 | tgt_type=target_type, | 
|  | 2098 | fun='health_checks.keystone_keys_attractor', | 
|  | 2099 | arg=["keys_dir='%s'" % keys_dir], | 
|  | 2100 | timeout=3 | 
|  | 2101 | ) or None | 
|  | 2102 |  | 
|  | 2103 | if not _minions_output(out, agent, ignore_dead): | 
|  | 2104 | __context__['retcode'] = 2 | 
|  | 2105 | return False | 
|  | 2106 |  | 
|  | 2107 | cluster_attractors = [] | 
|  | 2108 | failed_minions = [] | 
|  | 2109 | verified_minions = [] | 
|  | 2110 | attractor = {} | 
|  | 2111 |  | 
|  | 2112 | for minion in out: | 
|  | 2113 | verified_minions.append(minion) | 
|  | 2114 | attractor = out[minion]['ret'] | 
|  | 2115 | if attractor == {}: | 
|  | 2116 | failed_minions.append(minion) | 
|  | 2117 | if attractor not in cluster_attractors: | 
|  | 2118 | cluster_attractors.append(attractor) | 
|  | 2119 |  | 
|  | 2120 | if not _failed_minions(out, agent, failed_minions): | 
|  | 2121 | __context__['retcode'] = 2 | 
|  | 2122 | return False | 
|  | 2123 |  | 
|  | 2124 | if len(cluster_attractors) > 1: | 
|  | 2125 | failed_minions = [] | 
|  | 2126 | for minion in out: | 
|  | 2127 | failed_minions.append(minion) | 
|  | 2128 |  | 
|  | 2129 | if not _failed_minions(out, agent, failed_minions): | 
|  | 2130 | __context__['retcode'] = 2 | 
|  | 2131 | return False | 
|  | 2132 |  | 
|  | 2133 | if kwargs.get("debug", False): | 
|  | 2134 | logger.info("%s check done." % agent) | 
|  | 2135 | logger.info(verified_minions) | 
|  | 2136 |  | 
|  | 2137 | return True | 
|  | 2138 |  | 
| Dzmitry Stremkouski | 83b9c16 | 2019-09-25 09:41:45 +0200 | [diff] [blame] | 2139 |  | 
|  | 2140 | def list_namespaces(raw_output=False): | 
|  | 2141 |  | 
|  | 2142 | ''' JSON formatted ip netns dict ''' | 
|  | 2143 |  | 
|  | 2144 | proc = subprocess.Popen(['ip', 'netns'], stdout=subprocess.PIPE) | 
|  | 2145 | stdout, stderr =  proc.communicate() | 
|  | 2146 |  | 
|  | 2147 | namespaces = [] | 
|  | 2148 |  | 
|  | 2149 | for line in stdout.split('\n'): | 
|  | 2150 | if len(line) > 0: | 
|  | 2151 | netns = {} | 
|  | 2152 | if raw_output: | 
|  | 2153 | netns['id'] = -2 | 
|  | 2154 | netns['uuid'] = line | 
|  | 2155 | else: | 
|  | 2156 | line_splitted = line.split() | 
|  | 2157 | if len(line_splitted) > 1: | 
|  | 2158 | ns_uuid = line_splitted[0] | 
|  | 2159 | ns_id = int(line_splitted[2][:-1]) | 
|  | 2160 | else: | 
|  | 2161 | ns_uuid = line | 
|  | 2162 | ns_id = -1 | 
|  | 2163 | netns['id'] = ns_id | 
|  | 2164 | netns['uuid'] = ns_uuid | 
|  | 2165 | namespaces.append(netns) | 
|  | 2166 |  | 
|  | 2167 | return namespaces | 
| Dzmitry Stremkouski | 1ce1b35 | 2019-11-02 10:47:14 +0100 | [diff] [blame^] | 2168 |  | 
|  | 2169 |  | 
|  | 2170 | def _load_mysql_module(): | 
|  | 2171 |  | 
|  | 2172 | # Check if module is loaded | 
|  | 2173 | # It can be loaded by parent function | 
|  | 2174 | # In case of direct funtction call, we load it | 
|  | 2175 | mod_not_loaded = False | 
|  | 2176 | try: | 
|  | 2177 | dir(MySQLdb) | 
|  | 2178 | except: | 
|  | 2179 | # Not loaded. Trying to load | 
|  | 2180 | mod_not_loaded = True | 
|  | 2181 |  | 
|  | 2182 | if mod_not_loaded: | 
|  | 2183 | try: | 
|  | 2184 | import MySQLdb | 
|  | 2185 | except: | 
|  | 2186 | logger.error("Python library MySQLdb could not be loaded. Install it first") | 
|  | 2187 | __context__['retcode'] = 2 | 
|  | 2188 | return False | 
|  | 2189 |  | 
|  | 2190 | return MySQLdb | 
|  | 2191 |  | 
|  | 2192 |  | 
|  | 2193 | def get_keystone_db_versions(db_host, db_user, db_pass, db_name="keystone"): | 
|  | 2194 |  | 
|  | 2195 | ''' Return dict of keystone DB versions ''' | 
|  | 2196 |  | 
|  | 2197 | MySQLdb = _load_mysql_module() | 
|  | 2198 | if not MySQLdb: | 
|  | 2199 | return MySQLdb | 
|  | 2200 |  | 
|  | 2201 | keystone_versions = {} | 
|  | 2202 | db = MySQLdb.connect(db_host, db_user, db_pass, db_name) | 
|  | 2203 | cursor = db.cursor() | 
|  | 2204 |  | 
|  | 2205 | cursor.execute("select `version` from `migrate_version` where `repository_id` like 'keystone'") | 
|  | 2206 | if cursor.rowcount == 1: | 
|  | 2207 | keystone_db_ver = cursor.fetchone()[0] | 
|  | 2208 | else: | 
|  | 2209 | keystone_db_ver = 0 | 
|  | 2210 |  | 
|  | 2211 | if keystone_db_ver > 0: | 
|  | 2212 | cursor.execute("select `version` from `migrate_version` where `repository_id` like 'keystone_data_migrate'") | 
|  | 2213 | if cursor.rowcount == 1: | 
|  | 2214 | keystone_data_ver = cursor.fetchone()[0] | 
|  | 2215 | else: | 
|  | 2216 | keystone_data_ver = 0 | 
|  | 2217 | db.close() | 
|  | 2218 |  | 
|  | 2219 | keystone_versions['db'] = keystone_db_ver | 
|  | 2220 | keystone_versions['data_db'] = keystone_data_ver | 
|  | 2221 | for release in db_ver_map: | 
|  | 2222 | keystone_obj = db_ver_map[release]["keystone"] | 
|  | 2223 | if isinstance(keystone_obj, dict): | 
|  | 2224 | keystone_db_map = int(keystone_obj["db"]) | 
|  | 2225 | keystone_data_map = int(keystone_obj["data"]) | 
|  | 2226 | if keystone_db_map == keystone_db_ver and keystone_data_map == keystone_data_ver: | 
|  | 2227 | keystone_versions['os_release'] = release | 
|  | 2228 | else: | 
|  | 2229 | keystone_db_map = int(keystone_obj) | 
|  | 2230 | if keystone_db_map == keystone_db_ver: | 
|  | 2231 | keystone_versions['os_release'] = release | 
|  | 2232 |  | 
|  | 2233 | return keystone_versions | 
|  | 2234 |  | 
|  | 2235 |  | 
|  | 2236 | def get_glance_db_versions(db_host, db_user, db_pass, db_name="glance"): | 
|  | 2237 |  | 
|  | 2238 | ''' Return dict of glance DB versions ''' | 
|  | 2239 |  | 
|  | 2240 | MySQLdb = _load_mysql_module() | 
|  | 2241 | if not MySQLdb: | 
|  | 2242 | return MySQLdb | 
|  | 2243 |  | 
|  | 2244 | glance_versions = {} | 
|  | 2245 | db = MySQLdb.connect(db_host, db_user, db_pass, db_name) | 
|  | 2246 | cursor = db.cursor() | 
|  | 2247 |  | 
|  | 2248 | glance_release = "unknown" | 
|  | 2249 | cursor.execute("select `version` from `migrate_version` where `repository_id` like 'Glance Migrations'") | 
|  | 2250 | if cursor.rowcount == 1: | 
|  | 2251 | glance_db_ver = cursor.fetchone()[0] | 
|  | 2252 | for release in db_ver_map: | 
|  | 2253 | glance_obj = db_ver_map[release]["glance"] | 
|  | 2254 | glance_db_map = int(glance_obj) | 
|  | 2255 | if glance_db_map == glance_db_ver: | 
|  | 2256 | glance_release = release | 
|  | 2257 | else: | 
|  | 2258 | glance_db_ver = 0 | 
|  | 2259 | db.close() | 
|  | 2260 |  | 
|  | 2261 | db = MySQLdb.connect(db_host, db_user, db_pass, db_name) | 
|  | 2262 | cursor = db.cursor() | 
|  | 2263 | try: | 
|  | 2264 | cursor.execute("select `version_num` from `alembic_version`") | 
|  | 2265 | except: | 
|  | 2266 | pass | 
|  | 2267 | if cursor.rowcount == 1: | 
|  | 2268 | glance_release = cursor.fetchone()[0] | 
|  | 2269 | for release in db_ver_map: | 
|  | 2270 | if release in glance_release: | 
|  | 2271 | glance_db_ver = db_ver_map[release]["glance"] | 
|  | 2272 | glance_release = "%s (alembic)" % release | 
|  | 2273 | db.close() | 
|  | 2274 |  | 
|  | 2275 | glance_versions['db'] = glance_db_ver | 
|  | 2276 | glance_versions['os_release'] = glance_release | 
|  | 2277 |  | 
|  | 2278 | return glance_versions | 
|  | 2279 |  | 
|  | 2280 |  | 
|  | 2281 | def get_cinder_db_versions(db_host, db_user, db_pass, db_name="cinder"): | 
|  | 2282 |  | 
|  | 2283 | ''' Return dict of cinder DB versions ''' | 
|  | 2284 |  | 
|  | 2285 | MySQLdb = _load_mysql_module() | 
|  | 2286 | if not MySQLdb: | 
|  | 2287 | return MySQLdb | 
|  | 2288 |  | 
|  | 2289 | cinder_versions = {} | 
|  | 2290 | db = MySQLdb.connect(db_host, db_user, db_pass, db_name) | 
|  | 2291 | cursor = db.cursor() | 
|  | 2292 |  | 
|  | 2293 | cursor.execute("select `version` from `migrate_version` where `repository_id` like 'cinder'") | 
|  | 2294 | if cursor.rowcount == 1: | 
|  | 2295 | cinder_db_ver = cursor.fetchone()[0] | 
|  | 2296 | else: | 
|  | 2297 | cinder_db_ver = 0 | 
|  | 2298 | db.close() | 
|  | 2299 |  | 
|  | 2300 | cinder_release = "" | 
|  | 2301 | for release in db_ver_map: | 
|  | 2302 | cinder_obj = db_ver_map[release]["cinder"] | 
|  | 2303 | cinder_db_map = int(cinder_obj) | 
|  | 2304 | if cinder_db_map == cinder_db_ver: | 
|  | 2305 | cinder_release = release | 
|  | 2306 |  | 
|  | 2307 | cinder_versions['db'] = cinder_db_ver | 
|  | 2308 | cinder_versions['os_release'] = cinder_release | 
|  | 2309 |  | 
|  | 2310 | return cinder_versions | 
|  | 2311 |  | 
|  | 2312 |  | 
|  | 2313 | def get_heat_db_versions(db_host, db_user, db_pass, db_name="heat"): | 
|  | 2314 |  | 
|  | 2315 | ''' Return dict of heat DB versions ''' | 
|  | 2316 |  | 
|  | 2317 | MySQLdb = _load_mysql_module() | 
|  | 2318 | if not MySQLdb: | 
|  | 2319 | return MySQLdb | 
|  | 2320 |  | 
|  | 2321 | heat_versions = {} | 
|  | 2322 | db = MySQLdb.connect(db_host, db_user, db_pass, db_name) | 
|  | 2323 | cursor = db.cursor() | 
|  | 2324 |  | 
|  | 2325 | cursor.execute("select `version` from `migrate_version` where `repository_id` like 'heat'") | 
|  | 2326 | if cursor.rowcount == 1: | 
|  | 2327 | heat_db_ver = cursor.fetchone()[0] | 
|  | 2328 | else: | 
|  | 2329 | heat_db_ver = 0 | 
|  | 2330 | db.close() | 
|  | 2331 |  | 
|  | 2332 | heat_release = "" | 
|  | 2333 | for release in db_ver_map: | 
|  | 2334 | heat_obj = db_ver_map[release]["heat"] | 
|  | 2335 | heat_db_map = int(heat_obj) | 
|  | 2336 | if heat_db_map == heat_db_ver: | 
|  | 2337 | heat_release = release | 
|  | 2338 |  | 
|  | 2339 | heat_versions['db'] = heat_db_ver | 
|  | 2340 | heat_versions['os_release'] = heat_release | 
|  | 2341 |  | 
|  | 2342 | return heat_versions | 
|  | 2343 |  | 
|  | 2344 |  | 
|  | 2345 | def get_neutron_db_versions(db_host, db_user, db_pass, db_name="neutron"): | 
|  | 2346 |  | 
|  | 2347 | ''' Return dict of neutron DB versions ''' | 
|  | 2348 |  | 
|  | 2349 | MySQLdb = _load_mysql_module() | 
|  | 2350 | if not MySQLdb: | 
|  | 2351 | return MySQLdb | 
|  | 2352 |  | 
|  | 2353 | neutron_versions = {} | 
|  | 2354 | db = MySQLdb.connect(db_host, db_user, db_pass, db_name) | 
|  | 2355 | cursor = db.cursor() | 
|  | 2356 |  | 
|  | 2357 | try: | 
|  | 2358 | cursor.execute("select `version_num` from `alembic_version`") | 
|  | 2359 | neutron_db_versions_raw = cursor.fetchall() | 
|  | 2360 | neutron_db_versions = [] | 
|  | 2361 | for el in neutron_db_versions_raw: | 
|  | 2362 | neutron_db_versions.append(el[0]) | 
|  | 2363 | except: | 
|  | 2364 | neutron_db_versions = [] | 
|  | 2365 |  | 
|  | 2366 | db.close() | 
|  | 2367 |  | 
|  | 2368 | neutron_release = "unknown (no marker found)" | 
|  | 2369 | for release in db_ver_map: | 
|  | 2370 | for commit_id in neutron_db_versions: | 
|  | 2371 | if commit_id in db_ver_map[release]["neutron"]: | 
|  | 2372 | neutron_release = release | 
|  | 2373 |  | 
|  | 2374 | neutron_versions['db_versions'] = neutron_db_versions | 
|  | 2375 | neutron_versions['os_release'] = neutron_release | 
|  | 2376 |  | 
|  | 2377 | return neutron_versions | 
|  | 2378 |  | 
|  | 2379 |  | 
|  | 2380 | def get_nova_db_versions(db_host, db_user, db_pass, db_name="nova", db_api_name="nova_api", db_api_pass=""): | 
|  | 2381 |  | 
|  | 2382 | ''' Return dict of nova DB versions ''' | 
|  | 2383 |  | 
|  | 2384 | MySQLdb = _load_mysql_module() | 
|  | 2385 | if not MySQLdb: | 
|  | 2386 | return MySQLdb | 
|  | 2387 |  | 
|  | 2388 | if not db_api_pass: | 
|  | 2389 | db_api_pass = db_pass | 
|  | 2390 |  | 
|  | 2391 | nova_versions = {} | 
|  | 2392 | db = MySQLdb.connect(db_host, db_user, db_pass, db_name) | 
|  | 2393 | cursor = db.cursor() | 
|  | 2394 |  | 
|  | 2395 | cursor.execute("select `version` from `migrate_version` where `repository_id` like 'nova'") | 
|  | 2396 | if cursor.rowcount == 1: | 
|  | 2397 | nova_db_ver = cursor.fetchone()[0] | 
|  | 2398 | else: | 
|  | 2399 | nova_db_ver = 0 | 
|  | 2400 | db.close() | 
|  | 2401 |  | 
|  | 2402 | db = MySQLdb.connect(db_host, db_user, db_pass, db_api_name) | 
|  | 2403 | cursor = db.cursor() | 
|  | 2404 | if nova_db_ver > 0: | 
|  | 2405 | cursor.execute("select `version` from `migrate_version` where `repository_id` like 'nova_api'") | 
|  | 2406 | if cursor.rowcount == 1: | 
|  | 2407 | nova_apidb_ver = cursor.fetchone()[0] | 
|  | 2408 | else: | 
|  | 2409 | nova_apidb_ver = 0 | 
|  | 2410 | db.close() | 
|  | 2411 |  | 
|  | 2412 | nova_versions['db'] = nova_db_ver | 
|  | 2413 | nova_versions['api_db'] = nova_apidb_ver | 
|  | 2414 | for release in db_ver_map: | 
|  | 2415 | nova_obj = db_ver_map[release]["nova"] | 
|  | 2416 | if isinstance(nova_obj, dict): | 
|  | 2417 | nova_db_map = int(nova_obj["db"]) | 
|  | 2418 | nova_apidb_map = int(nova_obj["api_db"]) | 
|  | 2419 | if nova_db_map == nova_db_ver and nova_apidb_map == nova_apidb_ver: | 
|  | 2420 | nova_versions['os_release'] = release | 
|  | 2421 | else: | 
|  | 2422 | nova_db_map = int(nova_obj) | 
|  | 2423 | if nova_db_map == nova_db_ver: | 
|  | 2424 | nova_versions['os_release'] = release | 
|  | 2425 |  | 
|  | 2426 | return nova_versions | 
|  | 2427 |  | 
|  | 2428 |  | 
|  | 2429 | def list_db_versions(): | 
|  | 2430 |  | 
|  | 2431 | ''' Retrieve openstack DB release codenames ''' | 
|  | 2432 |  | 
|  | 2433 | db_host = str(__salt__['pillar.get']('_param:openstack_database_address')) | 
|  | 2434 | cinder_db_pass = str(__salt__['pillar.get']('_param:mysql_cinder_password')) | 
|  | 2435 | glance_db_pass = str(__salt__['pillar.get']('_param:mysql_glance_password')) | 
|  | 2436 | heat_db_pass = str(__salt__['pillar.get']('_param:mysql_heat_password')) | 
|  | 2437 | keystone_db_pass = str(__salt__['pillar.get']('_param:mysql_keystone_password')) | 
|  | 2438 | neutron_db_pass = str(__salt__['pillar.get']('_param:mysql_neutron_password')) | 
|  | 2439 | nova_db_pass = str(__salt__['pillar.get']('_param:mysql_nova_password')) | 
|  | 2440 | cinder_db_user = str(__salt__['pillar.get']('_param:mysql_cinder_username')) | 
|  | 2441 | glance_db_user = str(__salt__['pillar.get']('_param:mysql_glance_username')) | 
|  | 2442 | heat_db_user = str(__salt__['pillar.get']('_param:mysql_heat_username')) | 
|  | 2443 | keystone_db_user = str(__salt__['pillar.get']('_param:mysql_keystone_username')) | 
|  | 2444 | neutron_db_user = str(__salt__['pillar.get']('_param:mysql_neutron_username')) | 
|  | 2445 | nova_db_user = str(__salt__['pillar.get']('_param:mysql_nova_username')) | 
|  | 2446 |  | 
|  | 2447 | os_db_releases = {} | 
|  | 2448 | os_db_releases['cinder'] = get_cinder_db_versions(db_host, cinder_db_user, cinder_db_pass) | 
|  | 2449 | os_db_releases['glance'] = get_glance_db_versions(db_host, glance_db_user, glance_db_pass) | 
|  | 2450 | os_db_releases['heat'] = get_heat_db_versions(db_host, heat_db_user, heat_db_pass) | 
|  | 2451 | os_db_releases['neutron'] = get_neutron_db_versions(db_host, neutron_db_user, neutron_db_pass) | 
|  | 2452 | os_db_releases['keystone'] = get_keystone_db_versions(db_host, keystone_db_user, keystone_db_pass) | 
|  | 2453 | os_db_releases['nova'] = get_nova_db_versions(db_host, nova_db_user, nova_db_pass) | 
|  | 2454 |  | 
|  | 2455 | return os_db_releases |