blob: 68a54262720c9d958ee256a82ff18124b9c947a7 [file] [log] [blame]
Dzmitry Stremkouskif1bcbb52019-04-11 15:48:24 +02001import requests
Dzmitry Stremkouskib71ada92019-04-05 22:37:59 +02002import subprocess
3import socket
4import salt.utils
5import logging
6import os
7import re
8import json
Dzmitry Stremkouski36290202019-05-05 21:26:25 +02009import yaml
Dzmitry Stremkouskib71ada92019-04-05 22:37:59 +020010
11__author__ = "Dzmitry Stremkouski"
12__copyright__ = "Copyright 2019, Mirantis Inc."
13__license__ = "Apache 2.0"
14
15logger = logging.getLogger(__name__)
16stream = logging.StreamHandler()
17logger.addHandler(stream)
18
Dzmitry Stremkouski36290202019-05-05 21:26:25 +020019try:
20 from yaml import CLoader as Loader, CDumper as Dumper
21except ImportError:
22 from yaml import Loader, Dumper
23
24default_vrouter_info_map = yaml.load("""
25ContrailConfig:
26- deleted
27- elements:uuid
28- elements:virtual_router_dpdk_enabled
29- elements:virtual_router_type
30VrouterAgent:
31- build_info:build-info:0:build-version
32- build_info:build-info:0:build-number
Dzmitry Stremkouski36290202019-05-05 21:26:25 +020033- config_file
34- control_ip
35- control_node_list_cfg
36- dns_server_list_cfg
37- dns_servers
38- down_interface_count
39- eth_name
40- headless_mode_cfg
41- hostname_cfg
42- hypervisor
43- mode
44- phy_if
45- platform
46- self_ip_list
47- total_interface_count
48- tunnel_type
49- vhost_cfg
50- vhost_if
51- vr_limits:max_interfaces
52- vr_limits:max_labels
53- vr_limits:max_mirror_entries
54- vr_limits:max_nexthops
55- vr_limits:max_vrfs
56- vr_limits:vrouter_max_bridge_entries
57- vr_limits:vrouter_max_flow_entries
58- vr_limits:vrouter_max_oflow_bridge_entries
59- vr_limits:vrouter_max_oflow_entries
60- xmpp_peer_list:*:ip
61- xmpp_peer_list:*:primary
62- xmpp_peer_list:*:status
63""", Loader=Loader)
64
Dzmitry Stremkouskia78a04d2019-07-13 11:05:03 +020065default_peer_filter = ["encoding", "peer_address", "state"]
66
Dzmitry Stremkouskib71ada92019-04-05 22:37:59 +020067
68def _failed_minions(out, agent, failed_minions):
69
70 ''' Verify failed minions '''
71
72 if len(failed_minions) > 0:
73 logger.error("%s check FAILED" % agent)
74 logger.error("Some minions returned non-zero exit code or empty data")
75 logger.error("Failed minions:" + str(failed_minions))
76 for minion in failed_minions:
77 logger.error(minion)
78 logger.debug(str(out[minion]['ret']))
79 __context__['retcode'] = 2
80 return False
81
82 return True
83
84
85def _minions_output(out, agent, ignore_dead, ignore_empty=False):
86
87 ''' Verify minions output and exit code '''
88
89 if not out:
90 logger.error("%s check FAILED" % agent)
91 logger.error("No response from master cmd")
92 __context__['retcode'] = 2
93 return False
94
95 if not ignore_dead:
96 jid = out.itervalues().next()['jid']
97 job_stats = __salt__['saltutil.runner']( 'jobs.print_job', arg=[jid] ) or None
98 if not job_stats:
99 logger.error("%s check FAILED" % agent)
100 logger.error("No response from master runner")
101 __context__['retcode'] = 2
102 return False
103
104 job_result = job_stats[jid]['Result']
105 job_minions = job_stats[jid]['Minions']
106 if len(job_minions) != len(job_result):
107 logger.error("%s check FAILED" % agent)
108 logger.error("Some minions are offline")
109 logger.error(list(set(job_minions) - set(job_result.keys())))
110 __context__['retcode'] = 2
111 return False
112
113 failed_minions = []
114 for minion in out:
115 if 'retcode' in out[minion]:
116 if out[minion]['retcode'] == 0:
117 if not ignore_empty:
118 if isinstance(out[minion]['ret'], bool):
119 if minion not in failed_minions:
120 failed_minions.append(minion)
121 elif len(out[minion]['ret']) == 0:
122 if minion not in failed_minions:
123 failed_minions.append(minion)
124 else:
125 if minion not in failed_minions:
126 failed_minions.append(minion)
127 else:
128 if minion not in failed_minions:
129 failed_minions.append(minion)
130
131 if not _failed_minions(out, agent, failed_minions):
132 __context__['retcode'] = 2
133 return False
134
135 return True
136
137
138def minions_check(wait_timeout=1, gather_job_wait_timeout=1, target='*', target_type='glob', ignore_dead=False):
139
140 ''' Verify minions are online '''
141
142 agent = "Minions"
143 out = __salt__['saltutil.cmd']( tgt=target,
144 tgt_type=target_type,
145 fun='test.ping',
146 timeout=wait_timeout,
147 gather_job_timeout=gather_job_wait_timeout
148 ) or None
149
150 return _minions_output(out, agent, ignore_dead, ignore_empty=True)
151
152
153def time_diff_check(time_diff=1, target='*', target_type='glob', ignore_dead=False, **kwargs):
154
155 ''' Verify time diff on servers '''
156
157 agent = "Time diff"
158 out = __salt__['saltutil.cmd']( tgt=target,
159 tgt_type=target_type,
160 fun='status.time',
161 arg=['%s'],
162 timeout=3
163 ) or None
164
165 if not _minions_output(out, agent, ignore_dead):
166 __context__['retcode'] = 2
167 return False
168
169 minions_times = {}
170 env_times = []
171 verified_minions = []
172
173 for minion in out:
174 verified_minions.append(minion)
175 if out[minion]['retcode'] == 0:
176 minion_time = int(out[minion]['ret'])
177 if str(minion_time) not in minions_times:
178 minions_times[str(minion_time)] = []
179 minions_times[str(minion_time)].append(minion)
180 env_times.append(minion_time)
181
182 env_times.sort()
183 diff = env_times[-1] - env_times[0]
184
185 if diff > time_diff:
186 __context__['retcode'] = 2
187 if kwargs.get("debug", False):
188 return False, minions_times
189 else:
190 return False
191
192 if kwargs.get("debug", False):
193 logger.info(verified_minions)
194 return True
195
196
Dzmitry Stremkouski2c709f22019-04-22 02:27:54 +0200197def contrail_check(target='I@opencontrail:control or I@opencontrail:collector or I@opencontrail:compute', target_type='compound', ignore_dead=False, **kwargs):
Dzmitry Stremkouskib71ada92019-04-05 22:37:59 +0200198
199 ''' Verify contrail status returns nothing critical '''
200
201 agent = "Contrail status"
202 out = __salt__['saltutil.cmd']( tgt=target,
203 tgt_type=target_type,
204 fun='cmd.run',
205 arg=['contrail-status'],
206 timeout=5
207 ) or None
208
209 if not _minions_output(out, agent, ignore_dead):
210 __context__['retcode'] = 2
211 return False
212
213 failed_minions = []
214 pattern = '^(==|$|\S+\s+(active|backup|inactive\s\(disabled\son\sboot\)))'
215 prog = re.compile(pattern)
216
217 validated = []
218 for minion in out:
219 for line in out[minion]['ret'].split('\n'):
220 if not prog.match(line) and minion not in failed_minions:
221 failed_minions.append(minion)
222 validated.append(minion)
223
224 if not _failed_minions(out, agent, failed_minions):
225 __context__['retcode'] = 2
226 return False
227
228 if kwargs.get("debug", False):
229 logger.info(validated)
230 return True
231
232
233def galera_check(cluster_size=3, target='I@galera:master or I@galera:slave', target_type='compound', ignore_dead=False, **kwargs):
234
235 ''' Verify galera cluster size and state '''
236
237 agent = "Galera status"
238 out = __salt__['saltutil.cmd']( tgt=target,
239 tgt_type=target_type,
240 fun='mysql.status',
241 timeout=3
242 ) or None
243
244 if not _minions_output(out, agent, ignore_dead):
245 __context__['retcode'] = 2
246 return False
247
248 failed_minions = []
249
250 validated = []
251 for minion in out:
252 if int(out[minion]['ret']['wsrep_cluster_size']) != int(cluster_size) and minion not in failed_minions:
253 failed_minions.append(minion)
254 if out[minion]['ret']['wsrep_evs_state'] != 'OPERATIONAL' and minion not in failed_minions:
255 failed_minions.append(minion)
256 validated.append(minion)
257
258 if not _failed_minions(out, agent, failed_minions):
259 __context__['retcode'] = 2
260 return False
261
262 if kwargs.get("debug", False):
263 logger.info(validated)
264 logger.info("Cluster size: " + str(out[validated[0]]['ret']['wsrep_cluster_size']))
265 logger.info("Cluster state: " + str(out[validated[0]]['ret']['wsrep_evs_state']))
266 return True
267
268
269def _quote_str(s, l=False, r=False):
270
271 ''' Quting rabbitmq erl objects for json import '''
272
273 if len(s) > 0:
274 if l:
275 s = s.lstrip()
276 if r:
277 s = s.rstrip()
278 if (s[0] == "'") and (s[-1] != "'") and r and not l:
279 s += "'"
280 if (s[0] == '"') and (s[-1] != '"') and r and not l:
281 s += '"'
282 if (s[-1] == "'") and (s[0] != "'") and l and not r:
283 s = "'" + s
284 if (s[-1] == '"') and (s[0] != '"') and l and not r:
285 s = '"' + s
286 if (s[-1] != "'") and (s[-1] != '"') and (s[0] != "'") and (s[0] != '"'):
287 s = '"' + s.replace('"', '\\\"') + '"'
288 else:
289 if (not l) and (not r) and s[0] != '"' and not s[-1] != '"':
290 s= s.replace('"', '\\\"')
291 return s.replace("'", '"')
292 else:
293 return s
294
295
296def _sanitize_rmqctl_output(string):
297
298 ''' Sanitizing rabbitmq erl objects for json import '''
299
300 rabbitctl_json = ""
301 for line in string.split(','):
302 copy = line
303 left = ""
304 right = ""
305 mid = copy
306 lpar = False
307 rpar = False
308 if re.search('([\[\{\s]+)(.*)', copy):
309 mid = re.sub('^([\[\{\s]+)','', copy)
310 left = copy[:-len(mid)]
311 copy = mid
312 lpar = True
313 if re.search('(.*)([\]\}\s]+)$', copy):
314 mid = re.sub('([\]\}\s]+)$','', copy)
315 right = copy[len(mid):]
316 copy = mid
317 rpar = True
318 result = left + _quote_str(mid, l=lpar, r=rpar) + right
319 if (not rpar) and lpar and (len(left.strip()) > 0) and (left.strip()[-1] == '{'):
320 result += ":"
321 else:
322 result += ","
323 rabbitctl_json += result
324
325 rabbitctl_json = rabbitctl_json[:-1]
326 new_rabbitctl_json = rabbitctl_json
327 for s in re.findall('"[^:\[{\]}]+"\s*:\s*("[^\[{\]}]+")', rabbitctl_json):
328 if '"' in s[1:][:-1]:
329 orig = s
330 changed = '"' + s.replace('\\', '\\\\').replace('"', '\\\"') + '"'
331 new_rabbitctl_json = new_rabbitctl_json.replace(orig, changed)
332 return new_rabbitctl_json
333
334
Dzmitry Stremkouskif1bcbb52019-04-11 15:48:24 +0200335def rabbitmq_list_queues(vhost='/'):
336
337 ''' JSON formatted RabbitMQ queues list '''
338
339 proc = subprocess.Popen(['rabbitmqctl', 'list_queues' , '-p', vhost], stdout=subprocess.PIPE)
340 stdout, stderr = proc.communicate()
341
342 queues = {}
343 for line in stdout.split('\n'):
344 if re.findall('[0-9]$', line):
345 queue_name, num = re.sub(r"\s+", " ", line).split()
346 queues[queue_name] = int(num)
347
348 return queues
349
350
351def rabbitmq_list_vhosts():
352
353 ''' JSON formatted RabbitMQ vhosts list '''
354
355 proc = subprocess.Popen(['rabbitmqctl', 'list_vhosts'], stdout=subprocess.PIPE)
356 stdout, stderr = proc.communicate()
357
358 vhosts = []
359 for line in stdout.split('\n'):
360 if re.findall('^/', line):
361 vhosts.append(line)
362
363 return vhosts
364
365
Dzmitry Stremkouskib71ada92019-04-05 22:37:59 +0200366def rabbitmq_cmd(cmd):
367
368 ''' JSON formatted RabbitMQ command output '''
369
370 supported_commands = ['status', 'cluster_status', 'list_hashes', 'list_ciphers']
371 if cmd not in supported_commands:
372 logger.error("Command is not supported yet, sorry")
373 logger.error("Supported commands are: " + str(supported_commands))
374 __context__['retcode'] = 2
375 return False
376
377 proc = subprocess.Popen(['rabbitmqctl', cmd], stdout=subprocess.PIPE)
378 stdout, stderr = proc.communicate()
379
380 rabbitmqctl_cutoff = stdout[int(stdout.find('[')):int(stdout.rfind(']'))+1].replace('\n','')
381 return json.loads(_sanitize_rmqctl_output(rabbitmqctl_cutoff))
382
383
384def rabbitmq_check(target='I@rabbitmq:server', target_type='compound', ignore_dead=False, **kwargs):
385
386 ''' Verify rabbit cluster and it's alarms '''
387
388 agent = "RabbitMQ status"
389 out = __salt__['saltutil.cmd']( tgt=target,
390 tgt_type=target_type,
391 fun='health_checks.rabbitmq_cmd',
392 arg=['cluster_status'],
393 timeout=3
394 ) or None
395
396 if not _minions_output(out, agent, ignore_dead):
397 __context__['retcode'] = 2
398 return False
399
400 failed_minions = []
401
402 for minion in out:
403 rabbitmqctl_json = out[minion]['ret']
404 running_nodes = []
405 available_nodes = []
406 alarms = []
407 for el in rabbitmqctl_json:
408 if 'alarms' in el:
409 alarms = el['alarms']
410 if 'nodes' in el:
411 available_nodes = el['nodes'][0]['disc']
412 if 'running_nodes' in el:
413 running_nodes = el['running_nodes']
414
415 if running_nodes.sort() == available_nodes.sort():
416 nodes_alarms = []
417 for node in running_nodes:
418 for el in alarms:
419 if node in el:
420 if len(el[node]) > 0:
421 nodes_alarms.append(el[node])
422 if len(nodes_alarms) > 0:
423 failed_minions.append(minion)
424 else:
425 failed_minions.append(minion)
426
427 if not _failed_minions(out, agent, failed_minions):
428 __context__['retcode'] = 2
429 return False
430
431 if kwargs.get("debug", False):
432 logger.info(running_nodes)
433 return True
434
435
436def haproxy_status(socket_path='/run/haproxy/admin.sock', buff_size = 8192, encoding = 'UTF-8', stats_filter=[]):
437
438 ''' JSON formatted haproxy status '''
439
440 stat_cmd = 'show stat\n'
441
442 if not os.path.exists(socket_path):
443 logger.error('Socket %s does not exist or haproxy not running' % socket_path)
444 __context__['retcode'] = 2
445 return False
446
447 client = socket.socket( socket.AF_UNIX, socket.SOCK_STREAM)
448 client.connect(socket_path)
449 stat_cmd = 'show stat\n'
450
451 client.send(bytearray(stat_cmd, encoding))
452 output = client.recv(buff_size)
453
454 res = ""
455 while output:
456 res += output.decode(encoding)
457 output = client.recv(buff_size)
458 client.close()
459
460 haproxy_stats = {}
461 res_list = res.split('\n')
462 fields = res_list[0][2:].split(',')
463 stats_list = []
464 for line in res_list[1:]:
465 if len(line.strip()) > 0:
466 stats_list.append(line)
467
468 for i in range(len(stats_list)):
469 element = {}
470 for n in fields:
471 element[n] = stats_list[i].split(',')[fields.index(n)]
472 server_name = element.pop('pxname')
473 server_type = element.pop('svname')
474 if stats_filter:
475 filtered_element = element.copy()
476 for el in element:
477 if el not in stats_filter:
478 filtered_element.pop(el)
479 element = filtered_element
480 if server_name not in haproxy_stats:
481 haproxy_stats[server_name] = {}
482 if server_type == "FRONTEND" or server_type == "BACKEND":
483 haproxy_stats[server_name][server_type] = element
484 else:
485 if 'UPSTREAM' not in haproxy_stats[server_name]:
486 haproxy_stats[server_name]['UPSTREAM'] = {}
487 haproxy_stats[server_name]['UPSTREAM'][server_type] = element
488
489 return haproxy_stats
490
491
492def haproxy_check(target='I@haproxy:proxy', target_type='compound', ignore_dead=False, ignore_services=[], ignore_upstreams=[], ignore_no_upstream=False, **kwargs):
493
494 ''' Verify haproxy backends status '''
495
496 agent = "haproxy status"
497 out = __salt__['saltutil.cmd']( tgt=target,
498 tgt_type=target_type,
499 fun='health_checks.haproxy_status',
500 arg=["stats_filter=['status']"],
501 timeout=3
502 ) or None
503
504 if not _minions_output(out, agent, ignore_dead):
505 __context__['retcode'] = 2
506 return False
507
508 failed_minions = []
509 verified_minions = []
510 for minion in out:
511 verified_minions.append(minion)
512 haproxy_json = out[minion]['ret']
513 for service in haproxy_json:
514 if service not in ignore_services:
515 if haproxy_json[service]['FRONTEND']['status'] != 'OPEN':
516 if minion not in failed_minions:
517 failed_minions.append(minion)
518 if haproxy_json[service]['BACKEND']['status'] != 'UP':
519 if minion not in failed_minions:
520 failed_minions.append(minion)
521 if 'UPSTREAM' in haproxy_json[service]:
522 for upstream in haproxy_json[service]['UPSTREAM']:
523 if upstream not in ignore_upstreams:
524 if haproxy_json[service]['UPSTREAM'][upstream]['status'] != 'UP':
525 if minion not in failed_minions:
526 failed_minions.append(minion)
527 else:
528 if not ignore_no_upstream:
529 if minion not in failed_minions:
530 failed_minions.append(minion)
531
532 if not _failed_minions(out, agent, failed_minions):
533 __context__['retcode'] = 2
534 return False
535
536 if kwargs.get("debug", False):
537 logger.info(verified_minions)
538 return True
539
540
541def df_check(target='*', target_type='glob', verify='space', space_limit=80, inode_limit=80, ignore_dead=False, ignore_partitions=[], **kwargs):
542
543 ''' Verify storage space/inodes status '''
544
545 supported_options = ['space', 'inodes']
546 if verify not in supported_options:
547 logger.error('Unsupported "verify" option.')
548 logger.error('Supported options are: %s' % str(supported_options))
549 __context__['retcode'] = 2
550 return False
551
552 if verify == 'space':
553 fun_cmd = 'disk.usage'
554 json_arg = 'capacity'
555 limit = space_limit
556 elif verify == 'inodes':
557 fun_cmd = 'disk.inodeusage'
558 json_arg = 'use'
559 limit = inode_limit
560
561 agent = "df status"
562 out = __salt__['saltutil.cmd']( tgt=target,
563 tgt_type=target_type,
564 fun=fun_cmd,
565 timeout=3
566 ) or None
567
568 if not _minions_output(out, agent, ignore_dead):
569 __context__['retcode'] = 2
570 return False
571
572 failed_minions = []
573 verified_minions = []
574 for minion in out:
575 verified_minions.append(minion)
576 df_json = out[minion]['ret']
577 for disk in df_json:
578 if disk not in ignore_partitions:
579 if int(df_json[disk][json_arg][:-1]) > int(limit):
580 if minion not in failed_minions:
581 failed_minions.append(minion)
582
583 if not _failed_minions(out, agent, failed_minions):
584 __context__['retcode'] = 2
585 return False
586
587 if kwargs.get("debug", False):
588 logger.info(verified_minions)
589 return True
590
591
592def load_check(target='*', target_type='glob', la1=3, la5=3, la15=3, ignore_dead=False, **kwargs):
593
594 ''' Verify load average status '''
595
596 agent = "load average status"
597 out = __salt__['saltutil.cmd']( tgt=target,
598 tgt_type=target_type,
599 fun='status.loadavg',
600 timeout=3
601 ) or None
602
603 if not _minions_output(out, agent, ignore_dead):
604 __context__['retcode'] = 2
605 return False
606
607 failed_minions = []
608 verified_minions = []
609 for minion in out:
610 verified_minions.append(minion)
611 la_json = out[minion]['ret']
612 if float(la_json['1-min']) > float(la1):
613 if minion not in failed_minions:
614 failed_minions.append(minion)
615 if float(la_json['5-min']) > float(la5):
616 if minion not in failed_minions:
617 failed_minions.append(minion)
618 if float(la_json['15-min']) > float(la15):
619 if minion not in failed_minions:
620 failed_minions.append(minion)
621
622 if not _failed_minions(out, agent, failed_minions):
623 __context__['retcode'] = 2
624 return False
625
626 if kwargs.get("debug", False):
627 logger.info(verified_minions)
628 return True
629
630
631def netdev_check(target='*', target_type='glob', rx_drop_limit=0, tx_drop_limit=0, ignore_devices=[], ignore_dead=False, **kwargs):
632
633 ''' Verify netdev rx/tx drop status '''
634
635 agent = "netdev rx/tx status"
636 out = __salt__['saltutil.cmd']( tgt=target,
637 tgt_type=target_type,
638 fun='status.netdev',
639 timeout=3
640 ) or None
641
642 if not _minions_output(out, agent, ignore_dead):
643 __context__['retcode'] = 2
644 return False
645
Dzmitry Stremkouski2c709f22019-04-22 02:27:54 +0200646 failed_minions = {}
Dzmitry Stremkouskib71ada92019-04-05 22:37:59 +0200647 verified_minions = []
648 for minion in out:
649 verified_minions.append(minion)
650 dev_json = out[minion]['ret']
651 for netdev in dev_json:
652 if netdev not in ignore_devices:
653 if int(dev_json[netdev]['rx_drop']) > int(rx_drop_limit):
654 if minion not in failed_minions:
Dzmitry Stremkouski2c709f22019-04-22 02:27:54 +0200655 failed_minions[minion] = {}
656 if netdev not in failed_minions[minion]:
657 failed_minions[minion][netdev] = {}
658 failed_minions[minion][netdev]['rx_drop'] = int(dev_json[netdev]['rx_drop'])
Dzmitry Stremkouskib71ada92019-04-05 22:37:59 +0200659 if int(dev_json[netdev]['tx_drop']) > int(tx_drop_limit):
660 if minion not in failed_minions:
Dzmitry Stremkouski2c709f22019-04-22 02:27:54 +0200661 failed_minions[minion] = {}
662 if netdev not in failed_minions[minion]:
663 failed_minions[minion][netdev] = {}
664 failed_minions[minion][netdev]['tx_drop'] = int(dev_json[netdev]['tx_drop'])
Dzmitry Stremkouskib71ada92019-04-05 22:37:59 +0200665
666 if not _failed_minions(out, agent, failed_minions):
667 __context__['retcode'] = 2
668 return False
669
670 if kwargs.get("debug", False):
671 logger.info(verified_minions)
672 return True
673
674
675def mem_check(target='*', target_type='glob', used_limit=80, ignore_dead=False, **kwargs):
676
677 ''' Verify available memory status '''
678
679 agent = "available memory status"
680 out = __salt__['saltutil.cmd']( tgt=target,
681 tgt_type=target_type,
682 fun='status.meminfo',
683 timeout=3
684 ) or None
685
686 if not _minions_output(out, agent, ignore_dead):
687 __context__['retcode'] = 2
688 return False
689
690 failed_minions = []
691 verified_minions = []
692 for minion in out:
693 mem_avail = int(out[minion]['ret']['MemAvailable']['value'])
694 mem_total = int(out[minion]['ret']['MemTotal']['value'])
695 used_pct = float((mem_total - mem_avail) * 100 / mem_total)
696 if used_pct > float(used_limit):
697 if minion not in failed_minions:
698 failed_minions.append(minion)
699 else:
700 verified_minions.append( { minion : str(used_pct) + '%' } )
701
702 if not _failed_minions(out, agent, failed_minions):
703 __context__['retcode'] = 2
704 return False
705
706 if kwargs.get("debug", False):
707 logger.info(verified_minions)
708 return True
709
710
711def ntp_status(params = ['-4', '-p', '-n']):
712
713 ''' JSON formatted ntpq command output '''
714
715 ntp_states = [
716 { 'indicator': '#', 'comment': 'source selected, distance exceeds maximum value' },
717 { 'indicator': 'o', 'comment': 'source selected, Pulse Per Second (PPS) used' },
718 { 'indicator': '+', 'comment': 'source selected, included in final set' },
719 { 'indicator': 'x', 'comment': 'source false ticker' },
720 { 'indicator': '.', 'comment': 'source selected from end of candidate list' },
721 { 'indicator': '-', 'comment': 'source discarded by cluster algorithm' },
722 { 'indicator': '*', 'comment': 'current time source' },
723 { 'indicator': ' ', 'comment': 'source discarded high stratum, failed sanity' }
724 ]
725 ntp_state_indicators = []
726 for state in ntp_states:
727 ntp_state_indicators.append(state['indicator'])
728 source_types = {}
729 source_types['l'] = "local (such as a GPS, WWVB)"
730 source_types['u'] = "unicast (most common)"
731 source_types['m'] = "multicast"
732 source_types['b'] = "broadcast"
733 source_types['-'] = "netaddr"
734
735 proc = subprocess.Popen(['ntpq'] + params, stdout=subprocess.PIPE)
736 stdout, stderr = proc.communicate()
737
738 ntp_lines = stdout.split('\n')
739 fields = re.sub("\s+", " ", ntp_lines[0]).split()
740 fields[fields.index('st')] = 'stratum'
741 fields[fields.index('t')] = 'source_type'
742
743 ntp_peers = {}
744 for line in ntp_lines[2:]:
745 if len(line.strip()) > 0:
746 element = {}
747 values = re.sub("\s+", " ", line).split()
748 for i in range(len(values)):
749 if fields[i] == 'source_type':
750 element[fields[i]] = { 'indicator': values[i], 'comment': source_types[values[i]] }
751 elif fields[i] in ['stratum', 'when', 'poll', 'reach']:
752 if values[i] == '-':
753 element[fields[i]] = int(-1)
754 else:
755 element[fields[i]] = int(values[i])
756 elif fields[i] in ['delay', 'offset', 'jitter']:
757 element[fields[i]] = float(values[i])
758 else:
759 element[fields[i]] = values[i]
760 peer = element.pop('remote')
761 peer_state = peer[0]
762 if peer_state in ntp_state_indicators:
763 peer = peer[1:]
764 else:
765 peer_state = 'f'
766 element['current'] = False
767 if peer_state == '*':
768 element['current'] = True
769 for state in ntp_states:
770 if state['indicator'] == peer_state:
771 element['state'] = state.copy()
772 if peer_state == 'f' and state['indicator'] == ' ':
773 fail_state = state.copy()
774 fail_state.pop('indicator')
775 fail_state['indicator'] = 'f'
776 element['state'] = fail_state
777 ntp_peers[peer] = element
778
779 return ntp_peers
780
781
782def ntp_check(min_peers=1, max_stratum=3, target='*', target_type='glob', ignore_dead=False, **kwargs):
783
784 ''' Verify NTP peers status '''
785
786 agent = "ntpd peers status"
787 out = __salt__['saltutil.cmd']( tgt=target,
788 tgt_type=target_type,
789 fun='health_checks.ntp_status',
790 timeout=3
791 ) or None
792
793 if not _minions_output(out, agent, ignore_dead):
794 __context__['retcode'] = 2
795 return False
796
797 failed_minions = []
798 verified_minions = []
799 for minion in out:
800 ntp_json = out[minion]['ret']
801 good_peers = []
802 for peer in ntp_json:
803 if ntp_json[peer]['stratum'] < int(max_stratum) + 1:
804 good_peers.append(peer)
805 if len(good_peers) > int(min_peers) - 1:
806 if minion not in verified_minions:
807 verified_minions.append(minion)
808 else:
809 if minion not in failed_minions:
810 failed_minions.append(minion)
811
812 if not _failed_minions(out, agent, failed_minions):
813 __context__['retcode'] = 2
814 return False
815
816 if kwargs.get("debug", False):
817 logger.info(verified_minions)
Dzmitry Stremkouskif1bcbb52019-04-11 15:48:24 +0200818
Dzmitry Stremkouskib71ada92019-04-05 22:37:59 +0200819 return True
Dzmitry Stremkouskif1bcbb52019-04-11 15:48:24 +0200820
821
822def gluster_pool_list():
823
824 ''' JSON formatted GlusterFS pool list command output '''
825
826 proc = subprocess.Popen(['gluster', 'pool', 'list'], stdout=subprocess.PIPE)
827 stdout, stderr = proc.communicate()
828
829 regex = re.compile('^(\S+)\s+(\S+)\s+(\S+)$')
830 fields = regex.findall(stdout.split('\n')[0])[0]
831
832 pool = {}
833
834 for line in stdout.split('\n')[1:]:
835 if len(line.strip()) > 0:
836 peer = {}
837 values = regex.findall(line.strip())[0]
838 for i in range(len(fields)):
839 peer[fields[i].lower()] = values[i]
840 uuid = peer.pop('uuid')
841 pool[uuid] = peer
842
843 return pool
844
845
846def gluster_volume_status():
847
848 ''' JSON formatted GlusterFS volumes status command output '''
849
850 proc = subprocess.Popen(['gluster', 'volume', 'status', 'all', 'detail'], stdout=subprocess.PIPE)
851 stdout, stderr = proc.communicate()
852
853 begin_volume = False
854 brick_lookup = False
855 volumes = {}
856 volume_name = ""
857
858 for line in stdout.split('\n'):
859 if 'Status of volume' in line:
860 volume_name = line.split(':')[1].strip()
861 volumes[volume_name] = { 'bricks': [] }
862 begin_volume = True
863 elif len(line.strip()) == 0:
864 if begin_volume:
865 begin_volume = False
866 elif '--------' in line:
867 brick_lookup = True
868 elif brick_lookup and line.split(':')[0].strip() == 'Brick':
869 brick_host, brick_path = re.findall('^Brick\ *:\ (.*)', line)[0].split()[1].split(':')
870 volumes[volume_name]['bricks'].append({ 'host': brick_host, 'path': brick_path })
871 brick_lookup = False
872 else:
873 brick_key, brick_value = line.split(':')
874 brick_key = brick_key.strip().lower().replace(' ', '_')
875 brick_value = brick_value.strip()
876 volumes[volume_name]['bricks'][len(volumes[volume_name]['bricks']) - 1][brick_key] = brick_value
877
878 return volumes
879
880
881def gluster_pool_check(target='I@glusterfs:server', target_type='compound', expected_size=3, ignore_dead=False, **kwargs):
882
883 ''' Check GlusterFS peer status '''
884
885 agent = "glusterfs peer status"
886 out = __salt__['saltutil.cmd']( tgt=target,
887 tgt_type=target_type,
888 fun='health_checks.gluster_pool_list',
889 timeout=3,
890 kwargs='[batch=True]'
891 ) or None
892
893 if not _minions_output(out, agent, ignore_dead):
894 __context__['retcode'] = 2
895 return False
896
897 failed_minions = []
898 verified_minions = []
899 for minion in out:
900 verified_minions.append(minion)
901 gluster_json = out[minion]['ret']
902 alive_peers = []
903 for peer in gluster_json:
904 if gluster_json[peer]['state'] == 'Connected':
905 alive_peers.append(peer)
906 else:
907 if minion not in failed_minions:
908 failed_minions.append(minion)
909 if len(alive_peers) < expected_size:
910 if minion not in failed_minions:
911 failed_minions.append(minion)
912
913 if not _failed_minions(out, agent, failed_minions):
914 __context__['retcode'] = 2
915 return False
916
917 if kwargs.get("debug", False):
918 logger.info(verified_minions)
919
920 return True
921
922
923def gluster_volumes_check(target='I@glusterfs:server', target_type='compound', expected_size=3, ignore_volumes=[], ignore_dead=False, **kwargs):
924
925 ''' Check GlusterFS volumes status '''
926
927 agent = "glusterfs volumes status"
928 out = __salt__['saltutil.cmd']( tgt=target,
929 tgt_type=target_type,
930 fun='health_checks.gluster_volume_status',
931 timeout=3,
932 kwargs='[batch=True]'
933 ) or None
934
935 if not _minions_output(out, agent, ignore_dead):
936 __context__['retcode'] = 2
937 return False
938
939 failed_minions = []
940 verified_minions = []
941 verified_volumes = []
942 for minion in out:
943 verified_minions.append(minion)
944 gluster_json = out[minion]['ret']
945 for volume in gluster_json:
946 if volume in ignore_volumes:
947 continue
948 else:
949 verified_volumes.append(volume)
950 alive_bricks = 0
951 if 'bricks' not in gluster_json[volume]:
952 if minion not in failed_minions:
953 failed_minions.append(minion)
954 bricks = gluster_json[volume]['bricks']
955 if len(bricks) < expected_size:
956 if minion not in failed_minions:
957 failed_minions.append(minion)
958 for brick in bricks:
959 if brick['online'] == 'Y':
960 alive_bricks += 1
961 else:
962 if minion not in failed_minions:
963 failed_minions.append(minion)
964 if alive_bricks < expected_size:
965 if minion not in failed_minions:
966 failed_minions.append(minion)
967
968 if not _failed_minions(out, agent, failed_minions):
969 __context__['retcode'] = 2
970 return False
971
972 if kwargs.get("debug", False):
973 logger.info("Verified minions:")
974 logger.info(verified_minions)
975 logger.info("Verified volumes:")
976 logger.info(verified_volumes)
977
978 return True
979
980
981def ceph_cmd(cmd):
982
983 ''' JSON formatted ceph command output '''
984
985 proc = subprocess.Popen(['ceph'] + cmd.split() + ['--format', 'json-pretty'], stdout=subprocess.PIPE)
986 stdout, stderr = proc.communicate()
987
988 return json.loads(stdout)
989
990
991def ceph_health_check(target='I@ceph:mon', target_type='compound', expected_status='HEALTH_OK', expected_state='active+clean', ignore_dead=False, **kwargs):
992
993 ''' Check all ceph monitors health status '''
994
995 agent = "ceph health status"
996 out = __salt__['saltutil.cmd']( tgt=target,
997 tgt_type=target_type,
998 fun='health_checks.ceph_cmd',
999 arg=['status'],
1000 timeout=3
1001 ) or None
1002
1003 if not _minions_output(out, agent, ignore_dead):
1004 __context__['retcode'] = 2
1005 return False
1006
1007 failed_minions = []
1008 verified_minions = []
1009 for minion in out:
1010 verified_minions.append(minion)
1011 ceph_json = out[minion]['ret']
1012 fsid = ceph_json['fsid']
1013
1014 if ceph_json['health']['overall_status'] != expected_status:
1015 if minion not in failed_minions:
1016 failed_minions.append(minion)
1017
1018 if ceph_json['osdmap']['osdmap']['full']:
1019 if minion not in failed_minions:
1020 failed_minions.append(minion)
1021
1022 if ceph_json['osdmap']['osdmap']['nearfull']:
1023 if minion not in failed_minions:
1024 failed_minions.append(minion)
1025
1026 num_osds = ceph_json['osdmap']['osdmap']['num_osds']
1027 num_in_osds = ceph_json['osdmap']['osdmap']['num_in_osds']
1028 num_up_osds = ceph_json['osdmap']['osdmap']['num_up_osds']
1029 if not ( num_osds == num_in_osds == num_up_osds ):
1030 if minion not in failed_minions:
1031 failed_minions.append(minion)
1032
1033 quorum = len(ceph_json['quorum'])
1034 quorum_names = len(ceph_json['quorum_names'])
1035 mons = len(ceph_json['monmap']['mons'])
1036 if not ( quorum == quorum_names == mons ):
1037 if minion not in failed_minions:
1038 failed_minions.append(minion)
1039
1040 for mon in ceph_json['health']['timechecks']['mons']:
1041 if mon['health'] != expected_status:
1042 if minion not in failed_minions:
1043 failed_minions.append(minion)
1044
1045 for srv in ceph_json['health']['health']['health_services']:
1046 for mon in srv['mons']:
1047 if mon['health'] != expected_status:
1048 if minion not in failed_minions:
1049 failed_minions.append(minion)
1050
1051 for state in ceph_json['pgmap']['pgs_by_state']:
1052 if state['state_name'] != expected_state:
1053 if minion not in failed_minions:
1054 failed_minions.append(minion)
1055
1056 if not _failed_minions(out, agent, failed_minions):
1057 __context__['retcode'] = 2
1058 return False
1059
1060 if kwargs.get("debug", False):
1061 logger.info("Quorum:")
1062 logger.info(ceph_json['quorum_names'])
1063 logger.info("Verified minions:")
1064 logger.info(verified_minions)
1065
1066 return True
1067
1068
Dzmitry Stremkouski7cd10fc2019-04-17 11:51:59 +02001069def get_entropy():
1070
1071 ''' Retrieve entropy size for the host '''
1072
1073 with open('/proc/sys/kernel/random/entropy_avail', 'r') as f:
1074 entropy = f.read()
1075 return entropy
1076
1077
1078def entropy_check(target='*', target_type='glob', minimum_bits=700, ignore_dead=False, **kwargs):
1079
1080 ''' Check entropy size in cluster '''
1081
1082 agent = "entropy size status"
1083 out = __salt__['saltutil.cmd']( tgt=target,
1084 tgt_type=target_type,
1085 fun='health_checks.get_entropy',
1086 timeout=3
1087 ) or None
1088
1089 if not _minions_output(out, agent, ignore_dead):
1090 __context__['retcode'] = 2
1091 return False
1092
1093 failed_minions = []
1094 verified_minions = []
1095
Dzmitry Stremkouski7cd10fc2019-04-17 11:51:59 +02001096 for minion in out:
1097 verified_minions.append(minion)
1098 entropy = int(out[minion]['ret'])
1099 if entropy < minimum_bits:
1100 if not minion in failed_minions:
1101 failed_minions.append(minion)
1102
1103 if not _failed_minions(out, agent, failed_minions):
1104 __context__['retcode'] = 2
1105 return False
1106
1107 if kwargs.get("debug", False):
1108 logger.info(verified_minions)
1109
1110 return True
1111
1112
Dzmitry Stremkouskif1bcbb52019-04-11 15:48:24 +02001113def docker_registry_list(host):
1114
1115 ''' Retrieve and list docker catalog '''
1116
1117 try:
1118 if host[0:4] == 'http':
1119 url = host + '/v2/'
1120 else:
1121 url = 'http://' + host + '/v2/'
1122 repos = requests.get(url + '_catalog')
1123
1124 versions = {}
1125 for repo in repos.json()['repositories']:
1126 repo_versions = requests.get(url + repo + '/tags/list')
1127 versions[repo] = repo_versions.json().pop('tags')
1128 return versions
1129 except:
1130 return {}
Dzmitry Stremkouski7cd10fc2019-04-17 11:51:59 +02001131
1132
1133def docker_ps(list_all=0):
1134
1135 import docker
1136 client = docker.client.Client(base_url='unix://var/run/docker.sock')
1137 return client.containers(all=list_all)
1138
Dzmitry Stremkouski2c709f22019-04-22 02:27:54 +02001139
1140def zookeeper_cmd(cmd, hostname='localhost', port=2181):
1141
1142 ''' Execute zookeeper cmd via socket '''
1143
1144 buf_size = 1024
1145 sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
1146 sock.connect((hostname, port))
1147 sock.sendall(cmd)
1148 sock.shutdown(socket.SHUT_WR)
1149 rdata = ""
1150 while 1:
1151 data = sock.recv(buf_size)
1152 if data == "":
1153 break
1154 rdata += data
1155 sock.close()
1156 return rdata
1157
1158
1159def zookeeper_stats():
1160
1161 ''' Retrieve zookeeper stats '''
1162
1163 stats = {}
1164 stats['configuration'] = {}
1165 for line in zookeeper_cmd('conf').split('\n'):
1166 if line:
1167 key, value = line.split('=')
1168 if value.strip().isdigit():
1169 value = int(value)
1170 else:
1171 value = value.strip()
1172 stats['configuration'][key.strip().lower().replace(' ', '_')] = value
1173
1174 stats['environment'] = {}
1175 for line in zookeeper_cmd('envi').split('\n')[1:]:
1176 if line:
1177 key, value = line.split('=')
1178 if value.strip().isdigit():
1179 value = int(value)
1180 else:
1181 value = value.strip()
1182 stats['environment'][key.strip().lower().replace(' ', '_')] = value
1183
1184 stats['server'] = {}
1185 for line in zookeeper_cmd('srvr').split('\n'):
1186 if line:
1187 if re.findall('^Zookeeper version:', line, flags=re.IGNORECASE):
1188 version_str = line.split(':')[1].strip()
1189 version = version_str
1190 if '-' in version_str:
1191 version_str = version_str.split('-')[0]
1192 if '.' in version_str:
1193 version = []
1194 version_list = version_str.split('.')
1195 for elem in version_list:
1196 if elem.strip().isdigit():
1197 version.append(int(elem))
1198 stats['server']['version'] = version
1199 continue
1200 if re.findall('^Latency min/avg/max:', line, flags=re.IGNORECASE):
1201 latency_min, latency_avg, latency_max = line.split(':')[1].strip().split('/')
1202 stats['server']['latency'] = {'min':int(latency_min),'max':int(latency_max),'avg':int(latency_avg)}
1203 continue
1204 key, value = line.split(':')
1205 if value.strip().isdigit():
1206 value = int(value)
1207 else:
1208 value = value.strip()
1209 stats['server'][key.strip().lower().replace(' ', '_')] = value
1210
1211 stats['clients'] = {}
1212 for line in zookeeper_cmd('cons').split('\n'):
1213 if line:
1214 clients = re.findall('^(\s*\/)(.+)(:\d+\[\d+\])(\(.+\))$', line)[0][1:]
1215 addr = clients[0]
1216 port, direction = re.findall('^(\d+)\[(\d+)\]$', clients[1][1:])[0]
1217 client = '['+addr+']:'+str(port)
1218 stats['clients'][client] = {'direction': int(direction)}
1219 for elem in clients[2][1:-1].split(','):
1220 key, value = elem.split('=')
1221 if value.strip().isdigit():
1222 value = int(value)
1223 else:
1224 value = value.strip()
1225 stats['clients'][client][key.strip().lower().replace(' ', '_')] = value
1226
1227 return stats
1228
1229
1230def get_zookeeper_leader(target='I@opencontrail:control', target_type='compound', ignore_dead=False, **kwargs):
1231
1232 ''' Retrieve zookeeper leader '''
1233
1234 agent = "zookeeper leader retrieve"
1235 out = __salt__['saltutil.cmd']( tgt=target,
1236 tgt_type=target_type,
1237 fun='health_checks.zookeeper_stats',
1238 timeout=3
1239 ) or None
1240
1241 if not _minions_output(out, agent, ignore_dead):
1242 __context__['retcode'] = 2
1243 return False
1244
1245 leader = None
1246 for minion in out:
1247 zookeeper_mode = out[minion]['ret']['server']['mode']
1248
1249 if zookeeper_mode == 'leader':
1250 leader = minion
1251
1252 return leader
1253
1254
1255def contrail_vrouter_list(api_host='127.0.0.1', api_port=9100):
1256
1257 ''' Retrieve and list contrail vrouters.
1258 Valid targets: Contrail controllers.
1259 '''
1260
1261 try:
1262 if api_host[0:4] == 'http':
1263 url = api_host + ':' + str(api_port)
1264 else:
1265 url = 'http://' + api_host + ':' + str(api_port)
1266
1267 vrouters = requests.get(url + '/virtual-routers').json()
1268 vrouter_list = []
1269 for vr in vrouters['virtual-routers']:
1270 vr_uuid = vr['uuid']
1271 for name in vr['fq_name']:
1272 if name == "default-global-system-config":
1273 continue
1274 else:
1275 vr_name = name
1276 vrouter_list.append({'name': vr_name, 'uuid': vr_uuid})
1277 return vrouter_list
1278
1279 except:
1280 return {}
1281
1282
1283def contrail_vrouter_show(vr_uuid, api_host='127.0.0.1', api_port=9100):
1284
1285 ''' Retrieve contrail vrouter data
1286 Valid targets: Contrail controllers.
1287 '''
1288
1289 try:
1290 if api_host[0:4] == 'http':
1291 url = api_host + ':' + str(api_port)
1292 else:
1293 url = 'http://' + api_host + ':' + str(api_port)
1294
1295 return requests.get(url + '/virtual-router/' + vr_uuid).json()
1296
1297 except:
1298 return {}
1299
1300
1301def _xmletree_descend_child(given_child, tag_requested):
1302
1303 ''' Returns xmletree subelement by tag name '''
1304
1305 my_child = {}
1306
1307 for child in given_child:
1308 if child.tag == tag_requested:
1309 my_child = child
1310 break
1311
1312 return my_child
1313
1314
1315def contrail_vrouter_agent_status(api_host='127.0.0.1', api_port=8085):
1316
1317 ''' Retrieve contrail vrouter agent status '''
1318
1319 import xml.etree.ElementTree as ET
1320
1321 if api_host[0:4] == 'http':
1322 url = api_host + ':' + str(api_port)
1323 else:
1324 url = 'http://' + api_host + ':' + str(api_port)
1325
1326 try:
1327 req = requests.get(url + '/Snh_SandeshUVECacheReq?x=NodeStatus')
1328 if int(req.status_code) != 200:
1329 return "Could not fetch data from vrouter agent via %s.\nGot bad status code: %s\n%s" % (url, str(req.status_code), str(req.text))
1330 except:
1331 pass
1332
1333 try:
1334 xmletree = ET.fromstring(req.text)
1335 except:
1336 return "Could not parse xml tree %s" % str(req.text)
1337
1338 try:
1339 vrouter_data = {}
1340 child = _xmletree_descend_child(xmletree, 'NodeStatusUVE')
1341 child = _xmletree_descend_child(child, 'data')
1342 child = _xmletree_descend_child(child, 'NodeStatus')
1343 child = _xmletree_descend_child(child, 'process_status')
1344 child = _xmletree_descend_child(child, 'list')
1345 child = _xmletree_descend_child(child, 'ProcessStatus')
1346 vrouter_data['state'] = _xmletree_descend_child(child, 'state').text
1347 vrouter_data['connections'] = []
1348 child = _xmletree_descend_child(child, 'connection_infos')
1349 for elem in _xmletree_descend_child(child, 'list'):
1350 conn = {}
1351 conn['type'] = _xmletree_descend_child(elem,'type').text
1352 conn['name'] = _xmletree_descend_child(elem,'name').text
1353 conn['status'] = _xmletree_descend_child(elem,'status').text
1354 conn['description'] = _xmletree_descend_child(elem,'description').text
1355 conn['server_addrs'] = []
1356 server_addrs = _xmletree_descend_child(elem,'server_addrs')
1357 for srv in _xmletree_descend_child(server_addrs,'list'):
1358 host, port = srv.text.split(':')
1359 conn['server_addrs'].append({'host': host, 'port': port})
1360 vrouter_data['connections'].append(conn)
1361 return vrouter_data
1362 except:
1363 return "Unsupported xml tree for this function %s" % str(req.text)
1364
1365
Dzmitry Stremkouski36290202019-05-05 21:26:25 +02001366def contrail_collector_agent_status(vr_name, api_host='auto', api_port=9081):
1367
1368 ''' Retrieve contrail vrouter agent status from analyticsdb '''
1369
1370 if api_host[0:4] == 'http':
1371 url = api_host + ':' + str(api_port)
1372 elif api_host == 'auto':
1373 my_ip = __salt__['pillar.get']('_param:opencontrail_analytics_address')
1374 url = 'http://' + my_ip+ ':' + str(api_port)
1375 else:
1376 url = 'http://' + api_host + ':' + str(api_port)
1377
1378 req = requests.get(url + '/analytics/uves/vrouter/' + vr_name + '?flat')
1379 if int(req.status_code) != 200:
1380 return "Could not fetch data from vrouter agent via %s.\nGot bad status code: %s\n%s" % (url, str(req.status_code), str(req.text))
1381
1382 return json.loads(req.text)
1383
1384
Dzmitry Stremkouskia78a04d2019-07-13 11:05:03 +02001385def contrail_control_peers_summary(api_host='auto', api_port=8083):
1386
1387 ''' Retrieve contrail control peers summary '''
1388
1389 import xml.etree.ElementTree as ET
1390
1391 if api_host[0:4] == 'http':
1392 url = api_host + ':' + str(api_port)
1393 elif api_host == 'auto':
1394 my_ip = '127.0.0.1'
1395 url = 'http://' + my_ip+ ':' + str(api_port)
1396 else:
1397 url = 'http://' + api_host + ':' + str(api_port)
1398
1399 req = requests.get(url + '/Snh_ShowBgpNeighborSummaryReq')
1400 if int(req.status_code) != 200:
1401 return "Could not fetch data from contrail control via %s.\nGot bad status code: %s\n%s" % (url, str(req.status_code), str(req.text))
1402
1403 peers = []
1404 summary = req.text
1405
1406 try:
1407 xmletree = ET.fromstring(summary)
1408 for elem in xmletree.find('.//list'):
1409 attrs = {}
1410 for child in elem:
1411 attrs[child.tag] = child.text
1412 peers.append(attrs)
1413 except:
1414 return "Could not parse xml tree %s" % str(summary)
1415
1416 return peers
1417
1418
Dzmitry Stremkouski36290202019-05-05 21:26:25 +02001419def _get_object(json_obj, obj_path):
1420
1421 ''' Retrieve subelemet of an JSON object or value '''
1422
1423 if ':' in obj_path:
1424 splitter = obj_path.split(':')
1425 k = splitter[0]
1426 v = ':'.join(splitter[1:])
1427 if k.isdigit():
1428 # Return specific element path
1429 return [ _get_object(json_obj[int(k)], v) ]
1430 elif k == '*':
1431 l = []
1432 for el in json_obj:
1433 l.append(_get_object(el, v))
1434 # Return all list elements from the path
1435 return l
1436 else:
1437 # Contrail output may have nested JSON
1438 if isinstance(json_obj, str) or isinstance(json_obj, unicode):
1439 json_obj = json.loads(json_obj)
1440 # Assume list. Return it
1441 return { k: _get_object(json_obj[k], v) }
1442 else:
1443 return { obj_path: json_obj[obj_path] }
1444
1445
1446def _deepmerge(o1, o2):
1447
1448 ''' Deep merge JSON objects '''
1449
1450 o3 = {}
1451 if type(o1) == type(o2):
1452 if type(o1) == dict or type(o1) == tuple:
1453 for k in set(o1.keys() + o2.keys()):
1454 if k in o1:
1455 if k in o2:
1456 o3[k] = _deepmerge(o1[k], o2[k])
1457 else:
1458 o3[k] = o1[k]
1459 else:
1460 o3[k] = o2[k]
1461 elif type(o1) == list or type(o1) == set:
1462 o3 = [] + o2
1463 for el in o3:
1464 i = o3.index(el)
1465 o3[i] = _deepmerge(o1[i], o2[i])
1466 else:
1467 o3 = o2
1468 else:
1469 o3 = o2
1470
1471 return o3
1472
1473
1474def contrail_vrouter_agent_info(vr_name, filter_map=default_vrouter_info_map):
1475
1476 ''' Retrieve filtered contrail vrouter agent info from analyticsdb '''
1477
1478 vr_agent_status = contrail_collector_agent_status(vr_name)
1479 vr_info = {}
1480 for conf in filter_map:
1481 vr_info[conf] = {}
1482 for el_path in filter_map[conf]:
1483 vr_info = _deepmerge(vr_info, { conf: _get_object(vr_agent_status[conf], el_path) } )
1484
1485 return vr_info
1486
1487
Dzmitry Stremkouskia78a04d2019-07-13 11:05:03 +02001488def kafka_brokers_ids():
1489
1490 ''' Retrieve kafka brokers ids '''
1491
1492 brokers_ids = []
1493 for line in zookeeper_cmd('dump').split('\n'):
1494 if line:
1495 if '/brokers/ids/' in line:
1496 brokers_ids.append(int(line.split('/')[3]))
1497
1498 return brokers_ids
1499
1500
Dzmitry Stremkouski2c709f22019-04-22 02:27:54 +02001501def libvirt_capabilities():
1502
1503 ''' JSON formatted libvirtcapabilities list '''
1504
1505 import xml.etree.ElementTree as ET
1506
1507 try:
1508 proc = subprocess.Popen(['virsh', 'capabilities'], stdout=subprocess.PIPE)
1509 stdout, stderr = proc.communicate()
1510 xmletree = ET.fromstring(stdout)
1511 except:
1512 return "Could not parse xml tree %s" % str(stdout)
1513
1514 try:
1515 capabilities = {}
1516 for elem in xmletree:
1517 if elem.tag == "guest":
1518 for el in elem:
1519 if el.tag == 'arch':
1520 _name = el.attrib['name']
1521 capabilities[_name] = []
1522 for arch in el:
1523 if arch.tag == 'machine':
1524 if 'canonical' not in arch.attrib:
1525 capabilities[_name].append(arch.text)
1526
1527 return capabilities
1528 except:
1529 return "Unsupported xml tree for this function %s" % str(stdout)
1530
Dzmitry Stremkouskia78a04d2019-07-13 11:05:03 +02001531
1532def contrail_control_peer_status(api_host='auto', api_port=8083, fields=default_peer_filter):
1533
1534 ''' Contrail control peer status '''
1535
1536 peer_status = {}
1537
1538 for peer_elem in contrail_control_peers_summary():
1539 elem = {}
1540 for attr in peer_elem:
1541 if attr in fields:
1542 elem[attr] = peer_elem[attr]
1543
1544 peer_name = peer_elem["peer"]
1545 peer_status[peer_name] = elem
1546
1547 return peer_status