blob: 6180023d9e32f70f70bcb131fdee97f2869249b8 [file] [log] [blame]
Dzmitry Stremkouskif1bcbb52019-04-11 15:48:24 +02001import requests
Dzmitry Stremkouskib71ada92019-04-05 22:37:59 +02002import subprocess
3import socket
4import salt.utils
5import logging
6import os
7import re
8import json
Dzmitry Stremkouski36290202019-05-05 21:26:25 +02009import yaml
Dzmitry Stremkouskib71ada92019-04-05 22:37:59 +020010
11__author__ = "Dzmitry Stremkouski"
12__copyright__ = "Copyright 2019, Mirantis Inc."
13__license__ = "Apache 2.0"
14
15logger = logging.getLogger(__name__)
16stream = logging.StreamHandler()
17logger.addHandler(stream)
18
Dzmitry Stremkouski36290202019-05-05 21:26:25 +020019try:
20 from yaml import CLoader as Loader, CDumper as Dumper
21except ImportError:
22 from yaml import Loader, Dumper
23
24default_vrouter_info_map = yaml.load("""
25ContrailConfig:
26- deleted
27- elements:uuid
28- elements:virtual_router_dpdk_enabled
29- elements:virtual_router_type
30VrouterAgent:
31- build_info:build-info:0:build-version
32- build_info:build-info:0:build-number
33- collector_server_list_cfg
34- config_file
35- control_ip
36- control_node_list_cfg
37- dns_server_list_cfg
38- dns_servers
39- down_interface_count
40- eth_name
41- headless_mode_cfg
42- hostname_cfg
43- hypervisor
44- mode
45- phy_if
46- platform
47- self_ip_list
48- total_interface_count
49- tunnel_type
50- vhost_cfg
51- vhost_if
52- vr_limits:max_interfaces
53- vr_limits:max_labels
54- vr_limits:max_mirror_entries
55- vr_limits:max_nexthops
56- vr_limits:max_vrfs
57- vr_limits:vrouter_max_bridge_entries
58- vr_limits:vrouter_max_flow_entries
59- vr_limits:vrouter_max_oflow_bridge_entries
60- vr_limits:vrouter_max_oflow_entries
61- xmpp_peer_list:*:ip
62- xmpp_peer_list:*:primary
63- xmpp_peer_list:*:status
64""", Loader=Loader)
65
Dzmitry Stremkouskib71ada92019-04-05 22:37:59 +020066
67def _failed_minions(out, agent, failed_minions):
68
69 ''' Verify failed minions '''
70
71 if len(failed_minions) > 0:
72 logger.error("%s check FAILED" % agent)
73 logger.error("Some minions returned non-zero exit code or empty data")
74 logger.error("Failed minions:" + str(failed_minions))
75 for minion in failed_minions:
76 logger.error(minion)
77 logger.debug(str(out[minion]['ret']))
78 __context__['retcode'] = 2
79 return False
80
81 return True
82
83
84def _minions_output(out, agent, ignore_dead, ignore_empty=False):
85
86 ''' Verify minions output and exit code '''
87
88 if not out:
89 logger.error("%s check FAILED" % agent)
90 logger.error("No response from master cmd")
91 __context__['retcode'] = 2
92 return False
93
94 if not ignore_dead:
95 jid = out.itervalues().next()['jid']
96 job_stats = __salt__['saltutil.runner']( 'jobs.print_job', arg=[jid] ) or None
97 if not job_stats:
98 logger.error("%s check FAILED" % agent)
99 logger.error("No response from master runner")
100 __context__['retcode'] = 2
101 return False
102
103 job_result = job_stats[jid]['Result']
104 job_minions = job_stats[jid]['Minions']
105 if len(job_minions) != len(job_result):
106 logger.error("%s check FAILED" % agent)
107 logger.error("Some minions are offline")
108 logger.error(list(set(job_minions) - set(job_result.keys())))
109 __context__['retcode'] = 2
110 return False
111
112 failed_minions = []
113 for minion in out:
114 if 'retcode' in out[minion]:
115 if out[minion]['retcode'] == 0:
116 if not ignore_empty:
117 if isinstance(out[minion]['ret'], bool):
118 if minion not in failed_minions:
119 failed_minions.append(minion)
120 elif len(out[minion]['ret']) == 0:
121 if minion not in failed_minions:
122 failed_minions.append(minion)
123 else:
124 if minion not in failed_minions:
125 failed_minions.append(minion)
126 else:
127 if minion not in failed_minions:
128 failed_minions.append(minion)
129
130 if not _failed_minions(out, agent, failed_minions):
131 __context__['retcode'] = 2
132 return False
133
134 return True
135
136
137def minions_check(wait_timeout=1, gather_job_wait_timeout=1, target='*', target_type='glob', ignore_dead=False):
138
139 ''' Verify minions are online '''
140
141 agent = "Minions"
142 out = __salt__['saltutil.cmd']( tgt=target,
143 tgt_type=target_type,
144 fun='test.ping',
145 timeout=wait_timeout,
146 gather_job_timeout=gather_job_wait_timeout
147 ) or None
148
149 return _minions_output(out, agent, ignore_dead, ignore_empty=True)
150
151
152def time_diff_check(time_diff=1, target='*', target_type='glob', ignore_dead=False, **kwargs):
153
154 ''' Verify time diff on servers '''
155
156 agent = "Time diff"
157 out = __salt__['saltutil.cmd']( tgt=target,
158 tgt_type=target_type,
159 fun='status.time',
160 arg=['%s'],
161 timeout=3
162 ) or None
163
164 if not _minions_output(out, agent, ignore_dead):
165 __context__['retcode'] = 2
166 return False
167
168 minions_times = {}
169 env_times = []
170 verified_minions = []
171
172 for minion in out:
173 verified_minions.append(minion)
174 if out[minion]['retcode'] == 0:
175 minion_time = int(out[minion]['ret'])
176 if str(minion_time) not in minions_times:
177 minions_times[str(minion_time)] = []
178 minions_times[str(minion_time)].append(minion)
179 env_times.append(minion_time)
180
181 env_times.sort()
182 diff = env_times[-1] - env_times[0]
183
184 if diff > time_diff:
185 __context__['retcode'] = 2
186 if kwargs.get("debug", False):
187 return False, minions_times
188 else:
189 return False
190
191 if kwargs.get("debug", False):
192 logger.info(verified_minions)
193 return True
194
195
Dzmitry Stremkouski2c709f22019-04-22 02:27:54 +0200196def contrail_check(target='I@opencontrail:control or I@opencontrail:collector or I@opencontrail:compute', target_type='compound', ignore_dead=False, **kwargs):
Dzmitry Stremkouskib71ada92019-04-05 22:37:59 +0200197
198 ''' Verify contrail status returns nothing critical '''
199
200 agent = "Contrail status"
201 out = __salt__['saltutil.cmd']( tgt=target,
202 tgt_type=target_type,
203 fun='cmd.run',
204 arg=['contrail-status'],
205 timeout=5
206 ) or None
207
208 if not _minions_output(out, agent, ignore_dead):
209 __context__['retcode'] = 2
210 return False
211
212 failed_minions = []
213 pattern = '^(==|$|\S+\s+(active|backup|inactive\s\(disabled\son\sboot\)))'
214 prog = re.compile(pattern)
215
216 validated = []
217 for minion in out:
218 for line in out[minion]['ret'].split('\n'):
219 if not prog.match(line) and minion not in failed_minions:
220 failed_minions.append(minion)
221 validated.append(minion)
222
223 if not _failed_minions(out, agent, failed_minions):
224 __context__['retcode'] = 2
225 return False
226
227 if kwargs.get("debug", False):
228 logger.info(validated)
229 return True
230
231
232def galera_check(cluster_size=3, target='I@galera:master or I@galera:slave', target_type='compound', ignore_dead=False, **kwargs):
233
234 ''' Verify galera cluster size and state '''
235
236 agent = "Galera status"
237 out = __salt__['saltutil.cmd']( tgt=target,
238 tgt_type=target_type,
239 fun='mysql.status',
240 timeout=3
241 ) or None
242
243 if not _minions_output(out, agent, ignore_dead):
244 __context__['retcode'] = 2
245 return False
246
247 failed_minions = []
248
249 validated = []
250 for minion in out:
251 if int(out[minion]['ret']['wsrep_cluster_size']) != int(cluster_size) and minion not in failed_minions:
252 failed_minions.append(minion)
253 if out[minion]['ret']['wsrep_evs_state'] != 'OPERATIONAL' and minion not in failed_minions:
254 failed_minions.append(minion)
255 validated.append(minion)
256
257 if not _failed_minions(out, agent, failed_minions):
258 __context__['retcode'] = 2
259 return False
260
261 if kwargs.get("debug", False):
262 logger.info(validated)
263 logger.info("Cluster size: " + str(out[validated[0]]['ret']['wsrep_cluster_size']))
264 logger.info("Cluster state: " + str(out[validated[0]]['ret']['wsrep_evs_state']))
265 return True
266
267
268def _quote_str(s, l=False, r=False):
269
270 ''' Quting rabbitmq erl objects for json import '''
271
272 if len(s) > 0:
273 if l:
274 s = s.lstrip()
275 if r:
276 s = s.rstrip()
277 if (s[0] == "'") and (s[-1] != "'") and r and not l:
278 s += "'"
279 if (s[0] == '"') and (s[-1] != '"') and r and not l:
280 s += '"'
281 if (s[-1] == "'") and (s[0] != "'") and l and not r:
282 s = "'" + s
283 if (s[-1] == '"') and (s[0] != '"') and l and not r:
284 s = '"' + s
285 if (s[-1] != "'") and (s[-1] != '"') and (s[0] != "'") and (s[0] != '"'):
286 s = '"' + s.replace('"', '\\\"') + '"'
287 else:
288 if (not l) and (not r) and s[0] != '"' and not s[-1] != '"':
289 s= s.replace('"', '\\\"')
290 return s.replace("'", '"')
291 else:
292 return s
293
294
295def _sanitize_rmqctl_output(string):
296
297 ''' Sanitizing rabbitmq erl objects for json import '''
298
299 rabbitctl_json = ""
300 for line in string.split(','):
301 copy = line
302 left = ""
303 right = ""
304 mid = copy
305 lpar = False
306 rpar = False
307 if re.search('([\[\{\s]+)(.*)', copy):
308 mid = re.sub('^([\[\{\s]+)','', copy)
309 left = copy[:-len(mid)]
310 copy = mid
311 lpar = True
312 if re.search('(.*)([\]\}\s]+)$', copy):
313 mid = re.sub('([\]\}\s]+)$','', copy)
314 right = copy[len(mid):]
315 copy = mid
316 rpar = True
317 result = left + _quote_str(mid, l=lpar, r=rpar) + right
318 if (not rpar) and lpar and (len(left.strip()) > 0) and (left.strip()[-1] == '{'):
319 result += ":"
320 else:
321 result += ","
322 rabbitctl_json += result
323
324 rabbitctl_json = rabbitctl_json[:-1]
325 new_rabbitctl_json = rabbitctl_json
326 for s in re.findall('"[^:\[{\]}]+"\s*:\s*("[^\[{\]}]+")', rabbitctl_json):
327 if '"' in s[1:][:-1]:
328 orig = s
329 changed = '"' + s.replace('\\', '\\\\').replace('"', '\\\"') + '"'
330 new_rabbitctl_json = new_rabbitctl_json.replace(orig, changed)
331 return new_rabbitctl_json
332
333
Dzmitry Stremkouskif1bcbb52019-04-11 15:48:24 +0200334def rabbitmq_list_queues(vhost='/'):
335
336 ''' JSON formatted RabbitMQ queues list '''
337
338 proc = subprocess.Popen(['rabbitmqctl', 'list_queues' , '-p', vhost], stdout=subprocess.PIPE)
339 stdout, stderr = proc.communicate()
340
341 queues = {}
342 for line in stdout.split('\n'):
343 if re.findall('[0-9]$', line):
344 queue_name, num = re.sub(r"\s+", " ", line).split()
345 queues[queue_name] = int(num)
346
347 return queues
348
349
350def rabbitmq_list_vhosts():
351
352 ''' JSON formatted RabbitMQ vhosts list '''
353
354 proc = subprocess.Popen(['rabbitmqctl', 'list_vhosts'], stdout=subprocess.PIPE)
355 stdout, stderr = proc.communicate()
356
357 vhosts = []
358 for line in stdout.split('\n'):
359 if re.findall('^/', line):
360 vhosts.append(line)
361
362 return vhosts
363
364
Dzmitry Stremkouskib71ada92019-04-05 22:37:59 +0200365def rabbitmq_cmd(cmd):
366
367 ''' JSON formatted RabbitMQ command output '''
368
369 supported_commands = ['status', 'cluster_status', 'list_hashes', 'list_ciphers']
370 if cmd not in supported_commands:
371 logger.error("Command is not supported yet, sorry")
372 logger.error("Supported commands are: " + str(supported_commands))
373 __context__['retcode'] = 2
374 return False
375
376 proc = subprocess.Popen(['rabbitmqctl', cmd], stdout=subprocess.PIPE)
377 stdout, stderr = proc.communicate()
378
379 rabbitmqctl_cutoff = stdout[int(stdout.find('[')):int(stdout.rfind(']'))+1].replace('\n','')
380 return json.loads(_sanitize_rmqctl_output(rabbitmqctl_cutoff))
381
382
383def rabbitmq_check(target='I@rabbitmq:server', target_type='compound', ignore_dead=False, **kwargs):
384
385 ''' Verify rabbit cluster and it's alarms '''
386
387 agent = "RabbitMQ status"
388 out = __salt__['saltutil.cmd']( tgt=target,
389 tgt_type=target_type,
390 fun='health_checks.rabbitmq_cmd',
391 arg=['cluster_status'],
392 timeout=3
393 ) or None
394
395 if not _minions_output(out, agent, ignore_dead):
396 __context__['retcode'] = 2
397 return False
398
399 failed_minions = []
400
401 for minion in out:
402 rabbitmqctl_json = out[minion]['ret']
403 running_nodes = []
404 available_nodes = []
405 alarms = []
406 for el in rabbitmqctl_json:
407 if 'alarms' in el:
408 alarms = el['alarms']
409 if 'nodes' in el:
410 available_nodes = el['nodes'][0]['disc']
411 if 'running_nodes' in el:
412 running_nodes = el['running_nodes']
413
414 if running_nodes.sort() == available_nodes.sort():
415 nodes_alarms = []
416 for node in running_nodes:
417 for el in alarms:
418 if node in el:
419 if len(el[node]) > 0:
420 nodes_alarms.append(el[node])
421 if len(nodes_alarms) > 0:
422 failed_minions.append(minion)
423 else:
424 failed_minions.append(minion)
425
426 if not _failed_minions(out, agent, failed_minions):
427 __context__['retcode'] = 2
428 return False
429
430 if kwargs.get("debug", False):
431 logger.info(running_nodes)
432 return True
433
434
435def haproxy_status(socket_path='/run/haproxy/admin.sock', buff_size = 8192, encoding = 'UTF-8', stats_filter=[]):
436
437 ''' JSON formatted haproxy status '''
438
439 stat_cmd = 'show stat\n'
440
441 if not os.path.exists(socket_path):
442 logger.error('Socket %s does not exist or haproxy not running' % socket_path)
443 __context__['retcode'] = 2
444 return False
445
446 client = socket.socket( socket.AF_UNIX, socket.SOCK_STREAM)
447 client.connect(socket_path)
448 stat_cmd = 'show stat\n'
449
450 client.send(bytearray(stat_cmd, encoding))
451 output = client.recv(buff_size)
452
453 res = ""
454 while output:
455 res += output.decode(encoding)
456 output = client.recv(buff_size)
457 client.close()
458
459 haproxy_stats = {}
460 res_list = res.split('\n')
461 fields = res_list[0][2:].split(',')
462 stats_list = []
463 for line in res_list[1:]:
464 if len(line.strip()) > 0:
465 stats_list.append(line)
466
467 for i in range(len(stats_list)):
468 element = {}
469 for n in fields:
470 element[n] = stats_list[i].split(',')[fields.index(n)]
471 server_name = element.pop('pxname')
472 server_type = element.pop('svname')
473 if stats_filter:
474 filtered_element = element.copy()
475 for el in element:
476 if el not in stats_filter:
477 filtered_element.pop(el)
478 element = filtered_element
479 if server_name not in haproxy_stats:
480 haproxy_stats[server_name] = {}
481 if server_type == "FRONTEND" or server_type == "BACKEND":
482 haproxy_stats[server_name][server_type] = element
483 else:
484 if 'UPSTREAM' not in haproxy_stats[server_name]:
485 haproxy_stats[server_name]['UPSTREAM'] = {}
486 haproxy_stats[server_name]['UPSTREAM'][server_type] = element
487
488 return haproxy_stats
489
490
491def haproxy_check(target='I@haproxy:proxy', target_type='compound', ignore_dead=False, ignore_services=[], ignore_upstreams=[], ignore_no_upstream=False, **kwargs):
492
493 ''' Verify haproxy backends status '''
494
495 agent = "haproxy status"
496 out = __salt__['saltutil.cmd']( tgt=target,
497 tgt_type=target_type,
498 fun='health_checks.haproxy_status',
499 arg=["stats_filter=['status']"],
500 timeout=3
501 ) or None
502
503 if not _minions_output(out, agent, ignore_dead):
504 __context__['retcode'] = 2
505 return False
506
507 failed_minions = []
508 verified_minions = []
509 for minion in out:
510 verified_minions.append(minion)
511 haproxy_json = out[minion]['ret']
512 for service in haproxy_json:
513 if service not in ignore_services:
514 if haproxy_json[service]['FRONTEND']['status'] != 'OPEN':
515 if minion not in failed_minions:
516 failed_minions.append(minion)
517 if haproxy_json[service]['BACKEND']['status'] != 'UP':
518 if minion not in failed_minions:
519 failed_minions.append(minion)
520 if 'UPSTREAM' in haproxy_json[service]:
521 for upstream in haproxy_json[service]['UPSTREAM']:
522 if upstream not in ignore_upstreams:
523 if haproxy_json[service]['UPSTREAM'][upstream]['status'] != 'UP':
524 if minion not in failed_minions:
525 failed_minions.append(minion)
526 else:
527 if not ignore_no_upstream:
528 if minion not in failed_minions:
529 failed_minions.append(minion)
530
531 if not _failed_minions(out, agent, failed_minions):
532 __context__['retcode'] = 2
533 return False
534
535 if kwargs.get("debug", False):
536 logger.info(verified_minions)
537 return True
538
539
540def df_check(target='*', target_type='glob', verify='space', space_limit=80, inode_limit=80, ignore_dead=False, ignore_partitions=[], **kwargs):
541
542 ''' Verify storage space/inodes status '''
543
544 supported_options = ['space', 'inodes']
545 if verify not in supported_options:
546 logger.error('Unsupported "verify" option.')
547 logger.error('Supported options are: %s' % str(supported_options))
548 __context__['retcode'] = 2
549 return False
550
551 if verify == 'space':
552 fun_cmd = 'disk.usage'
553 json_arg = 'capacity'
554 limit = space_limit
555 elif verify == 'inodes':
556 fun_cmd = 'disk.inodeusage'
557 json_arg = 'use'
558 limit = inode_limit
559
560 agent = "df status"
561 out = __salt__['saltutil.cmd']( tgt=target,
562 tgt_type=target_type,
563 fun=fun_cmd,
564 timeout=3
565 ) or None
566
567 if not _minions_output(out, agent, ignore_dead):
568 __context__['retcode'] = 2
569 return False
570
571 failed_minions = []
572 verified_minions = []
573 for minion in out:
574 verified_minions.append(minion)
575 df_json = out[minion]['ret']
576 for disk in df_json:
577 if disk not in ignore_partitions:
578 if int(df_json[disk][json_arg][:-1]) > int(limit):
579 if minion not in failed_minions:
580 failed_minions.append(minion)
581
582 if not _failed_minions(out, agent, failed_minions):
583 __context__['retcode'] = 2
584 return False
585
586 if kwargs.get("debug", False):
587 logger.info(verified_minions)
588 return True
589
590
591def load_check(target='*', target_type='glob', la1=3, la5=3, la15=3, ignore_dead=False, **kwargs):
592
593 ''' Verify load average status '''
594
595 agent = "load average status"
596 out = __salt__['saltutil.cmd']( tgt=target,
597 tgt_type=target_type,
598 fun='status.loadavg',
599 timeout=3
600 ) or None
601
602 if not _minions_output(out, agent, ignore_dead):
603 __context__['retcode'] = 2
604 return False
605
606 failed_minions = []
607 verified_minions = []
608 for minion in out:
609 verified_minions.append(minion)
610 la_json = out[minion]['ret']
611 if float(la_json['1-min']) > float(la1):
612 if minion not in failed_minions:
613 failed_minions.append(minion)
614 if float(la_json['5-min']) > float(la5):
615 if minion not in failed_minions:
616 failed_minions.append(minion)
617 if float(la_json['15-min']) > float(la15):
618 if minion not in failed_minions:
619 failed_minions.append(minion)
620
621 if not _failed_minions(out, agent, failed_minions):
622 __context__['retcode'] = 2
623 return False
624
625 if kwargs.get("debug", False):
626 logger.info(verified_minions)
627 return True
628
629
630def netdev_check(target='*', target_type='glob', rx_drop_limit=0, tx_drop_limit=0, ignore_devices=[], ignore_dead=False, **kwargs):
631
632 ''' Verify netdev rx/tx drop status '''
633
634 agent = "netdev rx/tx status"
635 out = __salt__['saltutil.cmd']( tgt=target,
636 tgt_type=target_type,
637 fun='status.netdev',
638 timeout=3
639 ) or None
640
641 if not _minions_output(out, agent, ignore_dead):
642 __context__['retcode'] = 2
643 return False
644
Dzmitry Stremkouski2c709f22019-04-22 02:27:54 +0200645 failed_minions = {}
Dzmitry Stremkouskib71ada92019-04-05 22:37:59 +0200646 verified_minions = []
647 for minion in out:
648 verified_minions.append(minion)
649 dev_json = out[minion]['ret']
650 for netdev in dev_json:
651 if netdev not in ignore_devices:
652 if int(dev_json[netdev]['rx_drop']) > int(rx_drop_limit):
653 if minion not in failed_minions:
Dzmitry Stremkouski2c709f22019-04-22 02:27:54 +0200654 failed_minions[minion] = {}
655 if netdev not in failed_minions[minion]:
656 failed_minions[minion][netdev] = {}
657 failed_minions[minion][netdev]['rx_drop'] = int(dev_json[netdev]['rx_drop'])
Dzmitry Stremkouskib71ada92019-04-05 22:37:59 +0200658 if int(dev_json[netdev]['tx_drop']) > int(tx_drop_limit):
659 if minion not in failed_minions:
Dzmitry Stremkouski2c709f22019-04-22 02:27:54 +0200660 failed_minions[minion] = {}
661 if netdev not in failed_minions[minion]:
662 failed_minions[minion][netdev] = {}
663 failed_minions[minion][netdev]['tx_drop'] = int(dev_json[netdev]['tx_drop'])
Dzmitry Stremkouskib71ada92019-04-05 22:37:59 +0200664
665 if not _failed_minions(out, agent, failed_minions):
666 __context__['retcode'] = 2
667 return False
668
669 if kwargs.get("debug", False):
670 logger.info(verified_minions)
671 return True
672
673
674def mem_check(target='*', target_type='glob', used_limit=80, ignore_dead=False, **kwargs):
675
676 ''' Verify available memory status '''
677
678 agent = "available memory status"
679 out = __salt__['saltutil.cmd']( tgt=target,
680 tgt_type=target_type,
681 fun='status.meminfo',
682 timeout=3
683 ) or None
684
685 if not _minions_output(out, agent, ignore_dead):
686 __context__['retcode'] = 2
687 return False
688
689 failed_minions = []
690 verified_minions = []
691 for minion in out:
692 mem_avail = int(out[minion]['ret']['MemAvailable']['value'])
693 mem_total = int(out[minion]['ret']['MemTotal']['value'])
694 used_pct = float((mem_total - mem_avail) * 100 / mem_total)
695 if used_pct > float(used_limit):
696 if minion not in failed_minions:
697 failed_minions.append(minion)
698 else:
699 verified_minions.append( { minion : str(used_pct) + '%' } )
700
701 if not _failed_minions(out, agent, failed_minions):
702 __context__['retcode'] = 2
703 return False
704
705 if kwargs.get("debug", False):
706 logger.info(verified_minions)
707 return True
708
709
710def ntp_status(params = ['-4', '-p', '-n']):
711
712 ''' JSON formatted ntpq command output '''
713
714 ntp_states = [
715 { 'indicator': '#', 'comment': 'source selected, distance exceeds maximum value' },
716 { 'indicator': 'o', 'comment': 'source selected, Pulse Per Second (PPS) used' },
717 { 'indicator': '+', 'comment': 'source selected, included in final set' },
718 { 'indicator': 'x', 'comment': 'source false ticker' },
719 { 'indicator': '.', 'comment': 'source selected from end of candidate list' },
720 { 'indicator': '-', 'comment': 'source discarded by cluster algorithm' },
721 { 'indicator': '*', 'comment': 'current time source' },
722 { 'indicator': ' ', 'comment': 'source discarded high stratum, failed sanity' }
723 ]
724 ntp_state_indicators = []
725 for state in ntp_states:
726 ntp_state_indicators.append(state['indicator'])
727 source_types = {}
728 source_types['l'] = "local (such as a GPS, WWVB)"
729 source_types['u'] = "unicast (most common)"
730 source_types['m'] = "multicast"
731 source_types['b'] = "broadcast"
732 source_types['-'] = "netaddr"
733
734 proc = subprocess.Popen(['ntpq'] + params, stdout=subprocess.PIPE)
735 stdout, stderr = proc.communicate()
736
737 ntp_lines = stdout.split('\n')
738 fields = re.sub("\s+", " ", ntp_lines[0]).split()
739 fields[fields.index('st')] = 'stratum'
740 fields[fields.index('t')] = 'source_type'
741
742 ntp_peers = {}
743 for line in ntp_lines[2:]:
744 if len(line.strip()) > 0:
745 element = {}
746 values = re.sub("\s+", " ", line).split()
747 for i in range(len(values)):
748 if fields[i] == 'source_type':
749 element[fields[i]] = { 'indicator': values[i], 'comment': source_types[values[i]] }
750 elif fields[i] in ['stratum', 'when', 'poll', 'reach']:
751 if values[i] == '-':
752 element[fields[i]] = int(-1)
753 else:
754 element[fields[i]] = int(values[i])
755 elif fields[i] in ['delay', 'offset', 'jitter']:
756 element[fields[i]] = float(values[i])
757 else:
758 element[fields[i]] = values[i]
759 peer = element.pop('remote')
760 peer_state = peer[0]
761 if peer_state in ntp_state_indicators:
762 peer = peer[1:]
763 else:
764 peer_state = 'f'
765 element['current'] = False
766 if peer_state == '*':
767 element['current'] = True
768 for state in ntp_states:
769 if state['indicator'] == peer_state:
770 element['state'] = state.copy()
771 if peer_state == 'f' and state['indicator'] == ' ':
772 fail_state = state.copy()
773 fail_state.pop('indicator')
774 fail_state['indicator'] = 'f'
775 element['state'] = fail_state
776 ntp_peers[peer] = element
777
778 return ntp_peers
779
780
781def ntp_check(min_peers=1, max_stratum=3, target='*', target_type='glob', ignore_dead=False, **kwargs):
782
783 ''' Verify NTP peers status '''
784
785 agent = "ntpd peers status"
786 out = __salt__['saltutil.cmd']( tgt=target,
787 tgt_type=target_type,
788 fun='health_checks.ntp_status',
789 timeout=3
790 ) or None
791
792 if not _minions_output(out, agent, ignore_dead):
793 __context__['retcode'] = 2
794 return False
795
796 failed_minions = []
797 verified_minions = []
798 for minion in out:
799 ntp_json = out[minion]['ret']
800 good_peers = []
801 for peer in ntp_json:
802 if ntp_json[peer]['stratum'] < int(max_stratum) + 1:
803 good_peers.append(peer)
804 if len(good_peers) > int(min_peers) - 1:
805 if minion not in verified_minions:
806 verified_minions.append(minion)
807 else:
808 if minion not in failed_minions:
809 failed_minions.append(minion)
810
811 if not _failed_minions(out, agent, failed_minions):
812 __context__['retcode'] = 2
813 return False
814
815 if kwargs.get("debug", False):
816 logger.info(verified_minions)
Dzmitry Stremkouskif1bcbb52019-04-11 15:48:24 +0200817
Dzmitry Stremkouskib71ada92019-04-05 22:37:59 +0200818 return True
Dzmitry Stremkouskif1bcbb52019-04-11 15:48:24 +0200819
820
821def gluster_pool_list():
822
823 ''' JSON formatted GlusterFS pool list command output '''
824
825 proc = subprocess.Popen(['gluster', 'pool', 'list'], stdout=subprocess.PIPE)
826 stdout, stderr = proc.communicate()
827
828 regex = re.compile('^(\S+)\s+(\S+)\s+(\S+)$')
829 fields = regex.findall(stdout.split('\n')[0])[0]
830
831 pool = {}
832
833 for line in stdout.split('\n')[1:]:
834 if len(line.strip()) > 0:
835 peer = {}
836 values = regex.findall(line.strip())[0]
837 for i in range(len(fields)):
838 peer[fields[i].lower()] = values[i]
839 uuid = peer.pop('uuid')
840 pool[uuid] = peer
841
842 return pool
843
844
845def gluster_volume_status():
846
847 ''' JSON formatted GlusterFS volumes status command output '''
848
849 proc = subprocess.Popen(['gluster', 'volume', 'status', 'all', 'detail'], stdout=subprocess.PIPE)
850 stdout, stderr = proc.communicate()
851
852 begin_volume = False
853 brick_lookup = False
854 volumes = {}
855 volume_name = ""
856
857 for line in stdout.split('\n'):
858 if 'Status of volume' in line:
859 volume_name = line.split(':')[1].strip()
860 volumes[volume_name] = { 'bricks': [] }
861 begin_volume = True
862 elif len(line.strip()) == 0:
863 if begin_volume:
864 begin_volume = False
865 elif '--------' in line:
866 brick_lookup = True
867 elif brick_lookup and line.split(':')[0].strip() == 'Brick':
868 brick_host, brick_path = re.findall('^Brick\ *:\ (.*)', line)[0].split()[1].split(':')
869 volumes[volume_name]['bricks'].append({ 'host': brick_host, 'path': brick_path })
870 brick_lookup = False
871 else:
872 brick_key, brick_value = line.split(':')
873 brick_key = brick_key.strip().lower().replace(' ', '_')
874 brick_value = brick_value.strip()
875 volumes[volume_name]['bricks'][len(volumes[volume_name]['bricks']) - 1][brick_key] = brick_value
876
877 return volumes
878
879
880def gluster_pool_check(target='I@glusterfs:server', target_type='compound', expected_size=3, ignore_dead=False, **kwargs):
881
882 ''' Check GlusterFS peer status '''
883
884 agent = "glusterfs peer status"
885 out = __salt__['saltutil.cmd']( tgt=target,
886 tgt_type=target_type,
887 fun='health_checks.gluster_pool_list',
888 timeout=3,
889 kwargs='[batch=True]'
890 ) or None
891
892 if not _minions_output(out, agent, ignore_dead):
893 __context__['retcode'] = 2
894 return False
895
896 failed_minions = []
897 verified_minions = []
898 for minion in out:
899 verified_minions.append(minion)
900 gluster_json = out[minion]['ret']
901 alive_peers = []
902 for peer in gluster_json:
903 if gluster_json[peer]['state'] == 'Connected':
904 alive_peers.append(peer)
905 else:
906 if minion not in failed_minions:
907 failed_minions.append(minion)
908 if len(alive_peers) < expected_size:
909 if minion not in failed_minions:
910 failed_minions.append(minion)
911
912 if not _failed_minions(out, agent, failed_minions):
913 __context__['retcode'] = 2
914 return False
915
916 if kwargs.get("debug", False):
917 logger.info(verified_minions)
918
919 return True
920
921
922def gluster_volumes_check(target='I@glusterfs:server', target_type='compound', expected_size=3, ignore_volumes=[], ignore_dead=False, **kwargs):
923
924 ''' Check GlusterFS volumes status '''
925
926 agent = "glusterfs volumes status"
927 out = __salt__['saltutil.cmd']( tgt=target,
928 tgt_type=target_type,
929 fun='health_checks.gluster_volume_status',
930 timeout=3,
931 kwargs='[batch=True]'
932 ) or None
933
934 if not _minions_output(out, agent, ignore_dead):
935 __context__['retcode'] = 2
936 return False
937
938 failed_minions = []
939 verified_minions = []
940 verified_volumes = []
941 for minion in out:
942 verified_minions.append(minion)
943 gluster_json = out[minion]['ret']
944 for volume in gluster_json:
945 if volume in ignore_volumes:
946 continue
947 else:
948 verified_volumes.append(volume)
949 alive_bricks = 0
950 if 'bricks' not in gluster_json[volume]:
951 if minion not in failed_minions:
952 failed_minions.append(minion)
953 bricks = gluster_json[volume]['bricks']
954 if len(bricks) < expected_size:
955 if minion not in failed_minions:
956 failed_minions.append(minion)
957 for brick in bricks:
958 if brick['online'] == 'Y':
959 alive_bricks += 1
960 else:
961 if minion not in failed_minions:
962 failed_minions.append(minion)
963 if alive_bricks < expected_size:
964 if minion not in failed_minions:
965 failed_minions.append(minion)
966
967 if not _failed_minions(out, agent, failed_minions):
968 __context__['retcode'] = 2
969 return False
970
971 if kwargs.get("debug", False):
972 logger.info("Verified minions:")
973 logger.info(verified_minions)
974 logger.info("Verified volumes:")
975 logger.info(verified_volumes)
976
977 return True
978
979
980def ceph_cmd(cmd):
981
982 ''' JSON formatted ceph command output '''
983
984 proc = subprocess.Popen(['ceph'] + cmd.split() + ['--format', 'json-pretty'], stdout=subprocess.PIPE)
985 stdout, stderr = proc.communicate()
986
987 return json.loads(stdout)
988
989
990def ceph_health_check(target='I@ceph:mon', target_type='compound', expected_status='HEALTH_OK', expected_state='active+clean', ignore_dead=False, **kwargs):
991
992 ''' Check all ceph monitors health status '''
993
994 agent = "ceph health status"
995 out = __salt__['saltutil.cmd']( tgt=target,
996 tgt_type=target_type,
997 fun='health_checks.ceph_cmd',
998 arg=['status'],
999 timeout=3
1000 ) or None
1001
1002 if not _minions_output(out, agent, ignore_dead):
1003 __context__['retcode'] = 2
1004 return False
1005
1006 failed_minions = []
1007 verified_minions = []
1008 for minion in out:
1009 verified_minions.append(minion)
1010 ceph_json = out[minion]['ret']
1011 fsid = ceph_json['fsid']
1012
1013 if ceph_json['health']['overall_status'] != expected_status:
1014 if minion not in failed_minions:
1015 failed_minions.append(minion)
1016
1017 if ceph_json['osdmap']['osdmap']['full']:
1018 if minion not in failed_minions:
1019 failed_minions.append(minion)
1020
1021 if ceph_json['osdmap']['osdmap']['nearfull']:
1022 if minion not in failed_minions:
1023 failed_minions.append(minion)
1024
1025 num_osds = ceph_json['osdmap']['osdmap']['num_osds']
1026 num_in_osds = ceph_json['osdmap']['osdmap']['num_in_osds']
1027 num_up_osds = ceph_json['osdmap']['osdmap']['num_up_osds']
1028 if not ( num_osds == num_in_osds == num_up_osds ):
1029 if minion not in failed_minions:
1030 failed_minions.append(minion)
1031
1032 quorum = len(ceph_json['quorum'])
1033 quorum_names = len(ceph_json['quorum_names'])
1034 mons = len(ceph_json['monmap']['mons'])
1035 if not ( quorum == quorum_names == mons ):
1036 if minion not in failed_minions:
1037 failed_minions.append(minion)
1038
1039 for mon in ceph_json['health']['timechecks']['mons']:
1040 if mon['health'] != expected_status:
1041 if minion not in failed_minions:
1042 failed_minions.append(minion)
1043
1044 for srv in ceph_json['health']['health']['health_services']:
1045 for mon in srv['mons']:
1046 if mon['health'] != expected_status:
1047 if minion not in failed_minions:
1048 failed_minions.append(minion)
1049
1050 for state in ceph_json['pgmap']['pgs_by_state']:
1051 if state['state_name'] != expected_state:
1052 if minion not in failed_minions:
1053 failed_minions.append(minion)
1054
1055 if not _failed_minions(out, agent, failed_minions):
1056 __context__['retcode'] = 2
1057 return False
1058
1059 if kwargs.get("debug", False):
1060 logger.info("Quorum:")
1061 logger.info(ceph_json['quorum_names'])
1062 logger.info("Verified minions:")
1063 logger.info(verified_minions)
1064
1065 return True
1066
1067
Dzmitry Stremkouski7cd10fc2019-04-17 11:51:59 +02001068def get_entropy():
1069
1070 ''' Retrieve entropy size for the host '''
1071
1072 with open('/proc/sys/kernel/random/entropy_avail', 'r') as f:
1073 entropy = f.read()
1074 return entropy
1075
1076
1077def entropy_check(target='*', target_type='glob', minimum_bits=700, ignore_dead=False, **kwargs):
1078
1079 ''' Check entropy size in cluster '''
1080
1081 agent = "entropy size status"
1082 out = __salt__['saltutil.cmd']( tgt=target,
1083 tgt_type=target_type,
1084 fun='health_checks.get_entropy',
1085 timeout=3
1086 ) or None
1087
1088 if not _minions_output(out, agent, ignore_dead):
1089 __context__['retcode'] = 2
1090 return False
1091
1092 failed_minions = []
1093 verified_minions = []
1094
Dzmitry Stremkouski7cd10fc2019-04-17 11:51:59 +02001095 for minion in out:
1096 verified_minions.append(minion)
1097 entropy = int(out[minion]['ret'])
1098 if entropy < minimum_bits:
1099 if not minion in failed_minions:
1100 failed_minions.append(minion)
1101
1102 if not _failed_minions(out, agent, failed_minions):
1103 __context__['retcode'] = 2
1104 return False
1105
1106 if kwargs.get("debug", False):
1107 logger.info(verified_minions)
1108
1109 return True
1110
1111
Dzmitry Stremkouskif1bcbb52019-04-11 15:48:24 +02001112def docker_registry_list(host):
1113
1114 ''' Retrieve and list docker catalog '''
1115
1116 try:
1117 if host[0:4] == 'http':
1118 url = host + '/v2/'
1119 else:
1120 url = 'http://' + host + '/v2/'
1121 repos = requests.get(url + '_catalog')
1122
1123 versions = {}
1124 for repo in repos.json()['repositories']:
1125 repo_versions = requests.get(url + repo + '/tags/list')
1126 versions[repo] = repo_versions.json().pop('tags')
1127 return versions
1128 except:
1129 return {}
Dzmitry Stremkouski7cd10fc2019-04-17 11:51:59 +02001130
1131
1132def docker_ps(list_all=0):
1133
1134 import docker
1135 client = docker.client.Client(base_url='unix://var/run/docker.sock')
1136 return client.containers(all=list_all)
1137
Dzmitry Stremkouski2c709f22019-04-22 02:27:54 +02001138
1139def zookeeper_cmd(cmd, hostname='localhost', port=2181):
1140
1141 ''' Execute zookeeper cmd via socket '''
1142
1143 buf_size = 1024
1144 sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
1145 sock.connect((hostname, port))
1146 sock.sendall(cmd)
1147 sock.shutdown(socket.SHUT_WR)
1148 rdata = ""
1149 while 1:
1150 data = sock.recv(buf_size)
1151 if data == "":
1152 break
1153 rdata += data
1154 sock.close()
1155 return rdata
1156
1157
1158def zookeeper_stats():
1159
1160 ''' Retrieve zookeeper stats '''
1161
1162 stats = {}
1163 stats['configuration'] = {}
1164 for line in zookeeper_cmd('conf').split('\n'):
1165 if line:
1166 key, value = line.split('=')
1167 if value.strip().isdigit():
1168 value = int(value)
1169 else:
1170 value = value.strip()
1171 stats['configuration'][key.strip().lower().replace(' ', '_')] = value
1172
1173 stats['environment'] = {}
1174 for line in zookeeper_cmd('envi').split('\n')[1:]:
1175 if line:
1176 key, value = line.split('=')
1177 if value.strip().isdigit():
1178 value = int(value)
1179 else:
1180 value = value.strip()
1181 stats['environment'][key.strip().lower().replace(' ', '_')] = value
1182
1183 stats['server'] = {}
1184 for line in zookeeper_cmd('srvr').split('\n'):
1185 if line:
1186 if re.findall('^Zookeeper version:', line, flags=re.IGNORECASE):
1187 version_str = line.split(':')[1].strip()
1188 version = version_str
1189 if '-' in version_str:
1190 version_str = version_str.split('-')[0]
1191 if '.' in version_str:
1192 version = []
1193 version_list = version_str.split('.')
1194 for elem in version_list:
1195 if elem.strip().isdigit():
1196 version.append(int(elem))
1197 stats['server']['version'] = version
1198 continue
1199 if re.findall('^Latency min/avg/max:', line, flags=re.IGNORECASE):
1200 latency_min, latency_avg, latency_max = line.split(':')[1].strip().split('/')
1201 stats['server']['latency'] = {'min':int(latency_min),'max':int(latency_max),'avg':int(latency_avg)}
1202 continue
1203 key, value = line.split(':')
1204 if value.strip().isdigit():
1205 value = int(value)
1206 else:
1207 value = value.strip()
1208 stats['server'][key.strip().lower().replace(' ', '_')] = value
1209
1210 stats['clients'] = {}
1211 for line in zookeeper_cmd('cons').split('\n'):
1212 if line:
1213 clients = re.findall('^(\s*\/)(.+)(:\d+\[\d+\])(\(.+\))$', line)[0][1:]
1214 addr = clients[0]
1215 port, direction = re.findall('^(\d+)\[(\d+)\]$', clients[1][1:])[0]
1216 client = '['+addr+']:'+str(port)
1217 stats['clients'][client] = {'direction': int(direction)}
1218 for elem in clients[2][1:-1].split(','):
1219 key, value = elem.split('=')
1220 if value.strip().isdigit():
1221 value = int(value)
1222 else:
1223 value = value.strip()
1224 stats['clients'][client][key.strip().lower().replace(' ', '_')] = value
1225
1226 return stats
1227
1228
1229def get_zookeeper_leader(target='I@opencontrail:control', target_type='compound', ignore_dead=False, **kwargs):
1230
1231 ''' Retrieve zookeeper leader '''
1232
1233 agent = "zookeeper leader retrieve"
1234 out = __salt__['saltutil.cmd']( tgt=target,
1235 tgt_type=target_type,
1236 fun='health_checks.zookeeper_stats',
1237 timeout=3
1238 ) or None
1239
1240 if not _minions_output(out, agent, ignore_dead):
1241 __context__['retcode'] = 2
1242 return False
1243
1244 leader = None
1245 for minion in out:
1246 zookeeper_mode = out[minion]['ret']['server']['mode']
1247
1248 if zookeeper_mode == 'leader':
1249 leader = minion
1250
1251 return leader
1252
1253
1254def contrail_vrouter_list(api_host='127.0.0.1', api_port=9100):
1255
1256 ''' Retrieve and list contrail vrouters.
1257 Valid targets: Contrail controllers.
1258 '''
1259
1260 try:
1261 if api_host[0:4] == 'http':
1262 url = api_host + ':' + str(api_port)
1263 else:
1264 url = 'http://' + api_host + ':' + str(api_port)
1265
1266 vrouters = requests.get(url + '/virtual-routers').json()
1267 vrouter_list = []
1268 for vr in vrouters['virtual-routers']:
1269 vr_uuid = vr['uuid']
1270 for name in vr['fq_name']:
1271 if name == "default-global-system-config":
1272 continue
1273 else:
1274 vr_name = name
1275 vrouter_list.append({'name': vr_name, 'uuid': vr_uuid})
1276 return vrouter_list
1277
1278 except:
1279 return {}
1280
1281
1282def contrail_vrouter_show(vr_uuid, api_host='127.0.0.1', api_port=9100):
1283
1284 ''' Retrieve contrail vrouter data
1285 Valid targets: Contrail controllers.
1286 '''
1287
1288 try:
1289 if api_host[0:4] == 'http':
1290 url = api_host + ':' + str(api_port)
1291 else:
1292 url = 'http://' + api_host + ':' + str(api_port)
1293
1294 return requests.get(url + '/virtual-router/' + vr_uuid).json()
1295
1296 except:
1297 return {}
1298
1299
1300def _xmletree_descend_child(given_child, tag_requested):
1301
1302 ''' Returns xmletree subelement by tag name '''
1303
1304 my_child = {}
1305
1306 for child in given_child:
1307 if child.tag == tag_requested:
1308 my_child = child
1309 break
1310
1311 return my_child
1312
1313
1314def contrail_vrouter_agent_status(api_host='127.0.0.1', api_port=8085):
1315
1316 ''' Retrieve contrail vrouter agent status '''
1317
1318 import xml.etree.ElementTree as ET
1319
1320 if api_host[0:4] == 'http':
1321 url = api_host + ':' + str(api_port)
1322 else:
1323 url = 'http://' + api_host + ':' + str(api_port)
1324
1325 try:
1326 req = requests.get(url + '/Snh_SandeshUVECacheReq?x=NodeStatus')
1327 if int(req.status_code) != 200:
1328 return "Could not fetch data from vrouter agent via %s.\nGot bad status code: %s\n%s" % (url, str(req.status_code), str(req.text))
1329 except:
1330 pass
1331
1332 try:
1333 xmletree = ET.fromstring(req.text)
1334 except:
1335 return "Could not parse xml tree %s" % str(req.text)
1336
1337 try:
1338 vrouter_data = {}
1339 child = _xmletree_descend_child(xmletree, 'NodeStatusUVE')
1340 child = _xmletree_descend_child(child, 'data')
1341 child = _xmletree_descend_child(child, 'NodeStatus')
1342 child = _xmletree_descend_child(child, 'process_status')
1343 child = _xmletree_descend_child(child, 'list')
1344 child = _xmletree_descend_child(child, 'ProcessStatus')
1345 vrouter_data['state'] = _xmletree_descend_child(child, 'state').text
1346 vrouter_data['connections'] = []
1347 child = _xmletree_descend_child(child, 'connection_infos')
1348 for elem in _xmletree_descend_child(child, 'list'):
1349 conn = {}
1350 conn['type'] = _xmletree_descend_child(elem,'type').text
1351 conn['name'] = _xmletree_descend_child(elem,'name').text
1352 conn['status'] = _xmletree_descend_child(elem,'status').text
1353 conn['description'] = _xmletree_descend_child(elem,'description').text
1354 conn['server_addrs'] = []
1355 server_addrs = _xmletree_descend_child(elem,'server_addrs')
1356 for srv in _xmletree_descend_child(server_addrs,'list'):
1357 host, port = srv.text.split(':')
1358 conn['server_addrs'].append({'host': host, 'port': port})
1359 vrouter_data['connections'].append(conn)
1360 return vrouter_data
1361 except:
1362 return "Unsupported xml tree for this function %s" % str(req.text)
1363
1364
Dzmitry Stremkouski36290202019-05-05 21:26:25 +02001365def contrail_collector_agent_status(vr_name, api_host='auto', api_port=9081):
1366
1367 ''' Retrieve contrail vrouter agent status from analyticsdb '''
1368
1369 if api_host[0:4] == 'http':
1370 url = api_host + ':' + str(api_port)
1371 elif api_host == 'auto':
1372 my_ip = __salt__['pillar.get']('_param:opencontrail_analytics_address')
1373 url = 'http://' + my_ip+ ':' + str(api_port)
1374 else:
1375 url = 'http://' + api_host + ':' + str(api_port)
1376
1377 req = requests.get(url + '/analytics/uves/vrouter/' + vr_name + '?flat')
1378 if int(req.status_code) != 200:
1379 return "Could not fetch data from vrouter agent via %s.\nGot bad status code: %s\n%s" % (url, str(req.status_code), str(req.text))
1380
1381 return json.loads(req.text)
1382
1383
1384def _get_object(json_obj, obj_path):
1385
1386 ''' Retrieve subelemet of an JSON object or value '''
1387
1388 if ':' in obj_path:
1389 splitter = obj_path.split(':')
1390 k = splitter[0]
1391 v = ':'.join(splitter[1:])
1392 if k.isdigit():
1393 # Return specific element path
1394 return [ _get_object(json_obj[int(k)], v) ]
1395 elif k == '*':
1396 l = []
1397 for el in json_obj:
1398 l.append(_get_object(el, v))
1399 # Return all list elements from the path
1400 return l
1401 else:
1402 # Contrail output may have nested JSON
1403 if isinstance(json_obj, str) or isinstance(json_obj, unicode):
1404 json_obj = json.loads(json_obj)
1405 # Assume list. Return it
1406 return { k: _get_object(json_obj[k], v) }
1407 else:
1408 return { obj_path: json_obj[obj_path] }
1409
1410
1411def _deepmerge(o1, o2):
1412
1413 ''' Deep merge JSON objects '''
1414
1415 o3 = {}
1416 if type(o1) == type(o2):
1417 if type(o1) == dict or type(o1) == tuple:
1418 for k in set(o1.keys() + o2.keys()):
1419 if k in o1:
1420 if k in o2:
1421 o3[k] = _deepmerge(o1[k], o2[k])
1422 else:
1423 o3[k] = o1[k]
1424 else:
1425 o3[k] = o2[k]
1426 elif type(o1) == list or type(o1) == set:
1427 o3 = [] + o2
1428 for el in o3:
1429 i = o3.index(el)
1430 o3[i] = _deepmerge(o1[i], o2[i])
1431 else:
1432 o3 = o2
1433 else:
1434 o3 = o2
1435
1436 return o3
1437
1438
1439def contrail_vrouter_agent_info(vr_name, filter_map=default_vrouter_info_map):
1440
1441 ''' Retrieve filtered contrail vrouter agent info from analyticsdb '''
1442
1443 vr_agent_status = contrail_collector_agent_status(vr_name)
1444 vr_info = {}
1445 for conf in filter_map:
1446 vr_info[conf] = {}
1447 for el_path in filter_map[conf]:
1448 vr_info = _deepmerge(vr_info, { conf: _get_object(vr_agent_status[conf], el_path) } )
1449
1450 return vr_info
1451
1452
Dzmitry Stremkouski2c709f22019-04-22 02:27:54 +02001453def libvirt_capabilities():
1454
1455 ''' JSON formatted libvirtcapabilities list '''
1456
1457 import xml.etree.ElementTree as ET
1458
1459 try:
1460 proc = subprocess.Popen(['virsh', 'capabilities'], stdout=subprocess.PIPE)
1461 stdout, stderr = proc.communicate()
1462 xmletree = ET.fromstring(stdout)
1463 except:
1464 return "Could not parse xml tree %s" % str(stdout)
1465
1466 try:
1467 capabilities = {}
1468 for elem in xmletree:
1469 if elem.tag == "guest":
1470 for el in elem:
1471 if el.tag == 'arch':
1472 _name = el.attrib['name']
1473 capabilities[_name] = []
1474 for arch in el:
1475 if arch.tag == 'machine':
1476 if 'canonical' not in arch.attrib:
1477 capabilities[_name].append(arch.text)
1478
1479 return capabilities
1480 except:
1481 return "Unsupported xml tree for this function %s" % str(stdout)
1482