| # Author: Alex Savatieiev (osavatieiev@mirantis.com; a.savex@gmail.com) |
| # Copyright 2019-2022 Mirantis, Inc. |
| import ipaddress |
| import json |
| |
| from cfg_checker.common import logger_cli |
| from cfg_checker.helpers.console_utils import Progress |
| |
| |
| # This is independent class with a salt.nodes input |
| class NetworkPinger(object): |
| def __init__( |
| self, |
| mapper, |
| detailed=False, |
| skip_list=None, |
| skip_list_file=None |
| ): |
| logger_cli.info("# Initializing Pinger") |
| self.mapper = mapper |
| # only data |
| self.detailed_summary = detailed |
| self.results = {} |
| |
| def _collect_node_addresses(self, target_net): |
| # use reclass model and standard methods |
| # to create list of nodes with target network |
| _networks = self.mapper.map_network(self.mapper.RUNTIME) |
| if target_net in _networks: |
| return _networks[target_net] |
| else: |
| logger_cli.info( |
| "# Target network of {} not found after mapping".format( |
| target_net.exploded |
| ) |
| ) |
| return None |
| |
| def _get_packets_data(self, network_cidr_str, mtu): |
| # Conduct actual ping using network CIDR |
| logger_cli.debug("... collecting node pairs") |
| _fake_if = ipaddress.IPv4Interface(str(network_cidr_str)) |
| _net = _fake_if.network |
| # collect nodes and ips from reclass |
| nodes = self._collect_node_addresses(_net) |
| # build list of packets to be sent |
| # source -> target |
| _count = 0 |
| _packets = {} |
| _nodes = sorted(nodes.keys()) |
| _nodes_total = len(_nodes) |
| logger_cli.info("-> {} nodes found within subnet of '{}'".format( |
| _nodes_total, |
| network_cidr_str |
| )) |
| while len(_nodes) > 0: |
| src_host = _nodes.pop() |
| src_data = nodes[src_host] |
| src_if_name = src_data[0]['name'] |
| src_ips = [str(_if.ip) for _if in src_data[0]['ifs']] |
| _packets[src_host] = { |
| "ip": src_ips[0], |
| "if_name": src_if_name, |
| "targets": {} |
| } |
| |
| for tgt_host, tgt_data in nodes.items(): |
| _t = _packets[src_host]["targets"] |
| for tgt_if in tgt_data: |
| tgt_if_name = tgt_if['name'] |
| _ip_index = 0 |
| for tgt_ip in tgt_if['ifs']: |
| _ip = str(tgt_ip.ip) |
| if _ip not in src_ips: |
| if tgt_host not in _t: |
| _t[tgt_host] = [] |
| # Handling mtu and packet size |
| if mtu == 0: |
| # Detect MTU |
| _mtu = int(tgt_if['mtu']) |
| else: |
| _mtu = mtu |
| _packet_size = int(_mtu)-20-8 if _mtu != 0 else 0 |
| _tgt = { |
| "ip": _ip, |
| "tgt_host": tgt_host, |
| "ip_index": _ip_index, |
| "if_name": tgt_if_name, |
| "mtu": _mtu, |
| "size": _packet_size |
| } |
| _t[tgt_host].append( |
| _tgt |
| ) |
| _count += 1 |
| _ip_index += 1 |
| else: |
| pass |
| logger_cli.info("-> {} packets to send".format(_count)) |
| |
| if not _count: |
| logger_cli.warning( |
| "\n# WARNING: No packets to send for '{}', " |
| "check network configuration\n".format(network_cidr_str) |
| ) |
| |
| return None, _packets, _nodes_total |
| else: |
| return _count, _packets, _nodes_total |
| |
| def _process_result(self, src, src_data, result): |
| # Parse output |
| try: |
| _result = json.loads(result) |
| except (ValueError, TypeError): |
| self.mapper.errors.add_error( |
| self.mapper.errors.NET_NODE_NON_RESPONSIVE, |
| node=src, |
| response=result |
| ) |
| |
| _msg = "# ERROR: Unexpected return for '{}': '{}'\n".format( |
| src, |
| result |
| ) |
| |
| return False, _msg |
| |
| # Handle return codes |
| for tgt_node, _tgt_ips in _result.items(): |
| for _params in _tgt_ips: |
| _body = "{}({}) --{}--> {}({}@{})\n".format( |
| src, |
| src_data["if_name"], |
| _params["returncode"], |
| tgt_node, |
| _params["if_name"], |
| _params["ip"] |
| ) |
| _stdout = "" |
| _stderr = "" |
| if len(_params["stdout"]) > 0: |
| _stdout = "stdout:\n{}\n".format(_params["stdout"]) |
| if len(_params["stderr"]) > 0: |
| _stderr = "stderr:\n{}\n".format(_params["stderr"]) |
| |
| if not _params["returncode"]: |
| # 0 |
| self.mapper.errors.add_error( |
| self.mapper.errors.NET_PING_SUCCESS, |
| ping_path=_body, |
| stdout=_stdout, |
| stderr=_stderr |
| ) |
| elif _params["returncode"] == 68: |
| # 68 is a 'can't resove host error' |
| self.mapper.errors.add_error( |
| self.mapper.errors.NET_PING_NOT_RESOLVED, |
| ping_path=_body, |
| stdout=_stdout, |
| stderr=_stderr |
| ) |
| elif _params["returncode"] > 1: |
| # >1 is when no actial (any) response |
| self.mapper.errors.add_error( |
| self.mapper.errors.NET_PING_ERROR, |
| ping_path=_body, |
| stdout=_stdout, |
| stderr=_stderr |
| ) |
| else: |
| # 1 is for timeouts amd/or packet lost |
| self.mapper.errors.add_error( |
| self.mapper.errors.NET_PING_TIMEOUT, |
| ping_path=_body, |
| stdout=_stdout, |
| stderr=_stderr |
| ) |
| return True, _result |
| |
| def print_summary(self, cidr, data): |
| # Print summary of ping activity in node-node perspective |
| logger_cli.info("\n-> {}, {} nodes".format(cidr, len(data))) |
| # iterate nodes |
| for _n, _d in data.items(): |
| # targets |
| _total = len(_d['targets']) |
| _fail = [] |
| _pass = [] |
| _mtus = [] |
| for _f, _t in _d['targets'].items(): |
| # filter data |
| _fail += [[_f, _l] for _l in _t if _l['returncode']] |
| _pass += [[_f, _l] for _l in _t if not _l['returncode']] |
| _mtus += [str(_l['mtu']) for _l in _t] |
| # summary of passed |
| # source node |
| logger_cli.info( |
| " PASS: {}/{} nodes from {} ({}:{}) with MTU {}".format( |
| len(_pass), |
| _total, |
| _n, |
| _d['if_name'], |
| _d['ip'], |
| ",".join(list(set(_mtus))) |
| ) |
| ) |
| # print fails |
| for node, _dict in _fail: |
| logger_cli.info( |
| "\tFAIL: {} ({}:{}) with {}/{}".format( |
| node, |
| _dict['if_name'], |
| _dict['ip'], |
| _dict['size'], |
| _dict['mtu'] |
| ) |
| ) |
| |
| # logger_cli.info(self.mapper.errors.get_summary(print_zeros=False)) |
| return |
| |
| def print_details(self, cidr, data): |
| def _print_stds(stdout, stderr): |
| logger_cli.debug(" stdout:\n") |
| for line in stdout.splitlines(): |
| logger_cli.debug(" {}".format(line)) |
| if not stderr: |
| logger_cli.debug(" stderr: <empty>") |
| else: |
| logger_cli.debug(" stderr:\n") |
| for line in stderr.splitlines(): |
| logger_cli.debug(" {}".format(line)) |
| |
| logger_cli.info("\n---> {}, {} nodes".format(cidr, len(data))) |
| # iterate nodes |
| for _n, _d in data.items(): |
| # targets |
| _fail = [] |
| _pass = [] |
| for _f, _t in _d['targets'].items(): |
| # filter data |
| _fail += [[_f, _l] for _l in _t if _l['returncode']] |
| _pass += [[_f, _l] for _l in _t if not _l['returncode']] |
| # summary of passed |
| # source node |
| logger_cli.info( |
| "======= from {} ({}:{})".format( |
| _n, |
| _d['if_name'], |
| _d['ip'] |
| ) |
| ) |
| for node, _dict in _pass: |
| logger_cli.info( |
| " + PASS: to {} ({}:{}) with {}/{}".format( |
| node, |
| _dict['if_name'], |
| _dict['ip'], |
| _dict['size'], |
| _dict['mtu'] |
| ) |
| ) |
| _print_stds(_dict['stdout'], _dict['stderr']) |
| # print fails |
| for node, _dict in _fail: |
| logger_cli.info( |
| " - FAIL: to {} ({}:{}) with {}/{}".format( |
| node, |
| _dict['if_name'], |
| _dict['ip'], |
| _dict['size'], |
| _dict['mtu'] |
| ) |
| ) |
| _print_stds(_dict['stdout'], _dict['stderr']) |
| |
| # Detailed errors |
| # logger_cli.info( |
| # "\n{}\n".format( |
| # self.mapper.errors.get_errors() |
| # ) |
| # ) |
| return |
| |
| |
| class SaltNetworkPinger(NetworkPinger): |
| def __init__( |
| self, |
| mapper, |
| detailed=False, |
| skip_list=None, |
| skip_list_file=None |
| ): |
| super(SaltNetworkPinger, self).__init__( |
| mapper, |
| detailed=detailed, |
| skip_list=skip_list, |
| skip_list_file=skip_list_file |
| ) |
| |
| def ping_nodes(self, network_cidr_str, mtu): |
| # get packets |
| _count, _packets, _nodes_total = self._get_packets_data( |
| network_cidr_str, |
| mtu |
| ) |
| if not _count: |
| return None |
| |
| # do ping of packets |
| logger_cli.info( |
| "-> Pinging nodes in {}".format(network_cidr_str)) |
| self.mapper.master.prepare_script_on_active_nodes("ping.py") |
| _progress = Progress(_count) |
| _progress_index = 0 |
| _node_index = 0 |
| for src, src_data in _packets.items(): |
| _targets = src_data["targets"] |
| _node_index += 1 |
| # create 'targets.json' on source host |
| _path = self.mapper.master.prepare_json_on_node( |
| src, |
| _targets, |
| "targets.json" |
| ) |
| # execute ping.py |
| _results = self.mapper.master.execute_script_on_node( |
| src, |
| "ping.py", |
| args=[_path] |
| ) |
| _progress_index += len(_targets) |
| # print progress |
| _progress.write_progress( |
| _progress_index, |
| note='/ {}/{} nodes / current {}'.format( |
| _node_index, |
| _nodes_total, |
| src |
| ) |
| ) |
| # Parse results back in place |
| _ret_code, _data = self._process_result( |
| src, src_data, |
| _results[src] |
| ) |
| if not _ret_code: |
| _progress.clearline() |
| logger_cli.error(_data) |
| continue |
| else: |
| src_data["targets"] = _data |
| |
| _progress.end() |
| |
| return _packets |
| |
| |
| class KubeNetworkPinger(NetworkPinger): |
| def __init__( |
| self, |
| mapper, |
| detailed=False, |
| skip_list=None, |
| skip_list_file=None |
| ): |
| super(KubeNetworkPinger, self).__init__( |
| mapper, |
| detailed=detailed, |
| skip_list=skip_list, |
| skip_list_file=skip_list_file |
| ) |
| |
| def ping_nodes(self, network_cidr_str, mtu): |
| # get packets |
| _count, _packets, _nodes_total = self._get_packets_data( |
| network_cidr_str, |
| mtu |
| ) |
| if not _count: |
| return None |
| |
| # do ping of packets |
| logger_cli.info( |
| "-> Pinging nodes in {}".format(network_cidr_str)) |
| _progress = Progress(_count) |
| _progress_index = 0 |
| _node_index = 0 |
| for src, src_data in _packets.items(): |
| _targets = src_data["targets"] |
| _node_index += 1 |
| # create 'targets.json' on source host |
| _ds = self.mapper.get_daemonset() |
| _pname = self.mapper.master.get_pod_name_in_daemonset_by_node( |
| src, |
| _ds |
| ) |
| _path = self.mapper.master.prepare_json_in_pod( |
| _pname, |
| _ds.metadata.namespace, |
| _targets, |
| "targets.json" |
| ) |
| # execute ping.py |
| _result = self.mapper.master.exec_script_on_target_pod( |
| _pname, |
| "ping.py", |
| args=[_path] |
| ) |
| _progress_index += len(_targets) |
| # print progress |
| _progress.write_progress( |
| _progress_index, |
| note='/ {}/{} nodes / current {}'.format( |
| _node_index, |
| _nodes_total, |
| src |
| ) |
| ) |
| # Parse results back in place |
| _ret_code, _data = self._process_result( |
| src, src_data, |
| _result |
| ) |
| if not _ret_code: |
| _progress.clearline() |
| logger_cli.error(_data) |
| continue |
| else: |
| src_data["targets"] = _data |
| |
| _progress.end() |
| |
| return _packets |