Updated ping command to work with MCC/MOS

  - updated Pinger class with inherited structure for Salt and Kube
  - implemented DeamonSet handling in KubeApi interface
  - implemented put-textfile and series of ConfigMap methods in KubeApi
  - updated Pinger to use multiple --cidr commands at once
  - update Summary section to be more informative and human readable

Change-Id: Iac18a619d0bb9a36a286a07f38aeba8f99a454ca
Related-PROD: PROD-36603
diff --git a/cfg_checker/common/decorators.py b/cfg_checker/common/decorators.py
new file mode 100644
index 0000000..eed3fba
--- /dev/null
+++ b/cfg_checker/common/decorators.py
@@ -0,0 +1,60 @@
+# Author: https://gist.github.com/FBosler/be10229aba491a8c912e3a1543bbc74e
+# Updated to fit current framework by Alex Savatieiev
+from functools import wraps
+import time
+
+from cfg_checker.common import logger, logger_cli
+
+
+def retry(exceptions, total_tries=4, initial_wait=0.5, backoff_factor=2):
+    """
+    calling the decorated function applying an exponential backoff.
+    Args:
+        exceptions: Exception(s) that trigger a retry, can be a tuple
+        total_tries: Total tries
+        initial_wait: Time to first retry
+        backoff_factor: Backoff multiplier (e.g. value of 2 will double
+                        the delay each retry).
+        logger: logger to be used, if none specified print
+    """
+    def retry_decorator(f):
+        @wraps(f)
+        def func_with_retries(*args, **kwargs):
+            _tries, _delay = total_tries + 1, initial_wait
+            while _tries > 1:
+                try:
+                    return f(*args, **kwargs)
+                except exceptions as e:
+                    _tries -= 1
+                    print_args = args if args else "no args"
+                    if _tries == 1:
+                        msg = "... {} failed after {} tries".format(
+                                f.__name___,
+                                total_tries
+                            )
+                        logger_cli.debug(msg)
+                        logger.debug(
+                            msg + "args: {}, kwargs: {}".format(
+                                print_args,
+                                kwargs
+                            )
+                        )
+                        raise
+                    msg = "... {}; Exception: {}.\n" \
+                          "... retrying in {} seconds!".format(
+                              f.__name__,
+                              e,
+                              _delay
+                          )
+                    logger_cli.debug(msg)
+                    logger.debug(
+                        msg + "args: {}, kwargs: {}\n".format(
+                            print_args,
+                            kwargs
+                        )
+                    )
+                    time.sleep(_delay)
+                    _delay *= backoff_factor
+
+        return func_with_retries
+    return retry_decorator
diff --git a/cfg_checker/common/kube_utils.py b/cfg_checker/common/kube_utils.py
index 86e59a5..042db5d 100644
--- a/cfg_checker/common/kube_utils.py
+++ b/cfg_checker/common/kube_utils.py
@@ -8,14 +8,17 @@
 
 from kubernetes import client as kclient, config as kconfig
 from kubernetes.stream import stream
+from kubernetes.client.rest import ApiException
 
 from cfg_checker.common import logger, logger_cli
+from cfg_checker.common.decorators import retry
 from cfg_checker.common.exception import InvalidReturnException, KubeException
 from cfg_checker.common.file_utils import create_temp_file_with_content
 from cfg_checker.common.other import utils, shell
 from cfg_checker.common.ssh_utils import ssh_shell_p
 from cfg_checker.common.const import ENV_LOCAL
 
+
 urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
 
 
@@ -208,7 +211,10 @@
     @property
     def CoreV1(self):
         if not self._coreV1:
-            self._coreV1 = kclient.CoreV1Api(self.kApi)
+            if self.is_local:
+                self._coreV1 = kclient.CoreV1Api(kclient.ApiClient())
+            else:
+                self._coreV1 = kclient.CoreV1Api(kclient.ApiClient(self.kConf))
         return self._coreV1
 
     @property
@@ -306,7 +312,7 @@
                 "... searching for pods with the name '{}'".format(pod_name)
             )
             _pods = {}
-            _pods = self._coreV1.list_namespaced_pod(namespace)
+            _pods = self.CoreV1.list_namespaced_pod(namespace)
             _names = self._get_listed_attrs(_pods.items, "metadata.name")
             _pname = ""
             _pnames = [n for n in _names if n.startswith(pod_name)]
@@ -334,11 +340,12 @@
         # If not, output gets converted to str
         # Which causes to change " to '
         # After that json.loads(...) fail
+        cmd = cmd if isinstance(cmd, list) else cmd.split()
         _pod_stream = stream(
             self.CoreV1.connect_get_namespaced_pod_exec,
             _pname,
             namespace,
-            command=cmd.split(),
+            command=cmd,
             stderr=True,
             stdin=False,
             stdout=True,
@@ -350,7 +357,11 @@
         # run for timeout
         _pod_stream.run_forever(timeout=_request_timeout)
         # read the output
-        return _pod_stream.read_stdout()
+        _output = _pod_stream.read_stdout()
+        # Force recreate of api objects
+        self._coreV1 = None
+        # Send output
+        return _output
 
     def ensure_namespace(self, ns):
         """
@@ -487,3 +498,71 @@
         # create result list
 
         return []
+
+    @retry(ApiException)
+    def get_pods_for_daemonset(self, ds):
+        # get all pod names for daemonset
+        logger_cli.debug(
+            "... extracting pod names from daemonset '{}'".format(
+                ds.metadata.name
+            )
+        )
+        _ns = ds.metadata.namespace
+        _name = ds.metadata.name
+        _pods = self.CoreV1.list_namespaced_pod(
+            namespace=_ns,
+            label_selector='name={}'.format(_name)
+        )
+        return _pods
+
+    def put_string_buffer_to_pod_as_textfile(
+        self,
+        pod_name,
+        namespace,
+        buffer,
+        filepath,
+        _request_timeout=120,
+        **kwargs
+    ):
+        _command = ['/bin/sh']
+        response = stream(
+            self.CoreV1.connect_get_namespaced_pod_exec,
+            pod_name,
+            namespace,
+            command=_command,
+            stderr=True,
+            stdin=True,
+            stdout=True,
+            tty=False,
+            _request_timeout=_request_timeout,
+            _preload_content=False,
+            **kwargs
+        )
+
+        # if json
+        # buffer = json.dumps(_dict, indent=2).encode('utf-8')
+
+        commands = [
+            bytes("cat <<'EOF' >" + filepath + "\n", 'utf-8'),
+            buffer,
+            bytes("\n" + "EOF\n", 'utf-8')
+        ]
+
+        while response.is_open():
+            response.update(timeout=1)
+            if response.peek_stdout():
+                logger_cli.debug("... STDOUT: %s" % response.read_stdout())
+            if response.peek_stderr():
+                logger_cli.debug("... STDERR: %s" % response.read_stderr())
+            if commands:
+                c = commands.pop(0)
+                logger_cli.debug("... running command... {}\n".format(c))
+                response.write_stdin(str(c, encoding='utf-8'))
+            else:
+                break
+        response.close()
+
+        # Force recreate of Api objects
+        self._coreV1 = None
+
+        return
diff --git a/cfg_checker/common/salt_utils.py b/cfg_checker/common/salt_utils.py
index f7ea50b..08f2d2b 100644
--- a/cfg_checker/common/salt_utils.py
+++ b/cfg_checker/common/salt_utils.py
@@ -41,8 +41,7 @@
             username=config.ssh_user,
             keypath=config.ssh_key,
             piped=False,
-            use_sudo=config.ssh_uses_sudo,
-            silent=True
+            use_sudo=config.ssh_uses_sudo
         )
         if len(_result) < 1:
             raise InvalidReturnException(
diff --git a/cfg_checker/common/settings.py b/cfg_checker/common/settings.py
index 993f92f..7b79354 100644
--- a/cfg_checker/common/settings.py
+++ b/cfg_checker/common/settings.py
@@ -327,7 +327,8 @@
                 self.kube_config_detected = True
             else:
                 logger_cli.debug("... KUBECONFIG env var not found")
-                self.kube_config_path = None
+                # do not change it from default
+                # self.kube_config_path = None
                 self.kube_config_detected = False
         else:
             logger_cli.debug(
@@ -339,7 +340,7 @@
 
         # try to load values from KUBECONF
         _kube_conf = None
-        if self.kube_config_path:
+        if self.kube_config_path and self.kube_config_detected:
             with open(self.kube_config_path) as kF:
                 _kube_conf = yaml.load(kF, Loader=yaml.SafeLoader)
             # extract host ip
diff --git a/cfg_checker/modules/network/__init__.py b/cfg_checker/modules/network/__init__.py
index 714c170..4c95ef3 100644
--- a/cfg_checker/modules/network/__init__.py
+++ b/cfg_checker/modules/network/__init__.py
@@ -16,11 +16,15 @@
             _class = checker.SaltNetworkChecker
         elif strClassHint == "mapper":
             _class = mapper.SaltNetworkMapper
+        elif strClassHint == "pinger":
+            _class = pinger.SaltNetworkPinger
     elif _env == ENV_TYPE_KUBE:
         if strClassHint == "checker":
             _class = checker.KubeNetworkChecker
         elif strClassHint == "mapper":
             _class = mapper.KubeNetworkMapper
+        elif strClassHint == "pinger":
+            _class = pinger.KubeNetworkPinger
     if not _class:
         raise CheckerException(
             "Unknown hint for selecting Network handler Class: '{}'".format(
@@ -73,12 +77,13 @@
     net_ping_parser.add_argument(
         '--cidr',
         metavar='network_ping_cidr',
-        help="Subnet CIDR to ping nodes in"
+        action="append",
+        help="Subnet CIDR to ping nodes in. Can be used multiple times"
     )
     net_ping_parser.add_argument(
         '--mtu',
-        metavar='network_ping_mtu',
-        help="MTU size to use. Ping will be done for MTU - 20 - 8"
+        metavar='network_ping_mtu', default=64,
+        help="MTU size to use. Ping will be done for MTU - 20 - 8. Default: 64"
     )
     net_ping_parser.add_argument(
         '--detailed',
@@ -240,12 +245,55 @@
     # Checks if selected nodes are pingable
     # with a desireble parameters: MTU, Frame, etc
     # Check if there is supported env found
-    _env = args_utils.check_supported_env(ENV_TYPE_SALT, args, config)
+    _env = args_utils.check_supported_env(
+        [ENV_TYPE_SALT, ENV_TYPE_KUBE],
+        args,
+        config
+    )
     # Start command
+    # prepare parameters
     if not args.cidr:
         logger_cli.error("\n# Use mcp-check network list to get list of CIDRs")
     _cidr = args_utils.get_arg(args, "cidr")
     _skip, _skip_file = args_utils.get_skip_args(args)
+    _mtu = args_utils.get_arg(args, "mtu")
+    if isinstance(_mtu, str):
+        if _mtu == "auto":
+            logger_cli.info(
+                "# Supplied MTU is 'auto'. Network's MTU value will be used"
+            )
+            _mtu = 0
+        else:
+            raise CheckerException(
+                "Bad MTU value supplied: '{}'.\n"
+                "Allowed values are: 'auto'/0 (autodetect), 64 - 9100".format(
+                    _mtu
+                )
+            )
+    elif isinstance(_mtu, int):
+        if _mtu < 64 and _mtu > 0:
+            logger_cli.warn(
+                "WARNING: Supplied MTU value is low: '{}'"
+                "Defaulting to '64'".format(_mtu)
+            )
+            _mtu = 64
+        elif _mtu == 0:
+            logger_cli.info(
+                "# MTU set to '0'. Network's MTU value will be used."
+            )
+        elif _mtu > 9100:
+            logger_cli.warn(
+                "WARNING: Supplied MTU value is >9100: '{}'"
+                "Defaulting to '9100'".format(_mtu)
+            )
+            _mtu = 9100
+        else:
+            raise CheckerException(
+                "Negative MTU values not supported: '{}'".format(
+                    _mtu
+                )
+            )
+
     # init mapper
     _skip, _skip_file = args_utils.get_skip_args(args)
     _class = _selectClass(_env, strClassHint="mapper")
@@ -255,31 +303,38 @@
         skip_list_file=_skip_file
     )
     # init pinger
-    _pinger = pinger.NetworkPinger(
+    _class = _selectClass(_env, strClassHint="pinger")
+    _pinger = _class(
         _mapper,
-        mtu=args.mtu,
         detailed=args.detailed,
         skip_list=_skip,
         skip_list_file=_skip_file
     )
+    # Iterate networks to be checked
+    logger_cli.info("# Checking {} networks".format(len(_cidr)))
+    _results = {}
+    for _t_cidr in _cidr:
+        logger_cli.info("-> '{}'".format(_t_cidr))
+        _results[_t_cidr] = _pinger.ping_nodes(_t_cidr, _mtu)
 
-    _ret = _pinger.ping_nodes(_cidr)
+    logger_cli.info("\n# Summary")
+    _dataset = []
+    for _cidr, _data in _results.items():
+        if not _data:
+            # no need to save the iterations and summary
+            _dataset.append(False)
+        else:
+            # print a report for cidr
+            if args.detailed:
+                # if set, print details
+                _pinger.print_details(_cidr, _data)
+            else:
+                _pinger.print_summary(_cidr, _data)
 
-    if _ret < 0:
-        # no need to save the iterations and summary
-        return
-    else:
+    if any(_dataset):
         # save what was collected
         _pinger.mapper.errors.save_iteration_data()
-
-        # print a report
-        _pinger.print_summary()
-
-        # if set, print details
-        if args.detailed:
-            _pinger.print_details()
-
-        return
+    return
 
 
 def do_trace(args, config):
diff --git a/cfg_checker/modules/network/mapper.py b/cfg_checker/modules/network/mapper.py
index f0fd78d..875f633 100644
--- a/cfg_checker/modules/network/mapper.py
+++ b/cfg_checker/modules/network/mapper.py
@@ -769,6 +769,7 @@
         skip_list_file=None
     ):
         self.master = KubeNodes(config)
+        self.daemonset = None
         super(KubeNetworkMapper, self).__init__(
             config,
             errors_class=errors_class,
@@ -776,22 +777,25 @@
             skip_list_file=skip_list_file
         )
 
+    def get_daemonset(self):
+        if not self.daemonset:
+            _d = self.master.prepare_daemonset("daemonset_template.yaml")
+
+            # wait for daemonset, normally less than 60 sec for all
+            # but still, let us give it 10 second per pod
+            _timeout = self.master.nodes.__len__() * 10
+            if not self.master.wait_for_daemonset(_d, timeout=_timeout):
+                raise KubeException("Daemonset deployment fail")
+            self.daemonset = _d
+        return self.daemonset
+
     def get_script_output(self, script, args=None):
         """
         Get runtime network by creating DaemonSet with Host network parameter
         """
         # prepare daemonset
         logger_cli.info("-> Preparing daemonset to get node info")
-        _daemonset = self.master.prepare_daemonset(
-            "daemonset_template.yaml",
-            config_map=script
-        )
-
-        # wait for daemonset, normally less than 60 sec for all
-        # but still, let us give it 10 second per pod
-        _timeout = self.master.nodes.__len__() * 10
-        if not self.master.wait_for_daemonset(_daemonset, timeout=_timeout):
-            raise KubeException("Daemonset deployment fail")
+        _daemonset = self.get_daemonset()
         logger_cli.info("-> Running script on daemonset")
         # exec script on all pods in daemonset
         _result = self.master.execute_script_on_daemon_set(
@@ -801,7 +805,8 @@
         )
 
         # delete daemonset
-        self.master.delete_daemonset(_daemonset)
+        # TODO: handle daemonset delete properly
+        # self.master.delete_daemonset(_daemonset)
 
         return _result
 
@@ -810,9 +815,11 @@
         self.map_network(self.RUNTIME)
 
     def map_network(self, source):
+        # if network type is mapped - just return it
+        if source in self.networks:
+            return self.networks[source]
         # maps target network using given source
         _networks = None
-
         if source == self.RUNTIME:
             logger_cli.info("# Mapping node runtime network data")
             _r = self.get_script_output("ifs_data.py", args="json")
diff --git a/cfg_checker/modules/network/pinger.py b/cfg_checker/modules/network/pinger.py
index 30881da..04a5f68 100644
--- a/cfg_checker/modules/network/pinger.py
+++ b/cfg_checker/modules/network/pinger.py
@@ -10,36 +10,33 @@
     def __init__(
         self,
         mapper,
-        mtu=None,
         detailed=False,
         skip_list=None,
         skip_list_file=None
     ):
         logger_cli.info("# Initializing Pinger")
         self.mapper = mapper
-        # default MTU value
-        self.target_mtu = mtu if mtu else 64
         # only data
-        self.packet_size = int(self.target_mtu) - 20 - 8
         self.detailed_summary = detailed
+        self.results = {}
 
     def _collect_node_addresses(self, target_net):
         # use reclass model and standard methods
         # to create list of nodes with target network
-        _reclass = self.mapper.map_network(self.mapper.RUNTIME)
-        if target_net in _reclass:
-            return _reclass[target_net]
+        _networks = self.mapper.map_network(self.mapper.RUNTIME)
+        if target_net in _networks:
+            return _networks[target_net]
         else:
             logger_cli.info(
-                "# Target network of {} not found in reclass".format(
+                "# Target network of {} not found after mapping".format(
                     target_net.exploded
                 )
             )
             return None
 
-    def ping_nodes(self, network_cidr_str):
+    def _get_packets_data(self, network_cidr_str, mtu):
         # Conduct actual ping using network CIDR
-        logger_cli.info("# Collecting node pairs")
+        logger_cli.debug("... collecting node pairs")
         _fake_if = ipaddress.IPv4Interface(str(network_cidr_str))
         _net = _fake_if.network
         # collect nodes and ips from reclass
@@ -75,13 +72,20 @@
                         if _ip not in src_ips:
                             if tgt_host not in _t:
                                 _t[tgt_host] = []
+                            # Handling mtu and packet size
+                            if mtu == 0:
+                                # Detect MTU
+                                _mtu = int(tgt_if['mtu'])
+                            else:
+                                _mtu = mtu
+                            _packet_size = int(_mtu)-20-8 if _mtu != 0 else 0
                             _tgt = {
                                 "ip": _ip,
                                 "tgt_host": tgt_host,
                                 "ip_index": _ip_index,
                                 "if_name": tgt_if_name,
-                                "mtu": self.target_mtu,
-                                "size": self.packet_size
+                                "mtu": _mtu,
+                                "size": _packet_size
                             }
                             _t[tgt_host].append(
                                 _tgt
@@ -98,10 +102,213 @@
                 "check network configuration\n".format(network_cidr_str)
             )
 
-            return -1
+            return None, _packets, _nodes_total
+        else:
+            return _count, _packets, _nodes_total
+
+    def _process_result(self, src, src_data, result):
+        # Parse output
+        try:
+            _result = json.loads(result)
+        except (ValueError, TypeError):
+            self.mapper.errors.add_error(
+                self.mapper.errors.NET_NODE_NON_RESPONSIVE,
+                node=src,
+                response=result
+            )
+
+            _msg = "# ERROR: Unexpected return for '{}': '{}'\n".format(
+                src,
+                result
+            )
+
+            return False, _msg
+
+        # Handle return codes
+        for tgt_node, _tgt_ips in _result.items():
+            for _params in _tgt_ips:
+                _body = "{}({}) --{}--> {}({}@{})\n".format(
+                        src,
+                        src_data["if_name"],
+                        _params["returncode"],
+                        tgt_node,
+                        _params["if_name"],
+                        _params["ip"]
+                    )
+                _stdout = ""
+                _stderr = ""
+                if len(_params["stdout"]) > 0:
+                    _stdout = "stdout:\n{}\n".format(_params["stdout"])
+                if len(_params["stderr"]) > 0:
+                    _stderr = "stderr:\n{}\n".format(_params["stderr"])
+
+                if not _params["returncode"]:
+                    # 0
+                    self.mapper.errors.add_error(
+                        self.mapper.errors.NET_PING_SUCCESS,
+                        ping_path=_body,
+                        stdout=_stdout,
+                        stderr=_stderr
+                    )
+                elif _params["returncode"] == 68:
+                    # 68 is a 'can't resove host error'
+                    self.mapper.errors.add_error(
+                        self.mapper.errors.NET_PING_NOT_RESOLVED,
+                        ping_path=_body,
+                        stdout=_stdout,
+                        stderr=_stderr
+                    )
+                elif _params["returncode"] > 1:
+                    # >1 is when no actial (any) response
+                    self.mapper.errors.add_error(
+                        self.mapper.errors.NET_PING_ERROR,
+                        ping_path=_body,
+                        stdout=_stdout,
+                        stderr=_stderr
+                    )
+                else:
+                    # 1 is for timeouts amd/or packet lost
+                    self.mapper.errors.add_error(
+                        self.mapper.errors.NET_PING_TIMEOUT,
+                        ping_path=_body,
+                        stdout=_stdout,
+                        stderr=_stderr
+                    )
+        return True, _result
+
+    def print_summary(self, cidr, data):
+        # Print summary of ping activity in node-node perspective
+        logger_cli.info("\n-> {}, {} nodes".format(cidr, len(data)))
+        # iterate nodes
+        for _n, _d in data.items():
+            # targets
+            _total = len(_d['targets'])
+            _fail = []
+            _pass = []
+            _mtus = []
+            for _f, _t in _d['targets'].items():
+                # filter data
+                _fail += [[_f, _l] for _l in _t if _l['returncode']]
+                _pass += [[_f, _l] for _l in _t if not _l['returncode']]
+                _mtus += [str(_l['mtu']) for _l in _t]
+            # summary of passed
+            # source node
+            logger_cli.info(
+                "  PASS: {}/{} nodes from {} ({}:{}) with MTU {}".format(
+                    len(_pass),
+                    _total,
+                    _n,
+                    _d['if_name'],
+                    _d['ip'],
+                    ",".join(list(set(_mtus)))
+                )
+            )
+            # print fails
+            for node, _dict in _fail:
+                logger_cli.info(
+                    "\tFAIL: {} ({}:{}) with {}/{}".format(
+                        node,
+                        _dict['if_name'],
+                        _dict['ip'],
+                        _dict['size'],
+                        _dict['mtu']
+                    )
+                )
+
+        # logger_cli.info(self.mapper.errors.get_summary(print_zeros=False))
+        return
+
+    def print_details(self, cidr, data):
+        def _print_stds(stdout, stderr):
+            logger_cli.debug("    stdout:\n")
+            for line in stdout.splitlines():
+                logger_cli.debug("      {}".format(line))
+            if not stderr:
+                logger_cli.debug("    stderr: <empty>")
+            else:
+                logger_cli.debug("    stderr:\n")
+                for line in stderr.splitlines():
+                    logger_cli.debug("      {}".format(line))
+
+        logger_cli.info("\n---> {}, {} nodes".format(cidr, len(data)))
+        # iterate nodes
+        for _n, _d in data.items():
+            # targets
+            _fail = []
+            _pass = []
+            for _f, _t in _d['targets'].items():
+                # filter data
+                _fail += [[_f, _l] for _l in _t if _l['returncode']]
+                _pass += [[_f, _l] for _l in _t if not _l['returncode']]
+            # summary of passed
+            # source node
+            logger_cli.info(
+                "======= from {} ({}:{})".format(
+                    _n,
+                    _d['if_name'],
+                    _d['ip']
+                )
+            )
+            for node, _dict in _pass:
+                logger_cli.info(
+                    " + PASS: to {} ({}:{}) with {}/{}".format(
+                        node,
+                        _dict['if_name'],
+                        _dict['ip'],
+                        _dict['size'],
+                        _dict['mtu']
+                    )
+                )
+                _print_stds(_dict['stdout'], _dict['stderr'])
+            # print fails
+            for node, _dict in _fail:
+                logger_cli.info(
+                    " - FAIL: to {} ({}:{}) with {}/{}".format(
+                        node,
+                        _dict['if_name'],
+                        _dict['ip'],
+                        _dict['size'],
+                        _dict['mtu']
+                    )
+                )
+                _print_stds(_dict['stdout'], _dict['stderr'])
+
+        # Detailed errors
+        # logger_cli.info(
+        #     "\n{}\n".format(
+        #         self.mapper.errors.get_errors()
+        #     )
+        # )
+        return
+
+
+class SaltNetworkPinger(NetworkPinger):
+    def __init__(
+        self,
+        mapper,
+        detailed=False,
+        skip_list=None,
+        skip_list_file=None
+    ):
+        super(SaltNetworkPinger, self).__init__(
+            mapper,
+            detailed=detailed,
+            skip_list=skip_list,
+            skip_list_file=skip_list_file
+        )
+
+    def ping_nodes(self, network_cidr_str, mtu):
+        #  get packets
+        _count, _packets, _nodes_total = self._get_packets_data(
+            network_cidr_str,
+            mtu
+        )
+        if not _count:
+            return None
 
         # do ping of packets
-        logger_cli.info("# Pinging nodes: MTU={}".format(self.target_mtu))
+        logger_cli.info(
+            "-> Pinging nodes in {}".format(network_cidr_str))
         self.mapper.master.prepare_script_on_active_nodes("ping.py")
         _progress = Progress(_count)
         _progress_index = 0
@@ -131,89 +338,96 @@
                     src
                 )
             )
-            # Parse salt output
-            _result = _results[src]
-            try:
-                _result = json.loads(_result)
-            except (ValueError, TypeError):
-                _progress.clearline()
-                logger_cli.error(
-                    "# ERROR: Unexpected salt return for '{}': '{}'\n".format(
-                        src,
-                        _result
-                    )
-                )
-                self.mapper.errors.add_error(
-                    self.mapper.errors.NET_NODE_NON_RESPONSIVE,
-                    node=src,
-                    response=_result
-                )
-                continue
-            # Handle return codes
-            for tgt_node, _tgt_ips in _result.items():
-                for _params in _tgt_ips:
-                    _body = "{}({}) --{}--> {}({}@{})\n".format(
-                            src,
-                            src_data["if_name"],
-                            _params["returncode"],
-                            tgt_node,
-                            _params["if_name"],
-                            _params["ip"]
-                        )
-                    _stdout = ""
-                    _stderr = ""
-                    if len(_params["stdout"]) > 0:
-                        _stdout = "stdout:\n{}\n".format(_params["stdout"])
-                    if len(_params["stderr"]) > 0:
-                        _stderr = "stderr:\n{}\n".format(_params["stderr"])
-
-                    if not _params["returncode"]:
-                        # 0
-                        self.mapper.errors.add_error(
-                            self.mapper.errors.NET_PING_SUCCESS,
-                            ping_path=_body,
-                            stdout=_stdout,
-                            stderr=_stderr
-                        )
-                    elif _params["returncode"] == 68:
-                        # 68 is a 'can't resove host error'
-                        self.mapper.errors.add_error(
-                            self.mapper.errors.NET_PING_NOT_RESOLVED,
-                            ping_path=_body,
-                            stdout=_stdout,
-                            stderr=_stderr
-                        )
-                    elif _params["returncode"] > 1:
-                        # >1 is when no actial (any) response
-                        self.mapper.errors.add_error(
-                            self.mapper.errors.NET_PING_ERROR,
-                            ping_path=_body,
-                            stdout=_stdout,
-                            stderr=_stderr
-                        )
-                    else:
-                        # 1 is for timeouts amd/or packet lost
-                        self.mapper.errors.add_error(
-                            self.mapper.errors.NET_PING_TIMEOUT,
-                            ping_path=_body,
-                            stdout=_stdout,
-                            stderr=_stderr
-                        )
-
             # Parse results back in place
-            src_data["targets"] = _result
+            _ret_code, _data = self._process_result(
+                src, src_data,
+                _results[src]
+            )
+            if not _ret_code:
+                _progress.clearline()
+                logger_cli.error(_data)
+                continue
+            else:
+                src_data["targets"] = _data
 
         _progress.end()
 
-        return 0
+        return _packets
 
-    def print_summary(self):
-        logger_cli.info(self.mapper.errors.get_summary(print_zeros=False))
 
-    def print_details(self):
-        # Detailed errors
-        logger_cli.info(
-            "\n{}\n".format(
-                self.mapper.errors.get_errors()
-            )
+class KubeNetworkPinger(NetworkPinger):
+    def __init__(
+        self,
+        mapper,
+        detailed=False,
+        skip_list=None,
+        skip_list_file=None
+    ):
+        super(KubeNetworkPinger, self).__init__(
+            mapper,
+            detailed=detailed,
+            skip_list=skip_list,
+            skip_list_file=skip_list_file
         )
+
+    def ping_nodes(self, network_cidr_str, mtu):
+        #  get packets
+        _count, _packets, _nodes_total = self._get_packets_data(
+            network_cidr_str,
+            mtu
+        )
+        if not _count:
+            return None
+
+        # do ping of packets
+        logger_cli.info(
+            "-> Pinging nodes in {}".format(network_cidr_str))
+        _progress = Progress(_count)
+        _progress_index = 0
+        _node_index = 0
+        for src, src_data in _packets.items():
+            _targets = src_data["targets"]
+            _node_index += 1
+            # create 'targets.json' on source host
+            _ds = self.mapper.get_daemonset()
+            _pname = self.mapper.master.get_pod_name_in_daemonset_by_node(
+                src,
+                _ds
+            )
+            _path = self.mapper.master.prepare_json_in_pod(
+                _pname,
+                _ds.metadata.namespace,
+                _targets,
+                "targets.json"
+            )
+            # execute ping.py
+            _result = self.mapper.master.exec_on_target_pod(
+                _pname,
+                "ping.py",
+                args=[_path]
+            )
+            _progress_index += len(_targets)
+            # print progress
+            _progress.write_progress(
+                _progress_index,
+                note='/ {}/{} nodes / current {}'.format(
+                    _node_index,
+                    _nodes_total,
+                    src
+                )
+            )
+            # Parse results back in place
+            _ret_code, _data = self._process_result(
+                src, src_data,
+                _result
+            )
+            if not _ret_code:
+                _progress.clearline()
+                logger_cli.error(_data)
+                continue
+            else:
+                src_data["targets"] = _data
+
+        _progress.end()
+
+        return _packets
diff --git a/cfg_checker/nodes.py b/cfg_checker/nodes.py
index cf2bdf7..d87d829 100644
--- a/cfg_checker/nodes.py
+++ b/cfg_checker/nodes.py
@@ -953,7 +953,7 @@
         _sh.kill()
         return _target_path
 
-    def prepare_daemonset(self, template_filename, config_map=None):
+    def prepare_daemonset(self, template_filename):
         # load template
         _yaml_file = os.path.join(pkg_dir, 'templates', template_filename)
         logger_cli.debug("... loading template '{}'".format(_yaml_file))
@@ -1043,6 +1043,27 @@
         logger_cli.error("Timed out waiting for Daemonset to be ready")
         return False
 
+    def exec_on_target_pod(self, pod_name, script_filename, args=None):
+        """
+        Run script from configmap on target pod assuming it is present
+        """
+        _arguments = args if args else ""
+        _cmd = [
+            "python3",
+            os.path.join(
+                "/",
+                self.env_config.kube_scripts_folder,
+                script_filename
+            )
+        ] + _arguments
+        _result = self.kube.exec_on_target_pod(
+            _cmd,
+            pod_name,
+            self._namespace,
+            strict=True
+        )
+        return _result
+
     def execute_script_on_daemon_set(self, ds, script_filename, args=None):
         """
         Query daemonset for pods and execute script on all of them
@@ -1060,12 +1081,7 @@
                 )
             ]
 
-        # get all pod names
-        logger_cli.debug("... extracting pod names from daemonset")
-        _pods = self.kube.CoreV1.list_namespaced_pod(
-            namespace=ds.metadata.namespace,
-            label_selector='name={}'.format(ds.metadata.name)
-        )
+        _pods = self.kube.get_pods_for_daemonset(ds)
         # Create map for threads: [[node_name, ns, pod_name, cmd]...]
         logger_cli.debug(
             "... runnning script on {} pods using {} threads at a time".format(
@@ -1137,3 +1153,40 @@
             ))
             _r = None
         return _r
+
+    def get_pod_name_in_daemonset_by_node(self, nodename, daemonset):
+        _podname = None
+        _pods = self.kube.get_pods_for_daemonset(daemonset)
+        for item in _pods.items:
+            if item.spec.node_name == nodename:
+                _podname = item.metadata.name
+
+        return _podname
+
+    def prepare_json_in_pod(self, podname, namespace, targets, filename):
+        # Iterate pods in daemonset and prepare json file on each one
+        _target_path = os.path.join(
+            "/",
+            "tmp",
+            filename
+        )
+        # check folder will probably not needed as the daemonset links
+        # configmap there on creation
+        # _folder = os.path.join(
+        #     self.env_config.kube_node_homepath,
+        #     self.env_config.kube_scripts_folder
+        # )
+        # prepare data
+        buffer = json.dumps(targets, indent=2).encode('utf-8')
+
+        # write data to pod using fancy websocket function
+        self.kube.put_string_buffer_to_pod_as_textfile(
+            podname,
+            namespace,
+            buffer,
+            _target_path
+        )
+
+        # TODO: Exception handling
+
+        return _target_path
diff --git a/scripts/ping.py b/scripts/ping.py
index fae4f65..c283e90 100644
--- a/scripts/ping.py
+++ b/scripts/ping.py
@@ -140,10 +140,15 @@
                 _packets[_node] = _t
         _threaded_out = pool.map(do_ping, _param_map)
         for _out in _threaded_out:
+            if sys.version_info[0] > 2:
+                # in python3 stdout will be a bytes object
+                _out['stdout'] = _out['stdout'].decode('utf-8')
+                _out['stderr'] = _out['stderr'].decode('utf-8')
             if isinstance(_packets[_out["tgt_host"]], dict):
                 _packets[_out["tgt_host"]] = _out
             elif isinstance(_packets[_out["tgt_host"]], list):
                 _packets[_out["tgt_host"]][_out["ip_index"]] = _out
+        # dump
         sys.stdout.write(json.dumps(_packets))
     else:
         # IP given
diff --git a/setup.py b/setup.py
index 9c9fbf1..c22fc61 100644
--- a/setup.py
+++ b/setup.py
@@ -39,7 +39,7 @@
     author_email="osavatieiev@mirantis.com",
     classifiers=[
         "Programming Language :: Python",
-        "Programming Language :: Python :: 2.7"
+        "Programming Language :: Python :: 3.6"
     ],
     keywords="QA, openstack, salt, config, reclass",
     entry_points=entry_points,