Log collector module

New:
  - [Done] multiple namespace selector
  - [Done] keyword-based pod selector
  - [Done] per-pod logs syntax detection and parsing
  - [Differed] in-place filtering for shorter logs
  - [Done] individual logs timestamp detection
  - [Done] Unix time bases Timestamp sorting
  - [Done] Single file logs output using common format
  - [Done] add all log types from all MOS namespaces and pods

Update:
  - resource preparation can be skipped per module
  - updated log collection using multiple threads
  - new setting LOG_COLLECT_THREADS

Fixes:
  - Network MTU fix
  - Faster cmd execution on single pod
  - Ceph benchmark validations
  - Ceph benchmark report sorting
  - Daemonset deployment with nodes skipped
  - Network tree debugging script
  - Tree depth limiter, i.e. stackoverflow prevention

  Related-PROD: PROD-36845

Change-Id: Icf229ac62078c6418ab4dbdff12b0d27ed42af1d
diff --git a/.flake8 b/.flake8
new file mode 100644
index 0000000..5fe46b0
--- /dev/null
+++ b/.flake8
@@ -0,0 +1,5 @@
+[flake8]
+per-file-ignores =
+    cfg_checker/modules/logs/sage.py:E501,
+    cfg_checker/modules/logs/sage.py:W605
+exclude = .tox,__pycache__,build
diff --git a/README.md b/README.md
index de53574..07a22c0 100644
--- a/README.md
+++ b/README.md
@@ -32,10 +32,18 @@
 - you can check your local.env file for parameters (no need to update for running on cfg node)
 
 # Running
-- Packages report (HTML): `mcp_check packages report --html __packages_html_filename.html__`
-- Packages report (CSV): `mcp_check packages report --csv __packages_csv_filename.csv__`
-- Network check (CLI output): `mcp-checker network check`
-- [Work in progress] Network check (HTML report): `mcp-checker network report --html __network_html_filename.html__`
+        "mos-checker = cfg_checker.cfg_check:config_check_entrypoint",
+        "chk-pkg = cfg_checker.cli.packages:entrypoint",
+        "chk-net = cfg_checker.cli.network:entrypoint",
+        "chk-reclass = cfg_checker.cli.reclass:entrypoint",
+        "checker-agent = cfg_checker.agent.cfg_agent:entrypoint"
+
+- Packages report (HTML): `mos-checker packages report --html __packages_html_filename.html__`
+- Packages report (CSV): `mos-checker packages report --csv __packages_csv_filename.csv__`
+- Network check (CLI output): `mos-checker network check`
+- Network check (HTML report): `mcp-checker network report --html __network_html_filename.html__`
+- Ceph Info (HTML report): `mos-checker ceph info --tgz __ceph_info_filename.tgz__`
+- Ceph Benchmark (HTML report) `mos-checker ceph bench --html __ceph_bench_filename.html__`
 - List folders that can be used for Reclass Compare: `mcp-checker reclass list -p __models_path__`
 - Compare two Reclass models (file and parameter wise): `mcp-checker reclass diff --model1 __model1_path__ --model2 __model2_path__ --html __reclass_html_filename.html__`
 
@@ -48,30 +56,4 @@
  - if node listing fails, execute `salt-key` on master 
    and create an `etc/nodes.list` file with minions list
 
-# Version history
-- [*Done*] Update messages to same style
-- [*Done*] Release versions support with customizable updates
-- [*Done*] Upgrades, errors and downgrades detection
-- [*Done*] Proper Debian package [version](https://www.debian.org/doc/debian-policy/ch-controlfields.html#version) naming convention support
-- [*Done*] Refactor parameter handling to have same naming for same options in different sections
-- [*Done*] Remove unneeded data from report
-- [*Done*] Add progress bar when processing along with handy stats
-- [*Done*] Add possibility to check cloud remotely
-- [*Done*] Add possibility to have several ready to use cloud connection configurations
-- [*Done*] Use flexible plugin structure to execute modules
-- [*Done*] Prepare script to create venv
-- [*Done*] Reformat reclass compare file
-- [*Done*] Finalize Network check HTML report
-- [*Done*] Do simple rule-based checks on network
-- [*Done*] Add node params to output
-
-# TODO
-- Implement simple packet sniff mechanics to check node-node VCP trafic flow
-- Format reclass compare file
-
-
-# Kube links
-  https://github.com/kubernetes-client/python/blob/master/examples/api_discovery.py
-  https://github.com/kubernetes-client/python/blob/master/examples/remote_cluster.py
-
 Cheers!
diff --git a/cfg_checker/common/decorators.py b/cfg_checker/common/decorators.py
index 1b39460..d507ae5 100644
--- a/cfg_checker/common/decorators.py
+++ b/cfg_checker/common/decorators.py
@@ -4,6 +4,7 @@
 import time
 
 from cfg_checker.common import logger, logger_cli
+from cfg_checker.common.exception import KubeException
 
 
 def retry(exceptions, total_tries=5, initial_wait=1, backoff_factor=2):
@@ -29,7 +30,8 @@
                     print_args = args if args else "no args"
                     if _tries == 1:
                         msg = "... {} failed after {} tries".format(
-                                f.__name___,
+                                # dirty hack to get name
+                                str(f).split(" ")[1],
                                 total_tries
                             )
                         logger_cli.info(msg)
@@ -39,10 +41,11 @@
                                 kwargs
                             )
                         )
-                        raise
+                        _tries = 0
+                        raise KubeException(msg)
                     msg = "... {}; Exception: {}.\n" \
                           "... retrying in {} seconds!".format(
-                              f.__name__,
+                              str(f).split(" ")[1],
                               e,
                               _delay
                           )
diff --git a/cfg_checker/common/kube_utils.py b/cfg_checker/common/kube_utils.py
index f4c38ef..e6b9922 100644
--- a/cfg_checker/common/kube_utils.py
+++ b/cfg_checker/common/kube_utils.py
@@ -11,6 +11,7 @@
 from kubernetes import client as kclient, config as kconfig, watch
 from kubernetes.stream import stream
 from kubernetes.client.rest import ApiException
+from urllib3.exceptions import MaxRetryError
 from time import time, sleep
 
 from cfg_checker.common import logger, logger_cli
@@ -965,7 +966,11 @@
                 _svc
             )
 
-    def get_pod_logs(self, podname, ns):
+    def list_namespaces(self):
+        return self.CoreV1.list_namespace()
+
+    @retry(ApiException, initial_wait=2)
+    def get_pod_logs(self, podname, container, ns, tail_lines=50):
         # Params
         # read log of the specified Pod # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True
 
@@ -1022,10 +1027,23 @@
         #        If the method is called asynchronously, returns the request
         #        thread.
 
-        return self.CoreV1.read_namespaced_pod_log(
-            podname,
-            ns,
-            # timestamps=True,
-            tail_lines=50,
-            # pretty=True
-        )
+        try:
+            return self.CoreV1.read_namespaced_pod_log(
+                name=podname,
+                namespace=ns,
+                container=container,
+                timestamps=True,
+                tail_lines=tail_lines,
+                # pretty=True,
+                _request_timeout=(1, 5)
+            )
+        except MaxRetryError as e:
+            logger_cli.warning(
+                "WARNING: Failed to retrieve log {}/{}:{}:\n{}".format(
+                    ns,
+                    podname,
+                    container,
+                    e.reason
+                )
+            )
+            return ""
diff --git a/cfg_checker/common/other.py b/cfg_checker/common/other.py
index 4c3ef04..987168f 100644
--- a/cfg_checker/common/other.py
+++ b/cfg_checker/common/other.py
@@ -193,5 +193,18 @@
             return getattr(obj, attr, *args)
         return functools.reduce(_getattr, [obj] + attr.split('.'))
 
+    @staticmethod
+    def split_option_type(size):
+        # I know, but it is faster then regex
+        _numbers = [48, 49, 50, 51, 52, 53, 54, 55, 56, 57]
+        _s_int = "0"
+        _s_type = ""
+        for ch in size:
+            if ord(ch) in _numbers:
+                _s_int += ch
+            else:
+                _s_type += ch
+        return int(_s_int), _s_type
+
 
 utils = Utils()
diff --git a/cfg_checker/common/settings.py b/cfg_checker/common/settings.py
index 08d4163..27482ff 100644
--- a/cfg_checker/common/settings.py
+++ b/cfg_checker/common/settings.py
@@ -209,6 +209,7 @@
         self.mcp_host = _get_env_value('MCP_ENV_HOST', None)
         self.salt_port = _get_env_value('MCP_SALT_PORT', '6969')
         self.threads = int(_get_env_value('MCP_THREADS', "5"))
+        self.sage_threads = int(_get_env_value('LOG_COLLECT_THREADS', "15"))
         self.script_execution_timeout = int(
             _get_env_value('MCP_SCRIPT_RUN_TIMEOUT', "300")
         )
@@ -489,3 +490,5 @@
         # Init vars that is specific to detected envs only
         logger_cli.debug("... loading detected environment type vars")
         self._init_env_values()
+        # Set internal resource preparation flag default
+        self.prepare_qa_resources = True
diff --git a/cfg_checker/modules/ceph/__init__.py b/cfg_checker/modules/ceph/__init__.py
index 5c9357b..31c6b7a 100644
--- a/cfg_checker/modules/ceph/__init__.py
+++ b/cfg_checker/modules/ceph/__init__.py
@@ -1,9 +1,13 @@
 #    Author: Alex Savatieiev (osavatieiev@mirantis.com; a.savex@gmail.com)
 #    Copyright 2019-2022 Mirantis, Inc.
+import os
+
 from cfg_checker.agent.fio_runner import get_fio_options
-from cfg_checker.agent.fio_runner import seq_modes, mix_modes
+from cfg_checker.agent.fio_runner import seq_modes, mix_modes, rand_modes
 from cfg_checker.common import logger_cli
+from cfg_checker.common.other import utils
 from cfg_checker.common.settings import ENV_TYPE_KUBE
+from cfg_checker.common.exception import CheckerException
 from cfg_checker.helpers import args_utils
 from cfg_checker.modules.ceph import info, bench
 
@@ -33,6 +37,32 @@
 #     else:
 #         return _class
 
+
+def _validate_option_type(value, type_list):
+    _s, _t = utils.split_option_type(value)
+    if _t not in type_list:
+        raise CheckerException(
+            "Invalid option type '{}'. Expected types: {}".format(
+                value,
+                ", ".join(type_list)
+            )
+        )
+    else:
+        return
+
+
+def _validate_option(value, type_list):
+    if value not in type_list:
+        raise CheckerException(
+            "Invalid option '{}'. Expected one of: {}".format(
+                value,
+                ", ".join(type_list)
+            )
+        )
+    else:
+        return
+
+
 def _get_param_and_log(arg, param_str):
     _value = args_utils.get_arg(arg, param_str)
     logger_cli.info("    {}={}".format(param_str, _value))
@@ -242,6 +272,10 @@
     # dump results options
     _dump_path = args_utils.get_arg(args, "dump_path")
     if _dump_path:
+        if not os.path.exists(_dump_path):
+            raise CheckerException(
+                "ERROR: Dump path invalid: '{}'".format(_dump_path)
+            )
         logger_cli.info("# Results will be dumped to '{}'".format(_dump_path))
         config.bench_results_dump_path = _dump_path
     else:
@@ -345,6 +379,38 @@
     # init the Bench class
     ceph_bench = bench.KubeCephBench(config)
     ceph_bench.set_ceph_info_class(ceph_info)
+
+    # Validate options
+    logger_cli.info("-> Validating options")
+    # size
+    _validate_option_type(_opts["size"], ["G", "M"])
+    _validate_option_type(_opts["ramp_time"], ["s", "m"])
+    _validate_option_type(_opts["runtime"], ["s", "m"])
+    _modes = seq_modes + mix_modes + rand_modes
+    _validate_option(_opts["readwrite"], _modes)
+
+    if _task_file:
+        _s, _ = utils.split_option_type(_opts["size"])
+        for idx in range(len(ceph_bench.tasks)):
+            # size
+            _ts, _ = utils.split_option_type(ceph_bench.tasks[idx]["size"])
+            if _s < _ts:
+                logger_cli.error(
+                    "ERROR: Task #{} file size is to big:"
+                    " {} (volume) < {} (testfile)".format(
+                        idx,
+                        _opts["size"],
+                        ceph_bench.tasks[idx]["size"]
+                    )
+                )
+            # other
+            _validate_option(ceph_bench.tasks[idx]["readwrite"], _modes)
+        # Print defaults
+        logger_cli.debug("... default/selected options for fio:")
+        for _k in _opts.keys():
+            # TODO: Update options for single run
+            logger_cli.debug("    {} = {}".format(_k, _opts[_k]))
+
     # Preload previous results for this name
     ceph_bench.preload_results()
     # Do the testrun
diff --git a/cfg_checker/modules/ceph/bench.py b/cfg_checker/modules/ceph/bench.py
index f5af704..b79007c 100644
--- a/cfg_checker/modules/ceph/bench.py
+++ b/cfg_checker/modules/ceph/bench.py
@@ -10,6 +10,7 @@
 from cfg_checker.common import logger_cli
 from cfg_checker.common.decorators import retry
 from cfg_checker.common.file_utils import write_str_to_file
+from cfg_checker.common.other import utils
 from cfg_checker.helpers.console_utils import Progress
 from cfg_checker.helpers.console_utils import cl_typewriter
 from cfg_checker.reports import reporter
@@ -45,19 +46,6 @@
     return {}
 
 
-def _split_vol_size(size):
-    # I know, but it is faster then regex
-    _numbers = [48, 49, 50, 51, 52, 53, 54, 55, 56, 57]
-    _s_int = "0"
-    _s_type = ""
-    for ch in size:
-        if ord(ch) in _numbers:
-            _s_int += ch
-        else:
-            _s_type += ch
-    return int(_s_int), _s_type
-
-
 class CephBench(object):
     _agent_template = "cfgagent-template.yaml"
 
@@ -165,7 +153,7 @@
         logger_cli.info("# Preparing {} agents".format(self.agent_count))
         # Increase volume size a bit, so datafile fits
         _quanitizer = 1.3
-        _v_size, _vol_size_units = _split_vol_size(options['size'])
+        _v_size, _vol_size_units = utils.split_option_type(options['size'])
         _v_size = round(_v_size * _quanitizer)
         _vol_size = str(_v_size) + _vol_size_units + "i"
         logger_cli.info(
diff --git a/cfg_checker/modules/ceph/info.py b/cfg_checker/modules/ceph/info.py
index 2c62018..db3dd75 100644
--- a/cfg_checker/modules/ceph/info.py
+++ b/cfg_checker/modules/ceph/info.py
@@ -313,49 +313,58 @@
         self._safe_tools_cmd("rm -f " + _tar_path)
         return _json
 
-    def _safe_get_cmd_output_as_json(self, cmd, zipped=False):
-        if zipped:
-            _buf = self._safe_tools_cmd_zipped_output(cmd)
-        else:
-            _buf = self._safe_tools_cmd(cmd)
+    @staticmethod
+    def _as_json(buf):
         try:
-            return json.loads(_buf)
+            return json.loads(buf)
         except ValueError as e:
             _out = ""
-            if len(_buf) > 512:
-                _out = _buf[:512]
+            if len(buf) > 512:
+                _out = buf[:512]
                 _out += "..."
             else:
-                _out = _buf
+                _out = buf
             logger_cli.error(
                 "\nERROR: failed to parse json: '{}'. Data: '{}'".format(
                     e,
                     _out
                 )
             )
-            return _buf
+            return buf
+
+    def _safe_get_cmd_output_as_json(self, cmd, zipped=False):
+        if zipped:
+            _buf = self._safe_tools_cmd_zipped_output(cmd)
+        else:
+            _buf = self._safe_tools_cmd(cmd)
+        return self._as_json(_buf)
 
     def _get_tools_pod_name(self):
         # get ceph pod
-        _names = self.master.kube.get_pod_names_by_partial_name(
+        _pods = self.master.kube.get_pods_by_partial_name(
             self.ceph_app_label,
             self.ceph_ns
         )
-        if not _names:
+        # _names = self.master.kube.get_pod_names_by_partial_name(
+        #     self.ceph_app_label,
+        #     self.ceph_ns
+        # )
+        if not _pods:
             raise KubeException(
                 "Failed to find pod using '{}'".format(self.ceph_app_label)
             )
-        elif len(_names) > 1:
+        elif len(_pods) > 1:
             logger_cli.warning(
                 "WARNING: Environment has more than one pod "
                 "with '{}' app: {}".format(
                     self.ceph_app_label,
-                    ", ".join(_names)
+                    ", ".join([p.metadata.name for p in _pods])
                 )
             )
         else:
-            logger_cli.debug("... found '{}'".format(_names[0]))
-        return _names[0]
+            logger_cli.debug("... found '{}'".format(_pods[0].metadata.name))
+        self.ceph_pod = _pods[0]
+        return _pods[0].metadata.name
 
     def _add_ceph_info_item(self, key, title, data, filename=None):
         # handle data
@@ -572,8 +581,7 @@
         _health_metrics = {}
         _devices = _c("ceph device ls")
         _devices = _devices.splitlines()
-        _progress = Progress(len(_devices)-1)
-        _index = 1
+        cmd_list = []
         for device in _devices:
             _t = device.split()
             _dev = _t[0]
@@ -582,14 +590,31 @@
 
             if _dev == "DEVICE":
                 continue
-            _metric = _cj("ceph device get-health-metrics {}".format(_dev))
+            # _metric = _cj("ceph device get-health-metrics {}".format(_dev))
+            _cmd = "ceph device get-health-metrics {}".format(_dev)
+            cmd_list.append(_cmd)
             _dev_name = "{}_{}".format(_osd, _dev)
-            _health_metrics[_dev_name] = _metric
+            _health_metrics[_dev_name] = {}
             _health_metrics[_dev_name]['node_name'] = _node
             _health_metrics[_dev_name]['osd_name'] = _osd
-            _progress.write_progress(_index, note=_dev_name)
-            _index += 1
-        _progress.end()
+            _health_metrics[_dev_name]['cmd'] = _cmd
+
+        results = self.master.exec_cmds_on_pod(
+            self.ceph_pod,
+            cmd_list
+        )
+
+        logger_cli.info("-> Processing results")
+        for _r in results:
+            _cmd = _r[3]
+            _j = self._as_json(_r[2])
+            for _dev_name in _health_metrics.keys():
+                if "cmd" in _health_metrics[_dev_name] and \
+                  _health_metrics[_dev_name]["cmd"] == _cmd:
+                    _health_metrics[_dev_name].update(_j)
+                    _health_metrics[_dev_name].pop("cmd")
+                    break
+
         self._add_ceph_info_item(
             "ceph_health",
             "Ceph Health Metrics",
@@ -633,21 +658,29 @@
         logger_cli.info(
             "-> Gathering OSD configuration ({})".format(_total_osd)
         )
-        # Shortcuts
-        # _c = self._safe_tools_cmd
-        _cj = self._safe_get_cmd_output_as_json
-        _progress = Progress(_total_osd)
-        _idx = 1
-        _cfgs = {}
+        cmds = {}
+        cmd_list = []
         for _osd in self.ceph_info["ceph_osd_df"]["data"]["nodes"]:
-            _progress.write_progress(_idx, note=_osd["name"])
-            _cfgs[_osd["name"]] = _cj(
-                "ceph config show-with-defaults -f json {}".format(
-                    _osd["name"]
-                )
+            _cmd = "ceph config show-with-defaults -f json {}".format(
+                _osd["name"]
             )
-            _idx += 1
-        _progress.end()
+            cmd_list.append(_cmd)
+            cmds[_osd["name"]] = _cmd
+
+        results = self.master.exec_cmds_on_pod(
+            self.ceph_pod,
+            cmd_list
+        )
+
+        logger_cli.info("-> Processing results")
+        _cfgs = {}
+        for _r in results:
+            _cmd = _r[3]
+            _j = self._as_json(_r[2])
+            for _osd_name in cmds.keys():
+                if cmds[_osd_name] == _cmd:
+                    _cfgs[_osd_name] = _j
+                    break
 
         # Process configs
         _base = {}
diff --git a/cfg_checker/modules/logs/__init__.py b/cfg_checker/modules/logs/__init__.py
new file mode 100644
index 0000000..8003e54
--- /dev/null
+++ b/cfg_checker/modules/logs/__init__.py
@@ -0,0 +1,122 @@
+#    Author: Alex Savatieiev (osavatieiev@mirantis.com; a.savex@gmail.com)
+#    Copyright 2019-2022 Mirantis, Inc.
+import os
+
+from cfg_checker.common import logger_cli
+from cfg_checker.common.settings import ENV_TYPE_KUBE
+from cfg_checker.helpers import args_utils
+from cfg_checker.modules.logs import sage
+
+command_help = "Logs collecting and organizing"
+supported_envs = [ENV_TYPE_KUBE]
+
+
+def init_parser(_parser):
+    # network subparser
+    logs_subparsers = _parser.add_subparsers(dest='type')
+
+    collect_parser = logs_subparsers.add_parser(
+        'collect',
+        help="Collect logs according to filters and/or given criteria"
+    )
+
+    collect_parser.add_argument(
+        '--ns',
+        metavar='namespace',
+        action="append",
+        help="Namespace to get pods from. Can be used multiple times"
+    )
+
+    collect_parser.add_argument(
+        '--pod-mask',
+        metavar='pod_mask',
+        action="append",
+        help="Mask/Keyword to filter pods. Can be used multiple times"
+    )
+
+    collect_parser.add_argument(
+        '--pods-inclusive',
+        action="store_true", default=True,
+        help="Inclusive pod mask filtering, "
+             "i.e. OR for filters for 'True' or AND for 'False"
+    )
+
+    collect_parser.add_argument(
+        '--file',
+        metavar='logs_filename',
+        help="Filename for logs to be saved to"
+    )
+
+    collect_parser.add_argument(
+        '--exclude',
+        metavar='exclude_mask',
+        action="append",
+        help="Mask/Keyword to exclude pods from final results. "
+             "Can be used multiple times"
+    )
+
+    collect_parser.add_argument(
+        '--dump-undetected',
+        metavar="dumppath", default="null",
+        help="Give dump path to store not parser log lines separatelly. "
+             "Default: null"
+    )
+
+    collect_parser.add_argument(
+        '--tail',
+        metavar='tail', default=50,
+        help="Number of lines to capture. Default: 50"
+    )
+
+    return _parser
+
+
+def do_collect(args, config):
+    # Ceph info
+    # Gather ceph info and create an archive with data
+    args_utils.check_supported_env(ENV_TYPE_KUBE, args, config)
+    # check tgz
+    _logsfile = "mos_logs.log" if not args.file else args.file
+    logger_cli.info("# Output file is '{}'".format(_logsfile))
+
+    # _class = _selectClass(_env)
+    config.prepare_qa_resources = False
+    # path to dump logs that are not detected by any regex
+    config.dumppath = args_utils.get_arg(args, "dump_undetected")
+    if config.dumppath != "null" and \
+       not os.path.exists(config.dumppath):
+        logger_cli.error(
+            "ERROR: Path to dump not parsable logs not found: '{}'".format(
+                config.dumppath
+            )
+        )
+        return
+    config.tail_lines = args_utils.get_arg(args, "tail")
+    ml = sage.KubeMosLogger(config)
+
+    # namespaces = ["openstack", "stacklight"]
+    # pod_masks = ["alerta", "nova-api"]
+    namespaces = args_utils.get_arg(args, "ns")
+    pod_masks = args_utils.get_arg(args, "pod_mask")
+    pods_inclusive = args_utils.get_arg(args, "pods_inclusive")
+    exclude_keywords = args_utils.get_arg(args, "exclude")
+    exclude_keywords = exclude_keywords if exclude_keywords else []
+    exclude_keywords += ["cleaner"]
+
+    # Prepare pod names list for log collection
+    _plist = ml.prepare_pods(
+        namespaces,
+        pod_masks,
+        inclusive_filter=pods_inclusive,
+        exclude_kw=exclude_keywords
+    )
+    # Collect logs
+    ml.collect_logs(_plist)
+    # Parse logs
+    ml.parse_logs()
+    # Merge them using timestamp
+    ml.merge_logs()
+    # Save resulting file
+    ml.save_logs(_logsfile)
+
+    return
diff --git a/cfg_checker/modules/logs/sage.py b/cfg_checker/modules/logs/sage.py
new file mode 100644
index 0000000..5375421
--- /dev/null
+++ b/cfg_checker/modules/logs/sage.py
@@ -0,0 +1,571 @@
+#    Author: Alex Savatieiev (osavatieiev@mirantis.com; a.savex@gmail.com)
+#    Copyright 2019-2022 Mirantis, Inc.
+import os
+import re
+
+from datetime import datetime
+from multiprocessing.dummy import Pool
+from multiprocessing import TimeoutError
+
+from cfg_checker.common import logger_cli
+# from cfg_checker.common.exception import KubeException
+
+from cfg_checker.helpers.console_utils import Progress
+from cfg_checker.nodes import KubeNodes
+
+
+_datetime_fmt = "%Y-%m-%d-%H-%M-%S.%f"
+
+# parsers = [
+#   [
+#       <split lines_parser>,
+#       <split data for a line parser>
+#   ]
+# ]
+kaas_timestamp_re = {
+        # kaas timestamp
+        # original ts comes from pandas: 2022-06-16T21:32:12.977674062Z
+        # since we do not have nanoseconds support in standard datetime,
+        # just throw away the 3 digits and 'Z'
+        "re": "(?P<kaas_date>\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}\.\d{6})"
+              "(?:\d{3}Z) "
+              "(?P<message>.+?("
+              "(?=\\n\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}\.\d{6})|(?=\\n$))"
+              ")",
+        "date_fmt": "%Y-%m-%dT%H:%M:%S.%f"
+}
+log_item_re = [
+    {
+        # (mariadb) YYYY-MM-DD hh:mm:ss
+        # 2022-06-08 01:38:54 DEBUG mariadb-controller Sleeping for 10
+        "re": "(?P<date>\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}) +(?P<type>INFO|DEBUG|ERROR|FAIL|WARNING) +(?P<message>.*)",
+        "groups": 3,
+        "date_fmt": "%Y-%m-%d %H:%M:%S"
+    },
+    {
+        # (iam-proxy-alerta...) nnn.nnn.nnn.nnn:ppppp - uuid - - [YYYY/MM/DD hh:mm:ss]
+        # '172.16.35.69:35298 - 9b68130c-4c3b-4abd-bb04-7ff5329ad644 - - [2022/04/01 23:00:50] 10.233.118.232:4180 GET - "/ping" HTTP/1.1 "kube-probe/1.20+" 200 2 0.000'
+        "re": "(?P<src_ip>\d{1,3}.\d{1,3}.\d{1,3}.\d{1,3}):(?P<src_port>\d{1,5}).\-.(?P<guid>\S{8}-\S{4}-\S{4}-\S{4}-\S{12}).-.-.\[(?P<date>\d{4}\/\d{2}\/\d{2} \d{2}:\d{2}:\d{2})\] (?P<dest_ip>\d{1,3}.\d{1,3}.\d{1,3}.\d{1,3}):(?P<dest_port>\d{1,5}) (?P<message>.*)",
+        "groups": 7,
+        "date_fmt": "%Y/%m/%d %H:%M:%S"
+    },
+    {
+        # (default1) YYYY-MM-DD hh:mm:ss,nnn
+        #
+        "re": "(?P<date>\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}\,\d{3}) +(?P<type>INFO|DEBUG|ERROR|FAIL|WARNING) +(?P<message>.*)",
+        "groups": 3,
+        "date_fmt": "%Y-%m-%d %H:%M:%S,%f"
+    },
+    {
+        # (default1a) YYYY-MM-DD hh:mm:ss,nnn
+        # 2022-06-27 23:34:51,845 - OpenStack-Helm Mariadb - INFO - Updating grastate configmap
+        "re": "(?P<date>\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}\,\d{3}) \- (?P<process>.+) \- (?P<type>INFO|DEBUG|ERROR|FAIL|WARNING) \- (?P<message>.*)",
+        "groups": 4,
+        "date_fmt": "%Y-%m-%d %H:%M:%S,%f"
+    },
+    {
+        # (default2) YYYY-MM-DD hh:mm:ss.nnn
+        # 2022-05-23 04:01:06.360 7 INFO barbican.model.clean [-] Cleaning up soft deletions in the barbican database
+        "re": "(?P<date>\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}\.\d{3}) +(?P<pid>\d{1,6}) +(?P<type>INFO|DEBUG|ERROR|FAIL|WARNING) +(?P<message>.*)",
+        "groups": 4,
+        "date_fmt": "%Y-%m-%d %H:%M:%S.%f"
+    },
+    {
+        # (default3) YYYY-MM-DD hh:mm:ss.nnn
+        # <date> - <type> - <message>
+        "re": "(?P<date>\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}\.\d{3}).\-.(?P<type>INFO|DEBUG|ERROR|FAIL|WARNING).\-.(?P<message>.*)",
+        "groups": 3,
+        "date_fmt": "%Y-%m-%d %H:%M:%S.%f"
+    },
+    {
+        # libvirt
+        # 2022-06-16 12:48:59.509+0000: 53235: info : libvirt version: 6.0.0, package: 0ubuntu8.15~cloud0 (Openstack Ubuntu Testing Bot <openstack-testing-bot@ubuntu.com> Mon, 22 Nov 2021 16:37:15 +0000)
+        "re": "(?P<date>\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}\.\d{3}\+\d{4}): (?P<pid>\d{1,6}):.(?P<type>info|debug|error|fail|warning)\s: (?P<message>.*)",
+        "groups": 4,
+        "date_fmt": "%Y-%m-%d %H:%M:%S.%f%z"
+    },
+    {
+        # 2022-06-28 00:00:55.400745 2022-06-28 00:00:55.400 13 INFO cinder.api.openstack.wsgi [req-07ca8c70-f33d-406f-9427-5388b1656297 9e5e4502e0c34c0eabcc5bfbc499b059 343ab637681b4520bf4f5a7b826b9803 - default default] https://cinder.ic-eu.ssl.mirantis.net/v2/343ab637681b4520bf4f5a7b826b9803/volumes/detail?metadata=%7B%27KaaS%27%3A%274131b4dc-81ab-4d84-b991-7b63f225058c%27%7D returned with HTTP 200
+        "re": "(?P<date>\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}\.\d{6}) +(?P<date_alt>\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}\.\d{3}) +(?P<pid>\d{1,6}) +(?P<type>INFO|DEBUG|ERROR|FAIL|WARNING) +(?P<message>.*)",
+        "groups": 5,
+        "date_fmt": "%Y-%m-%d %H:%M:%S.%f"
+    },
+    {
+        # 'stacklight/alerta-5fc6f5dfd-jj7ml'
+        # '2022/04/01 23:12:37 [info] 26#26: *124156 client 127.0.0.1 closed keepalive connection\n'
+        "re": "(?P<date>\d{4}\/\d{2}\/\d{2} \d{2}:\d{2}:\d{2}) +\[(?P<type>info|debug|error|fail|warning)\] +(?P<message>.*)",
+        "groups": 3,
+        "date_fmt": "%Y/%m/%d %H:%M:%S"
+    },
+    {
+        # (nova-api-osapi-....) YYYY-MM-DD hh:mm:ss,nnnnnn
+        # '2022-04-01 23:08:11.806062 capabilities. Old policies are deprecated and silently going to be ignored'
+        "re": "(?P<date>\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}\.\d{6}) +(?P<message>.*)",
+        "groups": 2,
+        "date_fmt": "%Y-%m-%d %H:%M:%S.%f"
+    },
+    {
+        # (nova-api-metadata..) nnn.nnn.nnn.nnn - - [DD/MMM/YYYY:hh:mm:ss +nnnn]
+        # '172.16.35.67 - - [01/Apr/2022:22:23:14 +0000] "GET / HTTP/1.1" 200 98 1345 "-" "kube-probe/1.20+"'
+        "re": "(?P<src_ip>\d{1,3}.\d{1,3}.\d{1,3}.\d{1,3}).+-.+-.+\[(?P<date>\d{1,2}\/\S{3}\/\d{4}:\d{2}:\d{2}:\d{2}.\+\d{4})\] +(?P<message>.*)",
+        "groups": 3,
+        "date_fmt": "%d/%b/%Y:%H:%M:%S %z"
+    },
+    {
+        # mysqld exporter
+        # time="2022-06-15T16:16:36Z" level=info msg="Starting mysqld_exporter (version=0.11.0, branch=HEAD, revision=5d7179615695a61ecc3b5bf90a2a7c76a9592cdd)" source="mysqld_exporter.go:206"
+        "re": "time\=\"(?P<date>\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2})Z\" level\=(?P<type>info|debug|error|fail|warning|warn) msg\=\"(?P<message>.*)\" source\=\"(?P<source>.+)\"",
+        "groups": 4,
+        "date_fmt": "%Y-%m-%dT%H:%M:%S"
+    },
+    {
+        # metrics
+        # 2022-06-24 20:55:19.752754+00:00 [info] <0.716.0> Setting segment_entry_count for vhost 'barbican' with 0 queues to '2048'
+        "re": "(?P<date>\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}.\d{6})\+00:00 \[(?P<type>info|debug|error|fail|warning|warn)\] (?P<message>.*)",
+        "groups": 3,
+        "date_fmt": "%Y-%m-%d %H:%M:%S.%f"
+    },
+    {
+        # openvswitch
+        # 2022-06-27T23:12:52Z|25993|reconnect|WARN|unix#89422: connection dropped (Connection reset by peer)
+        # 2022-06-27T21:31:11Z|08582|connmgr|INFO|br-tun<->tcp:127.0.0.1:6633: 6 flow_mods in the 3 s starting 10 s ago (6 adds)
+        "re": "(?P<date>\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2})Z\|(?P<pid>\d{1,6})\|(?P<action>.+)\|(?P<type>INFO|DEBUG|ERROR|ERR|FAIL|WARNING|WARN)\|(?P<message>.*)",
+        "groups": 5,
+        "date_fmt": "%Y-%m-%dT%H:%M:%S"
+    }
+]
+
+
+def _re_groups_as_dict(compiled, re_match):
+    _d = {}
+    for _k in compiled.groupindex.keys():
+        _d[_k] = re_match[compiled.groupindex[_k]-1]
+    return _d
+
+
+class MosLogger(object):
+    def __init__(
+        self,
+        config
+    ):
+        self.env_config = config
+        return
+
+
+class KubeMosLogger(MosLogger):
+    def __init__(self, config):
+        self.master = KubeNodes(config)
+        super(KubeMosLogger, self).__init__(config)
+        # Init ceph tools pod
+        self.logs = {}
+        self.merged_logs = {}
+        self.kaas_ts_regex = re.compile(kaas_timestamp_re["re"])
+        self.item_regex = []
+        self.dumppath = config.dumppath
+        self.tail_lines = config.tail_lines
+        for regex in log_item_re:
+            self.item_regex.append(
+                {
+                    "compiled": re.compile(regex["re"]),
+                    "date_fmt": regex["date_fmt"],
+                    "groups": regex["groups"]
+                }
+            )
+
+    def _keywords(self, tstring, keywords):
+        return [True if k in tstring else False for k in keywords]
+
+    def _safe_parse_date(self, str_date, fmt, note="-"):
+        # DEBUG
+        try:
+            _t = datetime.strptime(str_date, fmt)
+        except ValueError:
+            logger_cli.warning(
+                "WARNING ({}): Can't parse date '{}'"
+                " using '{}'".format(
+                    note,
+                    str_date,
+                    fmt
+                )
+            )
+            _t = -1
+        return _t
+
+    def prepare_pods(
+        self,
+        ns_list,
+        kw_list,
+        inclusive_filter=True,
+        exclude_kw=[]
+    ):
+        def _list_with_containers(pod_item):
+            _list = []
+            for c in pod_item[2]:
+                _list.append([
+                    pod_item[0],
+                    pod_item[1],
+                    c
+                ])
+            return _list
+        logger_cli.info(
+            "# Preparing pods, ns: {}; keywords: {}".format(
+                ", ".join(ns_list) if ns_list else "*",
+                ", ".join(kw_list) if kw_list else "*"
+            )
+        )
+        # [ns, pod_name]
+        _target_pods = []
+        # validate ns
+        _all_namespaces = self.master.list_namespaces()
+        for _ns in _all_namespaces:
+            if _ns in ns_list or not ns_list:
+                _tmp_pods = []
+                logger_cli.info("-> processing namespace '{}'".format(_ns))
+                # list pods using mask
+                logger_cli.debug("... getting pods list")
+                _pods = self.master.list_pod_names_with_containers(ns=_ns)
+                logger_cli.debug("... {} total pods found".format(len(_pods)))
+                for _pod in _pods:
+                    # _pod[_ns, _name]
+                    _kw = self._keywords(_pod[1], kw_list) \
+                        if kw_list else [True]
+                    if any(self._keywords(_pod[1], exclude_kw)) or \
+                       any(self._keywords(_pod[2], exclude_kw)):
+                        logger_cli.debug("... skipped '{}'".format(_pod[1]))
+                        continue
+                    elif (not inclusive_filter and all(_kw)) or \
+                         (inclusive_filter and any(_kw)):
+                        _cpods = _list_with_containers(_pod)
+                        _tmp_pods += _cpods
+                        logger_cli.debug(
+                            "... added {} items for pod '{}'".format(
+                                len(_cpods),
+                                "/".join(_pod[:2])
+                            )
+                        )
+                    else:
+                        # logger_cli.debug("... skipped pod '{}'".format(_pod))
+                        pass
+                logger_cli.info(
+                    "-> {} pods processed, "
+                    "{} log items to be collected".format(
+                        len(_pods),
+                        len(_tmp_pods)
+                    )
+                )
+                _target_pods += _tmp_pods
+
+        logger_cli.info(
+            "-> found {} log items total".format(len(_target_pods))
+        )
+        return _target_pods
+
+    def _get_pod_log(self, params):
+        ns = params[0]
+        name = params[1]
+        container_name = params[2]
+        # Get target log
+        _log_data = self.master.get_logs_for_pod(
+            name,
+            container_name,
+            ns,
+            tail_lines=self.tail_lines
+        )
+        if len(_log_data) < 10:
+            return None
+        else:
+            return _log_data
+
+    def collect_logs(self, pods_list):
+
+        # Prepare list of pods to collect
+        # cmd = """
+        # kubectl get pods -A -o=jsonpath='{range .items[*]}
+        #     {.metadata.namespace}{"/"}
+        #     {.metadata.name}{"\n"}{range .spec.containers[*]} {.name}{"\n"}
+        #     {end}'
+        # """
+
+        logger_cli.info(
+            "# Collecting logs using {} threads".format(
+                self.env_config.sage_threads
+            )
+        )
+        # Do collect using pooled threads
+        pool = Pool(self.env_config.sage_threads)
+        _params = []
+        # Prepare params for getting logs
+        for _ns, _pod_name, _cont_name in pods_list:
+            _params.append([_ns, _pod_name, _cont_name])
+        # Init progress bar
+        total_log_items = len(_params)
+        log_item_index = 0
+        _progress = Progress(total_log_items)
+        # Start pooled processing
+        results = pool.imap(self._get_pod_log, _params)
+        # Catch and parse results
+        while True:
+            try:
+                # use timeout as some of the request can hang
+                _r = results.next(timeout=10)
+                _namepath = "{}/{}:{}".format(
+                    _params[log_item_index][0],
+                    _params[log_item_index][1],
+                    _params[log_item_index][2]
+                )
+
+            except StopIteration:
+                # end of pool
+                break
+
+            except TimeoutError:
+                # report pod which hanged and ignore it
+                _progress.clearline()
+                logger_cli.warning(
+                    "WARNING: Timeout waiting for log content {}".format(
+                        _namepath
+                    )
+                )
+                continue
+            if _r is not None:
+                _raw = _r
+                _size = len(_raw)
+                # Save detected data
+                self.logs[_namepath] = {
+                    "ns": _params[log_item_index][0],
+                    "pod_name": _params[log_item_index][1],
+                    "container_name": _params[log_item_index][2],
+                    "raw": _raw
+                }
+
+            # print progress
+            _progress.write_progress(
+                log_item_index+1,
+                note="'{}': {} chars".format(_namepath, _size)
+            )
+
+            # track next param set
+            log_item_index += 1
+
+        _progress.end()
+        pool.close()
+        pool.join()
+
+        # debug
+        # with open("logs_debug.json", "w+") as wf:
+        #     wf.write(json.dumps(self.logs))
+        # debug
+        return
+
+    def _parse_kaas_timestamps(self):
+        # shortcut function to get array aligned index
+        def _get_group(match, key):
+            return match[self.kaas_ts_regex.groupindex[key]-1]
+
+        logger_cli.info("-> Parsing kaas timestamps")
+        # iterate logs
+        _counter = 0
+        _pbar = Progress(len(self.logs))
+        for _namepath, _item in self.logs.items():
+            # next progress bar item
+            _counter += 1
+            _pbar.write_progress(_counter, note=_namepath)
+            # Get all lines from log matched
+            _matches = self.kaas_ts_regex.findall(_item.pop("raw"))
+            # iterate matches and parse timestamp
+            _log = []
+            for _match in _matches:
+                # new log item
+                _log_line = _re_groups_as_dict(self.kaas_ts_regex, _match)
+                # parse ts from kaas
+                _pts = self._safe_parse_date(
+                    _log_line["kaas_date"],
+                    kaas_timestamp_re["date_fmt"],
+                    note=_namepath
+                )
+                _log_line["kaas_ts"] = _pts
+
+                # save log item
+                _log.append(_log_line)
+
+            # save pmessage and kaas_ts
+            _item["total_lines"] = len(_matches)
+            _item["log"] = _log
+
+        _pbar.end()
+        return
+
+    @staticmethod
+    def _get_matched(regex, line):
+        # Check if regex has matching groups number in last line
+        _c = regex["compiled"]
+        _m = _c.findall(line)
+        # calculate groups if there is a match
+        _group_count = len(_m[0]) if len(_m) > 0 else 0
+        # Check than group count at least 2
+        # and check that matched number of groups found
+        if _group_count > 1 and _group_count == len(_c.groupindex):
+            return _m
+        else:
+            return []
+
+    def _parse_log_item(self, log_item):
+        def _detect_re():
+            _re = None
+            for regex in self.item_regex:
+                _m = self._get_matched(regex, _message)
+                if _m:
+                    _re = regex
+                    break
+            return _re, _m
+
+        # parse whole log using detected pattern
+        l_parsed = 0
+        l_not_parsed = 0
+        l_skipped = 0
+        _index = 0
+        _li = log_item["log"]
+        _re = None
+        while _index < log_item["total_lines"]:
+            # pop message as there might be similar group name present
+            _message = _li[_index].pop("message")
+            # Parse line
+            _m = []
+            # Try last regex for this item
+            if _re is not None:
+                _m = self._get_matched(_re, _message)
+                # if not matched
+            if not _m:
+                # Try every regex to match line format
+                # by counting groups detected
+                _re, _m = _detect_re()
+            if len(_m) == 1:
+                # get matches with group names as a dict
+                _new_line_items = \
+                    _re_groups_as_dict(_re["compiled"], _m[0])
+                # update original dict
+                _li[_index].update(_new_line_items)
+                # Parse date
+                _pts = self._safe_parse_date(
+                    _new_line_items["date"],
+                    _re["date_fmt"]
+                )
+                _li[_index]["ts"] = _pts
+                l_parsed += 1
+            elif len(_m) == 0:
+                # put back message that failed to parse
+                _li[_index]["message"] = _message
+                l_not_parsed += 1
+            else:
+                # Should never happen
+                logger_cli.warning(
+                    "WARNING: Skipping ambigious log message: "
+                    "'{}'".format(_message)
+                )
+                l_skipped += 0
+            # next line
+            _index += 1
+        log_item["stats"] = {
+            "parsed": l_parsed,
+            "not_parsed": l_not_parsed,
+            "skipped": l_skipped
+        }
+
+    def parse_logs(self):
+
+        # debug: load precreated logs
+        # _ll = {}
+        # with open("logs_debug.json", "r+") as rf:
+        #     _ll = json.loads(rf.read())
+        # if _ll:
+        #     self.logs = _ll
+        # debug: end
+
+        # Get kaas ts as a plan B if log time either not present or not parsed
+        self._parse_kaas_timestamps()
+
+        logger_cli.info("-> Parsing logs")
+        _p = Progress(len(self.logs))
+        idx = 1
+        totalParsed = 0
+        totalNotParsed = 0
+        # iterate raw logs and try to parse actual pod timing
+        for _namepath, _log in self.logs.items():
+            # Update progress bar
+            _p.write_progress(
+                idx,
+                note="parsed: {}, not parsed: {}, => {}".format(
+                    totalParsed,
+                    totalNotParsed,
+                    _namepath
+                )
+            )
+            # Parse log
+            self._parse_log_item(_log)
+            if self.dumppath != "null":
+                for line in _log["log"]:
+                    if "date" not in line.keys():
+                        # Log line parsed
+                        _filename = os.path.join(
+                            self.dumppath,
+                            _log["pod_name"]+"-"+_log["container_name"]+".log"
+                        )
+                        with open(_filename, "a") as rawlogfile:
+                            rawlogfile.write(
+                                "<KTS>{} <M>{}\n".format(
+                                    line["kaas_date"],
+                                    line["message"]
+                                )
+                            )
+            # Stats
+            totalParsed += _log["stats"]["parsed"]
+            totalNotParsed += _log["stats"]["not_parsed"]
+            # Update progress bar
+            # _p.write_progress(
+            #     idx,
+            #     note="parsed: {}, not parsed: {}, => {}".format(
+            #         totalParsed,
+            #         totalNotParsed,
+            #         _namepath
+            #     )
+            # )
+            idx += 1
+        _p.end()
+
+    def merge_logs(self):
+        logger_cli.info("# Merging logs")
+        _merged = {}
+        for _pod, _logs in self.logs.items():
+            for _li in _logs["log"]:
+                # Prepare log entry
+                _li["namepath"] = _pod
+
+                # check if timestamp is detected
+                if "ts" not in _li:
+                    # Use kaas_ts as a timestamp
+                    _timestamp = _li.pop("kaas_ts")
+                else:
+                    # get parsed timestamp
+                    _timestamp = _li.pop("ts")
+
+                # and put undetected lines separatelly
+                # save items using timestamps
+                _merged[float(_timestamp.timestamp())] = _li
+        self.merged_logs = _merged
+        return
+
+    def save_logs(self, filename):
+        logger_cli.info("# Writing output file: '{}'".format(filename))
+        with open(filename, 'w+') as ff:
+            _log_iter = sorted(
+                self.merged_logs.items(), key=lambda item: item[0]
+            )
+            for k, v in _log_iter:
+                ff.write(
+                    "{} {}: {}\n".format(
+                        v.pop("namepath"),
+                        datetime.fromtimestamp(k).strftime(_datetime_fmt),
+                        " ".join(["{}={}".format(k, v) for k, v in v.items()])
+                    )
+                )
+        return
diff --git a/cfg_checker/modules/network/__init__.py b/cfg_checker/modules/network/__init__.py
index 19c0b6b..807d05f 100644
--- a/cfg_checker/modules/network/__init__.py
+++ b/cfg_checker/modules/network/__init__.py
@@ -11,6 +11,14 @@
 supported_envs = [ENV_TYPE_SALT, ENV_TYPE_KUBE]
 
 
+def mtu_type(value):
+    try:
+        _mtu = int(value)
+    except ValueError:
+        _mtu = value
+    return _mtu
+
+
 def _selectClass(_env, strClassHint="checker"):
     _class = None
     if _env == ENV_TYPE_SALT:
@@ -92,7 +100,7 @@
     )
     net_ping_parser.add_argument(
         '--mtu',
-        metavar='network_ping_mtu', default=64,
+        metavar='network_ping_mtu', default=64, type=mtu_type,
         help="MTU size to use. Ping will be done for MTU - 20 - 8. Default: 64"
     )
     net_ping_parser.add_argument(
@@ -306,6 +314,10 @@
                 "Defaulting to '9100'".format(_mtu)
             )
             _mtu = 9100
+        elif _mtu > 63 or _mtu < 9101:
+            logger_cli.info(
+                "# MTU set to '{}'.".format(_mtu)
+            )
         else:
             raise CheckerException(
                 "Negative MTU values not supported: '{}'".format(
diff --git a/cfg_checker/modules/network/mapper.py b/cfg_checker/modules/network/mapper.py
index 5c18148..3072c68 100644
--- a/cfg_checker/modules/network/mapper.py
+++ b/cfg_checker/modules/network/mapper.py
@@ -197,6 +197,8 @@
                 len(self.master.nodes[key]['networks'].keys())
             ))
         logger_cli.info("-> done collecting networks data")
+        # Save number for future recoursion depth cutting
+        _total_networks = len(self.master.nodes[key]['networks'])
 
         logger_cli.info("-> mapping runtime network IPs")
         # match interfaces by IP subnets
@@ -249,6 +251,15 @@
                         net_data['ifs'].append(_if)
 
             def process_interface(lvl, interface, tree, res):
+                if abs(lvl) > _total_networks:
+                    logger_cli.warn(
+                        "WARNING: Probable cyclic dependency, "
+                        "tree path discovery was cut down to {}".format(
+                            _total_networks
+                        )
+                    )
+                    return
+
                 # get childs for each root
                 # tree row item (<if_name>, [<parents>], [<childs>])
                 if lvl > 50 or lvl < -50:
diff --git a/cfg_checker/nodes.py b/cfg_checker/nodes.py
index 2e55b63..7c09690 100644
--- a/cfg_checker/nodes.py
+++ b/cfg_checker/nodes.py
@@ -509,9 +509,15 @@
         self._configmap_name = self.env_config.kube_scripts_folder
 
         # prepare needed resources
-        self._check_namespace()
-        self._scripts = self._check_config_map()
         self.prepared_daemonsets = []
+        # Check if we need resources prepared
+        if not config.prepare_qa_resources:
+            logger_cli.debug("... skipped preparing resources")
+            self._scripts = None
+            return
+        else:
+            self._check_namespace()
+            self._scripts = self._check_config_map()
 
     def _check_namespace(self):
         # ensure namespace
@@ -1013,7 +1019,7 @@
             ds.metadata.name
         )
 
-        _total = len(self.nodes)
+        _total = len(self.nodes) - len(self.skip_list)
         # _scheduled = _ds.status.scheduled
         # _ready = _ds.status.ready
 
@@ -1082,7 +1088,8 @@
 
     def exec_cmd_on_target_pod(self, pod_name, ns, command_str):
         """
-        Run script from configmap on target pod assuming it is present
+        Run cmd on target pod
+
         """
         _result = self.kube.exec_on_target_pod(
             command_str,
@@ -1110,7 +1117,9 @@
         )
         # Update results
         _ds_results = {}
-        for _n, _, _v in _results:
+        # only node name and result is needed
+        # pod name and cmd ignored
+        for _n, _, _v, _ in _results:
             _ds_results[_n] = _v
         return _ds_results
 
@@ -1135,14 +1144,7 @@
             _pod_results[_p] = _v
         return _pod_results
 
-    def exec_cmd_on_pods(
-        self,
-        pod_list,
-        cmd,
-        _args=None,
-        is_script=False,
-        silent=False
-    ):
+    def _pooled_exec_on_pod(self, plist, silent=False):
         def _kube_exec_on_pod(plist):
             return [
                 plist[1],  # node
@@ -1154,8 +1156,47 @@
                     strict=True,
                     _request_timeout=120,
                     arguments=plist[5]
-                )
+                ),
+                # save cmd used
+                plist[4]
             ]
+        # map func and cmd
+        pool = Pool(self.env_config.threads)
+        _results = []
+        self.not_responded = []
+        # create result list
+        if not silent:
+            _progress = Progress(len(plist))
+        ret = pool.imap_unordered(_kube_exec_on_pod, plist)
+
+        for ii in enumerate(ret, start=1):
+            if not ii[1][1]:
+                self.not_responded.append(ii[1][0])
+            else:
+                _results.append(ii[1])
+            if not silent:
+                _progress.write_progress(ii[0])
+
+        if not silent:
+            _progress.end()
+        pool.close()
+        pool.join()
+        logger_cli.debug(
+            "... done, {} total outputs; {} not responded".format(
+                len(_results),
+                len(self.not_responded)
+            )
+        )
+        return _results
+
+    def exec_cmd_on_pods(
+        self,
+        pod_list,
+        cmd,
+        _args=None,
+        is_script=False,
+        silent=False
+    ):
 
         # Create map for threads: [[node_name, ns, pod_name, cmd]...]
         logger_cli.debug(
@@ -1196,34 +1237,36 @@
                 ]
             )
 
-        # map func and cmd
-        pool = Pool(self.env_config.threads)
-        _results = []
-        self.not_responded = []
-        # create result list
-        if not silent:
-            _progress = Progress(len(_plist))
-        ret = pool.imap_unordered(_kube_exec_on_pod, _plist)
+        return self._pooled_exec_on_pod(_plist, silent=silent)
 
-        for ii in enumerate(ret, start=1):
-            if not ii[1][1]:
-                self.not_responded.append(ii[1][0])
-            else:
-                _results.append(ii[1])
-            if not silent:
-                _progress.write_progress(ii[0])
-
-        if not silent:
-            _progress.end()
-        pool.close()
-        pool.join()
+    def exec_cmds_on_pod(self, pod, cmd_list):
         logger_cli.debug(
-            "... done, {} total outputs; {} not responded".format(
-                len(_results),
-                len(self.not_responded)
+            "... runnning {} cmds using {} threads at a time".format(
+                len(cmd_list),
+                self.env_config.threads
             )
         )
-        return _results
+        _plist = []
+        # decide if we are to wrap it to bash
+        for item in cmd_list:
+            if '|' in item:
+                _cmd = "bash -c"
+                _arguments = item
+            else:
+                _cmd = item
+                _arguments = ""
+            _plist.append(
+                [
+                    self,
+                    pod.spec.node_name,
+                    pod.metadata.namespace,
+                    pod.metadata.name,
+                    _cmd,
+                    _arguments
+                ]
+            )
+
+        return self._pooled_exec_on_pod(_plist)
 
     def delete_daemonset(self, ds):
         # Try to delete daemonset
@@ -1446,5 +1489,40 @@
             return None
         return [[i.metadata.namespace, i.metadata.name] for i in _items.items]
 
-    def get_logs_for_pod(self, podname, namespace):
-        return self.kube.get_pod_logs(podname, namespace)
+    def list_pod_names_with_containers(self, ns="qa-space", running_only=True):
+        _result = []
+        _pods = self.kube.list_pods(ns)
+        if not running_only:
+            for i in _pods.items:
+                _result.append([
+                    i.metadata.namespace,
+                    i.metadata.name,
+                    [c.name for c in i.spec.containers]
+                ])
+        else:
+            for i in _pods.items:
+                if i.status.phase == "Running":
+                    _result.append([
+                        i.metadata.namespace,
+                        i.metadata.name,
+                        [c.name for c in i.status.container_statuses
+                         if c.state.running is not None]
+                    ])
+        return _result
+
+    def get_logs_for_pod(self, podname, container, namespace, tail_lines):
+        try:
+            return self.kube.get_pod_logs(
+                podname,
+                container,
+                namespace,
+                tail_lines=tail_lines
+            )
+        except KubeException as e:
+            logger_cli.warning(
+                "WARNING: Log retrieval failed: '{}'".format(e.message)
+            )
+            return ""
+
+    def list_namespaces(self):
+        return [i.metadata.name for i in self.kube.list_namespaces().items]
diff --git a/debug_scripts/dnetwork.py b/debug_scripts/dnetwork.py
new file mode 100644
index 0000000..545eb94
--- /dev/null
+++ b/debug_scripts/dnetwork.py
@@ -0,0 +1,301 @@
+import json
+import ipaddress
+
+from copy import deepcopy
+from cfg_checker.modules.network.network_errors import NetworkErrors
+
+_network_item = {
+    "runtime": {},
+    "config": {},
+    "reclass": {}
+}
+errors = NetworkErrors()
+_console = []
+
+
+def cprint(_str):
+    print(_str)
+    _console.append(_str)
+
+
+# adding net data to tree
+def _add_data(_list, _n, _h, _d):
+    if _n not in _list:
+        _list[_n] = {}
+        _list[_n][_h] = [_d]
+    elif _h not in _list[_n]:
+        # there is no such host, just create it
+        _list[_n][_h] = [_d]
+    else:
+        # there is such host... this is an error
+        errors.add_error(
+            errors.NET_DUPLICATE_IF,
+            host=_h,
+            dup_if=_d['name']
+        )
+        _list[_n][_h].append(_d)
+
+
+def _map_network_for_host(host, if_class, net_list, data):
+    # filter networks for this IF IP
+    _nets = [n for n in net_list.keys() if if_class.ip in n]
+    _masks = [n.netmask for n in _nets]
+    if len(_nets) > 1:
+        # There a multiple network found for this IP, Error
+        errors.add_error(
+            errors.NET_SUBNET_INTERSECT,
+            host=host,
+            ip=str(if_class.exploded),
+            networks="; ".join([str(_n) for _n in _nets])
+        )
+    # check mask match
+    if len(_nets) > 0 and if_class.netmask not in _masks:
+        errors.add_error(
+            errors.NET_MASK_MISMATCH,
+            host=host,
+            if_name=data['name'],
+            if_cidr=if_class.exploded,
+            if_mapped_networks=", ".join([str(_n) for _n in _nets])
+        )
+
+    if len(_nets) < 1:
+        _add_data(net_list, if_class.network, host, data)
+    else:
+        # add all data
+        for net in _nets:
+            _add_data(net_list, net, host, data)
+
+    return net_list
+
+
+host = "tmp-node-00000"
+node_data = {}
+interfaces = {
+    host: {}
+}
+_runtime = {}
+
+with open("ifs_data.json", "rt") as ff:
+    jj = json.loads(ff.read())
+
+node_data['routes'] = jj.pop("routes")
+node_data['networks'] = jj
+
+cprint("# {} Networks".format(len(jj)))
+
+for net_name, net_data in node_data['networks'].items():
+    # cut net name
+    _i = net_name.find('@')
+    _name = net_name if _i < 0 else net_name[:_i]
+    # get ips and calculate subnets
+    if _name in ['lo']:
+        # skip the localhost
+        continue
+    else:
+        # add collected data to interface storage
+        if _name not in interfaces[host]:
+            interfaces[host][_name] = \
+                deepcopy(_network_item)
+        interfaces[host][_name]['runtime'] = \
+            deepcopy(net_data)
+
+    #  get data and make sure that wide mask goes first
+    _ip4s = sorted(
+        net_data['ipv4'],
+        key=lambda s: s[s.index('/'):]
+    )
+    for _ip_str in _ip4s:
+        # create interface class
+        _if = ipaddress.IPv4Interface(_ip_str)
+        # check if this is a VIP
+        # ...all those will have /32 mask
+        net_data['vip'] = None
+        if _if.network.prefixlen == 32:
+            net_data['vip'] = str(_if.exploded)
+        if 'name' not in net_data:
+            net_data['name'] = _name
+        if 'ifs' not in net_data:
+            net_data['ifs'] = [_if]
+            # map it
+            _runtime = _map_network_for_host(
+                host,
+                _if,
+                _runtime,
+                net_data
+            )
+        else:
+            # data is already there, just add VIP
+            net_data['ifs'].append(_if)
+
+
+def process_interface(lvl, interface, tree, res):
+    if abs(lvl) > 50:
+        cprint("### ERROR: Probable cyclic dependency, exiting")
+        return
+    cprint("{}{}:{}".format(" "*(10+lvl), lvl, interface))
+    # get childs for each root
+    # tree row item (<if_name>, [<parents>], [<childs>])
+    if lvl not in tree:
+        # - no level - add it
+        tree[lvl] = {}
+    # there is such interface in this level?
+    if interface not in tree[lvl]:
+        # - IF not present
+        _n = ''
+        if interface not in res:
+            _n = 'unknown IF'
+            _p = None
+            _c = None
+        else:
+            # -- get parents, add
+            _p = res[interface]['lower']
+            # -- get childs, add
+            _c = res[interface]['upper']
+
+        # if None, put empty list
+        _p = _p if _p else []
+        # if None, put empty list
+        _c = _c if _c else []
+        tree[lvl].update({
+            interface: {
+                "note": _n,
+                "parents": _p,
+                "children": _c,
+                "size": len(_p) if len(_p) > len(_c) else len(_c)
+            }
+        })
+        for p_if in tree[lvl][interface]["parents"]:
+            # -- cycle: execute process for next parent, lvl-1
+            process_interface(lvl-1, p_if, tree, res)
+        for c_if in tree[lvl][interface]["children"]:
+            # -- cycle: execute process for next child, lvl+1
+            process_interface(lvl+1, c_if, tree, res)
+    else:
+        # - IF present - exit (been here already)
+        cprint("{}{}".format(" "*(10+lvl), "--branch-end--\n"))
+        return
+
+
+def _put(cNet, cIndex, _list):
+    _added = False
+    _actual_index = -1
+    # Check list len
+    _len = len(_list)
+    if cIndex >= _len:
+        # grow list to meet index
+        _list = _list + [''] * (cIndex - _len + 1)
+        _len = len(_list)
+
+    for _cI in range(cIndex, _len):
+        # add child per index
+        # if space is free
+        if not _list[_cI]:
+            _list[_cI] = cNet
+            _added = True
+            _actual_index = _cI
+            break
+    if not _added:
+        # grow list by one entry
+        _list = _list + [cNet]
+        _actual_index = len(_list) - 1
+    return _actual_index, _list
+
+
+# build network hierachy
+nr = node_data['networks']
+# walk interface tree
+for _ifname in node_data['networks']:
+    _tree = {}
+    _level = 0
+    cprint("# -> {}".format(_ifname))
+    process_interface(_level, _ifname, _tree, nr)
+    # save tree for node/if
+    node_data['networks'][_ifname]['tree'] = _tree
+
+    # debug, print built tree
+    cprint("# end '{}'".format(_ifname))
+    lvls = list(_tree.keys())
+    lvls.sort()
+    n = len(lvls)
+    m = max([len(_tree[k].keys()) for k in _tree.keys()])
+    matrix = [["" for i in range(m)] for j in range(n)]
+    x = 0
+    while True:
+        _lv = lvls.pop(0)
+        # get all interfaces on this level
+        nets = iter(_tree[_lv].keys())
+        while True:
+            y = 0
+            # get next interface
+            try:
+                _net = next(nets)
+            except StopIteration:
+                break
+            # all nets
+            _a = [_net]
+            # put current interface if this is only one left
+            if not _tree[_lv][_net]['children']:
+                if _net not in matrix[x]:
+                    _, matrix[x] = _put(
+                        _net,
+                        y,
+                        matrix[x]
+                    )
+                y += 1
+            else:
+                # get all nets with same child
+                for _c in _tree[_lv][_net]['children']:
+                    for _o_net in nets:
+                        if _c in _tree[_lv][_o_net]['children']:
+                            _a.append(_o_net)
+                    # flush collected nets
+                    for idx in range(len(_a)):
+                        if _a[idx] in matrix[x]:
+                            # there is such interface on this level
+                            # get index
+                            _nI = matrix[x].index(_a[idx])
+                            _, matrix[x+1] = _put(
+                                _c,
+                                _nI,
+                                matrix[x+1]
+                            )
+                        else:
+                            # there is no such interface
+                            # add it
+                            _t, matrix[x] = _put(
+                                _a[idx],
+                                0,
+                                matrix[x]
+                            )
+                            # also, put child
+                            _, matrix[x+1] = _put(
+                                _c,
+                                _t,
+                                matrix[x+1]
+                            )
+                        # remove collected nets from processing
+                        if _a[idx] in nets:
+                            nets.remove(_a[idx])
+                    y += len(_a)
+            if not nets:
+                x += 1
+                break
+        if not lvls:
+            break
+
+    lines = []
+    _columns = [len(max([i for i in li])) for li in matrix]
+    for idx_y in range(m):
+        line = ""
+        for idx_x in range(n):
+            _len = _columns[idx_x] if _columns[idx_x] else 1
+            _fmt = "{" + ":{}".format(_len) + "} "
+            line += _fmt.format(matrix[idx_x][idx_y])
+        lines.append(line)
+    node_data['networks'][_ifname]['matrix'] = matrix
+    node_data['networks'][_ifname]['lines'] = lines
+    cprint("#### Tree")
+    cprint("\n".join(lines))
+
+with open("debug_net_console.log", "w+") as ff:
+    ff.write("\n".join(_console))
diff --git a/etc/example._env b/etc/example._env
index 80e4a72..d226d3b 100644
--- a/etc/example._env
+++ b/etc/example._env
@@ -40,9 +40,15 @@
 # Subfolder with scripts to be uploaded
 SALT_SCRIPTS_FOLDER=test_scripts
 
-# All nodes list file overide
+# All nodes list file overide when salt.keys request failed
 SALT_NODE_LIST_FILE=etc/example._list
 
 ### K8s section
-# All vars start with KUBE
-KUBE_CONFIG_ROOT=/root
\ No newline at end of file
+# Kube config root when it is being looked up on remote host
+KUBE_CONFIG_ROOT=/root
+# Script folder to store scripts in pod
+KUBE_SCRIPTS_FOLDER=cfg-checker-scripts
+# Kube host's username to use when accessing it via SSH
+KUBE_NODE_USER=mcc-user
+# Key to use when accessing kube hosts remotely
+KUBE_NODE_KEYPATH=/.../<project_folder>/envs/node.key
\ No newline at end of file
diff --git a/templates/ceph_bench_html.j2 b/templates/ceph_bench_html.j2
index 4e04560..f1f767b 100644
--- a/templates/ceph_bench_html.j2
+++ b/templates/ceph_bench_html.j2
@@ -448,7 +448,7 @@
                 </div>
             </td>
         </tr>
-        {% for time,dt in results.items() %}
+        {% for time,dt in results.items() | sort %}
         {% set t = dt["totals"] %}
         {% set o = dt["input_options"] %}
         {% set tstripped = time | tstrip %}
@@ -700,7 +700,7 @@
                 </div>
             </td>
         </tr>
-        {% for time,dt in results.items() %}
+        {% for time,dt in results.items() | sort %}
         {% set b = dt["osd_summary"]["before"] %}
         {% set a = dt["osd_summary"]["after"] %}
         {% set s = dt["osd_summary"]["active"] %}
diff --git a/templates/ceph_info_html.j2 b/templates/ceph_info_html.j2
index ac4d83f..49371df 100644
--- a/templates/ceph_info_html.j2
+++ b/templates/ceph_info_html.j2
@@ -142,7 +142,7 @@
         }
         td > .osdconf_group {
             display: inline-block;
-            grid-template-columns: repeat(3, auto);
+            grid-template-columns: repeat(4, auto);
             padding-left: 0px;
             padding-right: 0px;
             margin: 1px;
@@ -227,9 +227,13 @@
         .lat_apply { border-color: #a0c0c0; background-color: rgb(255, 250, 250); text-align: left;  width: 35px}
         .meta_name { border-color: #c4b890; background-color: #e7dbb6; text-align: left; width: 150px;}
         .meta_value { border-color: #c6c3ba;background-color: #d4d4d4; text-align: left; width: 480px;}
-        .conf_name { border-color: #c4b890; background-color: #e7dbb6; text-align: left; width: 295px; word-break: break-all;}
-        .conf_value { border-color: #c6c3ba;background-color: #d4d4d4; text-align: left; width: 280px; word-break: break-all;}
-        .conf_source { border-color: #c6c3ba;background-color: #a4a4a4; text-align: left; width: 50px;}
+        .conf_name, .conf_name_uniq { border-color: #c4b890; background-color: #e7dbb6; text-align: left; width: 295px; word-break: break-all;}
+        .conf_value, .conf_value_uniq { border-color: #c6c3ba;background-color: #d4d4d4; text-align: left; width: 280px; word-break: break-all;}
+        .conf_source, .conf_source_uniq { border-color: #c6c3ba;background-color: #a4a4a4; text-align: left; width: 50px;}
+        .osd_name { border-color: gray; background-color: #bdb396; text-align: left; width: 120px;}
+        .conf_name_uniq { width: 410px;}
+        .conf_value_uniq { width: 600px;}
+        .conf_source_uniq { width: 140px;}
 
         .map_grid {
             display: grid;
@@ -664,22 +668,23 @@
             </td></tr>
         </tbody></table>
     </div>
-    {% for osdname in cuniq.keys() | sort %}
-    <button type="button" class="row_button">{{ osdname }}: {{ cuniq[osdname] | length }} uniq values</button>
+    <button type="button" class="row_button">Uniq values</button>
     <div class="row_content">
         <table class="ceph_status"><tbody>
+            {% for osdname in cuniq.keys() | sort %}
             <tr><td class="metadata">
             {% for tname, tdata in cuniq[osdname].items() %}
                 <div class="osdconf_group">
-                    <div class="item conf_name">{{ tname }}</div>
-                    <div class="item conf_value">{{ tdata["value"] }}</div>
-                    <div class="item conf_source">{{ tdata["source"] }}</div>
+                    <div class="item osd_name">{{ osdname }}</div>
+                    <div class="item conf_name_uniq">{{ tname }}</div>
+                    <div class="item conf_value_uniq">{{ tdata["value"] }}</div>
+                    <div class="item conf_source_uniq">{{ tdata["source"] }}</div>
                 </div>
             {% endfor %}
             </td></tr>
+            {% endfor %}
         </tbody></table>
     </div>
-    {% endfor %}
 </div>
 {% endmacro %}
 
diff --git a/tests/test_network.py b/tests/test_network.py
index 0ac43df..31a844e 100644
--- a/tests/test_network.py
+++ b/tests/test_network.py
@@ -52,7 +52,7 @@
         self.assertEqual(
             _r_code,
             0,
-            "'mcp-net {}' command failed".format(" ".join(_args))
+            "'chk-net {}' command failed".format(" ".join(_args))
         )
 
     @patch('requests.get', side_effect=mocked_salt_get)
@@ -72,7 +72,7 @@
         self.assertEqual(
             _r_code,
             0,
-            "'mcp-net {}' command failed".format(" ".join(_args))
+            "'chk-net {}' command failed".format(" ".join(_args))
         )
 
     @patch('requests.get', side_effect=mocked_salt_get)
@@ -92,7 +92,7 @@
         self.assertEqual(
             _r_code,
             0,
-            "'mcp-net {}' command failed".format(" ".join(_args))
+            "'chk-net {}' command failed".format(" ".join(_args))
         )
 
     @patch('requests.get', side_effect=mocked_salt_get)
@@ -111,5 +111,5 @@
         self.assertEqual(
             _r_code,
             0,
-            "'mcp-net {}' command failed".format(" ".join(_args))
+            "'chk-net {}' command failed".format(" ".join(_args))
         )