Multi env support and Kube client integration

Kube friendly Beta

Package versions supports Kube env

Added:
  - Env type detection
  - New option: --use-env, for selecting env
    when function supports multiple detected envs
  - Updated config loading
  - Each module and command type has supported env check
    and stops execution if it is on unsupported env
  - Functions can support multiple envs
  - Kubernetes dependency
  - Kubenernetes API detection: local and remote
  - Package checking class hierachy for using Salt or Kube
  - Remote pod execution routine
  - Flexible SSH/SSH Forwarder classes: with, ssh,do(), etc
  - Multithreaded SSH script execution
  - Number of workers parameter, default 5

Fixed:
  - Config dependency
  - Command loading with supported envs list
  - Unittests structure and execution flow updated
  - Unittests fixes
  - Fixed debug mode handling
  - Unified command type/support routine
  - Nested attrs getter/setter

Change-Id: I3ade693ac21536e2b5dcee4b24d511749dc72759
Related-PROD: PROD-35811
diff --git a/README.md b/README.md
index d231450..de53574 100644
--- a/README.md
+++ b/README.md
@@ -59,16 +59,19 @@
 - [*Done*] Add possibility to check cloud remotely
 - [*Done*] Add possibility to have several ready to use cloud connection configurations
 - [*Done*] Use flexible plugin structure to execute modules
+- [*Done*] Prepare script to create venv
+- [*Done*] Reformat reclass compare file
+- [*Done*] Finalize Network check HTML report
+- [*Done*] Do simple rule-based checks on network
+- [*Done*] Add node params to output
 
-TODO:
-- Check root on startup, exit
-- Prepare script to create venv
-- Reformat reclass compare file
-- Finalize Network check HTML report
-- Do simple rule-based checks on network
+# TODO
 - Implement simple packet sniff mechanics to check node-node VCP trafic flow
-- Add node params to output
 - Format reclass compare file
 
 
+# Kube links
+  https://github.com/kubernetes-client/python/blob/master/examples/api_discovery.py
+  https://github.com/kubernetes-client/python/blob/master/examples/remote_cluster.py
+
 Cheers!
diff --git a/cfg_checker/cfg_check.py b/cfg_checker/cfg_check.py
index 11190b4..403483e 100644
--- a/cfg_checker/cfg_check.py
+++ b/cfg_checker/cfg_check.py
@@ -2,9 +2,11 @@
 import sys
 from logging import DEBUG, INFO
 
+from cfg_checker.cli.arguments import add_global_arguments
 from cfg_checker.cli.command import execute_command, helps, parsers, \
     parsers_inits
-from cfg_checker.common import config, logger, logger_cli
+from cfg_checker.common.settings import CheckerConfiguration
+from cfg_checker.common import logger, logger_cli
 from cfg_checker.helpers.args_utils import MyParser
 
 pkg_dir = os.path.dirname(__file__)
@@ -20,33 +22,7 @@
     """
     # Main entrypoint
     parser = MyParser(prog="# Mirantis Cloud configuration checker")
-
-    parser.add_argument(
-        "-d",
-        "--debug",
-        action="store_true", default=False,
-        help="Set CLI logging level to DEBUG"
-    )
-    parser.add_argument(
-        '-s',
-        '--sudo',
-        action='store_true', default=True,
-        help="Use sudo for getting salt creds"
-    )
-
-    parser.add_argument(
-        '--skip-nodes',
-        metavar='skip_string', default=None,
-        help="String with nodes to skip. Only trailing '*' supported!"
-             " Example: 'cmp*,ctl01'"
-    )
-
-    parser.add_argument(
-        '--skip-nodes-file',
-        metavar='skip_nodes_file', default=None,
-        help="Filename with nodes to skip. Note: use fqdn node names."
-    )
-
+    add_global_arguments(parser)
     subparsers = parser.add_subparsers(dest='command')
 
     # create parsers
@@ -76,17 +52,18 @@
         parser.print_help()
         logger_cli.info("\n# Please, type a command listed above")
         sys.exit(1)
-    # Pass externally configured values
-    config.ssh_uses_sudo = args.sudo
 
-    # Handle options
+    # Handle debug option before config init
     if args.debug:
         logger_cli.setLevel(DEBUG)
     else:
         logger_cli.setLevel(INFO)
 
+    # Init the config and pass externally configured values
+    config = CheckerConfiguration(args)
+
     # Execute the command
-    result = execute_command(args, args.command)
+    result = execute_command(args, args.command, config)
     logger.debug(result)
     sys.exit(result)
 
diff --git a/cfg_checker/cli/arguments.py b/cfg_checker/cli/arguments.py
new file mode 100644
index 0000000..bd162a8
--- /dev/null
+++ b/cfg_checker/cli/arguments.py
@@ -0,0 +1,42 @@
+def add_global_arguments(parser):
+    parser.add_argument(
+        "-d",
+        "--debug",
+        action="store_true", default=False,
+        help="Set CLI logging level to DEBUG"
+    )
+    parser.add_argument(
+        '-s',
+        '--sudo',
+        action='store_true', default=True,
+        help="Use sudo for getting salt creds"
+    )
+    parser.add_argument(
+        '--force-no-key',
+        action='store_true', default=False,
+        help="Use sudo for getting salt creds"
+    )
+    parser.add_argument(
+        '--skip-nodes',
+        metavar='skip_string', default=None,
+        help="String with nodes to skip. Only trailing '*' supported!"
+             " Example: 'cmp*,ctl01'"
+    )
+    parser.add_argument(
+        '--kube-config-path',
+        metavar='skip_string', default="/root/.kube/config",
+        help="Kube config path with certificates and keys. "
+             "Default: '/root/.kube/config'"
+    )
+    parser.add_argument(
+        '--use-env',
+        metavar='use_env',
+        help="Specify env to use if function supports multiple among detected"
+             " Example: SALT/KUBE"
+    )
+
+    parser.add_argument(
+        '--skip-nodes-file',
+        metavar='skip_nodes_file', default=None,
+        help="Filename with nodes to skip. Note: use fqdn node names."
+    )
diff --git a/cfg_checker/cli/command.py b/cfg_checker/cli/command.py
index f98e608..566a498 100644
--- a/cfg_checker/cli/command.py
+++ b/cfg_checker/cli/command.py
@@ -2,8 +2,11 @@
 import sys
 import traceback
 
-from cfg_checker.common import config, logger, logger_cli
-from cfg_checker.common.exception import CheckerException
+from cfg_checker.cli.arguments import add_global_arguments
+from cfg_checker.common import logger, logger_cli
+from cfg_checker.common.exception import CheckerException, \
+    CommandNotSupportedException
+from cfg_checker.common.settings import CheckerConfiguration
 from cfg_checker.helpers.args_utils import MyParser
 
 main_pkg_name = __name__.split('.')[0]
@@ -12,6 +15,7 @@
 mods_prefix = mods_import_path + '.'
 
 commands = {}
+supports = {}
 parsers_inits = {}
 parsers = {}
 helps = {}
@@ -27,16 +31,16 @@
         # A package! Create it and add commands
         commands[mod_name] = \
             [_n[3:] for _n in dir(_p) if _n.startswith("do_")]
+        supports[mod_name] = _p.supported_envs
         parsers_inits[mod_name] = getattr(_p, 'init_parser')
         parsers[mod_name] = {}
         helps[mod_name] = getattr(_p, 'command_help')
 
 
-def execute_command(args, command):
+def execute_command(args, command, config):
     # Validate the commands
     # check commands
     if command not in commands:
-        
         logger_cli.info("\n# Please, type a command listed above")
         return 1
     if not hasattr(args, 'type') or not args.type:
@@ -53,6 +57,19 @@
         )
         return 1
     else:
+        # check if command is supported
+        if len(
+            set(supports[command]).intersection(set(config.detected_envs))
+        ) < 1:
+            raise CommandNotSupportedException(
+                "'{}' is not supported on any of the currently "
+                "detected environments: {}".format(
+                    command,
+                    ",".join(config.detected_envs)
+                )
+            )
+            return 1
+
         # form function name to call
         _method_name = "do_" + _type
         _target_module = __import__(
@@ -63,12 +80,16 @@
 
     # Execute the command
     try:
-        _method(args)
+        # Compatibility between entrypoints
+        if not hasattr(args, 'command'):
+            args.command = command
+        _method(args, config)
         return 0
+    except CommandNotSupportedException as e:
+        logger_cli.error("\n{}".format(e.message))
+        return 1
     except CheckerException as e:
-        logger_cli.error("\nERROR: {}".format(
-            e.message
-        ))
+        logger_cli.error("\n{}".format(e.message))
 
         exc_type, exc_value, exc_traceback = sys.exc_info()
         logger_cli.debug("\n{}".format(
@@ -87,6 +108,7 @@
 
     # parse arguments
     try:
+        add_global_arguments(my_parser)
         args, unknown = my_parser.parse_known_args()
     except TypeError:
         logger_cli.info("\n# Please, check arguments")
@@ -100,10 +122,18 @@
         )
         sys.exit(1)
 
+    # Init the config
+    config = CheckerConfiguration(args)
+
     # force use of sudo
     config.ssh_uses_sudo = True
 
     # Execute the command
-    result = execute_command(args, _name)
-    logger.debug(result)
+    try:
+        result = execute_command(args, _name, config)
+        logger.debug(result)
+    except CommandNotSupportedException as e:
+        logger_cli.error("\nERROR: {}".format(
+            e.message
+        ))
     sys.exit(result)
diff --git a/cfg_checker/clients/__init__.py b/cfg_checker/clients/__init__.py
index 86d731f..5d3a48d 100644
--- a/cfg_checker/clients/__init__.py
+++ b/cfg_checker/clients/__init__.py
@@ -1,8 +1,11 @@
 from cfg_checker.common import logger
 from cfg_checker.common.salt_utils import SaltRemote
+from cfg_checker.common.kube_utils import KubeRemote
+
 
 # instance of the salt client
 salt = None
+kube = None
 
 
 def get_salt_remote(config):
@@ -20,6 +23,24 @@
     logger.info("Creating salt remote instance")
     # create it once
     if salt is None:
-        salt = SaltRemote()
+        salt = SaltRemote(config)
     # return once required
     return salt
+
+
+def get_kube_remote(config):
+    """Singleton-like creation of instance
+
+    Arguments:
+        config {base_config} -- an instance to base_config
+            with creds and params
+
+    Returns:
+        KubeRemote -- instance of kube client
+    """
+    global kube
+    logger.info("Creating kube remote client instance")
+    # Create it once
+    if kube is None:
+        kube = KubeRemote(config)
+    return kube
diff --git a/cfg_checker/common/__init__.py b/cfg_checker/common/__init__.py
index 752373f..6eecf92 100644
--- a/cfg_checker/common/__init__.py
+++ b/cfg_checker/common/__init__.py
@@ -2,8 +2,6 @@
 
 from cfg_checker.common.other import Utils
 
-from cfg_checker.common.settings import config
-
 
 def nested_set(_d, _keys, _value):
     # # import and deepcopy for safety
@@ -18,4 +16,3 @@
 utils = Utils()
 logger = logger
 logger_cli = logger_cli
-config = config
diff --git a/cfg_checker/common/config_file.py b/cfg_checker/common/config_file.py
index c70e5a6..513e0ec 100644
--- a/cfg_checker/common/config_file.py
+++ b/cfg_checker/common/config_file.py
@@ -2,11 +2,10 @@
 import os
 
 from . import logger_cli
+from .const import truth
 
 
 class ConfigFile(object):
-    _truth = ['true', '1', 't', 'y', 'yes', 'yeah', 'yup',
-              'certainly', 'uh-huh']
     _config = None
     _section_name = None
     _config_filepath = None
@@ -45,7 +44,7 @@
             return path
 
     def _ensure_boolean(self, _value):
-        if _value.lower() in self._truth:
+        if _value.lower() in truth:
             return True
         else:
             return False
diff --git a/cfg_checker/common/const.py b/cfg_checker/common/const.py
index 3b17099..2ae41f6 100644
--- a/cfg_checker/common/const.py
+++ b/cfg_checker/common/const.py
@@ -54,7 +54,10 @@
 
 uknown_code = "unk"
 
-all_roles_map = {
+ENV_TYPE_KUBE = "salt"
+ENV_TYPE_KUBE = "kube"
+
+all_salt_roles_map = {
     "apt": "repository",
     "bmk": "validation",
     "cfg": "master",
@@ -98,3 +101,38 @@
 _mainteiners_index_filename = "mainteiners.json"
 _mirantis_versions_filename = "mirantis_v.json"
 _other_versions_filename = "other_v.json"
+
+all_kube_roles_map = {
+    'node-role.kubernetes.io/master': "k8s-master",
+    'openstack-compute-node': "os-cmp",
+    'openstack-control-plane': "os-ctl",
+    'openstack-gateway': "os-gtw",
+    'openvswitch': "ovs",
+    'local-volume-provisioner': "",
+    'ceph_role_mgr': "ceph-mgr",
+    'ceph_role_mon': "ceph-mon",
+    'com.docker.ucp.collection.shared': "ucp-shared",
+    'com.docker.ucp.collection.system': "ucp-system",
+    'com.docker.ucp.collection.swarm': "ucp-swarm",
+    'com.docker.ucp.collection.root': "ucp-root",
+}
+
+truth = ['true', '1', 't', 'y', 'yes', 'yeah', 'yup', 'certainly', 'uh-huh']
+
+ubuntu_versions = {
+    "20.10": "Groovy Gorilla",
+    "20.04": "Focal Fossa",
+    "18.04": "Bionic Beaver",
+    "16.04": "Xenial Xerus",
+    "14.04": "Trusty Tahr",
+}
+
+nova_openstack_versions = {
+    "23": "wallaby",
+    "22": "victoria",
+    "21": "ussuri",
+    "20": "train",
+    "19": "stein",
+    "18": "rocky",
+    "17": "queens"
+}
diff --git a/cfg_checker/common/exception.py b/cfg_checker/common/exception.py
index 2536099..4ee3a99 100644
--- a/cfg_checker/common/exception.py
+++ b/cfg_checker/common/exception.py
@@ -19,18 +19,50 @@
         self.message = "# Configuration error: {}".format(message)
 
 
+class CommandNotSupportedException(CheckerException):
+    def __init__(self, message, *args, **kwargs):
+        super(CommandNotSupportedException, self).__init__(
+            message,
+            *args,
+            **kwargs
+        )
+        self.message = "# Command not supported: {}".format(message)
+
+
+class CommandTypeNotSupportedException(CheckerException):
+    def __init__(self, message, *args, **kwargs):
+        super(CommandTypeNotSupportedException, self).__init__(
+            message,
+            *args,
+            **kwargs
+        )
+        self.message = "# Command type not supported: {}".format(message)
+
+
 class SaltException(CheckerException):
     def __init__(self, message, *args, **kwargs):
         super(SaltException, self).__init__(message, *args, **kwargs)
         self.message = "# Salt error: {}".format(message)
 
 
+class KubeException(CheckerException):
+    def __init__(self, message, *args, **kwargs):
+        super(KubeException, self).__init__(message, *args, **kwargs)
+        self.message = "# Kube client error: {}".format(message)
+
+
 class InvalidReturnException(CheckerException):
     def __init__(self, message, *args, **kwargs):
         super(InvalidReturnException, self).__init__(message, *args, **kwargs)
         self.message = "# Unexpected return value: {}".format(message)
 
 
+class TimeoutException(CheckerException):
+    def __init__(self, message, *args, **kwargs):
+        super(TimeoutException, self).__init__(message, *args, **kwargs)
+        self.message = "# Timed out waiting: {}".format(message)
+
+
 class ErrorMappingException(CheckerException):
     def __init__(self, message, *args, **kwargs):
         super(ErrorMappingException, self).__init__(message, *args, **kwargs)
diff --git a/cfg_checker/common/file_utils.py b/cfg_checker/common/file_utils.py
index 398ea66..6fbb675 100644
--- a/cfg_checker/common/file_utils.py
+++ b/cfg_checker/common/file_utils.py
@@ -1,11 +1,12 @@
+import atexit
 import grp
 import os
 import pwd
 import time
+import tempfile
 
-from cfg_checker.common import config
-
-_default_time_format = config.date_format
+_default_time_format = "%Y-%m-%d %H:%M:%S.%f%z"
+_temp_files = {}
 
 
 def remove_file(filename):
@@ -110,3 +111,35 @@
         return "... folder '{}' removed".format(_folder)
     else:
         return "... folder '{}' not exists".format(_folder)
+
+
+def _cleanup_temp_files():
+    global _temp_files
+    for temp_file in _temp_files.values():
+        try:
+            os.remove(temp_file)
+        except OSError:
+            pass
+    _temp_files = {}
+
+
+def create_temp_file_with_content(content, mode=None):
+    if len(_temp_files) == 0:
+        atexit.register(_cleanup_temp_files)
+    # Because we may change context several times, try to remember files we
+    # created and reuse them at a small memory cost.
+    content_key = hash(content)
+    if content_key in _temp_files:
+        return _temp_files[content_key]
+
+    # new file, create it
+    _, name = tempfile.mkstemp()
+    _temp_files[content_key] = name
+    with open(name, 'wb') as fd:
+        fd.write(content.encode() if isinstance(content, str) else content)
+
+    # set mode for the file
+    if mode:
+        os.chmod(name, mode)
+
+    return name
diff --git a/cfg_checker/common/kube_utils.py b/cfg_checker/common/kube_utils.py
new file mode 100644
index 0000000..3f56f0e
--- /dev/null
+++ b/cfg_checker/common/kube_utils.py
@@ -0,0 +1,282 @@
+"""
+Module to handle interaction with Kube
+"""
+import base64
+import os
+import urllib3
+import yaml
+
+from kubernetes import client as kclient, config as kconfig
+from kubernetes.stream import stream
+
+from cfg_checker.common import logger, logger_cli
+from cfg_checker.common.exception import InvalidReturnException, KubeException
+from cfg_checker.common.file_utils import create_temp_file_with_content
+from cfg_checker.common.other import utils, shell
+from cfg_checker.common.ssh_utils import ssh_shell_p
+
+urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
+
+
+def _init_kube_conf_local(config):
+    # Init kube library locally
+    try:
+        kconfig.load_kube_config()
+        logger_cli.debug(
+            "...found Kube env: core, {}". format(
+                ",".join(
+                    kclient.CoreApi().get_api_versions().versions
+                )
+            )
+        )
+        return kconfig, kclient.ApiClient()
+    except Exception as e:
+        logger.warn("Failed to init local Kube client: {}".format(
+                str(e)
+            )
+        )
+        return None, None
+
+
+def _init_kube_conf_remote(config):
+    # init remote client
+    # Preload Kube token
+    """
+    APISERVER=$(kubectl config view --minify |
+            grep server | cut -f 2- -d ":" | tr -d " ")
+    SECRET_NAME=$(kubectl get secrets |
+            grep ^default | cut -f1 -d ' ')
+    TOKEN=$(kubectl describe secret $SECRET_NAME |
+            grep -E '^token' | cut -f2 -d':' | tr -d " ")
+
+    echo "Detected API Server at: '${APISERVER}'"
+    echo "Got secret: '${SECRET_NAME}'"
+    echo "Loaded token: '${TOKEN}'"
+
+    curl $APISERVER/api
+        --header "Authorization: Bearer $TOKEN" --insecure
+    """
+    import yaml
+
+    _c_data = ssh_shell_p(
+        "sudo cat " + config.kube_config_path,
+        config.ssh_host,
+        username=config.ssh_user,
+        keypath=config.ssh_key,
+        piped=False,
+        use_sudo=config.ssh_uses_sudo,
+    )
+
+    _conf = yaml.load(_c_data, Loader=yaml.SafeLoader)
+
+    _kube_conf = kclient.Configuration()
+    # A remote host configuration
+
+    # To work with remote cluster, we need to extract these
+    # keys = ['host', 'ssl_ca_cert', 'cert_file', 'key_file', 'verify_ssl']
+    # When v12 of the client will be release, we will use load_from_dict
+
+    _kube_conf.ssl_ca_cert = create_temp_file_with_content(
+        base64.standard_b64decode(
+            _conf['clusters'][0]['cluster']['certificate-authority-data']
+        )
+    )
+    _host = _conf['clusters'][0]['cluster']['server']
+    _kube_conf.cert_file = create_temp_file_with_content(
+        base64.standard_b64decode(
+            _conf['users'][0]['user']['client-certificate-data']
+        )
+    )
+    _kube_conf.key_file = create_temp_file_with_content(
+        base64.standard_b64decode(
+            _conf['users'][0]['user']['client-key-data']
+        )
+    )
+    if "http" not in _host or "443" not in _host:
+        logger_cli.error(
+            "Failed to extract Kube host: '{}'".format(_host)
+        )
+    else:
+        logger_cli.debug(
+            "...'context' host extracted: '{}' via SSH@{}".format(
+                _host,
+                config.ssh_host
+            )
+        )
+
+    # Substitute context host to ours
+    _tmp = _host.split(':')
+    _kube_conf.host = \
+        _tmp[0] + "://" + config.mcp_host + ":" + _tmp[2]
+    config.kube_port = _tmp[2]
+    logger_cli.debug(
+        "...kube remote host updated to {}".format(
+            _kube_conf.host
+        )
+    )
+    _kube_conf.verify_ssl = False
+    _kube_conf.debug = config.debug
+    # Nevertheless if you want to do it
+    # you can with these 2 parameters
+    # configuration.verify_ssl=True
+    # ssl_ca_cert is the filepath
+    # to the file that contains the certificate.
+    # configuration.ssl_ca_cert="certificate"
+
+    # _kube_conf.api_key = {
+    #     "authorization": "Bearer " + config.kube_token
+    # }
+
+    # Create a ApiClient with our config
+    _kube_api = kclient.ApiClient(_kube_conf)
+
+    return _kube_conf, _kube_api
+
+
+class KubeApi(object):
+    def __init__(self, config):
+        self.config = config
+        self._init_kclient()
+        self.last_response = None
+
+    def _init_kclient(self):
+        # if there is no password - try to get local, if this available
+        logger_cli.debug("# Initializong Kube config...")
+        if self.config.env_name == "local":
+            self.kConf, self.kApi = _init_kube_conf_local(self.config)
+            self.is_local = True
+            # Load local config data
+            if os.path.exists(self.config.kube_config_path):
+                _c_data = shell("sudo cat " + self.config.kube_config_path)
+                _conf = yaml.load(_c_data, Loader=yaml.SafeLoader)
+                self.user_keypath = create_temp_file_with_content(
+                    base64.standard_b64decode(
+                        _conf['users'][0]['user']['client-key-data']
+                    )
+                )
+                self.yaml_conf = _c_data
+        else:
+            self.kConf, self.kApi = _init_kube_conf_remote(self.config)
+            self.is_local = False
+
+    def get_versions_api(self):
+        # client.CoreApi().get_api_versions().versions
+        return kclient.VersionApi(self.kApi)
+
+
+class KubeRemote(KubeApi):
+    def __init__(self, config):
+        super(KubeRemote, self).__init__(config)
+        self._coreV1 = None
+
+    @property
+    def CoreV1(self):
+        if not self._coreV1:
+            self._coreV1 = kclient.CoreV1Api(self.kApi)
+        return self._coreV1
+
+    @staticmethod
+    def _typed_list_to_dict(i_list):
+        _dict = {}
+        for _item in i_list:
+            _d = _item.to_dict()
+            _type = _d.pop("type")
+            _dict[_type.lower()] = _d
+
+        return _dict
+
+    @staticmethod
+    def _get_listed_attrs(items, _path):
+        _list = []
+        for _n in items:
+            _list.append(utils.rgetattr(_n, _path))
+
+        return _list
+
+    def get_node_info(self, http=False):
+        # Query API for the nodes and do some presorting
+        _nodes = {}
+        if http:
+            _raw_nodes = self.CoreV1.list_node_with_http_info()
+        else:
+            _raw_nodes = self.CoreV1.list_node()
+
+        if not isinstance(_raw_nodes, kclient.models.v1_node_list.V1NodeList):
+            raise InvalidReturnException(
+                "Invalid return type: '{}'".format(type(_raw_nodes))
+            )
+
+        for _n in _raw_nodes.items:
+            _name = _n.metadata.name
+            _d = _n.to_dict()
+            # parse inner data classes as dicts
+            _d['addresses'] = self._typed_list_to_dict(_n.status.addresses)
+            _d['conditions'] = self._typed_list_to_dict(_n.status.conditions)
+            # Update 'status' type
+            if isinstance(_d['conditions']['ready']['status'], str):
+                _d['conditions']['ready']['status'] = utils.to_bool(
+                    _d['conditions']['ready']['status']
+                )
+            # Parse image names?
+            # TODO: Here is the place where we can parse each node image names
+
+            # Parse roles
+            _d['labels'] = {}
+            for _label, _data in _d["metadata"]["labels"].items():
+                if _data.lower() in ["true", "false"]:
+                    _d['labels'][_label] = utils.to_bool(_data)
+                else:
+                    _d['labels'][_label] = _data
+
+            # Save
+            _nodes[_name] = _d
+
+        # debug report on how many nodes detected
+        logger_cli.debug("...node items returned '{}'".format(len(_nodes)))
+
+        return _nodes
+
+    def exec_on_target_pod(
+        self,
+        cmd,
+        pod_name,
+        namespace,
+        strict=False,
+        _request_timeout=120,
+        **kwargs
+    ):
+        _pods = {}
+        _pods = self._coreV1.list_namespaced_pod(namespace)
+        _names = self._get_listed_attrs(_pods.items, "metadata.name")
+
+        _pname = ""
+        if not strict:
+            _pname = [n for n in _names if n.startswith(pod_name)]
+            if len(_pname) > 1:
+                logger_cli.debug(
+                    "...more than one pod found for '{}': {}\n"
+                    "...using first one".format(
+                        pod_name,
+                        ", ".join(_pname)
+                    )
+                )
+                _pname = _pname[0]
+            elif len(_pname) < 1:
+                raise KubeException("No pods found for '{}'".format(pod_name))
+        else:
+            _pname = pod_name
+
+        _r = stream(
+            self.CoreV1.connect_get_namespaced_pod_exec,
+            _pname,
+            namespace,
+            command=cmd.split(),
+            stderr=True,
+            stdin=False,
+            stdout=True,
+            tty=False,
+            _request_timeout=_request_timeout,
+            **kwargs
+        )
+
+        return _r
diff --git a/cfg_checker/common/log.py b/cfg_checker/common/log.py
index 6edac2f..4c1c02c 100644
--- a/cfg_checker/common/log.py
+++ b/cfg_checker/common/log.py
@@ -91,6 +91,6 @@
     'cfg_checker',
     log_fname=os.path.join(
         pkg_dir,
-        os.getenv('LOGFILE', 'cfg_checker.log')
+        os.getenv('MCP_LOGFILE', 'cfg_checker.log')
     )
 )
diff --git a/cfg_checker/common/other.py b/cfg_checker/common/other.py
index 5a4c552..e3a3271 100644
--- a/cfg_checker/common/other.py
+++ b/cfg_checker/common/other.py
@@ -1,8 +1,11 @@
+import functools
 import os
 import re
 import subprocess
 
-from cfg_checker.common.const import all_roles_map, uknown_code
+from cfg_checker.common import logger_cli
+from cfg_checker.common.const import all_salt_roles_map, uknown_code, \
+    truth
 from cfg_checker.common.exception import ConfigException
 
 pkg_dir = os.path.dirname(__file__)
@@ -11,10 +14,22 @@
 pkg_dir = os.path.abspath(pkg_dir)
 
 
+# 'Dirty' and simple way to execute piped cmds
+def piped_shell(command):
+    logger_cli.debug("...cmd:'{}'".format(command))
+    _code, _out = subprocess.getstatusoutput(command)
+    if _code:
+        logger_cli.error("Non-zero return code: {}, '{}'".format(_code, _out))
+    return _out
+
+
+# 'Proper way to execute shell
 def shell(command):
+    logger_cli.debug("...cmd:'{}'".format(command))
     _ps = subprocess.Popen(
         command.split(),
-        stdout=subprocess.PIPE
+        stdout=subprocess.PIPE,
+        universal_newlines=False
     ).communicate()[0].decode()
 
     return _ps
@@ -69,7 +84,7 @@
         # node role code checks
         _code = re.findall(r"[a-zA-Z]+", fqdn.split('.')[0])
         if len(_code) > 0:
-            if _code[0] in all_roles_map:
+            if _code[0] in all_salt_roles_map:
                 return _result()
             else:
                 # log warning here
@@ -107,11 +122,11 @@
         if _isvalid:
             # try to match it with ones in map
             _c = _code[0]
-            match = any([r in _c for r in all_roles_map.keys()])
+            match = any([r in _c for r in all_salt_roles_map.keys()])
             if match:
                 # no match, try to find it
                 match = False
-                for r in all_roles_map.keys():
+                for r in all_salt_roles_map.keys():
                     _idx = _c.find(r)
                     if _idx > -1:
                         _c = _c[_idx:]
@@ -153,5 +168,25 @@
 
         return _valid, _invalid
 
+    @staticmethod
+    def to_bool(value):
+        if value.lower() in truth:
+            return True
+        else:
+            return False
+
+    # helper functions to get nested attrs
+    # https://stackoverflow.com/questions/31174295/getattr-and-setattr-on-nested-subobjects-chained-properties
+    # using wonder's beautiful simplification:
+    # https://stackoverflow.com/questions/31174295/getattr-and-setattr-on-nested-objects/31174427?noredirect=1#comment86638618_31174427
+    def rsetattr(self, obj, attr, val):
+        pre, _, post = attr.rpartition('.')
+        return setattr(self.rgetattr(obj, pre) if pre else obj, post, val)
+
+    def rgetattr(self, obj, attr, *args):
+        def _getattr(obj, attr):
+            return getattr(obj, attr, *args)
+        return functools.reduce(_getattr, [obj] + attr.split('.'))
+
 
 utils = Utils()
diff --git a/cfg_checker/common/salt_utils.py b/cfg_checker/common/salt_utils.py
index b3f5cae..f7ea50b 100644
--- a/cfg_checker/common/salt_utils.py
+++ b/cfg_checker/common/salt_utils.py
@@ -7,12 +7,13 @@
 
 import requests
 
-from cfg_checker.common import config, logger, logger_cli
+from cfg_checker.common import logger, logger_cli
 from cfg_checker.common.exception import InvalidReturnException, SaltException
 from cfg_checker.common.other import shell
+from cfg_checker.common.ssh_utils import ssh_shell_p
 
 
-def _extract_password(_raw):
+def _extract_salt_password(_raw):
     if not isinstance(_raw, str):
         raise InvalidReturnException(_raw)
     else:
@@ -26,45 +27,40 @@
     return _json["local"]
 
 
-def get_remote_env_password():
+def get_remote_salt_env_password(config):
     """Uses ssh call with configured options to get password from salt master
 
     :return: password string
     """
     _salt_cmd = "salt-call --out=json pillar.get _param:salt_api_password"
-    _ssh_cmd = ["ssh"]
-    # Build SSH cmd
-    if config.ssh_key:
-        _ssh_cmd.append("-i " + config.ssh_key)
-    if config.ssh_user:
-        _ssh_cmd.append(config.ssh_user+'@'+config.ssh_host)
-    else:
-        _ssh_cmd.append(config.ssh_host)
-    if config.ssh_uses_sudo:
-        _ssh_cmd.append("sudo")
-
-    _ssh_cmd.append(_salt_cmd)
-    _ssh_cmd = " ".join(_ssh_cmd)
-    logger_cli.debug("... calling salt: '{}'".format(_ssh_cmd))
+    logger_cli.debug("... calling salt using ssh: '{}'".format(_salt_cmd))
     try:
-        _result = shell(_ssh_cmd)
+        _result = ssh_shell_p(
+            _salt_cmd,
+            config.ssh_host,
+            username=config.ssh_user,
+            keypath=config.ssh_key,
+            piped=False,
+            use_sudo=config.ssh_uses_sudo,
+            silent=True
+        )
         if len(_result) < 1:
             raise InvalidReturnException(
                 "# Empty value returned for '{}".format(
-                    _ssh_cmd
+                    _salt_cmd
                 )
             )
         else:
-            return _extract_password(_result)
+            return _extract_salt_password(_result)
     except OSError as e:
         raise SaltException(
             "Salt error calling '{}': '{}'\n"
-            "\nConsider checking 'SALT_ENV' "
-            "and '<pkg>/etc/<env>.env' files".format(_ssh_cmd, e.strerror)
+            "\nConsider checking 'MCP_ENV' "
+            "and '<pkg>/etc/<env>.env' files".format(_salt_cmd, e.strerror)
         )
 
 
-def get_local_password():
+def get_salt_local_password(config):
     """Calls salt locally to get password from the pillar
 
     :return: password string
@@ -80,10 +76,10 @@
     except OSError as e:
         raise SaltException(
             "Salt error calling '{}': '{}'\n"
-            "\nConsider checking 'SALT_ENV' "
+            "\nConsider checking 'MCP_ENV' "
             "and '<pkg>/etc/<env>.env' files".format(_cmd, e.strerror)
         )
-    return _extract_password(_result)
+    return _extract_salt_password(_result)
 
 
 def list_to_target_string(node_list, separator):
@@ -94,9 +90,6 @@
 
 
 class SaltRest(object):
-    _host = config.salt_host
-    _port = config.salt_port
-    uri = "http://" + config.salt_host + ":" + config.salt_port
     _auth = {}
 
     default_headers = {
@@ -105,7 +98,13 @@
         'X-Auth-Token': None
     }
 
-    def __init__(self):
+    def __init__(self, config):
+        self.config = config
+
+        self._host = config.mcp_host
+        self._port = config.salt_port
+        self.uri = "http://" + config.mcp_host + ":" + config.salt_port
+
         self._token = self._login()
         self.last_response = None
 
@@ -154,12 +153,12 @@
 
     def _login(self):
         # if there is no password - try to get local, if this available
-        if config.salt_env == "local":
-            _pass = get_local_password()
+        if self.config.env_name == "local":
+            _pass = get_salt_local_password(self.config)
         else:
-            _pass = get_remote_env_password()
+            _pass = get_remote_salt_env_password(self.config)
         login_payload = {
-            'username': config.salt_user,
+            'username': self.config.salt_user,
             'password': _pass,
             'eauth': 'pam'
         }
@@ -212,8 +211,8 @@
 class SaltRemote(SaltRest):
     master_node = ""
 
-    def __init__(self):
-        super(SaltRemote, self).__init__()
+    def __init__(self, config):
+        super(SaltRemote, self).__init__(config)
 
     def cmd(
             self,
@@ -226,7 +225,7 @@
             tgt_type=None,
             timeout=None
     ):
-        _timeout = timeout if timeout is not None else config.salt_timeout
+        _timeout = timeout if timeout is not None else self.config.salt_timeout
         _payload = {
             'fun': fun,
             'tgt': tgt,
@@ -256,7 +255,7 @@
         _payload = {
             'client': 'runner',
             'fun': fun,
-            'timeout': config.salt_timeout
+            'timeout': self.config.salt_timeout
         }
 
         if kwarg:
@@ -275,7 +274,7 @@
         _payload = {
             'client': 'wheel',
             'fun': fun,
-            'timeout': config.salt_timeout
+            'timeout': self.config.salt_timeout
         }
 
         if arg:
@@ -358,11 +357,13 @@
 
         :return: json result from salt test.ping
         """
-        if config.skip_nodes:
-            logger.info("# Nodes to be skipped: {0}".format(config.skip_nodes))
+        if self.config.skip_nodes:
+            logger.info(
+                "# Nodes to be skipped: {0}".format(self.config.skip_nodes)
+            )
             _r = self.cmd(
                 '* and not ' + list_to_target_string(
-                    config.skip_nodes,
+                    self.config.skip_nodes,
                     'and not'
                 ),
                 'test.ping',
diff --git a/cfg_checker/common/settings.py b/cfg_checker/common/settings.py
index cca5142..deebbc0 100644
--- a/cfg_checker/common/settings.py
+++ b/cfg_checker/common/settings.py
@@ -1,11 +1,15 @@
 import os
+import json
+import pwd
 import sys
 
 from cfg_checker.common.exception import ConfigException
-
 from cfg_checker.common.log import logger_cli
 
-from cfg_checker.common.other import utils
+from cfg_checker.common.other import utils, shell
+from cfg_checker.common.ssh_utils import ssh_shell_p
+
+from cfg_checker.clients import get_kube_remote
 
 pkg_dir = os.path.dirname(__file__)
 pkg_dir = os.path.join(pkg_dir, os.pardir, os.pardir)
@@ -14,6 +18,31 @@
 
 _default_work_folder = os.path.normpath(pkg_dir)
 
+ENV_TYPE_GLOB = "MCP"
+ENV_TYPE_SALT = "SALT"
+ENV_TYPE_KUBE = "KUBE"
+ENV_TYPE_LINUX = "LINUX"
+
+ENV_LOCAL = "local"
+
+supported_envs = [ENV_TYPE_LINUX, ENV_TYPE_SALT, ENV_TYPE_KUBE]
+
+
+def _extract_salt_return(_raw):
+    if not isinstance(_raw, str):
+        _json = _raw
+        logger_cli.debug("...ambigious return detected")
+    else:
+        try:
+            _json = json.loads(_raw)
+        except ValueError:
+            _json = _raw
+            logger_cli.debug(
+                "...return value is not a json: '{}'".format(_raw)
+            )
+
+    return _json
+
 
 class CheckerConfiguration(object):
     @staticmethod
@@ -28,10 +57,117 @@
         else:
             return None
 
-    def _init_values(self):
+    def _detect(self, _type):
+        logger_cli.debug("...detecting '{}'".format(_type))
+        if _type is None:
+            raise ConfigException("# Unexpected supported env type")
+        elif _type == ENV_TYPE_SALT:
+            # Detect salt env
+            _detect_cmd = ["curl", "-s"]
+            _detect_cmd.append(
+                "http://" + self.mcp_host + ':' + self.salt_port
+            )
+            # Try to call salt API on target host
+            _r = None
+            logger_cli.debug("...trying to detect env type '{}'".format(_type))
+            if self.env_name == ENV_LOCAL:
+                _r = shell(" ".join(_detect_cmd))
+            else:
+                _r = ssh_shell_p(
+                    " ".join(_detect_cmd),
+                    self.ssh_host,
+                    username=self.ssh_user,
+                    keypath=self.ssh_key,
+                    piped=False,
+                    use_sudo=self.ssh_uses_sudo,
+                    silent=True
+                )
+            # Parse return
+            _r = _extract_salt_return(_r)
+
+            if len(_r) < 1:
+                return False
+            elif _r["return"] == "Welcome":
+                return True
+            else:
+                return False
+        elif _type == ENV_TYPE_KUBE:
+            _kube = get_kube_remote(self)
+            try:
+                _vApi = _kube.get_versions_api()
+                _v = _vApi.get_code()
+                if hasattr(_v, "platform") and \
+                        hasattr(_v, "major") and \
+                        hasattr(_v, "minor"):
+                    _host = "localhost" if _kube.is_local else _kube.kConf.host
+                    logger_cli.info(
+                        "# Kube server found: {}:{} on '{}'".format(
+                            _v.major,
+                            _v.minor,
+                            _host
+                        )
+                    )
+                    return True
+                else:
+                    return False
+            except Exception as e:
+                logger_cli.warn(
+                    "# Unexpected error finding Kube env: '{}' ".format(
+                        str(e)
+                    )
+                )
+                return False
+        elif _type == ENV_TYPE_LINUX:
+            # Detect Linux env
+            from platform import system, release
+            _s = system()
+            _r = release()
+            logger_cli.debug("...running on {} {}".format(_s, _r))
+            if _s in ['Linux', 'Darwin']:
+                return True
+            else:
+                return False
+        else:
+            raise ConfigException(
+                "# Env type of '{}' is not supported".format(
+                    _type
+                )
+            )
+
+    def _detect_types(self):
+        """Try to detect env type based on the name
+        """
+        self.detected_envs = []
+        logger_cli.info('# Detecting env types')
+        for _env in supported_envs:
+            if self._detect(_env):
+                logger_cli.info("# '{}' found".format(_env))
+                self.detected_envs.append(_env)
+            else:
+                logger_cli.info("# '{}' not found".format(_env))
+
+        return
+
+    def _init_mcp_values(self):
         """Load values from environment variables or put default ones
         """
-
+        # filter vars and preload if needed
+        self.salt_vars = []
+        self.kube_vars = []
+        for _key, _value in self.vars:
+            if _key.startswith(ENV_TYPE_GLOB):
+                os.environ[_key] = _value
+            elif _key.startswith(ENV_TYPE_SALT):
+                self.salt_vars.append([_key, _value])
+            elif _key.startswith(ENV_TYPE_KUBE):
+                self.kube_vars.append([_key, _value])
+            else:
+                logger_cli.warn(
+                    "Unsupported config variable: '{}={}'".format(
+                        _key,
+                        _value
+                    )
+                )
         self.name = "CheckerConfig"
         self.working_folder = os.environ.get(
             'CFG_TESTS_WORK_DIR',
@@ -43,24 +179,86 @@
         self.pkg_versions_map = 'versions_map.csv'
 
         self.ssh_uses_sudo = False
-        self.ssh_key = os.environ.get('SSH_KEY', None)
-        self.ssh_user = os.environ.get('SSH_USER', None)
-        self.ssh_host = os.environ.get('SSH_HOST', None)
+        self.ssh_key = os.environ.get('MCP_SSH_KEY', None)
+        self.ssh_user = os.environ.get('MCP_SSH_USER', None)
+        self.ssh_host = os.environ.get('MCP_SSH_HOST', None)
 
-        self.salt_host = os.environ.get('SALT_URL', None)
-        self.salt_port = os.environ.get('SALT_PORT', '6969')
-        self.salt_user = os.environ.get('SALT_USER', 'salt')
-        self.salt_timeout = os.environ.get('SALT_TIMEOUT', 30)
-        self.salt_file_root = os.environ.get('SALT_FILE_ROOT', None)
-        self.salt_scripts_folder = os.environ.get(
-            'SALT_SCRIPTS_FOLDER',
-            'cfg_checker_scripts'
-        )
+        self.mcp_host = os.environ.get('MCP_ENV_HOST', None)
+        self.salt_port = os.environ.get('MCP_SALT_PORT', '6969')
+        self.threads = int(os.environ.get('MCP_THREADS', "5"))
 
         self.skip_nodes = utils.node_string_to_list(os.environ.get(
             'CFG_SKIP_NODES',
             None
         ))
+        # prebuild user data and folder path
+        self.pw_user = pwd.getpwuid(os.getuid())
+        if self.env_name == "local":
+            pass
+        else:
+            if not self.ssh_key and not self.force_no_key:
+                raise ConfigException(
+                    "Please, supply a key for the cluster's master node. "
+                    "Use MCP_SSH_KEY, see 'etc/example.env'"
+                )
+
+    def _init_env_values(self):
+        if ENV_TYPE_SALT in self.detected_envs:
+            for _key, _value in self.salt_vars:
+                os.environ[_key] = _value
+
+            self.salt_user = os.environ.get('SALT_USER', 'salt')
+            self.salt_timeout = os.environ.get('SALT_TIMEOUT', 30)
+            self.salt_file_root = os.environ.get('SALT_FILE_ROOT', None)
+            self.salt_scripts_folder = os.environ.get(
+                'SALT_SCRIPTS_FOLDER',
+                'cfg_checker_scripts'
+            )
+        elif ENV_TYPE_KUBE in self.detected_envs:
+            for _key, _value in self.kube_vars:
+                os.environ[_key] = _value
+
+            self.kube_config_root = os.environ.get('KUBE_CONFIG_ROOT', None)
+            self.kube_scripts_folder = os.environ.get(
+                'KUBE_SCRIPTS_FOLDER',
+                None
+            )
+            self.kube_node_user = os.environ.get(
+                'KUBE_NODE_USER',
+                'ubuntu'
+            )
+            self.kube_node_keypath = os.environ.get(
+                'KUBE_NODE_KEYPATH',
+                None
+            )
+            # Warn user only if Kube env is detected locally
+            if self.env_name == "local":
+                if not os.path.exists(self.kube_config_path):
+                    logger_cli.warn(
+                        "Kube config path not found on local env: '{}'".format(
+                            self.kube_config_path
+                        )
+                    )
+                # On local envs, KUBE_NODE_KEYPATH is mandatory and is
+                # provided to give cfg-checker access to kube nodes
+                if not self.kube_node_keypath and not self.force_no_key:
+                    raise ConfigException(
+                        "Please, supply a key for the cluster nodes. "
+                        "Use KUBE_NODE_KEYPATH, see 'etc/example.env'. "
+                        "Consider checking KUBE_NODE_USER as well"
+                    )
+            else:
+                # Init keys for nodes in case of remote env
+                # KUBE_NODE_KEYPATH is provided in case of nodes key would be
+                # different to master nodes key, which is supplied
+                # using MCP_SSH_KEY (mandatory) and, for the most cases,
+                # should be the same for remote envs
+                if not self.kube_node_keypath and not self.force_no_key:
+                    logger_cli.debug(
+                        "... using MCP_SSH_KEY as node keys. "
+                        "Supply KUBE_NODE_KEYPATH to update."
+                    )
+                    self.kube_node_keypath = self.ssh_key
 
     def _init_env(self, env_name=None):
         """Inits the environment vars from the env file
@@ -69,6 +267,7 @@
         Keyword Arguments:
             env_name {str} -- environment name to search configuration
                 files in etc/<env_name>.env (default: {None})
+            env_type {str} -- environment type to use: salt/kube
 
         Raises:
             ConfigException -- on IO error when loading env file
@@ -76,7 +275,7 @@
         """
         # load env file as init os.environment with its values
         if env_name is None:
-            _env_name = 'local'
+            _env_name = ENV_LOCAL
         else:
             _env_name = env_name
         _config_path = os.path.join(pkg_dir, 'etc', _env_name + '.env')
@@ -94,6 +293,7 @@
                     _config_path
                 )
             )
+        self.vars = []
         for index in range(len(_list)):
             _line = _list[index]
             # skip comments
@@ -101,13 +301,14 @@
                 continue
             # validate
             _errors = []
-            if _line.find('=') < 0 or _line.count('=') > 1:
+            if len(_line) < 1:
+                _errors.append("Line {}: empty".format(index))
+            elif _line.find('=') < 0 or _line.count('=') > 1:
                 _errors.append("Line {}: {}".format(index, _line))
             else:
                 # save values
                 _t = _line.split('=')
-                _key, _value = _t[0], _t[1]
-                os.environ[_key] = _value
+                self.vars.append([_t[0], _t[1]])
         # if there was errors, report them
         if _errors:
             raise ConfigException(
@@ -121,11 +322,15 @@
                     len(_list)
                 )
             )
-            self.salt_env = _env_name
+            self.env_name = _env_name
 
-    def __init__(self):
+    def __init__(self, args):
         """Base configuration class. Only values that are common for all scripts
         """
+        self.ssh_uses_sudo = args.sudo
+        self.kube_config_path = args.kube_config_path
+        self.debug = args.debug
+        self.force_no_key = args.force_no_key
         # Make sure we running on Python 3
         if sys.version_info[0] < 3 and sys.version_info[1] < 5:
             logger_cli.error("# ERROR: Python 3.5+ is required")
@@ -136,9 +341,45 @@
                 sys.version_info[1]
             ))
 
-        _env = os.getenv('SALT_ENV', None)
+        _env = os.getenv('MCP_ENV', None)
+
+        # Init environment variables from file, validate
         self._init_env(_env)
-        self._init_values()
+        # Load Common vars for any type of the env
+        self._init_mcp_values()
+        # Detect env types present
+        self._detect_types()
+        # handle forced env type var
+        _forced_type = os.getenv('MCP_TYPE_FORCE', None)
+        if _forced_type in supported_envs:
+            self.detected_envs.append(_forced_type)
+        elif _forced_type is not None:
+            logger_cli.warn(
+                "Unsupported forced type of '{}'".format(
+                    _forced_type
+                )
+            )
+        # Check if any of the envs detected
+        if len(self.detected_envs) < 1:
+            if _env is None:
+                raise ConfigException("No environment types detected locally")
+            else:
+                raise ConfigException(
+                    "No environment types detected at '{}'".format(
+                                self.mcp_host
+                            )
+                )
+        # Init vars that is specific to detected envs only
+        self._init_env_values()
 
-
-config = CheckerConfiguration()
+        # initialize path to folders
+        if self.env_name == "local":
+            # names and folders
+            self.user = self.pw_user.pw_name
+            self.homepath = self.pw_user.pw_dir
+            self.node_homepath = os.path.join('/home', self.kube_node_user)
+        else:
+            # names and folders in case of remote env
+            self.user = self.ssh_user
+            self.homepath = os.path.join('/home', self.ssh_user)
+            self.node_homepath = self.homepath
diff --git a/cfg_checker/common/ssh_utils.py b/cfg_checker/common/ssh_utils.py
new file mode 100644
index 0000000..fdf4c91
--- /dev/null
+++ b/cfg_checker/common/ssh_utils.py
@@ -0,0 +1,403 @@
+import queue
+import subprocess
+import traceback
+import threading
+
+from time import sleep
+from .exception import TimeoutException, CheckerException
+from .other import shell, piped_shell
+from .log import logger, logger_cli
+
+
+# We do not use paramiko here to preserve system level ssh config
+def ssh_shell_p(
+    command,
+    host,
+    username=None,
+    keypath=None,
+    port=None,
+    silent=False,
+    piped=False,
+    use_sudo=False
+):
+    _ssh_cmd = []
+    _ssh_cmd.append("ssh")
+    if silent:
+        _ssh_cmd.append("-q")
+    # Build SSH cmd
+    if keypath:
+        _ssh_cmd.append("-i " + keypath)
+    if port:
+        _ssh_cmd.append("-p " + str(port))
+    if username:
+        _ssh_cmd.append(username+'@'+host)
+    else:
+        _ssh_cmd.append(host)
+
+    if use_sudo:
+        _ssh_cmd.append("sudo")
+
+    _ssh_cmd.append(command)
+
+    _ssh_cmd = " ".join(_ssh_cmd)
+    if not piped:
+        return shell(_ssh_cmd)
+    else:
+        return piped_shell(_ssh_cmd)
+
+
+def scp_p(
+    source,
+    target,
+    port=None,
+    keypath=None,
+    silent=False,
+    piped=False
+):
+    _scp_cmd = []
+    _scp_cmd.append("scp")
+    if port:
+        _scp_cmd.append("-P " + str(port))
+    if silent:
+        _scp_cmd.append("-q")
+    # Build SSH cmd
+    if keypath:
+        _scp_cmd.append("-i " + keypath)
+    _scp_cmd.append(source)
+    _scp_cmd.append(target)
+    _scp_cmd = " ".join(_scp_cmd)
+    if not piped:
+        return shell(_scp_cmd)
+    else:
+        return piped_shell(_scp_cmd)
+
+
+def output_reader(_stdout, outq):
+    for line in iter(_stdout.readline, b''):
+        outq.put(line.decode('utf-8'))
+
+
+# Base class for all SSH related actions
+class SshBase(object):
+    def __init__(
+        self,
+        tgt_host,
+        user=None,
+        keypath=None,
+        port=None,
+        timeout=15,
+        silent=False,
+        piped=False
+    ):
+        self._cmd = ["ssh"]
+        self.timeout = timeout
+        self.port = port if port else 22
+        self.host = tgt_host
+        self.username = user
+        self.keypath = keypath
+        self.silent = silent
+        self.piped = piped
+        self.output = []
+
+        self._options = ["-tt"]
+        if self.keypath:
+            self._options += ["-i", self.keypath]
+        if self.port:
+            self._options += ["-p", str(self.port)]
+        self._extra_options = [
+            "-o", "UserKnownHostsFile=/dev/null",
+            "-o", "StrictHostKeyChecking=no"
+        ]
+
+        self._host_uri = ""
+        if self.username:
+            self._host_uri = self.username + "@" + self.host
+        else:
+            self._host_uri = self.host
+
+    def _connect(self, banner="Welcome"):
+        if not isinstance(banner, str):
+            raise CheckerException(
+                "Invalid SSH banner type: '{}'".format(type(banner))
+            )
+        logger.debug("...connecting")
+        while True:
+            try:
+                line = self.outq.get(block=False)
+                self.output.append(line)
+                if line.startswith(banner):
+                    break
+            except queue.Empty:
+                logger.debug("... {} sec".format(self.timeout))
+                sleep(1)
+                self.timeout -= 1
+                if not self.timeout:
+                    logger.debug(
+                        "...timed out after {} sec".format(str(self.timeout))
+                    )
+                    return False
+        logger.debug("...connected")
+        return True
+
+    def _wait_for_string(self, string):
+        logger.debug("...waiting for '{}'".format(string))
+        while True:
+            try:
+                line = self.outq.get(block=False)
+                line = line.decode() if isinstance(line, bytes) else line
+                self.output.append(line)
+                if not line.startswith(string):
+                    continue
+                else:
+                    break
+            except queue.Empty:
+                logger.debug("... {} sec".format(self.timeout))
+                sleep(1)
+                self.timeout -= 1
+                if not self.timeout:
+                    logger.debug(
+                        "...timed out after {} sec".format(str(self.timeout))
+                    )
+                    return False
+        logger.debug("...found")
+        return True
+
+    def _init_connection(self, cmd):
+        self._proc = subprocess.Popen(
+            cmd,
+            stdin=subprocess.PIPE,
+            stdout=subprocess.PIPE,
+            stderr=subprocess.PIPE,
+            universal_newlines=False,
+            bufsize=0
+        )
+        # Create thread safe output getter
+        self.outq = queue.Queue()
+        self._t = threading.Thread(
+            target=output_reader,
+            args=(self._proc.stdout, self.outq)
+        )
+        self._t.start()
+
+        # Track if there is an yes/no
+        if not self._connect():
+            raise TimeoutException(
+                "SSH connection to '{}'".format(self.host)
+            )
+
+        self.input = self._proc.stdin
+        self.get_output()
+        logger.debug(
+            "Connected. Banners:\n{}".format(
+                "".join(self.flush_output())
+            )
+        )
+
+    def _end_connection(self):
+        # Kill the ssh process if it is alive
+        if self._proc.poll() is None:
+            self._proc.kill()
+        self.get_output()
+
+        return
+
+    def do(self, cmd, timeout=30, sudo=False, strip_cmd=True):
+        cmd = cmd if isinstance(cmd, bytes) else bytes(cmd.encode('utf-8'))
+        logger.debug("...ssh: '{}'".format(cmd))
+        if sudo:
+            _cmd = b"sudo " + cmd
+        else:
+            _cmd = cmd
+        # run command
+        self.input.write(_cmd + b'\n')
+        # wait for completion
+        self.wait_ready(_cmd, timeout=timeout)
+        self.get_output()
+        _output = self.flush_output().replace('\r', '')
+        if strip_cmd:
+            return "\n".join(_output.splitlines()[1:])
+        else:
+            return _output
+
+    def get_output(self):
+        while True:
+            try:
+                line = self.outq.get(block=False)
+                line = str(line) if isinstance(line, bytes) else line
+                self.output.append(line)
+            except queue.Empty:
+                return self.output
+        return None
+
+    def flush_output(self, as_string=True):
+        _out = self.output
+        self.output = []
+        if as_string:
+            return "".join(_out)
+        else:
+            return _out
+
+    def wait_ready(self, cmd, timeout=60):
+        def _strip_cmd_carrets(_str, carret='\r', skip_chars=1):
+            _cnt = _str.count(carret)
+            while _cnt > 0:
+                _idx = _str.index(carret)
+                _str = _str[:_idx] + _str[_idx+1+skip_chars:]
+                _cnt -= 1
+            return _str
+        while True:
+            try:
+                _line = self.outq.get(block=False)
+                line = _line.decode() if isinstance(_line, bytes) else _line
+                # line = line.replace('\r', '')
+                self.output.append(line)
+                # check if this is the command itself and skip
+                if '$' in line:
+                    _cmd = line.split('$', 1)[1].strip()
+                    _cmd = _strip_cmd_carrets(_cmd)
+                    if _cmd == cmd.decode():
+                        continue
+                break
+            except queue.Empty:
+                logger.debug("... {} sec".format(timeout))
+                sleep(1)
+                timeout -= 1
+                if not timeout:
+                    logger.debug("...timed out")
+                    return False
+        return True
+
+    def wait_for_string(self, string, timeout=60):
+        if not self._wait_for_string(string):
+            raise TimeoutException(
+                "Time out waiting for string '{}'".format(string)
+            )
+        else:
+            return True
+
+
+class SshShell(SshBase):
+    def __enter__(self):
+        self._cmd = ["ssh"]
+        self._cmd += self._options
+        self._cmd += self._extra_options
+        self._cmd += [self._host_uri]
+
+        logger.debug("...shell to: '{}'".format(" ".join(self._cmd)))
+        self._init_connection(self._cmd)
+        return self
+
+    def __exit__(self, _type, _value, _traceback):
+        self._end_connection()
+        if _value:
+            logger.warn(
+                "Error running SSH:\r\n{}".format(
+                    "".join(traceback.format_exception(
+                        _type,
+                        _value,
+                        _traceback
+                    ))
+                )
+            )
+
+        return True
+
+    def connect(self):
+        return self.__enter__()
+
+    def kill(self):
+        self._end_connection()
+
+    def get_host_path(self, path):
+        _uri = self.host + ":" + path
+        if self.username:
+            _uri = self.username + "@" + _uri
+        return _uri
+
+    def scp(self, _src, _dst):
+        self._scp_options = []
+        if self.keypath:
+            self._scp_options += ["-i", self.keypath]
+        if self.port:
+            self._scp_options += ["-P", str(self.port)]
+
+        _cmd = ["scp"]
+        _cmd += self._scp_options
+        _cmd += self._extra_options
+        _cmd += [_src]
+        _cmd += [_dst]
+
+        logger.debug("...scp: '{}'".format(" ".join(_cmd)))
+        _proc = subprocess.Popen(
+            _cmd,
+            stdout=subprocess.PIPE,
+            stderr=subprocess.PIPE
+        )
+        _r = _proc.communicate()
+        _e = _r[1].decode() if _r[1] else ""
+        return _proc.returncode, _r[0].decode(), _e
+
+
+class PortForward(SshBase):
+    def __init__(
+        self,
+        host,
+        fwd_host,
+        user=None,
+        keypath=None,
+        port=None,
+        loc_port=10022,
+        fwd_port=22,
+        timeout=15
+    ):
+        super(PortForward, self).__init__(
+            host,
+            user=user,
+            keypath=keypath,
+            port=port,
+            timeout=timeout,
+            silent=True,
+            piped=False
+        )
+        self.f_host = fwd_host
+        self.l_port = loc_port
+        self.f_port = fwd_port
+
+        self._forward_options = [
+            "-L",
+            ":".join([str(self.l_port), self.f_host, str(self.f_port)])
+        ]
+
+    def __enter__(self):
+        self._cmd = ["ssh"]
+        self._cmd += self._forward_options
+        self._cmd += self._options
+        self._cmd += self._extra_options
+        self._cmd += [self._host_uri]
+
+        logger.debug(
+            "...port forwarding: '{}'".format(" ".join(self._cmd))
+        )
+        self._init_connection(self._cmd)
+        return self
+
+    def __exit__(self, _type, _value, _traceback):
+        self._end_connection()
+        if _value:
+            logger_cli.warn(
+                "Error running SSH:\r\n{}".format(
+                    "".join(traceback.format_exception(
+                        _type,
+                        _value,
+                        _traceback
+                    ))
+                )
+            )
+
+        return True
+
+    def connect(self):
+        return self.__enter__()
+
+    def kill(self):
+        self._end_connection()
diff --git a/cfg_checker/helpers/args_utils.py b/cfg_checker/helpers/args_utils.py
index 498ed30..7ac431c 100644
--- a/cfg_checker/helpers/args_utils.py
+++ b/cfg_checker/helpers/args_utils.py
@@ -2,7 +2,8 @@
 import os
 import sys
 
-from cfg_checker.common.exception import ConfigException
+from cfg_checker.common.exception import ConfigException, \
+    CommandTypeNotSupportedException, CheckerException
 
 
 class MyParser(argparse.ArgumentParser):
@@ -70,3 +71,61 @@
             return 'csv', args.csv
     else:
         raise ConfigException("Report type and filename not set")
+
+
+def check_supported_env(target_env, args, config):
+    def _raise_type_not_supported():
+        raise CommandTypeNotSupportedException(
+                "'{}' -> '{}' is not supported on any of "
+                "the currently detected environments: "
+                "{}".format(
+                    args.command,
+                    args.type,
+                    ",".join(config.detected_envs))
+        )
+
+    def _check_target_vs_used(_tenv):
+        if not hasattr(args, 'use_env'):
+            return _tenv
+        elif args.use_env == _tenv or not args.use_env:
+            return _tenv
+        else:
+            raise CommandTypeNotSupportedException(
+                    "'{}' -> '{}' is not supported "
+                    "for selected env of '{}'".format(
+                        args.command,
+                        args.type,
+                        args.use_env
+                        )
+            )
+
+    if isinstance(target_env, list):
+        _set = set(config.detected_envs).intersection(set(target_env))
+        if len(_set) < 1:
+            _raise_type_not_supported()
+        if len(_set) > 1:
+            if not hasattr(args, 'use_env'):
+                raise CheckerException(
+                    "Multiple envs detected: {}, use --use-env option".format(
+                        ",".join(list(_set))
+                    )
+                )
+            elif args.use_env and args.use_env in _set:
+                return args.use_env
+            else:
+                _raise_type_not_supported()
+        else:
+            return _check_target_vs_used(list(_set)[0])
+    elif isinstance(target_env, str):
+        if target_env not in config.detected_envs:
+            _raise_type_not_supported()
+        else:
+            return _check_target_vs_used(target_env)
+    else:
+        raise CheckerException(
+            "Unexpected target env type '{}' in '{}' -> '{}'".format(
+                target_env,
+                args.command,
+                args.type,
+            )
+        )
diff --git a/cfg_checker/modules/network/__init__.py b/cfg_checker/modules/network/__init__.py
index 28d08c4..dd8cc98 100644
--- a/cfg_checker/modules/network/__init__.py
+++ b/cfg_checker/modules/network/__init__.py
@@ -1,9 +1,11 @@
 from cfg_checker.common import logger_cli
+from cfg_checker.common.settings import ENV_TYPE_SALT
 from cfg_checker.helpers import args_utils
 from cfg_checker.modules.network import checker, mapper, pinger
 
 
 command_help = "Network infrastructure checks and reports"
+supported_envs = [ENV_TYPE_SALT]
 
 
 def init_parser(_parser):
@@ -66,13 +68,17 @@
     return _parser
 
 
-def do_check(args):
+def do_check(args, config):
     # Net Checks
     # should not print map, etc...
     # Just bare summary and errors
+    # Check if there is supported env found
+    args_utils.check_supported_env(ENV_TYPE_SALT, args, config)
+    # Start command
     logger_cli.info("# Network check to console")
     _skip, _skip_file = args_utils.get_skip_args(args)
     netChecker = checker.NetworkChecker(
+        config,
         skip_list=_skip,
         skip_list_file=_skip_file
     )
@@ -89,16 +95,17 @@
         netChecker.print_error_details()
 
 
-def do_report(args):
+def do_report(args, config):
     # Network Report
-    # should generate Static HTML page
-    # with node/network map and values
-
+    # Check if there is supported env found
+    args_utils.check_supported_env(ENV_TYPE_SALT, args, config)
+    # Start command
     logger_cli.info("# Network report (check, node map")
 
     _filename = args_utils.get_arg(args, 'html')
     _skip, _skip_file = args_utils.get_skip_args(args)
     netChecker = checker.NetworkChecker(
+        config,
         skip_list=_skip,
         skip_list_file=_skip_file
     )
@@ -111,12 +118,15 @@
     return
 
 
-def do_map(args):
+def do_map(args, config):
     # Network Map
-    # Should generate network map to console or HTML
+    # Check if there is supported env found
+    args_utils.check_supported_env(ENV_TYPE_SALT, args, config)
+    # Start command
     logger_cli.info("# Network report")
     _skip, _skip_file = args_utils.get_skip_args(args)
     networkMap = mapper.NetworkMapper(
+        config,
         skip_list=_skip,
         skip_list_file=_skip_file
     )
@@ -127,11 +137,14 @@
     return
 
 
-def do_list(args):
+def do_list(args, config):
     # Network List
-    # Should generate network map to console or HTML
+    # Check if there is supported env found
+    args_utils.check_supported_env(ENV_TYPE_SALT, args, config)
+    # Start command
     _skip, _skip_file = args_utils.get_skip_args(args)
     _map = mapper.NetworkMapper(
+        config,
         skip_list=_skip,
         skip_list_file=_skip_file
     )
@@ -148,15 +161,19 @@
     return
 
 
-def do_ping(args):
+def do_ping(args, config):
     # Network pinger
     # Checks if selected nodes are pingable
     # with a desireble parameters: MTU, Frame, etc
+    # Check if there is supported env found
+    args_utils.check_supported_env(ENV_TYPE_SALT, args, config)
+    # Start command
     if not args.cidr:
         logger_cli.error("\n# Use mcp-check network list to get list of CIDRs")
     _cidr = args_utils.get_arg(args, "cidr")
     _skip, _skip_file = args_utils.get_skip_args(args)
     _pinger = pinger.NetworkPinger(
+        config,
         mtu=args.mtu,
         detailed=args.detailed,
         skip_list=_skip,
@@ -182,7 +199,7 @@
         return
 
 
-def do_trace(args):
+def do_trace(args, config):
     # Network packet tracer
     # Check if packet is delivered to proper network host
     logger_cli.info("# Packet Tracer not yet implemented")
diff --git a/cfg_checker/modules/network/checker.py b/cfg_checker/modules/network/checker.py
index c590d13..eaa2428 100644
--- a/cfg_checker/modules/network/checker.py
+++ b/cfg_checker/modules/network/checker.py
@@ -7,13 +7,15 @@
 class NetworkChecker(object):
     def __init__(
         self,
+        config,
         skip_list=None,
         skip_list_file=None
     ):
         logger_cli.debug("... init error logs folder")
         self.errors = NetworkErrors()
         self.mapper = NetworkMapper(
-            self.errors,
+            config,
+            errors_class=self.errors,
             skip_list=skip_list,
             skip_list_file=skip_list_file
         )
@@ -45,15 +47,16 @@
         """
         logger_cli.info("### Generating report to '{}'".format(filename))
         _report = reporter.ReportToFile(
-            reporter.HTMLNetworkReport(),
+            reporter.HTMLNetworkReport(self.mapper.salt_master),
             filename
         )
-        _report({
-            "domain": self.mapper.domain,
-            "nodes": self.mapper.nodes,
-            "map": self.mapper.map,
-            "mcp_release": self.mapper.cluster['mcp_release'],
-            "openstack_release": self.mapper.cluster['openstack_release']
-
-        })
+        _report(
+            {
+                "domain": self.mapper.domain,
+                "nodes": self.mapper.nodes,
+                "map": self.mapper.map,
+                "mcp_release": self.mapper.cluster['mcp_release'],
+                "openstack_release": self.mapper.cluster['openstack_release']
+            }
+        )
         logger_cli.info("-> Done")
diff --git a/cfg_checker/modules/network/mapper.py b/cfg_checker/modules/network/mapper.py
index 51f52bb..fd19864 100644
--- a/cfg_checker/modules/network/mapper.py
+++ b/cfg_checker/modules/network/mapper.py
@@ -5,7 +5,7 @@
 from cfg_checker.common import logger_cli
 from cfg_checker.common.exception import InvalidReturnException
 from cfg_checker.modules.network.network_errors import NetworkErrors
-from cfg_checker.nodes import salt_master
+from cfg_checker.nodes import SaltNodes
 
 # TODO: use templated approach
 # net interface structure should be the same
@@ -33,19 +33,21 @@
 
     def __init__(
         self,
+        config,
         errors_class=None,
         skip_list=None,
         skip_list_file=None
     ):
+        self.salt_master = SaltNodes(config)
         logger_cli.info("# Initializing mapper")
         # init networks and nodes
         self.networks = {}
-        self.nodes = salt_master.get_nodes(
+        self.nodes = self.salt_master.get_nodes(
             skip_list=skip_list,
             skip_list_file=skip_list_file
         )
-        self.cluster = salt_master.get_info()
-        self.domain = salt_master.domain
+        self.cluster = self.salt_master.get_info()
+        self.domain = self.salt_master.domain
         # init and pre-populate interfaces
         self.interfaces = {k: {} for k in self.nodes}
         # Init errors class
@@ -113,13 +115,14 @@
         # class uses nodes from self.nodes dict
         _reclass = {}
         # Get required pillars
-        salt_master.get_specific_pillar_for_nodes("linux:network")
-        for node in salt_master.nodes.keys():
+        self.salt_master.get_specific_pillar_for_nodes("linux:network")
+        for node in self.salt_master.nodes.keys():
             # check if this node
-            if not salt_master.is_node_available(node):
+            if not self.salt_master.is_node_available(node):
                 continue
             # get the reclass value
-            _pillar = salt_master.nodes[node]['pillars']['linux']['network']
+            _pillar = \
+                self.salt_master.nodes[node]['pillars']['linux']['network']
             # we should be ready if there is no interface in reclass for a node
             # for example on APT node
             if 'interface' in _pillar:
@@ -169,14 +172,14 @@
         # class uses nodes from self.nodes dict
         _runtime = {}
         logger_cli.info("# Mapping node runtime network data")
-        salt_master.prepare_script_on_active_nodes("ifs_data.py")
-        _result = salt_master.execute_script_on_active_nodes(
+        self.salt_master.prepare_script_on_active_nodes("ifs_data.py")
+        _result = self.salt_master.execute_script_on_active_nodes(
             "ifs_data.py",
             args=["json"]
         )
-        for key in salt_master.nodes.keys():
+        for key in self.salt_master.nodes.keys():
             # check if we are to work with this node
-            if not salt_master.is_node_available(key):
+            if not self.salt_master.is_node_available(key):
                 continue
             # due to much data to be passed from salt_master,
             # it is happening in order
@@ -191,21 +194,21 @@
                         )
                     )
                 _dict = json.loads(_text[_text.find('{'):])
-                salt_master.nodes[key]['routes'] = _dict.pop("routes")
-                salt_master.nodes[key]['networks'] = _dict
+                self.salt_master.nodes[key]['routes'] = _dict.pop("routes")
+                self.salt_master.nodes[key]['networks'] = _dict
             else:
-                salt_master.nodes[key]['networks'] = {}
-                salt_master.nodes[key]['routes'] = {}
+                self.salt_master.nodes[key]['networks'] = {}
+                self.salt_master.nodes[key]['routes'] = {}
             logger_cli.debug("... {} has {} networks".format(
                 key,
-                len(salt_master.nodes[key]['networks'].keys())
+                len(self.salt_master.nodes[key]['networks'].keys())
             ))
         logger_cli.info("-> done collecting networks data")
 
         logger_cli.info("-> mapping IPs")
         # match interfaces by IP subnets
-        for host, node_data in salt_master.nodes.items():
-            if not salt_master.is_node_available(host):
+        for host, node_data in self.salt_master.nodes.items():
+            if not self.salt_master.is_node_available(host):
                 continue
 
             for net_name, net_data in node_data['networks'].items():
@@ -460,7 +463,7 @@
             for hostname in names:
                 _notes = []
                 node = hostname.split('.')[0]
-                if not salt_master.is_node_available(hostname, log=False):
+                if not self.salt_master.is_node_available(hostname, log=False):
                     logger_cli.info(
                         "    {0:8} {1}".format(node, "node not available")
                     )
@@ -513,7 +516,7 @@
                 # get gate and routes if proto is static
                 if _proto == 'static':
                     # get the gateway for current net
-                    _routes = salt_master.nodes[hostname]['routes']
+                    _routes = self.salt_master.nodes[hostname]['routes']
                     _route = _routes[_net] if _net in _routes else None
                     # get the default gateway
                     if 'default' in _routes:
diff --git a/cfg_checker/modules/network/pinger.py b/cfg_checker/modules/network/pinger.py
index 5b12a94..17f8597 100644
--- a/cfg_checker/modules/network/pinger.py
+++ b/cfg_checker/modules/network/pinger.py
@@ -5,22 +5,24 @@
 from cfg_checker.helpers.console_utils import Progress
 from cfg_checker.modules.network.mapper import NetworkMapper
 from cfg_checker.modules.network.network_errors import NetworkErrors
-from cfg_checker.nodes import salt_master
+from cfg_checker.nodes import SaltNodes
 
 
 # This is independent class with a salt.nodes input
 class NetworkPinger(object):
     def __init__(
         self,
+        config,
         mtu=None,
         detailed=False,
         errors_class=None,
         skip_list=None,
         skip_list_file=None
     ):
-        logger_cli.info("# Initializing")
+        logger_cli.info("# Initializing Pinger")
+        self.salt_master = SaltNodes(config)
         # all active nodes in the cloud
-        self.target_nodes = salt_master.get_nodes(
+        self.target_nodes = self.salt_master.get_nodes(
             skip_list=skip_list,
             skip_list_file=skip_list_file
         )
@@ -117,7 +119,7 @@
 
         # do ping of packets
         logger_cli.info("# Pinging nodes: MTU={}".format(self.target_mtu))
-        salt_master.prepare_script_on_active_nodes("ping.py")
+        self.salt_master.prepare_script_on_active_nodes("ping.py")
         _progress = Progress(_count)
         _progress_index = 0
         _node_index = 0
@@ -125,13 +127,13 @@
             _targets = src_data["targets"]
             _node_index += 1
             # create 'targets.json' on source host
-            _path = salt_master.prepare_json_on_node(
+            _path = self.salt_master.prepare_json_on_node(
                 src,
                 _targets,
                 "targets.json"
             )
             # execute ping.py
-            _results = salt_master.execute_script_on_node(
+            _results = self.salt_master.execute_script_on_node(
                 src,
                 "ping.py",
                 args=[_path]
diff --git a/cfg_checker/modules/packages/__init__.py b/cfg_checker/modules/packages/__init__.py
index 2d0cc79..e482eec 100644
--- a/cfg_checker/modules/packages/__init__.py
+++ b/cfg_checker/modules/packages/__init__.py
@@ -1,9 +1,12 @@
+from cfg_checker.common.settings import ENV_TYPE_SALT, \
+    ENV_TYPE_KUBE, ENV_TYPE_LINUX
 from cfg_checker.helpers import args_utils
 from cfg_checker.modules.packages.repos import RepoManager
 
 from . import checker
 
 command_help = "Package versions check (Candidate vs Installed)"
+supported_envs = [ENV_TYPE_SALT, ENV_TYPE_KUBE, ENV_TYPE_LINUX]
 
 
 def init_parser(_parser):
@@ -98,12 +101,19 @@
     return _parser
 
 
-def do_report(args):
+def do_report(args, config):
     """Create package versions report, HTML
 
     :args: - parser arguments
     :return: - no return value
     """
+    # Check if there is supported env found
+    _env = args_utils.check_supported_env(
+        [ENV_TYPE_SALT, ENV_TYPE_KUBE],
+        args,
+        config
+    )
+    # Start command
     _type, _filename = args_utils.get_package_report_type_and_filename(args)
 
     if ' ' in args.exclude_keywords:
@@ -113,12 +123,23 @@
 
     # init connection to salt and collect minion data
     _skip, _skip_file = args_utils.get_skip_args(args)
-    pChecker = checker.CloudPackageChecker(
-        force_tag=args.force_tag,
-        exclude_keywords=_kw,
-        skip_list=_skip,
-        skip_list_file=_skip_file
-    )
+    if _env == ENV_TYPE_SALT:
+        pChecker = checker.SaltCloudPackageChecker(
+            config,
+            force_tag=args.force_tag,
+            exclude_keywords=_kw,
+            skip_list=_skip,
+            skip_list_file=_skip_file
+        )
+    elif _env == ENV_TYPE_KUBE:
+        pChecker = checker.KubeCloudPackageChecker(
+            config,
+            force_tag=args.force_tag,
+            exclude_keywords=_kw,
+            skip_list=_skip,
+            skip_list_file=_skip_file
+        )
+
     # collect data on installed packages
     pChecker.collect_installed_packages()
     # diff installed and candidates
@@ -127,12 +148,14 @@
     pChecker.create_report(_filename, rtype=_type, full=args.full)
 
 
-def do_versions(args):
+def do_versions(args, config):
     """Builds tagged repo structure and parses Packages.gz files
 
     :args: - parser arguments
     :return: - no return value
     """
+    # Check if there is supported env found
+    args_utils.check_supported_env(ENV_TYPE_LINUX, args, config)
     # Get the list of tags for the url
     r = RepoManager()
     if args.list_tags:
@@ -159,9 +182,11 @@
         r.parse_repos()
 
 
-def do_show(args):
+def do_show(args, config):
     """Shows package (or multiple) history across parsed tags
     """
+    # Check if there is supported env found
+    args_utils.check_supported_env(ENV_TYPE_LINUX, args, config)
     # Init manager
     r = RepoManager()
     # show packages
@@ -169,9 +194,11 @@
         r.show_package(p)
 
 
-def do_show_app(args):
+def do_show_app(args, config):
     """Shows packages for app
     """
+    # Check if there is supported env found
+    args_utils.check_supported_env(ENV_TYPE_LINUX, args, config)
     # Init manager
     r = RepoManager()
     # show packages
diff --git a/cfg_checker/modules/packages/checker.py b/cfg_checker/modules/packages/checker.py
index fb02db2..b2e8194 100644
--- a/cfg_checker/modules/packages/checker.py
+++ b/cfg_checker/modules/packages/checker.py
@@ -3,9 +3,10 @@
 from cfg_checker.common import const, logger_cli
 from cfg_checker.common.exception import ConfigException
 from cfg_checker.common.other import merge_dict
+from cfg_checker.common.settings import ENV_TYPE_SALT
 from cfg_checker.helpers.console_utils import Progress
 from cfg_checker.modules.packages.repos import RepoManager
-from cfg_checker.nodes import salt_master
+from cfg_checker.nodes import SaltNodes, KubeNodes
 from cfg_checker.reports import reporter
 
 from .versions import DebianVersion, PkgVersions, VersionCmpResult
@@ -14,34 +15,36 @@
 class CloudPackageChecker(object):
     def __init__(
         self,
+        config,
         force_tag=None,
         exclude_keywords=[],
         skip_list=None,
         skip_list_file=None
     ):
+        # check that this env tag is present in Manager
+        self.env_config = config
+        self.rm = RepoManager()
+        self.force_tag = force_tag
+        self.exclude_keywords = exclude_keywords
+
         # Init salt master info
-        if not salt_master.nodes:
-            salt_master.nodes = salt_master.get_nodes(
+        if not self.master.nodes:
+            self.master.nodes = self.master.get_nodes(
                 skip_list=skip_list,
                 skip_list_file=skip_list_file
             )
 
-        # check that this env tag is present in Manager
-        self.rm = RepoManager()
-        _tags = self.rm.get_available_tags(tag=salt_master.mcp_release)
+        _tags = self.rm.get_available_tags(tag=self.master.mcp_release)
         if not _tags:
             logger_cli.warning(
-                "\n# hWARNING: '{0}' is not listed in repo index. "
+                "\n# WARNING: '{0}' is not listed in repo index. "
                 "Consider running:\n\t{1}\nto add info on this tag's "
                 "release package versions".format(
-                    salt_master.mcp_release,
-                    "mcp-checker packages versions --tag {0}"
+                    self.master.mcp_release,
+                    "mcp-checker packages versions --tag <target_tag>"
                 )
             )
 
-        self.force_tag = force_tag
-        self.exclude_keywords = exclude_keywords
-
     @staticmethod
     def presort_packages(all_packages, full=None):
         logger_cli.info("-> Presorting packages")
@@ -189,41 +192,6 @@
 
         return _data
 
-    def collect_installed_packages(self):
-        """
-        Collect installed packages on each node
-        sets 'installed' dict property in the class
-
-        :return: none
-        """
-        logger_cli.info("# Collecting installed packages")
-        salt_master.prepare_script_on_active_nodes("pkg_versions.py")
-        _result = salt_master.execute_script_on_active_nodes("pkg_versions.py")
-
-        for key in salt_master.nodes.keys():
-            # due to much data to be passed from salt, it is happening in order
-            if key in _result and _result[key]:
-                _text = _result[key]
-                try:
-                    _dict = json.loads(_text[_text.find('{'):])
-                except ValueError:
-                    logger_cli.info("... no JSON for '{}'".format(
-                        key
-                    ))
-                    logger_cli.debug(
-                        "ERROR:\n{}\n".format(_text[:_text.find('{')])
-                    )
-                    _dict = {}
-
-                salt_master.nodes[key]['packages'] = _dict
-            else:
-                salt_master.nodes[key]['packages'] = {}
-            logger_cli.debug("... {} has {} packages installed".format(
-                key,
-                len(salt_master.nodes[key]['packages'].keys())
-            ))
-        logger_cli.info("-> Done")
-
     def collect_packages(self):
         """
         Check package versions in repos vs installed
@@ -231,13 +199,13 @@
         :return: no return values, all date put to dict in place
         """
         # Preload OpenStack release versions
-        _desc = PkgVersions()
+        _desc = PkgVersions(self.env_config)
         logger_cli.info(
             "# Cross-comparing: Installed vs Candidates vs Release"
         )
         # shortcuts for this cloud values
-        _os = salt_master.openstack_release
-        _mcp = salt_master.mcp_release
+        _os = self.master.openstack_release
+        _mcp = self.master.mcp_release
         _t = [self.force_tag] if self.force_tag else []
         _t.append(_mcp)
 
@@ -250,12 +218,12 @@
         )
 
         # Progress class
-        _progress = Progress(len(salt_master.nodes.keys()))
+        _progress = Progress(len(self.master.nodes.keys()))
         _progress_index = 0
         _total_processed = 0
         # Collect packages from all of the nodes in flat dict
         _all_packages = {}
-        for node_name, node_value in salt_master.nodes.items():
+        for node_name, node_value in self.master.nodes.items():
             _uniq_len = len(_all_packages.keys())
             _progress_index += 1
             # progress updates shown before next node only
@@ -277,8 +245,9 @@
                 # at a first sight
                 if _name not in _all_packages:
                     # get node attributes
-                    _linux = salt_master.nodes[node_name]['linux_codename']
-                    _arch = salt_master.nodes[node_name]['linux_arch']
+                    _linux = \
+                        self.master.nodes[node_name]['linux_codename']
+                    _arch = self.master.nodes[node_name]['linux_arch']
                     # get versions for tag, Openstack release and repo headers
                     # excluding 'nightly' repos by default
                     _r = {}
@@ -415,9 +384,9 @@
         """
         logger_cli.info("# Generating report to '{}'".format(filename))
         if rtype == 'html':
-            _type = reporter.HTMLPackageCandidates()
+            _type = reporter.HTMLPackageCandidates(self.master)
         elif rtype == 'csv':
-            _type = reporter.CSVAllPackages()
+            _type = reporter.CSVAllPackages(self.master)
         else:
             raise ConfigException("Report type not set")
         _report = reporter.ReportToFile(
@@ -425,10 +394,86 @@
             filename
         )
         payload = {
-            "nodes": salt_master.nodes,
-            "mcp_release": salt_master.mcp_release,
-            "openstack_release": salt_master.openstack_release
+            "nodes": self.master.nodes,
+            "mcp_release": self.master.mcp_release,
+            "openstack_release": self.master.openstack_release
         }
         payload.update(self.presort_packages(self._packages, full))
         _report(payload)
         logger_cli.info("-> Done")
+
+    def collect_installed_packages(self):
+        """
+        Collect installed packages on each node
+        sets 'installed' dict property in the class
+
+        :return: none
+        """
+        logger_cli.info("# Collecting installed packages")
+        if self.master.env_type == ENV_TYPE_SALT:
+            self.master.prepare_script_on_active_nodes("pkg_versions.py")
+        _result = self.master.execute_script_on_active_nodes(
+            "pkg_versions.py"
+        )
+
+        for key in self.master.nodes.keys():
+            # due to much data to be passed from salt, it is happening in order
+            if key in _result and _result[key]:
+                _text = _result[key]
+                try:
+                    _dict = json.loads(_text[_text.find('{'):])
+                except ValueError:
+                    logger_cli.info("... no JSON for '{}'".format(
+                        key
+                    ))
+                    logger_cli.debug(
+                        "ERROR:\n{}\n".format(_text[:_text.find('{')])
+                    )
+                    _dict = {}
+
+                self.master.nodes[key]['packages'] = _dict
+            else:
+                self.master.nodes[key]['packages'] = {}
+            logger_cli.debug("... {} has {} packages installed".format(
+                key,
+                len(self.master.nodes[key]['packages'].keys())
+            ))
+        logger_cli.info("-> Done")
+
+
+class SaltCloudPackageChecker(CloudPackageChecker):
+    def __init__(
+        self,
+        config,
+        force_tag=None,
+        exclude_keywords=[],
+        skip_list=None,
+        skip_list_file=None
+    ):
+        self.master = SaltNodes(config)
+        super(SaltCloudPackageChecker, self).__init__(
+            config,
+            force_tag=None,
+            exclude_keywords=[],
+            skip_list=None,
+            skip_list_file=None
+        )
+
+
+class KubeCloudPackageChecker(CloudPackageChecker):
+    def __init__(
+        self,
+        config,
+        force_tag=None,
+        exclude_keywords=[],
+        skip_list=None,
+        skip_list_file=None
+    ):
+        self.master = KubeNodes(config)
+        super(KubeCloudPackageChecker, self).__init__(
+            config,
+            force_tag=None,
+            exclude_keywords=[],
+            skip_list=None,
+            skip_list_file=None
+        )
diff --git a/cfg_checker/modules/packages/repos.py b/cfg_checker/modules/packages/repos.py
index 57d8b9e..15129e7 100644
--- a/cfg_checker/modules/packages/repos.py
+++ b/cfg_checker/modules/packages/repos.py
@@ -376,6 +376,7 @@
     init_done = False
 
     def _init_folders(self, arch_folder=None):
+        logger_cli.info("# Loading package versions data")
         # overide arch folder if needed
         if arch_folder:
             self._arch_folder = arch_folder
diff --git a/cfg_checker/modules/packages/versions.py b/cfg_checker/modules/packages/versions.py
index 542c0e4..05eaad8 100644
--- a/cfg_checker/modules/packages/versions.py
+++ b/cfg_checker/modules/packages/versions.py
@@ -2,7 +2,7 @@
 import os
 import re
 
-from cfg_checker.common import config, const, logger_cli
+from cfg_checker.common import const, logger_cli
 from cfg_checker.common.settings import pkg_dir
 
 
@@ -17,7 +17,7 @@
         "versions": {}
     }
 
-    def __init__(self):
+    def __init__(self, config):
         # preload csv file
         logger_cli.info("# Preloading specific MCP release versions")
         with open(os.path.join(
diff --git a/cfg_checker/modules/reclass/__init__.py b/cfg_checker/modules/reclass/__init__.py
index 88b287e..8d498c3 100644
--- a/cfg_checker/modules/reclass/__init__.py
+++ b/cfg_checker/modules/reclass/__init__.py
@@ -1,6 +1,7 @@
 import os
 
 from cfg_checker.common import logger_cli
+from cfg_checker.common.settings import ENV_TYPE_LINUX
 from cfg_checker.helpers import args_utils
 from cfg_checker.reports import reporter
 
@@ -9,6 +10,7 @@
 from . import validator
 
 command_help = "Reclass related checks and reports"
+supported_envs = [ENV_TYPE_LINUX]
 
 
 def init_parser(_parser):
@@ -48,7 +50,10 @@
     return _parser
 
 
-def do_list(args):
+def do_list(args, config):
+    # Check if there is supported env found
+    args_utils.check_supported_env(ENV_TYPE_LINUX, args, config)
+    # Start command
     logger_cli.info("# Reclass list")
     _arg_path = args_utils.get_arg(args, 'models_path')
     logger_cli.info("-> Current path is: {}".format(_arg_path))
@@ -78,7 +83,10 @@
     return
 
 
-def do_diff(args):
+def do_diff(args, config):
+    # Check if there is supported env found
+    args_utils.check_supported_env(ENV_TYPE_LINUX, args, config)
+    # Start command
     logger_cli.info("# Reclass comparer (HTML report)")
     _filename = args_utils.get_arg(args, 'html')
     # checking folder params
diff --git a/cfg_checker/nodes.py b/cfg_checker/nodes.py
index e028be2..e568cec 100644
--- a/cfg_checker/nodes.py
+++ b/cfg_checker/nodes.py
@@ -1,38 +1,132 @@
 import json
 import os
 from copy import deepcopy
+from multiprocessing.dummy import Pool
 
-from cfg_checker.clients import get_salt_remote, salt
-from cfg_checker.common import config
-from cfg_checker.common.const import all_roles_map
+from cfg_checker.clients import get_salt_remote, get_kube_remote
+from cfg_checker.common.const import all_salt_roles_map, all_kube_roles_map
 from cfg_checker.common.const import NODE_UP, NODE_DOWN, NODE_SKIP
+from cfg_checker.common.const import ubuntu_versions, nova_openstack_versions
 from cfg_checker.common import logger, logger_cli
 from cfg_checker.common import utils
-from cfg_checker.common.exception import SaltException
-from cfg_checker.common.settings import pkg_dir
+from cfg_checker.common.file_utils import create_temp_file_with_content
+from cfg_checker.common.exception import SaltException, KubeException
+from cfg_checker.common.ssh_utils import PortForward, SshShell
+from cfg_checker.common.settings import pkg_dir, ENV_TYPE_KUBE, ENV_TYPE_SALT
+from cfg_checker.helpers.console_utils import Progress
+
 
 node_tmpl = {
     'role': '',
     'node_group': '',
     'status': NODE_DOWN,
     'pillars': {},
-    'grains': {}
+    'grains': {},
+    'raw': {}
 }
 
 
-class SaltNodes(object):
-    def __init__(self):
+def _prepare_skipped_nodes(_names, skip_list, skip_list_file):
+    _skipped_minions = []
+    # skip list file
+    if skip_list_file:
+        _valid, _invalid = utils.get_nodes_list(skip_list_file)
+        logger_cli.info(
+            "\n# WARNING: Detected invalid entries "
+            "in nodes skip list: {}\n".format(
+                "\n".join(_invalid)
+            )
+        )
+        _skipped_minions.extend(_valid)
+    # process wildcard, create node list out of mask
+    if skip_list:
+        _list = []
+        _invalid = []
+        for _item in skip_list:
+            if '*' in _item:
+                _str = _item[:_item.index('*')]
+                _nodes = [_m for _m in _names if _m.startswith(_str)]
+                if not _nodes:
+                    logger_cli.warn(
+                        "# WARNING: No nodes found for {}".format(_item)
+                    )
+                _list.extend(_nodes)
+            else:
+                if _item in _names:
+                    _list += _item
+                else:
+                    logger_cli.warn(
+                        "# WARNING: No node found for {}".format(_item)
+                    )
+        # removing duplicates
+        _list = list(set(_list))
+        _skipped_minions.extend(_list)
+
+    return _skipped_minions
+
+
+class Nodes(object):
+    def __init__(self, config):
+        self.nodes = None
+        self.env_config = config
+
+    def skip_node(self, node):
+        # Add node to skip list
+        # Fro example if it is fails to comply with the rules
+
+        # check if we know such node
+        if node in self.nodes.keys() and node not in self.skip_list:
+            # yes, add it
+            self.skip_list.append(node)
+            return True
+        else:
+            return False
+
+    def get_nodes(self, skip_list=None, skip_list_file=None):
+        if not self.nodes:
+            if not skip_list and self.env_config.skip_nodes:
+                self.gather_node_info(
+                    self.env_config.skip_nodes,
+                    skip_list_file
+                )
+            else:
+                self.gather_node_info(skip_list, skip_list_file)
+        return self.nodes
+
+    def get_info(self):
+        _info = {
+           'mcp_release': self.mcp_release,
+           'openstack_release': self.openstack_release
+        }
+        return _info
+
+    def is_node_available(self, node, log=True):
+        if node in self.skip_list:
+            if log:
+                logger_cli.info("-> node '{}' not active".format(node))
+            return False
+        elif node in self.not_responded:
+            if log:
+                logger_cli.info("-> node '{}' not responded".format(node))
+            return False
+        else:
+            return True
+
+
+class SaltNodes(Nodes):
+    def __init__(self, config):
+        super(SaltNodes, self).__init__(config)
         logger_cli.info("# Gathering environment information")
         # simple salt rest client
-        self.salt = salt
-        self.nodes = None
+        self.salt = None
+        self.env_type = ENV_TYPE_SALT
 
     def gather_node_info(self, skip_list, skip_list_file):
         # Keys for all nodes
         # this is not working in scope of 2016.8.3, will overide with list
         logger_cli.debug("... collecting node names existing in the cloud")
         if not self.salt:
-            self.salt = get_salt_remote(config)
+            self.salt = get_salt_remote(self.env_config)
 
         try:
             _keys = self.salt.list_keys()
@@ -56,7 +150,7 @@
             )
         elif not self.node_keys:
             # this is the last resort
-            _minions = config.load_nodes_list()
+            _minions = self.env_config.load_nodes_list()
             logger_cli.info(
                 "-> {} nodes loaded from list file".format(len(_minions))
             )
@@ -64,40 +158,8 @@
             _minions = self.node_keys['minions']
 
         # Skip nodes if needed
-        _skipped_minions = []
-        # skip list file
-        if skip_list_file:
-            _valid, _invalid = utils.get_nodes_list(skip_list_file)
-            logger_cli.info(
-                "\n# WARNING: Detected invalid entries "
-                "in nodes skip list: {}\n".format(
-                    "\n".join(_invalid)
-                )
-            )
-            _skipped_minions.extend(_valid)
-        # process wildcard, create node list out of mask
-        if skip_list:
-            _list = []
-            _invalid = []
-            for _item in skip_list:
-                if '*' in _item:
-                    _str = _item[:_item.index('*')]
-                    _nodes = [_m for _m in _minions if _m.startswith(_str)]
-                    if not _nodes:
-                        logger_cli.warn(
-                            "# WARNING: No nodes found for {}".format(_item)
-                        )
-                    _list.extend(_nodes)
-                else:
-                    if _item in _minions:
-                        _list += _item
-                    else:
-                        logger_cli.warn(
-                            "# WARNING: No node found for {}".format(_item)
-                        )
-            # removing duplicates
-            _list = list(set(_list))
-            _skipped_minions.extend(_list)
+        _skipped_minions = \
+            _prepare_skipped_nodes(_minions, skip_list, skip_list_file)
 
         # in case API not listed minions, we need all that answer ping
         _active = self.salt.get_active_nodes()
@@ -108,7 +170,7 @@
         _domains = set()
         for _name in _minions:
             _nc = utils.get_node_code(_name)
-            _rmap = all_roles_map
+            _rmap = all_salt_roles_map
             _role = _rmap[_nc] if _nc in _rmap else 'unknown'
             if _name in _skipped_minions:
                 _status = NODE_SKIP
@@ -151,7 +213,7 @@
         #     lambda nd: self.nodes[nd]['role'] == const.all_roles_map['cfg'],
         #     self.nodes
         # )
-        _role = all_roles_map['cfg']
+        _role = all_salt_roles_map['cfg']
         _filtered = [n for n, v in self.nodes.items() if v['role'] == _role]
         if len(_filtered) < 1:
             raise SaltException(
@@ -180,33 +242,6 @@
                 _n['linux_codename'] = _p['linux_system_codename']
                 _n['linux_arch'] = _p['linux_system_architecture']
 
-    def skip_node(self, node):
-        # Add node to skip list
-        # Fro example if it is fails to comply with the rules
-
-        # check if we know such node
-        if node in self.nodes.keys() and node not in self.skip_list:
-            # yes, add it
-            self.skip_list.append(node)
-            return True
-        else:
-            return False
-
-    def get_nodes(self, skip_list=None, skip_list_file=None):
-        if not self.nodes:
-            if not skip_list and config.skip_nodes:
-                self.gather_node_info(config.skip_nodes, skip_list_file)
-            else:
-                self.gather_node_info(skip_list, skip_list_file)
-        return self.nodes
-
-    def get_info(self):
-        _info = {
-           'mcp_release': self.mcp_release,
-           'openstack_release': self.openstack_release
-        }
-        return _info
-
     def get_cmd_for_nodes(self, cmd, target_key, target_dict=None, nodes=None):
         """Function runs. cmd.run and parses result into place
         or into dict structure provided
@@ -243,7 +278,7 @@
                 logger_cli.debug(
                     "... '{}' not responded after '{}'".format(
                         node,
-                        config.salt_timeout
+                        self.env_config.salt_timeout
                     )
                 )
                 data[target_key] = None
@@ -283,7 +318,7 @@
                 logger_cli.debug(
                     "... '{}' not responded after '{}'".format(
                         node,
-                        config.salt_timeout
+                        self.env_config.salt_timeout
                     )
                 )
                 _data[_pillar_keys[-1]] = None
@@ -295,7 +330,7 @@
         # this function assumes that all folders are created
         _dumps = json.dumps(_dict, indent=2).splitlines()
         _storage_path = os.path.join(
-            config.salt_file_root, config.salt_scripts_folder
+            self.env_config.salt_file_root, self.env_config.salt_scripts_folder
         )
         logger_cli.debug(
             "... uploading data as '{}' "
@@ -307,12 +342,12 @@
         _cache_path = os.path.join(_storage_path, filename)
         _source_path = os.path.join(
             'salt://',
-            config.salt_scripts_folder,
+            self.env_config.salt_scripts_folder,
             filename
         )
         _target_path = os.path.join(
             '/root',
-            config.salt_scripts_folder,
+            self.env_config.salt_scripts_folder,
             filename
         )
 
@@ -334,7 +369,7 @@
         with open(_p, 'rt') as fd:
             _script = fd.read().splitlines()
         _storage_path = os.path.join(
-            config.salt_file_root, config.salt_scripts_folder
+            self.env_config.salt_file_root, self.env_config.salt_scripts_folder
         )
         logger_cli.debug(
             "... uploading script {} "
@@ -348,12 +383,12 @@
         _cache_path = os.path.join(_storage_path, script_filename)
         _source_path = os.path.join(
             'salt://',
-            config.salt_scripts_folder,
+            self.env_config.salt_scripts_folder,
             script_filename
         )
         _target_path = os.path.join(
             '/root',
-            config.salt_scripts_folder,
+            self.env_config.salt_scripts_folder,
             script_filename
         )
 
@@ -370,7 +405,7 @@
             self.active_nodes_compound,
             os.path.join(
                 '/root',
-                config.salt_scripts_folder
+                self.env_config.salt_scripts_folder
             ),
             tgt_type="compound"
         )
@@ -388,7 +423,7 @@
         # Prepare path
         _target_path = os.path.join(
             '/root',
-            config.salt_scripts_folder,
+            self.env_config.salt_scripts_folder,
             script_filename
         )
 
@@ -412,7 +447,7 @@
         # Prepare path
         _target_path = os.path.join(
             '/root',
-            config.salt_scripts_folder,
+            self.env_config.salt_scripts_folder,
             script_filename
         )
 
@@ -446,17 +481,410 @@
         self.not_responded = [_n for _n in _r.keys() if not _r[_n]]
         return _r
 
-    def is_node_available(self, node, log=True):
-        if node in self.skip_list:
-            if log:
-                logger_cli.info("-> node '{}' not active".format(node))
-            return False
-        elif node in self.not_responded:
-            if log:
-                logger_cli.info("-> node '{}' not responded".format(node))
-            return False
+
+class KubeNodes(Nodes):
+    def __init__(self, config):
+        super(KubeNodes, self).__init__(config)
+        logger_cli.info("# Gathering environment information")
+        # simple salt rest client
+        self.kube = get_kube_remote(self.env_config)
+        self.env_type = ENV_TYPE_KUBE
+
+    def gather_node_info(self, skip_list, skip_list_file):
+        # Gather nodes info and query pod lists for each node
+        logger_cli.debug("... collecting node names existing in the cloud")
+
+        # Gather node names and info
+        _nodes = self.kube.get_node_info()
+        _node_names = list(_nodes.keys())
+        # Skip nodes if needed
+        _skipped_nodes = \
+            _prepare_skipped_nodes(_node_names, skip_list, skip_list_file)
+
+        # Count how many nodes active
+        self._active = [n for n, v in _nodes.items()
+                        if v['conditions']['ready']['status']]
+
+        # iterate through all accepted nodes and create a dict for it
+        self.nodes = {}
+        self.skip_list = []
+        # _domains = set()
+        for _name in _node_names:
+            if _name in _skipped_nodes:
+                _status = NODE_SKIP
+                self.skip_list.append(_name)
+            else:
+                _status = NODE_UP if _name in self._active else NODE_DOWN
+                if _status == NODE_DOWN:
+                    self.skip_list.append(_name)
+                    logger_cli.info(
+                        "-> '{}' shows 'Ready' as 'False', "
+                        "added to skip list".format(
+                            _name
+                        )
+                    )
+            _roles = {}
+            _labels = {}
+            for _label, _value in _nodes[_name]['labels'].items():
+                if _label in all_kube_roles_map:
+                    _roles[all_kube_roles_map[_label]] = _value
+                else:
+                    _labels[_label] = _value
+
+            self.nodes[_name] = deepcopy(node_tmpl)
+            self.nodes[_name].pop("grains")
+            self.nodes[_name].pop("pillars")
+
+            # hostname
+            self.nodes[_name]['shortname'] = \
+                _nodes[_name]['addresses']['hostname']['address']
+            self.nodes[_name]['internalip'] = \
+                _nodes[_name]['addresses']['internalip']['address']
+            # _domains.add(_name.split(".", 1)[1])
+            self.nodes[_name]['node_group'] = None
+            self.nodes[_name]['labels'] = _labels
+            self.nodes[_name]['roles'] = _roles
+            self.nodes[_name]['status'] = _status
+            # Backward compatibility
+            _info = _nodes[_name]['status']['node_info']
+            self.nodes[_name]['linux_image'] = _info['os_image']
+            self.nodes[_name]['linux_arch'] = _info['architecture']
+
+            _codename = "unknown"
+            _n, _v, _c = _info['os_image'].split()
+            if _n.lower() == 'ubuntu':
+                _v, _, _ = _v.rpartition('.') if '.' in _v else (_v, "", "")
+                if _v in ubuntu_versions:
+                    _codename = ubuntu_versions[_v].split()[0].lower()
+            self.nodes[_name]['linux_codename'] = _codename
+
+            # Consider per-data type transfer
+            self.nodes[_name]["raw"] = _nodes[_name]
+        # TODO: Investigate how to handle domains in Kube, probably - skip
+        # _domains = list(_domains)
+        # if len(_domains) > 1:
+        #     logger_cli.warning(
+        #         "Multiple domains detected: {}".format(",".join(_domains))
+        #     )
+        # else:
+        #     self.domain = _domains[0]
+        logger_cli.info(
+            "-> {} nodes collected: {} - active, {} - not active".format(
+                len(self.nodes),
+                len(self._active),
+                len(self.skip_list)
+            )
+        )
+
+        _role = "k8s-master"
+        _filtered = [n for n, v in self.nodes.items() if _role in v['roles']]
+        if len(_filtered) < 1:
+            raise KubeException(
+                "No k8s-master nodes detected! Check/Update node role map."
+            )
         else:
-            return True
+            _r = [n for n, v in self.nodes.items()
+                  if v['status'] != NODE_UP and _role in v['roles']]
+            if len(_r) > 0:
+                logger_cli.warn(
+                    "Master nodes are reporting 'NotReady:\n{}".format(
+                        "\n".join(_r)
+                    )
+                )
+            self.kube.master_node = _filtered[0]
 
+        # get specific data upfront
+        # OpenStack versions
+        self.mcp_release = ""
+        # Quick and Dirty way to detect OS release
+        _nova_version = self.kube.exec_on_target_pod(
+            "nova-manage --version",
+            "nova-api-osapi",
+            "openstack"
+        )
+        _nmajor = _nova_version.partition('.')[0]
+        self.openstack_release = nova_openstack_versions[_nmajor]
 
-salt_master = SaltNodes()
+        return
+
+    @staticmethod
+    def _get_ssh_shell(_h, _u, _k, _p, _q, _pipe):
+        _ssh = SshShell(
+            _h,
+            user=_u,
+            keypath=_k,
+            port=_p,
+            silent=_q,
+            piped=_pipe
+        )
+        return _ssh.connect()
+
+    @staticmethod
+    def _do_ssh_cmd(_cmd, _h, _u, _k, _p, _q, _pipe):
+        with SshShell(
+            _h,
+            user=_u,
+            keypath=_k,
+            port=_p,
+            silent=_q,
+            piped=_pipe
+        ) as ssh:
+            _r = ssh.do(_cmd)
+            logger_cli.debug("'{}'".format(_r))
+        return _r
+
+    def node_shell(
+        self,
+        node,
+        silent=True,
+        piped=True,
+        use_sudo=True,
+        fport=None
+    ):
+        _u = self.env_config.kube_node_user
+        _k = self.env_config.kube_node_keypath
+        _h = self.nodes[node]['internalip']
+        _p = 22
+        if self.kube.is_local:
+            return None, self._get_ssh_shell(_h, _u, _k, _p, silent, piped)
+        else:
+            _fh = "localhost"
+            _p = 10022 if not fport else fport
+            _pfwd = PortForward(
+                self.env_config.ssh_host,
+                _h,
+                user=_u,
+                keypath=self.env_config.ssh_key,
+                loc_port=_p
+            )
+            _pfwd.connect()
+            _ssh = self._get_ssh_shell(_fh, _u, _k, _p, silent,  piped)
+            return _pfwd, _ssh
+
+    def execute_script_on_node(self, node, script_filename, args=[]):
+        # Prepare path
+        _target_path = os.path.join(
+            self.env_config.node_homepath,
+            self.env_config.kube_scripts_folder,
+            script_filename
+        )
+
+        # execute script
+        logger_cli.debug("... running script on '{}'".format(node))
+        # handle results for each node
+        _script_arguments = " ".join(args) if args else ""
+        self.not_responded = []
+        # get result
+        _nr = self.node_shell(
+            node,
+            "python {} {}".format(
+                _target_path,
+                _script_arguments
+            )
+        )
+
+        if not _nr:
+            self.not_responded.append(node)
+            return {}
+        else:
+            return {node: _nr}
+
+    def execute_cmd_on_active_nodes(self, cmd, nodes=None):
+        # execute script
+        logger_cli.debug("...running '{}' on active nodes".format(cmd))
+        # handle results for each node
+        self.not_responded = []
+        _r = {}
+        # TODO: Use threading and pool
+        for node in self._active:
+            _nr = self.node_shell(
+                node,
+                cmd
+            )
+
+            if not _nr:
+                self.not_responded.append(node)
+            else:
+                _r[node] = _nr
+
+        return _r
+
+    def _exec_script(self, params):
+        """
+        Threadsafe method to get shell to node,
+        check/copy script and get results
+        [
+            node_name,
+            src_path,
+            tgt_path,
+            conf,
+            args
+        ]
+        """
+        _name = params[0]
+        _src = params[1]
+        _tgt = params[2]
+        _conf = params[3]
+        _args = params[4]
+        _port = params[5]
+        _log_name = "["+_name+"]:"
+        _check = "echo $(if [[ -s '{}' ]]; then echo True; " \
+                 "else echo False; fi)"
+        _fwd_sh, _sh = self.node_shell(
+            _name,
+            use_sudo=False,
+            fport=_port
+        )
+        # check python3
+        _python = _sh.do("which python3")
+        _python = utils.to_bool(
+            _sh.do(_check.format(_python))
+        )
+        if not _python:
+            _sh.do("apt install python3", sudo=True)
+        # check if script already there
+        _folder = os.path.join(
+            self.env_config.node_homepath,
+            _conf.kube_scripts_folder
+        )
+        # check if folder exists
+        _folder_exists = utils.to_bool(
+            _sh.do(_check.format(_folder))
+        )
+        if not _folder_exists:
+            _sh.do("mkdir " + _folder)
+        logger.info("{} Syncing file".format(_log_name))
+        _code, _r, _e = _sh.scp(
+            _src,
+            _sh.get_host_path(_tgt),
+        )
+        # handle error code
+        if _code:
+            logger_cli.warn(
+                "{} Error in scp:\n"
+                "\tstdout:'{}'\n"
+                "\tstderr:'{}'".format(_log_name, _r, _e)
+            )
+
+        # execute script
+        logger.debug("{} Running script".format(_log_name))
+        _out = _sh.do(
+            "python3 {}{}".format(
+                _tgt,
+                _args
+            ),
+            sudo=True
+        )
+
+        if _fwd_sh:
+            _fwd_sh.kill()
+        _sh.kill()
+
+        return [_name, _out]
+
+    def execute_script_on_active_nodes(self, script_filename, args=[]):
+        # Prepare script
+        _source_path = os.path.join(pkg_dir, 'scripts', script_filename)
+        _target_path = os.path.join(
+            self.env_config.node_homepath,
+            self.env_config.kube_scripts_folder,
+            script_filename
+        )
+        # handle results for each node
+        _script_arguments = " ".join(args) if args else ""
+        if _script_arguments:
+            _script_arguments = " " + _script_arguments
+        self.not_responded = []
+        _results = {}
+        logger_cli.debug(
+            "...running '{}' on active nodes, {} worker threads".format(
+                script_filename,
+                self.env_config.threads
+            )
+        )
+        # Workers pool
+        pool = Pool(self.env_config.threads)
+
+        # init the parameters
+        # node_name,
+        # src_path,
+        # tgt_path,
+        # conf,
+        # args
+        _params = []
+        _port = 10022
+        for node in self._active:
+            # build parameter blocks
+            _p_list = [
+                node,
+                _source_path,
+                _target_path,
+                self.env_config,
+                _script_arguments,
+                _port
+            ]
+            _params.append(_p_list)
+            _port += 1
+
+        _progress = Progress(len(_params))
+        results = pool.imap_unordered(self._exec_script, _params)
+
+        for ii in enumerate(results, start=1):
+            if not ii[1][1]:
+                self.not_responded.append(ii[1][0])
+            else:
+                _results[ii[1][0]] = ii[1][1]
+            _progress.write_progress(ii[0])
+
+        _progress.end()
+        pool.close()
+        pool.join()
+
+        # return path on nodes, just in case
+        return _results
+
+    def prepare_json_on_node(self, node, _dict, filename):
+        # this function assumes that all folders are created
+        _dumps = json.dumps(_dict, indent=2).splitlines()
+
+        _source_path = create_temp_file_with_content(_dumps)
+        _target_path = os.path.join(
+            self.env_config.node_homepath,
+            self.env_config.kube_scripts_folder,
+            filename
+        )
+        _folder = os.path.join(
+            self.env_config.node_homepath,
+            self.env_config.kube_scripts_folder
+        )
+        _check = "echo $(if [[ -s '{}' ]]; then echo True; " \
+                 "else echo False; fi)"
+        _fwd_sh, _sh = self.node_shell(
+            node,
+            use_sudo=False
+        )
+
+        # check if folder exists
+        _folder_exists = utils.to_bool(
+            _sh.do(_check.format(_folder))
+        )
+        if not _folder_exists:
+            _sh.do("mkdir " + _folder)
+        logger_cli.debug(
+            "...create data on node '{}':'{}'".format(node, _target_path)
+        )
+        _code, _r, _e = _sh.scp(
+            _source_path,
+            _sh.get_host_path(_target_path),
+        )
+        # handle error code
+        if _code:
+            logger_cli.warn(
+                "Error in scp:\n"
+                "\tstdout:'{}'\n"
+                "\tstderr:'{}'".format(_r, _e)
+            )
+
+        _fwd_sh.kill()
+        _sh.kill()
+        return _target_path
diff --git a/cfg_checker/reports/reporter.py b/cfg_checker/reports/reporter.py
index e0a746e..d103cb4 100644
--- a/cfg_checker/reports/reporter.py
+++ b/cfg_checker/reports/reporter.py
@@ -6,7 +6,6 @@
 from cfg_checker.common import const
 from cfg_checker.common import logger_cli
 from cfg_checker.common.file_utils import read_file_as_lines
-from cfg_checker.nodes import salt_master
 
 import jinja2
 
@@ -119,8 +118,9 @@
 
 @six.add_metaclass(abc.ABCMeta)
 class _Base(object):
-    def __init__(self):
+    def __init__(self, salt_master=None):
         self.jinja2_env = self.init_jinja2_env()
+        self.salt_master = salt_master
 
     @abc.abstractmethod
     def __call__(self, payload):
@@ -230,7 +230,7 @@
         def _dmidecode(_dict, type=0):
             # _key = "dmi"
             _key_r = "dmi_r"
-            _f_cmd = salt_master.get_cmd_for_nodes
+            _f_cmd = self.salt_master.get_cmd_for_nodes
             _cmd = "dmidecode -t {}".format(type)
             _f_cmd(_cmd, _key_r, target_dict=_dict)
             # TODO: parse BIOS output or given type
@@ -239,7 +239,7 @@
         def _lsblk(_dict):
             # _key = "lsblk"
             _key_r = "lsblk_raw"
-            _f_cmd = salt_master.get_cmd_for_nodes
+            _f_cmd = self.salt_master.get_cmd_for_nodes
             _columns = [
                 "NAME",
                 "HCTL",
@@ -260,7 +260,7 @@
             _key = "lscpu"
             _key_r = "lscpu_raw"
             # get all of the values
-            _f_cmd = salt_master.get_cmd_for_nodes
+            _f_cmd = self.salt_master.get_cmd_for_nodes
             _cmd = "lscpu | sed -n '/\\:/s/ \\+/ /gp'"
             _f_cmd(_cmd, _key_r, target_dict=_dict)
             # parse them and put into dict
@@ -290,7 +290,7 @@
         def _free(_dict):
             _key = "ram"
             _key_r = "ram_raw"
-            _f_cmd = salt_master.get_cmd_for_nodes
+            _f_cmd = self.salt_master.get_cmd_for_nodes
             _cmd = "free -h | sed -n '/Mem/s/ \\+/ /gp'"
             _f_cmd(_cmd, _key_r, target_dict=_dict)
             # parse them and put into dict
@@ -323,7 +323,7 @@
         def _services(_dict):
             _key = "services"
             _key_r = "services_raw"
-            _f_cmd = salt_master.get_cmd_for_nodes
+            _f_cmd = self.salt_master.get_cmd_for_nodes
             _cmd = "service --status-all"
             _f_cmd(_cmd, _key_r, target_dict=_dict)
             for node, dt in _dict.items():
@@ -350,7 +350,7 @@
         def _vcp_status(_dict):
             _key = "virsh"
             _key_r = "virsh_raw"
-            salt_master.get_cmd_for_nodes(
+            self.salt_master.get_cmd_for_nodes(
                 "virsh list --all | sed -n -e '/[0-9]/s/ \\+/ /gp'",
                 _key_r,
                 target_dict=_dict,
@@ -383,7 +383,7 @@
         def _soft_net_stats(_dict):
             _key = "net_stats"
             _key_r = "net_stats_raw"
-            _f_cmd = salt_master.get_cmd_for_nodes
+            _f_cmd = self.salt_master.get_cmd_for_nodes
             _cmd = "cat /proc/net/softnet_stat; echo \\#; " \
                 "sleep {}; cat /proc/net/softnet_stat".format(
                     _softnet_interval
@@ -465,7 +465,7 @@
         }
 
         # get kernel version
-        salt_master.get_cmd_for_nodes(
+        self.salt_master.get_cmd_for_nodes(
             "uname -r",
             "kernel",
             target_dict=data["nodes"]
@@ -481,7 +481,7 @@
         # sample: /dev/vda1 78G 33G 45G 43%
         _key = "disk"
         _key_r = "disk_raw"
-        salt_master.get_cmd_for_nodes(
+        self.salt_master.get_cmd_for_nodes(
             "df -h | sed -n '/^\\/dev/s/ \\+/ /gp' | cut -d\" \" -f 1-5",
             "disk_raw",
             target_dict=data["nodes"]
diff --git a/etc/example._env b/etc/example._env
index 1a8c472..80e4a72 100644
--- a/etc/example._env
+++ b/etc/example._env
@@ -6,18 +6,22 @@
 # Or run it from within a cloud
 
 # Main log file of the tests
-LOGFILE = 'cfg_checker.log'
+MCP_LOGFILE = 'cfg_checker.log'
 
 # SSH is used when environment is not local to get salt password
 # in order set options and user for the ssh, 
 # please, use ~/.ssh/config
 # SSH hostname of salt master node
-SSH_KEY=<ssh_keyfile_path>
-SSH_USER=<ssh_user>
-SSH_HOST=<ssh_hostname>
+MCP_SSH_KEY=<ssh_keyfile_path>
+MCP_SSH_USER=<ssh_user>
+MCP_SSH_HOST=<ssh_hostname>
+
+
+### Salt type section
+# All vars should start with SALT
 
 # Salt master host to connect to
-SALT_URL=127.0.0.1
+MCP_ENV_HOST=127.0.0.1
 
 # Salt master port.
 # Note that you can safely execute second master (not on production)
@@ -38,3 +42,7 @@
 
 # All nodes list file overide
 SALT_NODE_LIST_FILE=etc/example._list
+
+### K8s section
+# All vars start with KUBE
+KUBE_CONFIG_ROOT=/root
\ No newline at end of file
diff --git a/etc/local.env b/etc/local.env
index 4fab668..bf55206 100644
--- a/etc/local.env
+++ b/etc/local.env
@@ -2,15 +2,18 @@
 # Local Environment file
 
 # Main log file of the tests
-LOGFILE = '/var/log/salt-cfg-checker.log'
+MCP_LOGFILE = '/var/log/salt-cfg-checker.log'
 
 # Salt master host to connect to
-SALT_URL=127.0.0.1
-
+MCP_ENV_HOST=127.0.0.1
 # Salt master port.
 # Note that you can safely execute second master (not on production)
 # on the same cfg node with diferent port and set logging to ALL
-SALT_PORT=6969
+MCP_SALT_PORT=6969
+
+
+### Salt type section
+# All vars should start with SALT
 
 # User and password to auth on master. Can be found in /srv/salt/...
 # No need for a password if environment is local
@@ -29,3 +32,10 @@
 # Can be set to 'auto' for automatic collection
 # or check the example in 'etc/example._list'
 SALT_NODE_LIST_FILE=auto
+
+### K8s section
+# All vars start with KUBE
+KUBE_CONFIG_ROOT=/root
+KUBE_SCRIPTS_FOLDER=cfg-checker-scripts
+KUBE_NODE_USER=ubuntu
+KUBE_NODE_KEYPATH=/home/ubuntu/node_key.rsa
\ No newline at end of file
diff --git a/etc/qa-lab.env b/etc/qa-lab.env
index 259528b..3d785ef 100644
--- a/etc/qa-lab.env
+++ b/etc/qa-lab.env
@@ -6,23 +6,22 @@
 # Or run it from within a cloud
 
 # Main log file of the tests
-LOGFILE = 'qa-lab.log'
+MCP_LOGFILE = 'qa-lab.log'
 
 # SSH is used when environment is not local to get salt password
 # in order set options and user for the ssh, 
 # please, use ~/.ssh/config
 # SSH hostname of salt master node
-SSH_KEY=/Users/savex/.ssh/id_rsa
-SSH_USER=osavatieiev
-SSH_HOST=172.16.242.122
+MCP_SSH_KEY=/Users/savex/.ssh/id_rsa
+MCP_SSH_USER=osavatieiev
+MCP_SSH_HOST=172.16.242.122
 
 # Salt master host to connect to
-SALT_URL=172.16.242.122
-
+MCP_ENV_HOST=172.16.242.122
 # Salt master port.
 # Note that you can safely execute second master (not on production)
 # on the same cfg node with diferent port and set logging to ALL
-SALT_PORT=6969
+MCP_SALT_PORT=6969
 
 # User and password to auth on master. Can be found in /srv/salt/...
 SALT_USER=salt
diff --git a/requirements.txt b/requirements.txt
index fe8d8d4..27030e0 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -3,4 +3,5 @@
 jinja2
 requests
 ipaddress
-configparser
\ No newline at end of file
+configparser
+kubernetes
diff --git a/scripts/pkg_versions.py b/scripts/pkg_versions.py
index 66a46aa..ae76833 100644
--- a/scripts/pkg_versions.py
+++ b/scripts/pkg_versions.py
@@ -7,7 +7,8 @@
 def shell(command):
     _ps = subprocess.Popen(
         command.split(),
-        stdout=subprocess.PIPE
+        stdout=subprocess.PIPE,
+        stderr=subprocess.PIPE
     ).communicate()[0].decode()
 
     return _ps
@@ -50,4 +51,7 @@
     pkgs[_pkg]['raw'] = res[3]
 
 buff = json.dumps(pkgs)
-sys.stdout.write(buff)
+if len(sys.argv) > 1 and sys.argv[1] == 'stdout':
+    sys.stdout.write(buff)
+else:
+    print(buff)
diff --git a/tests/mocks.py b/tests/mocks.py
index 863def5..9d9c534 100644
--- a/tests/mocks.py
+++ b/tests/mocks.py
@@ -6,6 +6,7 @@
 
 # Prepare fake filenames and files
 _res_dir = os.path.join(tests_dir, 'res')
+_fake_kube_config_path = os.path.join(_res_dir, "_fake_kube_config.env")
 
 
 # preload file from res
diff --git a/tests/res/_fake_kube_config.yaml b/tests/res/_fake_kube_config.yaml
new file mode 100644
index 0000000..5311671
--- /dev/null
+++ b/tests/res/_fake_kube_config.yaml
@@ -0,0 +1,19 @@
+apiVersion: v1
+clusters:
+- cluster:
+    certificate-authority-data: U3VwZXIgc2VjcmV0IGZha2UgY2VydGlmaWNhdGUgYXV0aG9yaXR5IQo=
+    server: https://192.168.0.1:6443
+  name: ucp_192.168.0.1:6443_admin
+contexts:
+- context:
+    cluster: ucp_192.168.0.1:6443_admin
+    user: ucp_192.168.0.1:6443_admin
+  name: ucp_192.168.0.1:6443_admin
+current-context: ucp_192.168.0.1:6443_admin
+kind: Config
+preferences: {}
+users:
+- name: ucp_192.168.0.1:6443_admin
+  user:
+    client-certificate-data: U3VwZXIgc2VjcmV0IGZha2UgY2VydGlmaWNhdGUhCg==
+    client-key-data: U3VwZXIgc2VjcmV0IGZha2Uga2V5IQo=
\ No newline at end of file
diff --git a/tests/test_cli.py b/tests/test_cli.py
index 74928e3..9d1bd32 100644
--- a/tests/test_cli.py
+++ b/tests/test_cli.py
@@ -1,9 +1,22 @@
+import os
+
 from unittest import mock
 
+from tests.mocks import _fake_kube_config_path
 from tests.test_base import CfgCheckerTestBase
 
 
+os.environ['MCP_TYPE_FORCE'] = 'SALT'
+
+
 class TestCliCommands(CfgCheckerTestBase):
+    def setUp(self):
+        # force env type to salt
+        os.environ['MCP_TYPE_FORCE'] = 'SALT'
+
+    def tearDown(self):
+        del os.environ['MCP_TYPE_FORCE']
+
     def test_do_cli_main_command(self):
         _module_name = 'cfg_checker.cfg_check'
         _m = self._try_import(_module_name)
@@ -104,8 +117,13 @@
         _m = self._try_import(_module_name)
         _fake_args = mock.MagicMock(name="FakeArgsClass")
         _command = "unknowncommand"
+        _config = None
         with self.redirect_output():
-            _r_value = _m.cli.command.execute_command(_fake_args, _command)
+            _r_value = _m.cli.command.execute_command(
+                _fake_args,
+                _command,
+                _config
+            )
 
         self.assertEqual(
             _r_value,
@@ -118,8 +136,9 @@
         _m = self._try_import(_module_name)
         _type = {}
         _command = "unknowncommand"
+        _config = None
         with self.redirect_output():
-            _r_value = _m.cli.command.execute_command(_type, _command)
+            _r_value = _m.cli.command.execute_command(_type, _command, _config)
 
         self.assertEqual(
             _r_value,
@@ -131,9 +150,18 @@
         _module_name = 'cfg_checker.cli.command'
         _m = self._try_import(_module_name)
         _fake_args = mock.MagicMock(name="FakeArgsClass")
+        _fake_args.kube_config_path = _fake_kube_config_path
         _command = "reclass"
+
+        from cfg_checker.common.settings import CheckerConfiguration
+        _config = CheckerConfiguration(_fake_args)
+
         with self.redirect_output():
-            _r_value = _m.cli.command.execute_command(_fake_args, _command)
+            _r_value = _m.cli.command.execute_command(
+                _fake_args,
+                _command,
+                _config
+            )
 
         self.assertEqual(
             _r_value,
diff --git a/tests/test_common.py b/tests/test_common.py
index 65159a1..42f2988 100644
--- a/tests/test_common.py
+++ b/tests/test_common.py
@@ -30,6 +30,13 @@
 
 
 class TestCommonModules(CfgCheckerTestBase):
+    def setUp(self):
+        # force env type to salt
+        os.environ['MCP_TYPE_FORCE'] = 'SALT'
+
+    def tearDown(self):
+        del os.environ['MCP_TYPE_FORCE']
+
     def test_exceptions(self):
         _m = self._try_import("cfg_checker.common.exception")
         # Get all classes from the exceptions module
diff --git a/tests/test_entrypoints.py b/tests/test_entrypoints.py
index 6cbee3d..7c420d5 100644
--- a/tests/test_entrypoints.py
+++ b/tests/test_entrypoints.py
@@ -1,7 +1,16 @@
+import os
+
 from tests.test_base import CfgCheckerTestBase
 
 
 class TestEntrypoints(CfgCheckerTestBase):
+    def setUp(self):
+        # force env type to salt
+        os.environ['MCP_TYPE_FORCE'] = 'SALT'
+
+    def tearDown(self):
+        del os.environ['MCP_TYPE_FORCE']
+
     def test_entry_mcp_checker(self):
         _module_name = 'cfg_checker.cfg_check'
         with self.redirect_output():
diff --git a/tests/test_network.py b/tests/test_network.py
index 2dc07b3..3a1867b 100644
--- a/tests/test_network.py
+++ b/tests/test_network.py
@@ -1,4 +1,5 @@
 import os
+import shutil
 
 from unittest.mock import patch
 
@@ -19,6 +20,21 @@
 
 
 class TestNetworkModule(CfgCheckerTestBase):
+    def setUp(self):
+        # force env type to salt
+        os.environ['MCP_TYPE_FORCE'] = 'SALT'
+
+    def tearDown(self):
+        del os.environ['MCP_TYPE_FORCE']
+
+    @classmethod
+    def tearDownClass(cls):
+        _ferr = os.path.join(_res_dir, "fakeerrors")
+        if os.path.exists(_ferr):
+            shutil.rmtree(_ferr)
+
+        return super().tearDownClass()
+
     @patch('requests.get', side_effect=mocked_salt_get)
     @patch('requests.post', side_effect=mocked_salt_post)
     @patch(_shell_salt_path, side_effect=mocked_shell)
diff --git a/tests/test_packages.py b/tests/test_packages.py
index 52ec23a..6d8990d 100644
--- a/tests/test_packages.py
+++ b/tests/test_packages.py
@@ -23,6 +23,13 @@
 
 
 class TestPackageModule(CfgCheckerTestBase):
+    def setUp(self):
+        # force env type to salt
+        os.environ['MCP_TYPE_FORCE'] = 'SALT'
+
+    def tearDown(self):
+        del os.environ['MCP_TYPE_FORCE']
+
     @patch('requests.get', side_effect=mocked_package_get)
     @patch(_ReposInfo_path, new=_fakeReposInfo)
     @patch(_RepoManager_path, new=_fakeRepoManager)
diff --git a/tests/test_reclass_comparer.py b/tests/test_reclass_comparer.py
index 07ff9a6..a1a94b3 100644
--- a/tests/test_reclass_comparer.py
+++ b/tests/test_reclass_comparer.py
@@ -5,6 +5,13 @@
 
 
 class TestReclassModule(CfgCheckerTestBase):
+    def setUp(self):
+        # force env type to salt
+        os.environ['MCP_TYPE_FORCE'] = 'SALT'
+
+    def tearDown(self):
+        del os.environ['MCP_TYPE_FORCE']
+
     def test_reclass_list(self):
         _models_dir = os.path.join(_res_dir, "models")
         _args = ["list", "-p", _models_dir]