Multi env support and Kube client integration
Kube friendly Beta
Package versions supports Kube env
Added:
- Env type detection
- New option: --use-env, for selecting env
when function supports multiple detected envs
- Updated config loading
- Each module and command type has supported env check
and stops execution if it is on unsupported env
- Functions can support multiple envs
- Kubernetes dependency
- Kubenernetes API detection: local and remote
- Package checking class hierachy for using Salt or Kube
- Remote pod execution routine
- Flexible SSH/SSH Forwarder classes: with, ssh,do(), etc
- Multithreaded SSH script execution
- Number of workers parameter, default 5
Fixed:
- Config dependency
- Command loading with supported envs list
- Unittests structure and execution flow updated
- Unittests fixes
- Fixed debug mode handling
- Unified command type/support routine
- Nested attrs getter/setter
Change-Id: I3ade693ac21536e2b5dcee4b24d511749dc72759
Related-PROD: PROD-35811
diff --git a/cfg_checker/modules/network/__init__.py b/cfg_checker/modules/network/__init__.py
index 28d08c4..dd8cc98 100644
--- a/cfg_checker/modules/network/__init__.py
+++ b/cfg_checker/modules/network/__init__.py
@@ -1,9 +1,11 @@
from cfg_checker.common import logger_cli
+from cfg_checker.common.settings import ENV_TYPE_SALT
from cfg_checker.helpers import args_utils
from cfg_checker.modules.network import checker, mapper, pinger
command_help = "Network infrastructure checks and reports"
+supported_envs = [ENV_TYPE_SALT]
def init_parser(_parser):
@@ -66,13 +68,17 @@
return _parser
-def do_check(args):
+def do_check(args, config):
# Net Checks
# should not print map, etc...
# Just bare summary and errors
+ # Check if there is supported env found
+ args_utils.check_supported_env(ENV_TYPE_SALT, args, config)
+ # Start command
logger_cli.info("# Network check to console")
_skip, _skip_file = args_utils.get_skip_args(args)
netChecker = checker.NetworkChecker(
+ config,
skip_list=_skip,
skip_list_file=_skip_file
)
@@ -89,16 +95,17 @@
netChecker.print_error_details()
-def do_report(args):
+def do_report(args, config):
# Network Report
- # should generate Static HTML page
- # with node/network map and values
-
+ # Check if there is supported env found
+ args_utils.check_supported_env(ENV_TYPE_SALT, args, config)
+ # Start command
logger_cli.info("# Network report (check, node map")
_filename = args_utils.get_arg(args, 'html')
_skip, _skip_file = args_utils.get_skip_args(args)
netChecker = checker.NetworkChecker(
+ config,
skip_list=_skip,
skip_list_file=_skip_file
)
@@ -111,12 +118,15 @@
return
-def do_map(args):
+def do_map(args, config):
# Network Map
- # Should generate network map to console or HTML
+ # Check if there is supported env found
+ args_utils.check_supported_env(ENV_TYPE_SALT, args, config)
+ # Start command
logger_cli.info("# Network report")
_skip, _skip_file = args_utils.get_skip_args(args)
networkMap = mapper.NetworkMapper(
+ config,
skip_list=_skip,
skip_list_file=_skip_file
)
@@ -127,11 +137,14 @@
return
-def do_list(args):
+def do_list(args, config):
# Network List
- # Should generate network map to console or HTML
+ # Check if there is supported env found
+ args_utils.check_supported_env(ENV_TYPE_SALT, args, config)
+ # Start command
_skip, _skip_file = args_utils.get_skip_args(args)
_map = mapper.NetworkMapper(
+ config,
skip_list=_skip,
skip_list_file=_skip_file
)
@@ -148,15 +161,19 @@
return
-def do_ping(args):
+def do_ping(args, config):
# Network pinger
# Checks if selected nodes are pingable
# with a desireble parameters: MTU, Frame, etc
+ # Check if there is supported env found
+ args_utils.check_supported_env(ENV_TYPE_SALT, args, config)
+ # Start command
if not args.cidr:
logger_cli.error("\n# Use mcp-check network list to get list of CIDRs")
_cidr = args_utils.get_arg(args, "cidr")
_skip, _skip_file = args_utils.get_skip_args(args)
_pinger = pinger.NetworkPinger(
+ config,
mtu=args.mtu,
detailed=args.detailed,
skip_list=_skip,
@@ -182,7 +199,7 @@
return
-def do_trace(args):
+def do_trace(args, config):
# Network packet tracer
# Check if packet is delivered to proper network host
logger_cli.info("# Packet Tracer not yet implemented")
diff --git a/cfg_checker/modules/network/checker.py b/cfg_checker/modules/network/checker.py
index c590d13..eaa2428 100644
--- a/cfg_checker/modules/network/checker.py
+++ b/cfg_checker/modules/network/checker.py
@@ -7,13 +7,15 @@
class NetworkChecker(object):
def __init__(
self,
+ config,
skip_list=None,
skip_list_file=None
):
logger_cli.debug("... init error logs folder")
self.errors = NetworkErrors()
self.mapper = NetworkMapper(
- self.errors,
+ config,
+ errors_class=self.errors,
skip_list=skip_list,
skip_list_file=skip_list_file
)
@@ -45,15 +47,16 @@
"""
logger_cli.info("### Generating report to '{}'".format(filename))
_report = reporter.ReportToFile(
- reporter.HTMLNetworkReport(),
+ reporter.HTMLNetworkReport(self.mapper.salt_master),
filename
)
- _report({
- "domain": self.mapper.domain,
- "nodes": self.mapper.nodes,
- "map": self.mapper.map,
- "mcp_release": self.mapper.cluster['mcp_release'],
- "openstack_release": self.mapper.cluster['openstack_release']
-
- })
+ _report(
+ {
+ "domain": self.mapper.domain,
+ "nodes": self.mapper.nodes,
+ "map": self.mapper.map,
+ "mcp_release": self.mapper.cluster['mcp_release'],
+ "openstack_release": self.mapper.cluster['openstack_release']
+ }
+ )
logger_cli.info("-> Done")
diff --git a/cfg_checker/modules/network/mapper.py b/cfg_checker/modules/network/mapper.py
index 51f52bb..fd19864 100644
--- a/cfg_checker/modules/network/mapper.py
+++ b/cfg_checker/modules/network/mapper.py
@@ -5,7 +5,7 @@
from cfg_checker.common import logger_cli
from cfg_checker.common.exception import InvalidReturnException
from cfg_checker.modules.network.network_errors import NetworkErrors
-from cfg_checker.nodes import salt_master
+from cfg_checker.nodes import SaltNodes
# TODO: use templated approach
# net interface structure should be the same
@@ -33,19 +33,21 @@
def __init__(
self,
+ config,
errors_class=None,
skip_list=None,
skip_list_file=None
):
+ self.salt_master = SaltNodes(config)
logger_cli.info("# Initializing mapper")
# init networks and nodes
self.networks = {}
- self.nodes = salt_master.get_nodes(
+ self.nodes = self.salt_master.get_nodes(
skip_list=skip_list,
skip_list_file=skip_list_file
)
- self.cluster = salt_master.get_info()
- self.domain = salt_master.domain
+ self.cluster = self.salt_master.get_info()
+ self.domain = self.salt_master.domain
# init and pre-populate interfaces
self.interfaces = {k: {} for k in self.nodes}
# Init errors class
@@ -113,13 +115,14 @@
# class uses nodes from self.nodes dict
_reclass = {}
# Get required pillars
- salt_master.get_specific_pillar_for_nodes("linux:network")
- for node in salt_master.nodes.keys():
+ self.salt_master.get_specific_pillar_for_nodes("linux:network")
+ for node in self.salt_master.nodes.keys():
# check if this node
- if not salt_master.is_node_available(node):
+ if not self.salt_master.is_node_available(node):
continue
# get the reclass value
- _pillar = salt_master.nodes[node]['pillars']['linux']['network']
+ _pillar = \
+ self.salt_master.nodes[node]['pillars']['linux']['network']
# we should be ready if there is no interface in reclass for a node
# for example on APT node
if 'interface' in _pillar:
@@ -169,14 +172,14 @@
# class uses nodes from self.nodes dict
_runtime = {}
logger_cli.info("# Mapping node runtime network data")
- salt_master.prepare_script_on_active_nodes("ifs_data.py")
- _result = salt_master.execute_script_on_active_nodes(
+ self.salt_master.prepare_script_on_active_nodes("ifs_data.py")
+ _result = self.salt_master.execute_script_on_active_nodes(
"ifs_data.py",
args=["json"]
)
- for key in salt_master.nodes.keys():
+ for key in self.salt_master.nodes.keys():
# check if we are to work with this node
- if not salt_master.is_node_available(key):
+ if not self.salt_master.is_node_available(key):
continue
# due to much data to be passed from salt_master,
# it is happening in order
@@ -191,21 +194,21 @@
)
)
_dict = json.loads(_text[_text.find('{'):])
- salt_master.nodes[key]['routes'] = _dict.pop("routes")
- salt_master.nodes[key]['networks'] = _dict
+ self.salt_master.nodes[key]['routes'] = _dict.pop("routes")
+ self.salt_master.nodes[key]['networks'] = _dict
else:
- salt_master.nodes[key]['networks'] = {}
- salt_master.nodes[key]['routes'] = {}
+ self.salt_master.nodes[key]['networks'] = {}
+ self.salt_master.nodes[key]['routes'] = {}
logger_cli.debug("... {} has {} networks".format(
key,
- len(salt_master.nodes[key]['networks'].keys())
+ len(self.salt_master.nodes[key]['networks'].keys())
))
logger_cli.info("-> done collecting networks data")
logger_cli.info("-> mapping IPs")
# match interfaces by IP subnets
- for host, node_data in salt_master.nodes.items():
- if not salt_master.is_node_available(host):
+ for host, node_data in self.salt_master.nodes.items():
+ if not self.salt_master.is_node_available(host):
continue
for net_name, net_data in node_data['networks'].items():
@@ -460,7 +463,7 @@
for hostname in names:
_notes = []
node = hostname.split('.')[0]
- if not salt_master.is_node_available(hostname, log=False):
+ if not self.salt_master.is_node_available(hostname, log=False):
logger_cli.info(
" {0:8} {1}".format(node, "node not available")
)
@@ -513,7 +516,7 @@
# get gate and routes if proto is static
if _proto == 'static':
# get the gateway for current net
- _routes = salt_master.nodes[hostname]['routes']
+ _routes = self.salt_master.nodes[hostname]['routes']
_route = _routes[_net] if _net in _routes else None
# get the default gateway
if 'default' in _routes:
diff --git a/cfg_checker/modules/network/pinger.py b/cfg_checker/modules/network/pinger.py
index 5b12a94..17f8597 100644
--- a/cfg_checker/modules/network/pinger.py
+++ b/cfg_checker/modules/network/pinger.py
@@ -5,22 +5,24 @@
from cfg_checker.helpers.console_utils import Progress
from cfg_checker.modules.network.mapper import NetworkMapper
from cfg_checker.modules.network.network_errors import NetworkErrors
-from cfg_checker.nodes import salt_master
+from cfg_checker.nodes import SaltNodes
# This is independent class with a salt.nodes input
class NetworkPinger(object):
def __init__(
self,
+ config,
mtu=None,
detailed=False,
errors_class=None,
skip_list=None,
skip_list_file=None
):
- logger_cli.info("# Initializing")
+ logger_cli.info("# Initializing Pinger")
+ self.salt_master = SaltNodes(config)
# all active nodes in the cloud
- self.target_nodes = salt_master.get_nodes(
+ self.target_nodes = self.salt_master.get_nodes(
skip_list=skip_list,
skip_list_file=skip_list_file
)
@@ -117,7 +119,7 @@
# do ping of packets
logger_cli.info("# Pinging nodes: MTU={}".format(self.target_mtu))
- salt_master.prepare_script_on_active_nodes("ping.py")
+ self.salt_master.prepare_script_on_active_nodes("ping.py")
_progress = Progress(_count)
_progress_index = 0
_node_index = 0
@@ -125,13 +127,13 @@
_targets = src_data["targets"]
_node_index += 1
# create 'targets.json' on source host
- _path = salt_master.prepare_json_on_node(
+ _path = self.salt_master.prepare_json_on_node(
src,
_targets,
"targets.json"
)
# execute ping.py
- _results = salt_master.execute_script_on_node(
+ _results = self.salt_master.execute_script_on_node(
src,
"ping.py",
args=[_path]
diff --git a/cfg_checker/modules/packages/__init__.py b/cfg_checker/modules/packages/__init__.py
index 2d0cc79..e482eec 100644
--- a/cfg_checker/modules/packages/__init__.py
+++ b/cfg_checker/modules/packages/__init__.py
@@ -1,9 +1,12 @@
+from cfg_checker.common.settings import ENV_TYPE_SALT, \
+ ENV_TYPE_KUBE, ENV_TYPE_LINUX
from cfg_checker.helpers import args_utils
from cfg_checker.modules.packages.repos import RepoManager
from . import checker
command_help = "Package versions check (Candidate vs Installed)"
+supported_envs = [ENV_TYPE_SALT, ENV_TYPE_KUBE, ENV_TYPE_LINUX]
def init_parser(_parser):
@@ -98,12 +101,19 @@
return _parser
-def do_report(args):
+def do_report(args, config):
"""Create package versions report, HTML
:args: - parser arguments
:return: - no return value
"""
+ # Check if there is supported env found
+ _env = args_utils.check_supported_env(
+ [ENV_TYPE_SALT, ENV_TYPE_KUBE],
+ args,
+ config
+ )
+ # Start command
_type, _filename = args_utils.get_package_report_type_and_filename(args)
if ' ' in args.exclude_keywords:
@@ -113,12 +123,23 @@
# init connection to salt and collect minion data
_skip, _skip_file = args_utils.get_skip_args(args)
- pChecker = checker.CloudPackageChecker(
- force_tag=args.force_tag,
- exclude_keywords=_kw,
- skip_list=_skip,
- skip_list_file=_skip_file
- )
+ if _env == ENV_TYPE_SALT:
+ pChecker = checker.SaltCloudPackageChecker(
+ config,
+ force_tag=args.force_tag,
+ exclude_keywords=_kw,
+ skip_list=_skip,
+ skip_list_file=_skip_file
+ )
+ elif _env == ENV_TYPE_KUBE:
+ pChecker = checker.KubeCloudPackageChecker(
+ config,
+ force_tag=args.force_tag,
+ exclude_keywords=_kw,
+ skip_list=_skip,
+ skip_list_file=_skip_file
+ )
+
# collect data on installed packages
pChecker.collect_installed_packages()
# diff installed and candidates
@@ -127,12 +148,14 @@
pChecker.create_report(_filename, rtype=_type, full=args.full)
-def do_versions(args):
+def do_versions(args, config):
"""Builds tagged repo structure and parses Packages.gz files
:args: - parser arguments
:return: - no return value
"""
+ # Check if there is supported env found
+ args_utils.check_supported_env(ENV_TYPE_LINUX, args, config)
# Get the list of tags for the url
r = RepoManager()
if args.list_tags:
@@ -159,9 +182,11 @@
r.parse_repos()
-def do_show(args):
+def do_show(args, config):
"""Shows package (or multiple) history across parsed tags
"""
+ # Check if there is supported env found
+ args_utils.check_supported_env(ENV_TYPE_LINUX, args, config)
# Init manager
r = RepoManager()
# show packages
@@ -169,9 +194,11 @@
r.show_package(p)
-def do_show_app(args):
+def do_show_app(args, config):
"""Shows packages for app
"""
+ # Check if there is supported env found
+ args_utils.check_supported_env(ENV_TYPE_LINUX, args, config)
# Init manager
r = RepoManager()
# show packages
diff --git a/cfg_checker/modules/packages/checker.py b/cfg_checker/modules/packages/checker.py
index fb02db2..b2e8194 100644
--- a/cfg_checker/modules/packages/checker.py
+++ b/cfg_checker/modules/packages/checker.py
@@ -3,9 +3,10 @@
from cfg_checker.common import const, logger_cli
from cfg_checker.common.exception import ConfigException
from cfg_checker.common.other import merge_dict
+from cfg_checker.common.settings import ENV_TYPE_SALT
from cfg_checker.helpers.console_utils import Progress
from cfg_checker.modules.packages.repos import RepoManager
-from cfg_checker.nodes import salt_master
+from cfg_checker.nodes import SaltNodes, KubeNodes
from cfg_checker.reports import reporter
from .versions import DebianVersion, PkgVersions, VersionCmpResult
@@ -14,34 +15,36 @@
class CloudPackageChecker(object):
def __init__(
self,
+ config,
force_tag=None,
exclude_keywords=[],
skip_list=None,
skip_list_file=None
):
+ # check that this env tag is present in Manager
+ self.env_config = config
+ self.rm = RepoManager()
+ self.force_tag = force_tag
+ self.exclude_keywords = exclude_keywords
+
# Init salt master info
- if not salt_master.nodes:
- salt_master.nodes = salt_master.get_nodes(
+ if not self.master.nodes:
+ self.master.nodes = self.master.get_nodes(
skip_list=skip_list,
skip_list_file=skip_list_file
)
- # check that this env tag is present in Manager
- self.rm = RepoManager()
- _tags = self.rm.get_available_tags(tag=salt_master.mcp_release)
+ _tags = self.rm.get_available_tags(tag=self.master.mcp_release)
if not _tags:
logger_cli.warning(
- "\n# hWARNING: '{0}' is not listed in repo index. "
+ "\n# WARNING: '{0}' is not listed in repo index. "
"Consider running:\n\t{1}\nto add info on this tag's "
"release package versions".format(
- salt_master.mcp_release,
- "mcp-checker packages versions --tag {0}"
+ self.master.mcp_release,
+ "mcp-checker packages versions --tag <target_tag>"
)
)
- self.force_tag = force_tag
- self.exclude_keywords = exclude_keywords
-
@staticmethod
def presort_packages(all_packages, full=None):
logger_cli.info("-> Presorting packages")
@@ -189,41 +192,6 @@
return _data
- def collect_installed_packages(self):
- """
- Collect installed packages on each node
- sets 'installed' dict property in the class
-
- :return: none
- """
- logger_cli.info("# Collecting installed packages")
- salt_master.prepare_script_on_active_nodes("pkg_versions.py")
- _result = salt_master.execute_script_on_active_nodes("pkg_versions.py")
-
- for key in salt_master.nodes.keys():
- # due to much data to be passed from salt, it is happening in order
- if key in _result and _result[key]:
- _text = _result[key]
- try:
- _dict = json.loads(_text[_text.find('{'):])
- except ValueError:
- logger_cli.info("... no JSON for '{}'".format(
- key
- ))
- logger_cli.debug(
- "ERROR:\n{}\n".format(_text[:_text.find('{')])
- )
- _dict = {}
-
- salt_master.nodes[key]['packages'] = _dict
- else:
- salt_master.nodes[key]['packages'] = {}
- logger_cli.debug("... {} has {} packages installed".format(
- key,
- len(salt_master.nodes[key]['packages'].keys())
- ))
- logger_cli.info("-> Done")
-
def collect_packages(self):
"""
Check package versions in repos vs installed
@@ -231,13 +199,13 @@
:return: no return values, all date put to dict in place
"""
# Preload OpenStack release versions
- _desc = PkgVersions()
+ _desc = PkgVersions(self.env_config)
logger_cli.info(
"# Cross-comparing: Installed vs Candidates vs Release"
)
# shortcuts for this cloud values
- _os = salt_master.openstack_release
- _mcp = salt_master.mcp_release
+ _os = self.master.openstack_release
+ _mcp = self.master.mcp_release
_t = [self.force_tag] if self.force_tag else []
_t.append(_mcp)
@@ -250,12 +218,12 @@
)
# Progress class
- _progress = Progress(len(salt_master.nodes.keys()))
+ _progress = Progress(len(self.master.nodes.keys()))
_progress_index = 0
_total_processed = 0
# Collect packages from all of the nodes in flat dict
_all_packages = {}
- for node_name, node_value in salt_master.nodes.items():
+ for node_name, node_value in self.master.nodes.items():
_uniq_len = len(_all_packages.keys())
_progress_index += 1
# progress updates shown before next node only
@@ -277,8 +245,9 @@
# at a first sight
if _name not in _all_packages:
# get node attributes
- _linux = salt_master.nodes[node_name]['linux_codename']
- _arch = salt_master.nodes[node_name]['linux_arch']
+ _linux = \
+ self.master.nodes[node_name]['linux_codename']
+ _arch = self.master.nodes[node_name]['linux_arch']
# get versions for tag, Openstack release and repo headers
# excluding 'nightly' repos by default
_r = {}
@@ -415,9 +384,9 @@
"""
logger_cli.info("# Generating report to '{}'".format(filename))
if rtype == 'html':
- _type = reporter.HTMLPackageCandidates()
+ _type = reporter.HTMLPackageCandidates(self.master)
elif rtype == 'csv':
- _type = reporter.CSVAllPackages()
+ _type = reporter.CSVAllPackages(self.master)
else:
raise ConfigException("Report type not set")
_report = reporter.ReportToFile(
@@ -425,10 +394,86 @@
filename
)
payload = {
- "nodes": salt_master.nodes,
- "mcp_release": salt_master.mcp_release,
- "openstack_release": salt_master.openstack_release
+ "nodes": self.master.nodes,
+ "mcp_release": self.master.mcp_release,
+ "openstack_release": self.master.openstack_release
}
payload.update(self.presort_packages(self._packages, full))
_report(payload)
logger_cli.info("-> Done")
+
+ def collect_installed_packages(self):
+ """
+ Collect installed packages on each node
+ sets 'installed' dict property in the class
+
+ :return: none
+ """
+ logger_cli.info("# Collecting installed packages")
+ if self.master.env_type == ENV_TYPE_SALT:
+ self.master.prepare_script_on_active_nodes("pkg_versions.py")
+ _result = self.master.execute_script_on_active_nodes(
+ "pkg_versions.py"
+ )
+
+ for key in self.master.nodes.keys():
+ # due to much data to be passed from salt, it is happening in order
+ if key in _result and _result[key]:
+ _text = _result[key]
+ try:
+ _dict = json.loads(_text[_text.find('{'):])
+ except ValueError:
+ logger_cli.info("... no JSON for '{}'".format(
+ key
+ ))
+ logger_cli.debug(
+ "ERROR:\n{}\n".format(_text[:_text.find('{')])
+ )
+ _dict = {}
+
+ self.master.nodes[key]['packages'] = _dict
+ else:
+ self.master.nodes[key]['packages'] = {}
+ logger_cli.debug("... {} has {} packages installed".format(
+ key,
+ len(self.master.nodes[key]['packages'].keys())
+ ))
+ logger_cli.info("-> Done")
+
+
+class SaltCloudPackageChecker(CloudPackageChecker):
+ def __init__(
+ self,
+ config,
+ force_tag=None,
+ exclude_keywords=[],
+ skip_list=None,
+ skip_list_file=None
+ ):
+ self.master = SaltNodes(config)
+ super(SaltCloudPackageChecker, self).__init__(
+ config,
+ force_tag=None,
+ exclude_keywords=[],
+ skip_list=None,
+ skip_list_file=None
+ )
+
+
+class KubeCloudPackageChecker(CloudPackageChecker):
+ def __init__(
+ self,
+ config,
+ force_tag=None,
+ exclude_keywords=[],
+ skip_list=None,
+ skip_list_file=None
+ ):
+ self.master = KubeNodes(config)
+ super(KubeCloudPackageChecker, self).__init__(
+ config,
+ force_tag=None,
+ exclude_keywords=[],
+ skip_list=None,
+ skip_list_file=None
+ )
diff --git a/cfg_checker/modules/packages/repos.py b/cfg_checker/modules/packages/repos.py
index 57d8b9e..15129e7 100644
--- a/cfg_checker/modules/packages/repos.py
+++ b/cfg_checker/modules/packages/repos.py
@@ -376,6 +376,7 @@
init_done = False
def _init_folders(self, arch_folder=None):
+ logger_cli.info("# Loading package versions data")
# overide arch folder if needed
if arch_folder:
self._arch_folder = arch_folder
diff --git a/cfg_checker/modules/packages/versions.py b/cfg_checker/modules/packages/versions.py
index 542c0e4..05eaad8 100644
--- a/cfg_checker/modules/packages/versions.py
+++ b/cfg_checker/modules/packages/versions.py
@@ -2,7 +2,7 @@
import os
import re
-from cfg_checker.common import config, const, logger_cli
+from cfg_checker.common import const, logger_cli
from cfg_checker.common.settings import pkg_dir
@@ -17,7 +17,7 @@
"versions": {}
}
- def __init__(self):
+ def __init__(self, config):
# preload csv file
logger_cli.info("# Preloading specific MCP release versions")
with open(os.path.join(
diff --git a/cfg_checker/modules/reclass/__init__.py b/cfg_checker/modules/reclass/__init__.py
index 88b287e..8d498c3 100644
--- a/cfg_checker/modules/reclass/__init__.py
+++ b/cfg_checker/modules/reclass/__init__.py
@@ -1,6 +1,7 @@
import os
from cfg_checker.common import logger_cli
+from cfg_checker.common.settings import ENV_TYPE_LINUX
from cfg_checker.helpers import args_utils
from cfg_checker.reports import reporter
@@ -9,6 +10,7 @@
from . import validator
command_help = "Reclass related checks and reports"
+supported_envs = [ENV_TYPE_LINUX]
def init_parser(_parser):
@@ -48,7 +50,10 @@
return _parser
-def do_list(args):
+def do_list(args, config):
+ # Check if there is supported env found
+ args_utils.check_supported_env(ENV_TYPE_LINUX, args, config)
+ # Start command
logger_cli.info("# Reclass list")
_arg_path = args_utils.get_arg(args, 'models_path')
logger_cli.info("-> Current path is: {}".format(_arg_path))
@@ -78,7 +83,10 @@
return
-def do_diff(args):
+def do_diff(args, config):
+ # Check if there is supported env found
+ args_utils.check_supported_env(ENV_TYPE_LINUX, args, config)
+ # Start command
logger_cli.info("# Reclass comparer (HTML report)")
_filename = args_utils.get_arg(args, 'html')
# checking folder params