Network check fixes
- Proper network mapping
- Proper reclass lookup
- VIP detection
- Simple error gathering
- IP shown as 'exploded', i.e. in CIDR format
- MTU matching and detection
- Errors class for handling errors, including codes and indices
- Summary and detailed errors view
- Flake8 refactoring
Change-Id: I8ee37d345bdc21c7ad930bf8305acd28f8c121c8
Related-PROD: PROD-28199
diff --git a/cfg_checker/modules/network/__init__.py b/cfg_checker/modules/network/__init__.py
index 6217b1a..8f4a037 100644
--- a/cfg_checker/modules/network/__init__.py
+++ b/cfg_checker/modules/network/__init__.py
@@ -1,7 +1,8 @@
+from cfg_checker.common import logger_cli
+from cfg_checker.helpers import args_utils
+
import checker
-from cfg_checker.helpers import args_utils
-from cfg_checker.common import logger_cli
def _prepare_check():
_checker_class = checker.NetworkChecker()
@@ -9,19 +10,23 @@
_checker_class.collect_network_info()
return _checker_class
+
def do_check(args):
logger_cli.info("# Network check to console")
netChecker = _prepare_check()
netChecker.print_network_report()
- return
+ netChecker.print_summary()
+
+ if args.detailed:
+ netChecker.print_error_details()
def do_report(args):
logger_cli.info("# Network report")
_filename = args_utils.get_arg(args, 'html')
-
+
netChecker = _prepare_check()
netChecker.create_html_report(_filename)
diff --git a/cfg_checker/modules/network/checker.py b/cfg_checker/modules/network/checker.py
index b0056c8..25060a6 100644
--- a/cfg_checker/modules/network/checker.py
+++ b/cfg_checker/modules/network/checker.py
@@ -1,28 +1,63 @@
-import json
-import os
-import sys
import ipaddress
+import json
-from copy import deepcopy
+from cfg_checker.common import logger_cli
+from cfg_checker.modules.network.network_errors import NetworkErrors
+from cfg_checker.nodes import SaltNodes
from cfg_checker.reports import reporter
-from cfg_checker.common import utils, const
-from cfg_checker.common import config, logger, logger_cli, pkg_dir
-from cfg_checker.common import salt_utils
-from cfg_checker.nodes import SaltNodes, node_tmpl
class NetworkChecker(SaltNodes):
- @staticmethod
- def _map_network_for_host(host, if_class, net_list, data):
- if if_class.network in net_list.keys():
- # There is a network
- net_list[if_class.network][host] = data
+ def __init__(self):
+ super(NetworkChecker, self).__init__()
+ self.errors = NetworkErrors()
+
+ # adding net data to tree
+ def _add_data(self, _list, _n, _h, _d):
+ if _n not in _list:
+ _list[_n] = {}
+ _list[_n][_h] = [_d]
+ elif _h not in _list[_n]:
+ # there is no such host, just create it
+ _list[_n][_h] = [_d]
else:
- # create subnet key
- net_list[if_class.network] = {}
- # add the host to the dict
- net_list[if_class.network][host] = data
+ # there is such host... this is an error
+ self.errors.add_error(
+ self.errors.NET_DUPLICATE_IF,
+ host=_h,
+ dup_if=_d['name']
+ )
+ _list[_n][_h].append(_d)
+
+ # TODO: refactor map creation. Build one map instead of two separate
+ def _map_network_for_host(self, host, if_class, net_list, data):
+ # filter networks for this IF IP
+ _nets = [n for n in net_list.keys() if if_class.ip in n]
+ _masks = [n.netmask for n in _nets]
+ if len(_nets) > 1:
+ # There a multiple network found for this IP, Error
+ self.errors.add_error(
+ self.errors.NET_SUBNET_INTERSECT,
+ host=host,
+ networks="; ".join(_nets)
+ )
+ # check mask match
+ if len(_nets) > 0 and if_class.netmask not in _masks:
+ self.errors.add_error(
+ self.errors.NET_MASK_MISMATCH,
+ host=host,
+ if_name=data['name'],
+ if_cidr=if_class.exploded,
+ if_mapped_networks=", ".join([str(_n) for _n in _nets])
+ )
+
+ if len(_nets) < 1:
+ self._add_data(net_list, if_class.network, host, data)
+ else:
+ # add all data
+ for net in _nets:
+ self._add_data(net_list, net, host, data)
return net_list
@@ -33,8 +68,11 @@
:return: none
"""
logger_cli.info("# Mapping node runtime network data")
- _result = self.execute_script_on_active_nodes("ifs_data.py", args=["json"])
-
+ _result = self.execute_script_on_active_nodes(
+ "ifs_data.py",
+ args=["json"]
+ )
+ self.stage = "Runtime"
for key in self.nodes.keys():
# check if we are to work with this node
if not self.is_node_available(key):
@@ -54,6 +92,7 @@
))
logger_cli.info("-> done collecting networks data")
+ # TODO: Mimic reclass structure for easy compare
logger_cli.info("### Building network tree")
# match interfaces by IP subnets
_all_nets = {}
@@ -66,26 +105,35 @@
if net_name in ['lo']:
# skip the localhost
continue
- _ip4s = net_data['ipv4']
- for _ip_str in _ip4s.keys():
- # create interface class
+ # get data and make sure that wide mask goes first
+ _ip4s = sorted(
+ net_data['ipv4'],
+ key=lambda s: s[s.index('/'):]
+ )
+ for _ip_str in _ip4s:
+ # create interface class
_if = ipaddress.IPv4Interface(_ip_str)
- net_data['name'] = net_name
- net_data['if'] = _if
-
- _all_nets = self._map_network_for_host(
- host,
- _if,
- _all_nets,
- net_data
- )
+ if 'name' not in net_data:
+ net_data['name'] = net_name
+ if 'ifs' not in net_data:
+ net_data['ifs'] = [_if]
+ # map it
+ _all_nets = self._map_network_for_host(
+ host,
+ _if,
+ _all_nets,
+ net_data
+ )
+ else:
+ # data is already there, just add VIP
+ net_data['ifs'].append(_if)
# save collected info
self.all_nets = _all_nets
-
def collect_reclass_networks(self):
logger_cli.info("# Mapping reclass networks")
+ self.stage = "Reclass"
# Get networks from reclass and mark them
_reclass_nets = {}
# Get required pillars
@@ -101,9 +149,11 @@
if 'interface' in _pillar:
_pillar = _pillar['interface']
else:
- logger_cli.info("...skipping node '{}', no IF section in reclass".format(
- node
- ))
+ logger_cli.info(
+ "... node '{}' skipped, no IF section in reclass".format(
+ node
+ )
+ )
continue
for _if_name, _if_data in _pillar.iteritems():
if 'address' in _if_data:
@@ -111,7 +161,7 @@
_if_data['address'] + '/' + _if_data['netmask']
)
_if_data['name'] = _if_name
- _if_data['if'] = _if
+ _if_data['ifs'] = [_if]
_reclass_nets = self._map_network_for_host(
node,
@@ -122,7 +172,6 @@
self.reclass_nets = _reclass_nets
-
def print_network_report(self):
"""
Create text report for CLI
@@ -132,7 +181,8 @@
_all_nets = self.all_nets.keys()
logger_cli.info("# Reclass networks")
logger_cli.info(
- " {0:17} {1:25}: {2:19} {3:5}{4:10} {5}{6} {7} / {8} / {9}".format(
+ " {0:17} {1:25}: "
+ "{2:19} {3:5}{4:10} {5}{6} {7} / {8} / {9}".format(
"Hostname",
"IF",
"IP",
@@ -145,7 +195,8 @@
"rcGate"
)
)
-
+ # TODO: Move matching to separate function
+ self.stage = "Matching"
_reclass = [n for n in _all_nets if n in self.reclass_nets]
for network in _reclass:
# shortcuts
@@ -154,7 +205,7 @@
names = sorted(self.all_nets[network].keys())
for hostname in names:
if not self.is_node_available(hostname, log=False):
- logger_cli.info(
+ logger_cli.info(
" {0:17} {1}".format(
hostname.split('.')[0],
"... no data for the node"
@@ -167,8 +218,8 @@
if not _route:
_gate = "no route!"
else:
- _gate = _route['gateway'] if _route['gateway'] else "empty"
-
+ _gate = _route['gateway'] if _route['gateway'] else "-"
+
# get the default gateway
if 'default' in _routes:
_d_gate = ipaddress.IPv4Address(
@@ -179,45 +230,107 @@
_d_gate_str = _d_gate if _d_gate else "No default gateway!"
_a = self.all_nets[network][hostname]
- # Check if reclass has such network
- if hostname in self.reclass_nets[network]:
- _r = self.reclass_nets[network][hostname]
- else:
- # Supply empty dict if there is no reclass gathered
- _r = {}
-
- # Take gateway parameter for this IF
- # from corresponding reclass record
- # TODO: Update gateway search mechanism
- if not self.is_node_available(hostname):
- _r_gate = "-"
- elif _a['if'].network not in self.reclass_nets:
- _r_gate = "no IF in reclass!"
- elif not hostname in self.reclass_nets[_a['if'].network]:
- _r_gate = "no IF on node in reclass!"
- else:
- _rd = self.reclass_nets[_a['if'].network][hostname]
- _r_gate = _rd['gateway'] if 'gateway' in _rd else "empty"
+ for _host in _a:
+ for _if in _host['ifs']:
+ # get proper reclass
+ _ip_str = str(_if.exploded)
+ _r = {}
+ for _item in self.reclass_nets[network][hostname]:
+ for _item_ifs in _item['ifs']:
+ if _ip_str == str(_item_ifs.exploded):
+ _r = _item
- if not 'enabled' in _r:
- _enabled = "no record!"
- else:
- _enabled = "(enabled)" if _r['enabled'] else "(disabled)"
- _text = "{0:25}: {1:19} {2:5}{3:10} {4:4}{5:10} {6} / {7} / {8}".format(
- _a['name'],
- str(_a['if'].ip),
- _a['mtu'],
- '('+str(_r['mtu'])+')' if 'mtu' in _r else '(unset!)',
- _a['state'],
- _enabled,
- _gate,
- _d_gate_str,
- _r_gate
- )
- logger_cli.info(
- " {0:17} {1}".format(hostname.split('.')[0], _text)
- )
-
+ # check if node is UP
+ if not self.is_node_available(hostname):
+ _r_gate = "-"
+ # get proper network from reclass
+ else:
+ # Lookup match for the ip
+ _r_gate = "no IF in reclass!"
+ # get all networks with this hostname
+ _rn = self.reclass_nets
+ _nets = filter(
+ lambda n: hostname in _rn[n].keys(),
+ self.reclass_nets
+ )
+ _rd = None
+ for _item in _nets:
+ # match ip
+ _r_dat = self.reclass_nets[_item][hostname]
+ for _r_ifs in _r_dat:
+ for _r_if in _r_ifs['ifs']:
+ if _if.ip == _r_if.ip:
+ _rd = _r_ifs
+ break
+ if _rd:
+ _gs = 'gateway'
+ _e = "empty"
+ _r_gate = _rd[_gs] if _gs in _rd else _e
+ break
+
+ # IF status in reclass
+ if 'enabled' not in _r:
+ _enabled = "no record!"
+ else:
+ _e = "enabled"
+ _d = "disabled"
+ _enabled = "("+_e+")" if _r[_e] else "("+_d+")"
+
+ _name = _host['name']
+ _rc_mtu = _r['mtu'] if 'mtu' in _r else None
+
+ # Check if this is a VIP
+ if _if.network.prefixlen == 32:
+ _name = " "*20
+ _ip_str += " VIP"
+ _rc_mtu = "(-)"
+ _enabled = "(-)"
+ _r_gate = "-"
+
+ # Check if this is a default MTU
+ elif _host['mtu'] == '1500':
+ # reclass is empty if MTU is untended to be 1500
+ _rc_mtu = "(-)"
+ elif _rc_mtu:
+ # if there is an MTU value, match it
+ if _host['mtu'] != str(_rc_mtu):
+ self.errors.add_error(
+ self.errors.NET_MTU_MISMATCH,
+ host=hostname,
+ if_name=_name,
+ if_cidr=_ip_str,
+ reclass_mtu=_rc_mtu,
+ runtime_mtu=_host['mtu']
+ )
+ else:
+ # there is no MTU value in reclass
+ self.errors.add_error(
+ self.errors.NET_MTU_EMPTY,
+ host=hostname,
+ if_name=_name,
+ if_cidr=_ip_str,
+ if_mtu=_host['mtu']
+ )
+
+ _text = "{0:25}: {1:19} {2:5}{3:10} {4:4}{5:10} {6} "
+ "/ {7} / {8}".format(
+ _name,
+ _ip_str,
+ _host['mtu'],
+ str(_rc_mtu) if _rc_mtu else "(No!)",
+ _host['state'],
+ _enabled,
+ _gate,
+ _d_gate_str,
+ _r_gate
+ )
+ logger_cli.info(
+ " {0:17} {1}".format(
+ hostname.split('.')[0],
+ _text
+ )
+ )
+
logger_cli.info("\n# Other networks")
_other = [n for n in _all_nets if n not in self.reclass_nets]
for network in _other:
@@ -225,17 +338,41 @@
names = sorted(self.all_nets[network].keys())
for hostname in names:
- _text = "{0:25}: {1:19} {2:5} {3:4}".format(
- self.all_nets[network][hostname]['name'],
- str(self.all_nets[network][hostname]['if'].ip),
- self.all_nets[network][hostname]['mtu'],
- self.all_nets[network][hostname]['state']
- )
- logger_cli.info(
- " {0:17} {1}".format(hostname.split('.')[0], _text)
- )
+ for _n in self.all_nets[network][hostname]:
+ _ifs = [str(ifs.ip) for ifs in _n['ifs']]
+ _text = "{0:25}: {1:19} {2:5} {3:4}".format(
+ _n['name'],
+ ", ".join(_ifs),
+ _n['mtu'],
+ _n['state']
+ )
+ logger_cli.info(
+ " {0:17} {1}".format(hostname.split('.')[0], _text)
+ )
-
+ def print_summary(self):
+ _total_errors = self.errors.get_errors_total()
+ # Summary
+ logger_cli.info(
+ "\n{:=^8s}\n{:^8s}\n{:=^8s}".format(
+ "=",
+ "Totals",
+ "="
+ )
+ )
+ logger_cli.info(self.errors.get_summary(print_zeros=False))
+ logger_cli.info('-'*20)
+ logger_cli.info("{:5d} total errors found\n".format(_total_errors))
+
+ def print_error_details(self):
+ # Detailed errors
+ if self.errors.get_errors_total() > 0:
+ logger_cli.info("\n# Errors")
+ for _msg in self.errors.get_errors_as_list():
+ logger_cli.info("{}\n".format(_msg))
+ else:
+ logger_cli.info("-> No errors\n")
+
def create_html_report(self, filename):
"""
Create static html showing network schema-like report
diff --git a/cfg_checker/modules/network/network_errors.py b/cfg_checker/modules/network/network_errors.py
new file mode 100644
index 0000000..2ec3d31
--- /dev/null
+++ b/cfg_checker/modules/network/network_errors.py
@@ -0,0 +1,42 @@
+import itertools
+
+from cfg_checker.helpers.errors import ErrorIndex
+
+
+_c = itertools.count(1)
+
+
+class NetworkErrors(ErrorIndex):
+ # error type codes here
+ NET_MTU_MISMATCH = next(_c)
+ NET_MTU_EMPTY = next(_c)
+ NET_DUPLICATE_IF = next(_c)
+ NET_SUBNET_INTERSECT = next(_c)
+ NET_MASK_MISMATCH = next(_c)
+
+ def __init__(self):
+ super(NetworkErrors, self).__init__("NET")
+
+ self.add_error_type(
+ self.NET_MTU_MISMATCH,
+ "MTU mismatch on runtime interface and in reclass"
+ )
+ self.add_error_type(
+ self.NET_MTU_EMPTY,
+ "MTU value is not 1500 on runtime and empty in reclass"
+ )
+ self.add_error_type(
+ self.NET_DUPLICATE_IF,
+ "Duplicate interface specified"
+ )
+ self.add_error_type(
+ self.NET_SUBNET_INTERSECT,
+ "Subnets intersection detected"
+ )
+ self.add_error_type(
+ self.NET_MASK_MISMATCH,
+ "IFs mask settings for the same subnet is not the same"
+ )
+
+
+del _c
diff --git a/cfg_checker/modules/packages/__init__.py b/cfg_checker/modules/packages/__init__.py
index 774e674..5e717d6 100644
--- a/cfg_checker/modules/packages/__init__.py
+++ b/cfg_checker/modules/packages/__init__.py
@@ -1,7 +1,7 @@
-import checker
-
from cfg_checker.helpers import args_utils
+import checker
+
def do_report(args):
"""Create package versions report, HTML
diff --git a/cfg_checker/modules/packages/checker.py b/cfg_checker/modules/packages/checker.py
index 8a3456d..3225f70 100644
--- a/cfg_checker/modules/packages/checker.py
+++ b/cfg_checker/modules/packages/checker.py
@@ -1,18 +1,12 @@
import json
-import os
-#import sys
-from copy import deepcopy
-
+from cfg_checker.common import const, logger_cli
from cfg_checker.common.exception import ConfigException
-from cfg_checker.common import utils, const
-from cfg_checker.common import config, logger, logger_cli, pkg_dir
-from cfg_checker.common import salt_utils
from cfg_checker.helpers.console_utils import Progress
-from cfg_checker.nodes import SaltNodes, node_tmpl
+from cfg_checker.nodes import SaltNodes
from cfg_checker.reports import reporter
-from versions import PkgVersions, DebianVersion, VersionCmpResult
+from versions import DebianVersion, PkgVersions, VersionCmpResult
class CloudPackageChecker(SaltNodes):
@@ -99,7 +93,6 @@
_eo += _val['results'].keys().count(const.VERSION_ERR)
_do += _val['results'].keys().count(const.VERSION_DOWN)
-
_progress.newline()
_data['errors'] = {
@@ -133,13 +126,15 @@
_text = _result[key]
try:
_dict = json.loads(_text[_text.find('{'):])
- except ValueError as e:
+ except ValueError:
logger_cli.info("... no JSON for '{}'".format(
key
))
- logger_cli.debug("ERROR:\n{}\n".format(_text[:_text.find('{')]))
+ logger_cli.debug(
+ "ERROR:\n{}\n".format(_text[:_text.find('{')])
+ )
_dict = {}
-
+
self.nodes[key]['packages'] = _dict
else:
self.nodes[key]['packages'] = {}
@@ -157,8 +152,10 @@
"""
# Preload OpenStack release versions
_desc = PkgVersions()
-
- logger_cli.info("# Cross-comparing: Installed vs Candidates vs Release")
+
+ logger_cli.info(
+ "# Cross-comparing: Installed vs Candidates vs Release"
+ )
_progress = Progress(len(self.nodes.keys()))
_progress_index = 0
_total_processed = 0
@@ -196,10 +193,10 @@
# no description - no library :)
_vers = {}
_pkg_desc = _desc.dummy_desc
-
+
# get specific set for this OS release if present
if _os in _vers:
- _v = _vers[_os]
+ _v = _vers[_os]
elif 'any' in _vers:
_v = _vers['any']
else:
@@ -212,13 +209,13 @@
"results": {},
"r": _release,
}
-
+
_cmp = VersionCmpResult(
_ver_ins,
_ver_can,
_all_packages[_name]['r']
)
-
+
# shortcut to results
_res = _all_packages[_name]['results']
# update status
@@ -240,7 +237,6 @@
self._packages = _all_packages
_progress.newline()
-
def create_report(self, filename, rtype, full=None):
"""
diff --git a/cfg_checker/modules/packages/versions.py b/cfg_checker/modules/packages/versions.py
index 10f65dc..9352dd6 100644
--- a/cfg_checker/modules/packages/versions.py
+++ b/cfg_checker/modules/packages/versions.py
@@ -1,7 +1,7 @@
import csv
import os
-from cfg_checker.common import config, logger, logger_cli, pkg_dir, const
+from cfg_checker.common import config, const, logger_cli, pkg_dir
class PkgVersions(object):
@@ -33,15 +33,16 @@
_app = row[2]
_repo = row[3]
# if release cell empty - use keyword 'any'
- _os_release = row[4] if len(row[4]) > 0 else 'any'
+ _os_release = row[4] if len(row[4]) > 0 else 'any'
# prepare versions dict
_l = self._labels
- _versions = {_l[i]:row[5+i] for i in range(0, len(row[5:]))}
-
+ _versions = {_l[i]: row[5+i] for i in range(0, len(row[5:]))}
+
if _pkg in self._list:
if _os_release in self._list[_pkg]["versions"]:
- # all pkg/os_releases should be uniq. If found, latest one used
+ # all pkg/os_releases should be uniq.
+ # If found, latest one used
logger_cli.info(
"-> WARNING: Duplicate package info found "
"'{}' (line {})".format(
@@ -59,17 +60,17 @@
"versions": {}
}
})
-
+
# and finally, update the versions for this release
self._list[_pkg]["versions"].update({
_os_release: _versions
})
-
+
def __getitem__(self, pkg_name):
- if pkg_name in self._list:
+ if pkg_name in self._list:
return self._list[pkg_name]
else:
- #return self._dummy_desc
+ # return self._dummy_desc
return None
@@ -95,7 +96,7 @@
_ord_map = [ord(ch) not in _chars for ch in version_fragment]
# if there is nothing to extract, return at once
if not any([_s in version_fragment for _s in _symbols]) \
- and not any(_ord_map):
+ and not any(_ord_map):
# no revisions
return version_fragment, ""
else:
@@ -114,7 +115,7 @@
_main = version_fragment[:_indices[0]]
_rev = version_fragment[_indices[0]:]
return _main, _rev
-
+
def __init__(self, version_string):
# save
if len(version_string) < 1:
@@ -139,7 +140,7 @@
self.upstream, self.upstream_rev = self.split_revision(_m)
self.debian, self.debian_rev = self.split_revision(_d)
self.version = version_string
-
+
# Following functions is a freestyle python mimic of apt's upstream, enjoy
# https://github.com/chaos/apt/blob/master/apt/apt-pkg/deb/debversion.cc#L42
# mimic produced in order not to pull any packages or call external code
@@ -160,7 +161,7 @@
return _num
_li += 1
_ri += 1
-
+
# diff found? lens equal?
if not _diff and _lL != _rL:
# lens not equal? Longer - later
@@ -168,7 +169,7 @@
else:
# equal
return 0
-
+
def _cmp_num(self, lf, rf):
# split fragments into lists
_lhf = lf.split('.') if '.' in lf else list(lf)
@@ -178,14 +179,14 @@
_rhf = [int(n) for n in _rhf if len(n)]
return self._cmp_fragment(_lhf, _rhf)
-
+
def _cmp_lex(self, lf, rf):
# cast each item into its ORD value
_lhf = [ord(n) for n in lf]
_rhf = [ord(n) for n in rf]
- return self._cmp_fragment(_lhf, _rhf)
- # end of cmps
+ return self._cmp_fragment(_lhf, _rhf)
+ # end of cmps
# main part compared using splitted numbers
# if equal, revision is compared using lexical comparizon
@@ -217,7 +218,7 @@
return True
else:
return False
-
+
def update_parts(self, target, status):
# updating parts of version statuses
if self._cmp_num(self.epoch, target.epoch) != 0:
@@ -226,13 +227,13 @@
self.epoch_status = const.VERSION_OK
if self._cmp_num(self.upstream, target.upstream) != 0 \
- or self._cmp_lex(self.upstream_rev, target.upstream_rev) != 0:
+ or self._cmp_lex(self.upstream_rev, target.upstream_rev) != 0:
self.upstream_status = status
else:
self.upstream_status = const.VERSION_OK
if self._cmp_lex(self.debian, target.debian) != 0 \
- or self._cmp_lex(self.debian_rev, target.debian_rev) != 0:
+ or self._cmp_lex(self.debian_rev, target.debian_rev) != 0:
self.debian_status = status
else:
self.debian_status = const.VERSION_OK
@@ -245,13 +246,12 @@
source = None
target = None
-
def __init__(self, i, c, r):
# compare three versions and write a result
self.source = i
self.status = const.VERSION_NA
self.action = const.ACT_NA
-
+
# Check if there is a release version present
if r and len(r.version) > 0 and r.version != 'n/a':
# I < C, installed version is older
@@ -333,7 +333,7 @@
elif i == c:
self.status = const.VERSION_OK
self.action = const.ACT_NA
-
+
# and we need to update per-part status
self.source.update_parts(self.target, self.status)
@@ -342,4 +342,4 @@
if _t.debian and _t.debian > _s.debian:
return True
else:
- return false
+ return False
diff --git a/cfg_checker/modules/reclass/__init__.py b/cfg_checker/modules/reclass/__init__.py
index 2546ec3..adae6df 100644
--- a/cfg_checker/modules/reclass/__init__.py
+++ b/cfg_checker/modules/reclass/__init__.py
@@ -1,21 +1,22 @@
import os
-import comparer
-import validator
-
from cfg_checker.common import logger_cli
from cfg_checker.helpers import args_utils
from cfg_checker.reports import reporter
+import comparer
+
+import validator
+
def do_list(args):
logger_cli.info("# Reclass list")
_arg_path = args_utils.get_arg(args, 'models_path')
logger_cli.info("-> Current path is: {}".format(_arg_path))
_path = args_utils.get_path_arg(_arg_path)
-
- logger_cli.info("# ...models path is '{}'".format(args.models_path))
-
+
+ logger_cli.info("# ...models path is '{}'".format(_path))
+
models = {}
for _folder in os.listdir(args.models_path):
# validate item as a model
@@ -24,15 +25,15 @@
_folder
)
_validated = validator.basic_model_validation_by_path(_model_path)
-
+
if not _validated:
logger_cli.info("-> '{}' not a valid model".format(_folder))
continue
else:
models[_folder] = _model_path
-
+
logger_cli.info("-> '{}' at '{}'".format(_folder, _model_path))
-
+
# TODO: collect info about the model
return
@@ -44,7 +45,7 @@
# checking folder params
_model1 = args_utils.get_path_arg(args.model1)
_model2 = args_utils.get_path_arg(args.model2)
-
+
# Do actual compare using hardcoded model names
mComparer = comparer.ModelComparer()
@@ -52,7 +53,7 @@
mComparer.model_path_1 = _model1
mComparer.model_name_2 = os.path.split(_model2)[1]
mComparer.model_path_2 = _model2
-
+
mComparer.load_model_tree(
mComparer.model_name_1,
mComparer.model_path_1
diff --git a/cfg_checker/modules/reclass/comparer.py b/cfg_checker/modules/reclass/comparer.py
index b0b7b37..6591d16 100644
--- a/cfg_checker/modules/reclass/comparer.py
+++ b/cfg_checker/modules/reclass/comparer.py
@@ -4,13 +4,14 @@
"""
import itertools
import os
+
+from cfg_checker.common import logger, logger_cli
+from cfg_checker.reports import reporter
+
import yaml
-from cfg_checker.reports import reporter
-from cfg_checker.common import logger, logger_cli
-
-def get_element(element_path, input_data):
+def get_element(element_path, input_data):
paths = element_path.split(":")
data = input_data
for i in range(0, len(paths)):
@@ -18,7 +19,7 @@
return data
-def pop_element(element_path, input_data):
+def pop_element(element_path, input_data):
paths = element_path.split(":")
data = input_data
# Search for last dict
@@ -38,7 +39,7 @@
"03_cluster": "classes:cluster",
"04_other": "classes"
}
-
+
models = {}
models_path = "/srv/salt/reclass"
model_name_1 = "source"
@@ -123,7 +124,7 @@
# creating dict structure out of folder list. Pure python magic
parent = reduce(dict.get, folders[:-1], raw_tree)
parent[folders[-1]] = subdir
-
+
self.models[name] = {}
# Brake in according to pathes
_parts = self._model_parts.keys()
@@ -133,7 +134,7 @@
self._model_parts[_parts[ii]],
raw_tree[root_key]
)
-
+
# save it as a single data object
self.models[name]["rc_diffs"] = raw_tree[root_key]
return True
@@ -223,8 +224,7 @@
if _removed or _added:
_removed_str_lst = ["- {}".format(item)
for item in _removed]
- _added_str_lst = ["+ {}".format(item)
- for item in _added]
+ _added_str_lst = ["+ {}".format(i) for i in _added]
_report[_new_path] = {
"type": "list",
"raw_values": [
@@ -287,7 +287,6 @@
))
return _report
-
def generate_model_report_tree(self):
"""Use two loaded models to generate comparison table with
values are groupped by YAML files
diff --git a/cfg_checker/modules/reclass/validator.py b/cfg_checker/modules/reclass/validator.py
index e7d7f06..8fc65a5 100644
--- a/cfg_checker/modules/reclass/validator.py
+++ b/cfg_checker/modules/reclass/validator.py
@@ -2,6 +2,7 @@
from cfg_checker.common import logger_cli
+
def basic_model_validation_by_path(path):
logger_cli.debug("\t...validating '{}' as a model".format(path))
_checks = []
@@ -20,7 +21,7 @@
_has_nodes = os.path.isdir(os.path.join(path, "nodes"))
logger_cli.debug("\t- has nodes? -> {}".format(_has_nodes))
_checks.append(_has_nodes)
-
+
logger_cli.debug("\t-> {}".format(
all(_checks)
))