| import abc |
| import os |
| import time |
| |
| from cfg_checker.common import const |
| from cfg_checker.common import logger_cli |
| from cfg_checker.nodes import salt_master |
| |
| import jinja2 |
| |
| import six |
| |
| pkg_dir = os.path.dirname(__file__) |
| pkg_dir = os.path.join(pkg_dir, os.pardir, os.pardir) |
| pkg_dir = os.path.normpath(pkg_dir) |
| |
| # % threshhold values |
| _disk_warn = 80 |
| _disk_critical = 90 |
| _ram_warn = 5 |
| _ram_critical = 3 |
| |
| |
| def line_breaks(text): |
| # replace python linebreaks with html breaks |
| return text.replace("\n", "<br />") |
| |
| |
| def get_sorted_keys(td): |
| # detect if we can sort by desc |
| # Yes, this is slow, but bullet-proof from empty desc |
| _desc = all([bool(td[k]['desc']) for k in td.keys()]) |
| # Get sorted list |
| if not _desc: |
| return sorted(td.keys()) |
| else: |
| return sorted( |
| td.keys(), |
| key=lambda k: ( |
| td[k]['desc']['section'], |
| td[k]['desc']['app'], |
| k |
| ) |
| ) |
| |
| |
| def get_max(_list): |
| return sorted(_list)[-1] |
| |
| |
| def make_pkg_action_label(act): |
| _act_labels = { |
| const.ACT_UPGRADE: "Upgrade possible", |
| const.ACT_NEED_UP: "Needs upgrade", |
| const.ACT_NEED_DOWN: "Needs downgrade", |
| const.ACT_REPO: "Repo update", |
| const.ACT_NA: "" |
| } |
| return _act_labels[act] |
| |
| |
| def make_pkg_action_class(act): |
| _act_classes = { |
| const.ACT_UPGRADE: "possible", |
| const.ACT_NEED_UP: "needs_up", |
| const.ACT_NEED_DOWN: "needs_down", |
| const.ACT_REPO: "needs_repo", |
| const.ACT_NA: "" |
| } |
| return _act_classes[act] |
| |
| |
| def make_pkg_status_label(sts): |
| _status_labels = { |
| const.VERSION_OK: "OK", |
| const.VERSION_UP: "Upgraded", |
| const.VERSION_DOWN: "Downgraded", |
| const.VERSION_ERR: "ERROR", |
| const.VERSION_NA: "N/A" |
| } |
| return _status_labels[sts] |
| |
| |
| def make_pkg_status_class(sts): |
| return const.all_pkg_statuses[sts] |
| |
| |
| def make_node_status(sts): |
| return const.node_status[sts] |
| |
| |
| def make_repo_info(repos): |
| _text = "" |
| for r in repos: |
| # tag |
| _text += r['tag'] + ": " |
| # repo header |
| _text += " ".join([ |
| r['subset'], |
| r['release'], |
| r['ubuntu-release'], |
| r['type'], |
| r['arch'] |
| ]) + ", " |
| # maintainer w/o email |
| _m = r['maintainer'][:r['maintainer'].find('<')-1] |
| _m_ascii = _m.encode('ascii', errors="xmlcharrefreplace") |
| _text += _m_ascii |
| # newline |
| _text += "<br />" |
| return _text |
| |
| |
| @six.add_metaclass(abc.ABCMeta) |
| class _Base(object): |
| def __init__(self): |
| self.jinja2_env = self.init_jinja2_env() |
| |
| @abc.abstractmethod |
| def __call__(self, payload): |
| pass |
| |
| @staticmethod |
| def init_jinja2_env(): |
| return jinja2.Environment( |
| loader=jinja2.FileSystemLoader(os.path.join(pkg_dir, 'templates')), |
| trim_blocks=True, |
| lstrip_blocks=True) |
| |
| |
| class _TMPLBase(_Base): |
| @abc.abstractproperty |
| def tmpl(self): |
| pass |
| |
| @staticmethod |
| def _count_totals(data): |
| data['counters']['total_nodes'] = len(data['nodes']) |
| |
| def __call__(self, payload): |
| # init data structures |
| data = self.common_data() |
| # payload should have pre-sorted structure according to report called |
| # nodes, openstack_release, mcp_release, etc... |
| data.update(payload) |
| |
| # add template specific data |
| self._extend_data(data) |
| |
| # do counts global |
| self._count_totals(data) |
| |
| # specific filters |
| self.jinja2_env.filters['linebreaks'] = line_breaks |
| self.jinja2_env.filters['get_max'] = get_max |
| |
| self.jinja2_env.filters['get_sorted_keys'] = get_sorted_keys |
| self.jinja2_env.filters['pkg_status_label'] = make_pkg_status_label |
| self.jinja2_env.filters['pkg_status_class'] = make_pkg_status_class |
| self.jinja2_env.filters['pkg_action_label'] = make_pkg_action_label |
| self.jinja2_env.filters['pkg_action_class'] = make_pkg_action_class |
| self.jinja2_env.filters['node_status_class'] = make_node_status |
| self.jinja2_env.filters['make_repo_info'] = make_repo_info |
| |
| # render! |
| logger_cli.info("-> Using template: {}".format(self.tmpl)) |
| tmpl = self.jinja2_env.get_template(self.tmpl) |
| logger_cli.info("-> Rendering") |
| return tmpl.render(data) |
| |
| def common_data(self): |
| return { |
| 'counters': {}, |
| 'salt_info': {}, |
| 'gen_date': time.strftime("%m/%d/%Y %H:%M:%S") |
| } |
| |
| def _extend_data(self, data): |
| pass |
| |
| |
| # HTML Package versions report |
| class CSVAllPackages(_TMPLBase): |
| tmpl = "pkg_versions_csv.j2" |
| |
| |
| # HTML Package versions report |
| class HTMLPackageCandidates(_TMPLBase): |
| tmpl = "pkg_versions_html.j2" |
| |
| |
| # Package versions report |
| class HTMLModelCompare(_TMPLBase): |
| tmpl = "model_tree_cmp_tmpl.j2" |
| |
| def _extend_data(self, data): |
| # move names into separate place |
| data["names"] = data["diffs"].pop("diff_names") |
| data["tabs"] = data.pop("diffs") |
| |
| # counters - mdl_diff |
| for _tab in data["tabs"].keys(): |
| data['counters'][_tab] = len(data["tabs"][_tab]["diffs"].keys()) |
| |
| |
| class HTMLNetworkReport(_TMPLBase): |
| tmpl = "network_check_tmpl.j2" |
| |
| def _extend_data(self, data): |
| def get_bytes(value): |
| if value[-1] == 'G': |
| return int(float(value[:-1]) * 1024 * 1024 * 1024) |
| elif value[-1] == 'M': |
| return int(float(value[:-1]) * 1024 * 1024) |
| elif value[-1] == 'K': |
| return int(float(value[:-1]) * 1024) |
| else: |
| return int(value) |
| |
| def _lscpu(field, key, _dict): |
| _f_cmd = salt_master.get_cmd_for_nodes |
| _cmd = "lscpu | grep -e \"^{}:\" | cut -d\":\" -f2 " \ |
| "| sed -e 's/^[[:space:]]*//'" |
| _f_cmd(_cmd.format(field), key, target_dict=_dict) |
| |
| def _free(field, key, _dict): |
| _f_cmd = salt_master.get_cmd_for_nodes |
| _cmd = "free -h | sed -n '/Mem/s/ \\+/ /gp' | cut -d\" \" -f {}" |
| _f_cmd(_cmd.format(field), key, target_dict=_dict) |
| |
| def _services(_dict): |
| _key = "services" |
| _key_r = "services_raw" |
| _f_cmd = salt_master.get_cmd_for_nodes |
| _cmd = "service --status-all" |
| _f_cmd(_cmd, _key_r, target_dict=_dict) |
| for node, dt in _dict.iteritems(): |
| dt[_key] = {} |
| lines = dt[_key_r].splitlines() |
| for line in lines: |
| li = line.split() |
| _status = li[1] |
| _name = li[3] |
| if _status == '-': |
| dt[_key][_name] = False |
| elif _status == '+': |
| dt[_key][_name] = True |
| else: |
| dt[_key][_name] = None |
| dt.pop(_key_r) |
| |
| data["const"] = { |
| "ram_warn": _ram_warn, |
| "ram_critical": _ram_critical, |
| "disk_warn": _disk_warn, |
| "disk_critical": _disk_critical |
| } |
| |
| # get kernel version |
| salt_master.get_cmd_for_nodes( |
| "uname -r", |
| "kernel", |
| target_dict=data["nodes"] |
| ) |
| # cpu info |
| # Sample: VT-x, KVM, full |
| _lscpu("Virtualization", "virt_mode", data["nodes"]) |
| _lscpu("Hypervisor vendor", "virt_vendor", data["nodes"]) |
| _lscpu("Virtualization type", "virt_type", data["nodes"]) |
| # sample: 4 |
| _lscpu("CPU(s)", "cpus", data["nodes"]) |
| |
| # free ram |
| # sample: 16425392 14883144 220196 |
| _free("2", "ram_total", data["nodes"]) |
| _free("3", "ram_used", data["nodes"]) |
| _free("4", "ram_free", data["nodes"]) |
| _free("7", "ram_available", data["nodes"]) |
| for _data in data["nodes"].itervalues(): |
| _total = get_bytes(_data["ram_total"]) |
| _avail = get_bytes(_data["ram_available"]) |
| _m = _avail * 100.0 / _total |
| if _m < _ram_critical: |
| _data["ram_status"] = "fail" |
| elif _m < _ram_warn: |
| _data["ram_status"] = "warn" |
| else: |
| _data["ram_status"] = "" |
| |
| # disk space |
| # sample: /dev/vda1 78G 33G 45G 43% |
| salt_master.get_cmd_for_nodes( |
| "df -h | sed -n '/^\\/dev/s/ \\+/ /gp' | cut -d\" \" -f 1-5", |
| "disk_raw", |
| target_dict=data["nodes"] |
| ) |
| for _data in data["nodes"].itervalues(): |
| _data["disk"] = {} |
| # show first device row by default |
| _data["disk_max_dev"] = None |
| _d = _data["disk"] |
| _r = _data["disk_raw"] |
| _r = _r.splitlines() |
| _max = -1 |
| for idx in range(0, len(_r)): |
| _t = _r[idx].split() |
| _d[_t[0]] = {} |
| _d[_t[0]]['v'] = _t[1:] |
| _chk = int(_t[-1].split('%')[0]) |
| if _chk > _max: |
| _data["disk_max_dev"] = _t[0] |
| _max = _chk |
| if _chk > _disk_critical: |
| _d[_t[0]]['f'] = "fail" |
| elif _chk > _disk_warn: |
| _d[_t[0]]['f'] = "warn" |
| else: |
| _d[_t[0]]['f'] = "" |
| |
| # prepare networks data for report |
| for net, net_v in data['map'].iteritems(): |
| for node, ifs in net_v.iteritems(): |
| for d in ifs: |
| _err = "fail" |
| d['interface_error'] = _err if d['interface_error'] else "" |
| d['mtu_error'] = _err if d['mtu_error'] else "" |
| d['status_error'] = _err if d['status_error'] else "" |
| d['subnet_gateway_error'] = \ |
| _err if d['subnet_gateway_error'] else "" |
| |
| _services(data["nodes"]) |
| |
| |
| class ReportToFile(object): |
| def __init__(self, report, target): |
| self.report = report |
| self.target = target |
| |
| def __call__(self, payload): |
| payload = self.report(payload) |
| |
| if isinstance(self.target, six.string_types): |
| self._wrapped_dump(payload) |
| else: |
| self._dump(payload, self.target) |
| |
| def _wrapped_dump(self, payload): |
| with open(self.target, 'wt') as target: |
| self._dump(payload, target) |
| |
| @staticmethod |
| def _dump(payload, target): |
| target.write(payload) |