savex | 4448e13 | 2018-04-25 15:51:14 +0200 | [diff] [blame] | 1 | import abc |
| 2 | import os |
Alex | 1839bbf | 2019-08-22 17:17:21 -0500 | [diff] [blame] | 3 | import re |
Alex | 4148552 | 2019-04-12 17:26:18 -0500 | [diff] [blame] | 4 | import time |
savex | 4448e13 | 2018-04-25 15:51:14 +0200 | [diff] [blame] | 5 | |
Alex Savatieiev | 5118de0 | 2019-02-20 15:50:42 -0600 | [diff] [blame] | 6 | from cfg_checker.common import const |
Alex | 3ebc563 | 2019-04-18 16:47:18 -0500 | [diff] [blame] | 7 | from cfg_checker.common import logger_cli |
Alex | 1839bbf | 2019-08-22 17:17:21 -0500 | [diff] [blame] | 8 | from cfg_checker.common.file_utils import read_file_as_lines |
Alex | 836fac8 | 2019-08-22 13:36:16 -0500 | [diff] [blame] | 9 | from cfg_checker.nodes import salt_master |
Alex | 3ebc563 | 2019-04-18 16:47:18 -0500 | [diff] [blame] | 10 | |
| 11 | import jinja2 |
| 12 | |
| 13 | import six |
savex | 4448e13 | 2018-04-25 15:51:14 +0200 | [diff] [blame] | 14 | |
| 15 | pkg_dir = os.path.dirname(__file__) |
Alex Savatieiev | 6d010be | 2019-03-11 10:36:59 -0500 | [diff] [blame] | 16 | pkg_dir = os.path.join(pkg_dir, os.pardir, os.pardir) |
savex | 4448e13 | 2018-04-25 15:51:14 +0200 | [diff] [blame] | 17 | pkg_dir = os.path.normpath(pkg_dir) |
| 18 | |
Alex | 836fac8 | 2019-08-22 13:36:16 -0500 | [diff] [blame] | 19 | # % threshhold values |
| 20 | _disk_warn = 80 |
| 21 | _disk_critical = 90 |
| 22 | _ram_warn = 5 |
| 23 | _ram_critical = 3 |
Alex | 1839bbf | 2019-08-22 17:17:21 -0500 | [diff] [blame] | 24 | _softnet_interval = 5 |
| 25 | |
| 26 | UP = const.NODE_UP |
| 27 | DOWN = const.NODE_DOWN |
Alex | 836fac8 | 2019-08-22 13:36:16 -0500 | [diff] [blame] | 28 | |
savex | 4448e13 | 2018-04-25 15:51:14 +0200 | [diff] [blame] | 29 | |
savex | 4448e13 | 2018-04-25 15:51:14 +0200 | [diff] [blame] | 30 | def line_breaks(text): |
| 31 | # replace python linebreaks with html breaks |
| 32 | return text.replace("\n", "<br />") |
| 33 | |
| 34 | |
Alex | 4148552 | 2019-04-12 17:26:18 -0500 | [diff] [blame] | 35 | def get_sorted_keys(td): |
| 36 | # detect if we can sort by desc |
| 37 | # Yes, this is slow, but bullet-proof from empty desc |
| 38 | _desc = all([bool(td[k]['desc']) for k in td.keys()]) |
| 39 | # Get sorted list |
| 40 | if not _desc: |
| 41 | return sorted(td.keys()) |
| 42 | else: |
| 43 | return sorted( |
| 44 | td.keys(), |
| 45 | key=lambda k: ( |
Alex | d0391d4 | 2019-05-21 18:48:55 -0500 | [diff] [blame] | 46 | td[k]['desc']['section'], |
Alex | 4148552 | 2019-04-12 17:26:18 -0500 | [diff] [blame] | 47 | td[k]['desc']['app'], |
| 48 | k |
| 49 | ) |
| 50 | ) |
| 51 | |
| 52 | |
| 53 | def get_max(_list): |
| 54 | return sorted(_list)[-1] |
| 55 | |
| 56 | |
Alex | 836fac8 | 2019-08-22 13:36:16 -0500 | [diff] [blame] | 57 | def make_pkg_action_label(act): |
Alex | 4148552 | 2019-04-12 17:26:18 -0500 | [diff] [blame] | 58 | _act_labels = { |
| 59 | const.ACT_UPGRADE: "Upgrade possible", |
| 60 | const.ACT_NEED_UP: "Needs upgrade", |
| 61 | const.ACT_NEED_DOWN: "Needs downgrade", |
Alex | 9e4bfaf | 2019-06-11 15:21:59 -0500 | [diff] [blame] | 62 | const.ACT_REPO: "Repo update", |
Alex | 4148552 | 2019-04-12 17:26:18 -0500 | [diff] [blame] | 63 | const.ACT_NA: "" |
| 64 | } |
| 65 | return _act_labels[act] |
| 66 | |
| 67 | |
Alex | 836fac8 | 2019-08-22 13:36:16 -0500 | [diff] [blame] | 68 | def make_pkg_action_class(act): |
Alex | 4148552 | 2019-04-12 17:26:18 -0500 | [diff] [blame] | 69 | _act_classes = { |
| 70 | const.ACT_UPGRADE: "possible", |
| 71 | const.ACT_NEED_UP: "needs_up", |
| 72 | const.ACT_NEED_DOWN: "needs_down", |
| 73 | const.ACT_REPO: "needs_repo", |
| 74 | const.ACT_NA: "" |
| 75 | } |
| 76 | return _act_classes[act] |
| 77 | |
| 78 | |
Alex | 836fac8 | 2019-08-22 13:36:16 -0500 | [diff] [blame] | 79 | def make_pkg_status_label(sts): |
Alex | 4148552 | 2019-04-12 17:26:18 -0500 | [diff] [blame] | 80 | _status_labels = { |
| 81 | const.VERSION_OK: "OK", |
| 82 | const.VERSION_UP: "Upgraded", |
| 83 | const.VERSION_DOWN: "Downgraded", |
| 84 | const.VERSION_ERR: "ERROR", |
| 85 | const.VERSION_NA: "N/A" |
| 86 | } |
| 87 | return _status_labels[sts] |
| 88 | |
| 89 | |
Alex | 836fac8 | 2019-08-22 13:36:16 -0500 | [diff] [blame] | 90 | def make_pkg_status_class(sts): |
| 91 | return const.all_pkg_statuses[sts] |
| 92 | |
| 93 | |
| 94 | def make_node_status(sts): |
| 95 | return const.node_status[sts] |
Alex | 4148552 | 2019-04-12 17:26:18 -0500 | [diff] [blame] | 96 | |
| 97 | |
Alex | d0391d4 | 2019-05-21 18:48:55 -0500 | [diff] [blame] | 98 | def make_repo_info(repos): |
| 99 | _text = "" |
| 100 | for r in repos: |
| 101 | # tag |
| 102 | _text += r['tag'] + ": " |
| 103 | # repo header |
| 104 | _text += " ".join([ |
| 105 | r['subset'], |
| 106 | r['release'], |
| 107 | r['ubuntu-release'], |
| 108 | r['type'], |
| 109 | r['arch'] |
| 110 | ]) + ", " |
| 111 | # maintainer w/o email |
| 112 | _m = r['maintainer'][:r['maintainer'].find('<')-1] |
| 113 | _m_ascii = _m.encode('ascii', errors="xmlcharrefreplace") |
| 114 | _text += _m_ascii |
| 115 | # newline |
| 116 | _text += "<br />" |
| 117 | return _text |
| 118 | |
| 119 | |
savex | 4448e13 | 2018-04-25 15:51:14 +0200 | [diff] [blame] | 120 | @six.add_metaclass(abc.ABCMeta) |
| 121 | class _Base(object): |
| 122 | def __init__(self): |
| 123 | self.jinja2_env = self.init_jinja2_env() |
| 124 | |
| 125 | @abc.abstractmethod |
| 126 | def __call__(self, payload): |
| 127 | pass |
| 128 | |
| 129 | @staticmethod |
| 130 | def init_jinja2_env(): |
| 131 | return jinja2.Environment( |
| 132 | loader=jinja2.FileSystemLoader(os.path.join(pkg_dir, 'templates')), |
| 133 | trim_blocks=True, |
| 134 | lstrip_blocks=True) |
| 135 | |
| 136 | |
| 137 | class _TMPLBase(_Base): |
| 138 | @abc.abstractproperty |
| 139 | def tmpl(self): |
| 140 | pass |
| 141 | |
| 142 | @staticmethod |
| 143 | def _count_totals(data): |
| 144 | data['counters']['total_nodes'] = len(data['nodes']) |
| 145 | |
Alex Savatieiev | 36b938d | 2019-01-21 11:01:18 +0100 | [diff] [blame] | 146 | def __call__(self, payload): |
savex | 4448e13 | 2018-04-25 15:51:14 +0200 | [diff] [blame] | 147 | # init data structures |
| 148 | data = self.common_data() |
Alex | 4148552 | 2019-04-12 17:26:18 -0500 | [diff] [blame] | 149 | # payload should have pre-sorted structure according to report called |
| 150 | # nodes, openstack_release, mcp_release, etc... |
| 151 | data.update(payload) |
savex | 4448e13 | 2018-04-25 15:51:14 +0200 | [diff] [blame] | 152 | |
| 153 | # add template specific data |
| 154 | self._extend_data(data) |
| 155 | |
| 156 | # do counts global |
| 157 | self._count_totals(data) |
| 158 | |
| 159 | # specific filters |
savex | 4448e13 | 2018-04-25 15:51:14 +0200 | [diff] [blame] | 160 | self.jinja2_env.filters['linebreaks'] = line_breaks |
Alex | 4148552 | 2019-04-12 17:26:18 -0500 | [diff] [blame] | 161 | self.jinja2_env.filters['get_max'] = get_max |
| 162 | |
| 163 | self.jinja2_env.filters['get_sorted_keys'] = get_sorted_keys |
Alex | 836fac8 | 2019-08-22 13:36:16 -0500 | [diff] [blame] | 164 | self.jinja2_env.filters['pkg_status_label'] = make_pkg_status_label |
| 165 | self.jinja2_env.filters['pkg_status_class'] = make_pkg_status_class |
| 166 | self.jinja2_env.filters['pkg_action_label'] = make_pkg_action_label |
| 167 | self.jinja2_env.filters['pkg_action_class'] = make_pkg_action_class |
| 168 | self.jinja2_env.filters['node_status_class'] = make_node_status |
Alex | c6566d8 | 2019-09-23 16:07:17 -0500 | [diff] [blame^] | 169 | self.jinja2_env.filters['pkg_repo_info'] = make_repo_info |
savex | 4448e13 | 2018-04-25 15:51:14 +0200 | [diff] [blame] | 170 | |
| 171 | # render! |
Alex | 4148552 | 2019-04-12 17:26:18 -0500 | [diff] [blame] | 172 | logger_cli.info("-> Using template: {}".format(self.tmpl)) |
savex | 4448e13 | 2018-04-25 15:51:14 +0200 | [diff] [blame] | 173 | tmpl = self.jinja2_env.get_template(self.tmpl) |
Alex | 4148552 | 2019-04-12 17:26:18 -0500 | [diff] [blame] | 174 | logger_cli.info("-> Rendering") |
savex | 4448e13 | 2018-04-25 15:51:14 +0200 | [diff] [blame] | 175 | return tmpl.render(data) |
| 176 | |
| 177 | def common_data(self): |
| 178 | return { |
| 179 | 'counters': {}, |
Alex | 4148552 | 2019-04-12 17:26:18 -0500 | [diff] [blame] | 180 | 'salt_info': {}, |
| 181 | 'gen_date': time.strftime("%m/%d/%Y %H:%M:%S") |
savex | 4448e13 | 2018-04-25 15:51:14 +0200 | [diff] [blame] | 182 | } |
| 183 | |
| 184 | def _extend_data(self, data): |
| 185 | pass |
| 186 | |
| 187 | |
Alex | 4148552 | 2019-04-12 17:26:18 -0500 | [diff] [blame] | 188 | # HTML Package versions report |
| 189 | class CSVAllPackages(_TMPLBase): |
| 190 | tmpl = "pkg_versions_csv.j2" |
| 191 | |
| 192 | |
| 193 | # HTML Package versions report |
savex | ce010ba | 2018-04-27 09:49:23 +0200 | [diff] [blame] | 194 | class HTMLPackageCandidates(_TMPLBase): |
Alex | 4148552 | 2019-04-12 17:26:18 -0500 | [diff] [blame] | 195 | tmpl = "pkg_versions_html.j2" |
savex | 4448e13 | 2018-04-25 15:51:14 +0200 | [diff] [blame] | 196 | |
| 197 | |
Alex Savatieiev | d48994d | 2018-12-13 12:13:00 +0100 | [diff] [blame] | 198 | # Package versions report |
| 199 | class HTMLModelCompare(_TMPLBase): |
| 200 | tmpl = "model_tree_cmp_tmpl.j2" |
| 201 | |
| 202 | def _extend_data(self, data): |
Alex Savatieiev | 36b938d | 2019-01-21 11:01:18 +0100 | [diff] [blame] | 203 | # move names into separate place |
Alex | b8af13a | 2019-04-16 18:38:12 -0500 | [diff] [blame] | 204 | data["names"] = data["diffs"].pop("diff_names") |
| 205 | data["tabs"] = data.pop("diffs") |
Alex | 3ebc563 | 2019-04-18 16:47:18 -0500 | [diff] [blame] | 206 | |
Alex Savatieiev | d48994d | 2018-12-13 12:13:00 +0100 | [diff] [blame] | 207 | # counters - mdl_diff |
Alex Savatieiev | 4f149d0 | 2019-02-28 17:15:29 -0600 | [diff] [blame] | 208 | for _tab in data["tabs"].keys(): |
| 209 | data['counters'][_tab] = len(data["tabs"][_tab]["diffs"].keys()) |
Alex Savatieiev | d48994d | 2018-12-13 12:13:00 +0100 | [diff] [blame] | 210 | |
| 211 | |
Alex Savatieiev | 9b2f651 | 2019-02-20 18:05:00 -0600 | [diff] [blame] | 212 | class HTMLNetworkReport(_TMPLBase): |
| 213 | tmpl = "network_check_tmpl.j2" |
| 214 | |
Alex | 836fac8 | 2019-08-22 13:36:16 -0500 | [diff] [blame] | 215 | def _extend_data(self, data): |
| 216 | def get_bytes(value): |
| 217 | if value[-1] == 'G': |
| 218 | return int(float(value[:-1]) * 1024 * 1024 * 1024) |
| 219 | elif value[-1] == 'M': |
| 220 | return int(float(value[:-1]) * 1024 * 1024) |
| 221 | elif value[-1] == 'K': |
| 222 | return int(float(value[:-1]) * 1024) |
| 223 | else: |
| 224 | return int(value) |
| 225 | |
Alex | 1839bbf | 2019-08-22 17:17:21 -0500 | [diff] [blame] | 226 | def _lscpu(_dict): |
| 227 | _key = "lscpu" |
| 228 | _key_r = "lscpu_raw" |
| 229 | # get all of the values |
Alex | 836fac8 | 2019-08-22 13:36:16 -0500 | [diff] [blame] | 230 | _f_cmd = salt_master.get_cmd_for_nodes |
Alex | 1839bbf | 2019-08-22 17:17:21 -0500 | [diff] [blame] | 231 | _cmd = "lscpu | sed -n '/\\:/s/ \\+/ /gp'" |
| 232 | _f_cmd(_cmd, _key_r, target_dict=_dict) |
| 233 | # parse them and put into dict |
| 234 | for node, dt in _dict.iteritems(): |
| 235 | dt[_key] = {} |
| 236 | if dt['status'] == DOWN: |
| 237 | continue |
| 238 | lines = dt[_key_r].splitlines() |
| 239 | for line in lines: |
| 240 | li = line.split(':') |
| 241 | _var_name = li[0].lower() |
| 242 | _var_name = re.sub(' ', '_', _var_name) |
| 243 | _var_name = re.sub('|'.join(['\\(', '\\)']), '', _var_name) |
| 244 | _var_value = li[1].strip() |
| 245 | dt[_key][_var_name] = _var_value |
| 246 | dt.pop(_key_r) |
| 247 | # detect virtual nodes |
| 248 | if "hypervisor_vendor" in dt[_key]: |
| 249 | dt['node_type'] = "virtual" |
| 250 | else: |
| 251 | dt['node_type'] = "physical" |
Alex | 836fac8 | 2019-08-22 13:36:16 -0500 | [diff] [blame] | 252 | |
Alex | 1839bbf | 2019-08-22 17:17:21 -0500 | [diff] [blame] | 253 | def _free(_dict): |
| 254 | _key = "ram" |
| 255 | _key_r = "ram_raw" |
Alex | 836fac8 | 2019-08-22 13:36:16 -0500 | [diff] [blame] | 256 | _f_cmd = salt_master.get_cmd_for_nodes |
Alex | 1839bbf | 2019-08-22 17:17:21 -0500 | [diff] [blame] | 257 | _cmd = "free -h | sed -n '/Mem/s/ \\+/ /gp'" |
| 258 | _f_cmd(_cmd, _key_r, target_dict=_dict) |
| 259 | # parse them and put into dict |
| 260 | for node, dt in _dict.iteritems(): |
| 261 | dt[_key] = {} |
| 262 | if dt['status'] == DOWN: |
| 263 | continue |
| 264 | li = dt[_key_r].split() |
| 265 | dt[_key]['total'] = li[1] |
| 266 | dt[_key]['used'] = li[2] |
| 267 | dt[_key]['free'] = li[3] |
| 268 | dt[_key]['shared'] = li[4] |
| 269 | dt[_key]['cache'] = li[5] |
| 270 | dt[_key]['available'] = li[6] |
| 271 | |
| 272 | _total = get_bytes(li[1]) |
| 273 | _avail = get_bytes(li[6]) |
| 274 | _m = _avail * 100.0 / _total |
| 275 | if _m < _ram_critical: |
| 276 | dt[_key]["status"] = "fail" |
| 277 | elif _m < _ram_warn: |
| 278 | dt[_key]["status"] = "warn" |
| 279 | else: |
| 280 | dt[_key]["status"] = "" |
Alex | 836fac8 | 2019-08-22 13:36:16 -0500 | [diff] [blame] | 281 | |
| 282 | def _services(_dict): |
| 283 | _key = "services" |
| 284 | _key_r = "services_raw" |
| 285 | _f_cmd = salt_master.get_cmd_for_nodes |
| 286 | _cmd = "service --status-all" |
| 287 | _f_cmd(_cmd, _key_r, target_dict=_dict) |
| 288 | for node, dt in _dict.iteritems(): |
| 289 | dt[_key] = {} |
Alex | 1839bbf | 2019-08-22 17:17:21 -0500 | [diff] [blame] | 290 | if dt['status'] == DOWN: |
| 291 | continue |
Alex | 836fac8 | 2019-08-22 13:36:16 -0500 | [diff] [blame] | 292 | lines = dt[_key_r].splitlines() |
| 293 | for line in lines: |
| 294 | li = line.split() |
| 295 | _status = li[1] |
| 296 | _name = li[3] |
| 297 | if _status == '-': |
| 298 | dt[_key][_name] = False |
| 299 | elif _status == '+': |
| 300 | dt[_key][_name] = True |
| 301 | else: |
| 302 | dt[_key][_name] = None |
| 303 | dt.pop(_key_r) |
| 304 | |
Alex | 1839bbf | 2019-08-22 17:17:21 -0500 | [diff] [blame] | 305 | def _vcp_status(_dict): |
| 306 | _key = "virsh" |
| 307 | _key_r = "virsh_raw" |
| 308 | salt_master.get_cmd_for_nodes( |
| 309 | "virsh list --all | sed -n -e '/[0-9]/s/ \\+/ /gp'", |
| 310 | _key_r, |
| 311 | target_dict=_dict, |
| 312 | nodes="kvm*" |
| 313 | ) |
| 314 | _kvm = filter(lambda x: x.find("kvm") >= 0, _dict.keys()) |
| 315 | for node in _kvm: |
| 316 | dt = _dict[node] |
| 317 | dt[_key] = {} |
| 318 | if dt['status'] == DOWN: |
| 319 | continue |
| 320 | lines = dt[_key_r].splitlines() |
| 321 | for line in lines: |
| 322 | li = line.split() |
| 323 | _id = li[0] |
| 324 | _name = li[1] |
| 325 | _status = li[2] |
| 326 | dt[_key][_name] = { |
| 327 | 'id': _id, |
| 328 | 'status': _status |
| 329 | } |
| 330 | dt.pop(_key_r) |
| 331 | |
| 332 | # query per-cpu and count totals |
| 333 | # total (0), dropped(1), squeezed (2), collision (7) |
| 334 | def _soft_net_stats(_dict): |
| 335 | _key = "net_stats" |
| 336 | _key_r = "net_stats_raw" |
| 337 | _f_cmd = salt_master.get_cmd_for_nodes |
| 338 | _cmd = "cat /proc/net/softnet_stat; echo \\#; " \ |
| 339 | "sleep {}; cat /proc/net/softnet_stat".format( |
| 340 | _softnet_interval |
| 341 | ) |
| 342 | _f_cmd(_cmd, _key_r, target_dict=_dict) |
| 343 | for node, dt in _dict.iteritems(): |
| 344 | _cpuindex = 1 |
| 345 | _add_mode = True |
| 346 | # final totals |
| 347 | dt[_key] = { |
| 348 | "total": [0, 0, 0, 0] |
| 349 | } |
| 350 | # totals for start mark |
| 351 | _ts = [0, 0, 0, 0] |
| 352 | # skip if node is down |
| 353 | if dt['status'] == DOWN: |
| 354 | continue |
| 355 | lines = dt[_key_r].splitlines() |
| 356 | for line in lines: |
| 357 | if line.startswith("#"): |
| 358 | _add_mode = False |
| 359 | _cpuindex = 1 |
| 360 | continue |
| 361 | li = line.split() |
| 362 | _c = [ |
| 363 | int(li[0], 16), |
| 364 | int(li[1], 16), |
| 365 | int(li[2], 16), |
| 366 | int(li[7], 16) |
| 367 | ] |
| 368 | _id = "cpu{:02}".format(_cpuindex) |
| 369 | if _id not in dt[_key]: |
| 370 | dt[_key][_id] = [] |
| 371 | _dc = dt[_key][_id] |
| 372 | if _add_mode: |
| 373 | # saving values and adding totals |
| 374 | dt[_key][_id] = _c |
| 375 | # save start totals |
| 376 | _ts = [_ts[i]+_c[i] for i in range(0, len(_c))] |
| 377 | else: |
| 378 | # this is second measurement |
| 379 | # subtract all values |
| 380 | for i in range(len(_c)): |
| 381 | dt[_key][_id][i] = _c[i] - _dc[i] |
| 382 | dt[_key]["total"][i] += _c[i] |
| 383 | _cpuindex += 1 |
| 384 | # finally, subtract initial totals |
| 385 | for k, v in dt[_key].iteritems(): |
| 386 | if k != "total": |
| 387 | dt[_key][k] = [v[i] / 5. for i in range(len(v))] |
| 388 | else: |
| 389 | dt[_key][k] = [(v[i]-_ts[i])/5. for i in range(len(v))] |
| 390 | dt.pop(_key_r) |
| 391 | |
| 392 | # prepare yellow and red marker values |
Alex | 836fac8 | 2019-08-22 13:36:16 -0500 | [diff] [blame] | 393 | data["const"] = { |
Alex | 1839bbf | 2019-08-22 17:17:21 -0500 | [diff] [blame] | 394 | "net_interval": _softnet_interval, |
Alex | 836fac8 | 2019-08-22 13:36:16 -0500 | [diff] [blame] | 395 | "ram_warn": _ram_warn, |
| 396 | "ram_critical": _ram_critical, |
| 397 | "disk_warn": _disk_warn, |
Alex | 1839bbf | 2019-08-22 17:17:21 -0500 | [diff] [blame] | 398 | "disk_critical": _disk_critical, |
| 399 | "services": read_file_as_lines( |
| 400 | os.path.join( |
| 401 | pkg_dir, |
| 402 | 'etc', |
| 403 | 'services.list' |
| 404 | ) |
| 405 | ) |
Alex | 836fac8 | 2019-08-22 13:36:16 -0500 | [diff] [blame] | 406 | } |
| 407 | |
| 408 | # get kernel version |
| 409 | salt_master.get_cmd_for_nodes( |
| 410 | "uname -r", |
| 411 | "kernel", |
| 412 | target_dict=data["nodes"] |
| 413 | ) |
Alex | 1839bbf | 2019-08-22 17:17:21 -0500 | [diff] [blame] | 414 | # process lscpu data |
| 415 | _lscpu(data["nodes"]) |
Alex | 836fac8 | 2019-08-22 13:36:16 -0500 | [diff] [blame] | 416 | |
| 417 | # free ram |
| 418 | # sample: 16425392 14883144 220196 |
Alex | 1839bbf | 2019-08-22 17:17:21 -0500 | [diff] [blame] | 419 | _free(data["nodes"]) |
Alex | 836fac8 | 2019-08-22 13:36:16 -0500 | [diff] [blame] | 420 | |
| 421 | # disk space |
| 422 | # sample: /dev/vda1 78G 33G 45G 43% |
| 423 | salt_master.get_cmd_for_nodes( |
| 424 | "df -h | sed -n '/^\\/dev/s/ \\+/ /gp' | cut -d\" \" -f 1-5", |
| 425 | "disk_raw", |
| 426 | target_dict=data["nodes"] |
| 427 | ) |
| 428 | for _data in data["nodes"].itervalues(): |
| 429 | _data["disk"] = {} |
| 430 | # show first device row by default |
| 431 | _data["disk_max_dev"] = None |
| 432 | _d = _data["disk"] |
| 433 | _r = _data["disk_raw"] |
| 434 | _r = _r.splitlines() |
| 435 | _max = -1 |
| 436 | for idx in range(0, len(_r)): |
| 437 | _t = _r[idx].split() |
| 438 | _d[_t[0]] = {} |
| 439 | _d[_t[0]]['v'] = _t[1:] |
| 440 | _chk = int(_t[-1].split('%')[0]) |
| 441 | if _chk > _max: |
| 442 | _data["disk_max_dev"] = _t[0] |
| 443 | _max = _chk |
| 444 | if _chk > _disk_critical: |
| 445 | _d[_t[0]]['f'] = "fail" |
| 446 | elif _chk > _disk_warn: |
| 447 | _d[_t[0]]['f'] = "warn" |
| 448 | else: |
| 449 | _d[_t[0]]['f'] = "" |
| 450 | |
| 451 | # prepare networks data for report |
| 452 | for net, net_v in data['map'].iteritems(): |
| 453 | for node, ifs in net_v.iteritems(): |
| 454 | for d in ifs: |
| 455 | _err = "fail" |
| 456 | d['interface_error'] = _err if d['interface_error'] else "" |
| 457 | d['mtu_error'] = _err if d['mtu_error'] else "" |
| 458 | d['status_error'] = _err if d['status_error'] else "" |
| 459 | d['subnet_gateway_error'] = \ |
| 460 | _err if d['subnet_gateway_error'] else "" |
| 461 | |
| 462 | _services(data["nodes"]) |
Alex | 1839bbf | 2019-08-22 17:17:21 -0500 | [diff] [blame] | 463 | # vcp status |
| 464 | # query virsh and prepare for report |
| 465 | _vcp_status(data["nodes"]) |
| 466 | |
| 467 | # soft net stats |
| 468 | _soft_net_stats(data["nodes"]) |
Alex | 836fac8 | 2019-08-22 13:36:16 -0500 | [diff] [blame] | 469 | |
Alex Savatieiev | 9b2f651 | 2019-02-20 18:05:00 -0600 | [diff] [blame] | 470 | |
savex | 4448e13 | 2018-04-25 15:51:14 +0200 | [diff] [blame] | 471 | class ReportToFile(object): |
| 472 | def __init__(self, report, target): |
| 473 | self.report = report |
| 474 | self.target = target |
| 475 | |
| 476 | def __call__(self, payload): |
| 477 | payload = self.report(payload) |
| 478 | |
| 479 | if isinstance(self.target, six.string_types): |
| 480 | self._wrapped_dump(payload) |
| 481 | else: |
| 482 | self._dump(payload, self.target) |
| 483 | |
| 484 | def _wrapped_dump(self, payload): |
| 485 | with open(self.target, 'wt') as target: |
| 486 | self._dump(payload, target) |
| 487 | |
| 488 | @staticmethod |
| 489 | def _dump(payload, target): |
| 490 | target.write(payload) |