Hide keyboard shortcuts

Hot-keys on this page

r m x p   toggle line displays

j k   next/prev highlighted chunk

0   (zero) top of page

1   (one) first highlighted chunk

1import abc 

2import os 

3import re 

4import time 

5 

6from cfg_checker.common import const 

7from cfg_checker.common import logger_cli 

8from cfg_checker.common.file_utils import read_file_as_lines 

9from cfg_checker.nodes import salt_master 

10 

11import jinja2 

12 

13import six 

14 

15pkg_dir = os.path.dirname(__file__) 

16pkg_dir = os.path.join(pkg_dir, os.pardir, os.pardir) 

17pkg_dir = os.path.normpath(pkg_dir) 

18 

19# % threshhold values 

20_disk_warn = 80 

21_disk_critical = 90 

22_ram_warn = 5 

23_ram_critical = 3 

24_softnet_interval = 5 

25 

26UP = const.NODE_UP 

27DOWN = const.NODE_DOWN 

28 

29 

30def line_breaks(text): 

31 # replace python linebreaks with html breaks 

32 return text.replace("\n", "<br />") 

33 

34 

35def get_sorted_keys(td): 

36 # detect if we can sort by desc 

37 # Yes, this is slow, but bullet-proof from empty desc 

38 _desc = all([bool(td[k]['desc']) for k in td.keys()]) 

39 # Get sorted list 

40 if not _desc: 

41 return sorted(td.keys()) 

42 else: 

43 return sorted( 

44 td.keys(), 

45 key=lambda k: ( 

46 td[k]['desc']['section'], 

47 td[k]['desc']['app'], 

48 k 

49 ) 

50 ) 

51 

52 

53def get_max(_list): 

54 return sorted(_list)[-1] 

55 

56 

57def make_pkg_action_label(act): 

58 _act_labels = { 

59 const.ACT_UPGRADE: "Upgrade possible", 

60 const.ACT_NEED_UP: "Needs upgrade", 

61 const.ACT_NEED_DOWN: "Needs downgrade", 

62 const.ACT_REPO: "Repo update", 

63 const.ACT_NA: "" 

64 } 

65 return _act_labels[act] 

66 

67 

68def make_pkg_action_class(act): 

69 _act_classes = { 

70 const.ACT_UPGRADE: "possible", 

71 const.ACT_NEED_UP: "needs_up", 

72 const.ACT_NEED_DOWN: "needs_down", 

73 const.ACT_REPO: "needs_repo", 

74 const.ACT_NA: "" 

75 } 

76 return _act_classes[act] 

77 

78 

79def make_pkg_status_label(sts): 

80 _status_labels = { 

81 const.VERSION_OK: "OK", 

82 const.VERSION_UP: "Upgraded", 

83 const.VERSION_DOWN: "Downgraded", 

84 const.VERSION_WARN: "WARNING", 

85 const.VERSION_ERR: "ERROR", 

86 const.VERSION_NA: "N/A" 

87 } 

88 return _status_labels[sts] 

89 

90 

91def make_pkg_status_class(sts): 

92 return const.all_pkg_statuses[sts] 

93 

94 

95def make_node_status(sts): 

96 return const.node_status[sts] 

97 

98 

99def make_repo_info(repos): 

100 _text = "" 

101 for r in repos: 

102 # tag 

103 _text += r['tag'] + ": " 

104 # repo header 

105 _text += " ".join([ 

106 r['subset'], 

107 r['release'], 

108 r['ubuntu-release'], 

109 r['type'], 

110 r['arch'] 

111 ]) + ", " 

112 # maintainer w/o email 

113 _m = r['maintainer'][:r['maintainer'].find('<')-1] 

114 _m_ascii = _m.encode('ascii', errors="xmlcharrefreplace") 

115 _text += _m_ascii 

116 # newline 

117 _text += "<br />" 

118 return _text 

119 

120 

121@six.add_metaclass(abc.ABCMeta) 

122class _Base(object): 

123 def __init__(self): 

124 self.jinja2_env = self.init_jinja2_env() 

125 

126 @abc.abstractmethod 

127 def __call__(self, payload): 

128 pass 

129 

130 @staticmethod 

131 def init_jinja2_env(): 

132 return jinja2.Environment( 

133 loader=jinja2.FileSystemLoader(os.path.join(pkg_dir, 'templates')), 

134 trim_blocks=True, 

135 lstrip_blocks=True) 

136 

137 

138class _TMPLBase(_Base): 

139 @abc.abstractproperty 

140 def tmpl(self): 

141 pass 

142 

143 @staticmethod 

144 def _count_totals(data): 

145 data['counters']['total_nodes'] = len(data['nodes']) 

146 

147 def __call__(self, payload): 

148 # init data structures 

149 data = self.common_data() 

150 # payload should have pre-sorted structure according to report called 

151 # nodes, openstack_release, mcp_release, etc... 

152 data.update(payload) 

153 

154 # add template specific data 

155 self._extend_data(data) 

156 

157 # do counts global 

158 self._count_totals(data) 

159 

160 # specific filters 

161 self.jinja2_env.filters['linebreaks'] = line_breaks 

162 self.jinja2_env.filters['get_max'] = get_max 

163 

164 self.jinja2_env.filters['get_sorted_keys'] = get_sorted_keys 

165 self.jinja2_env.filters['pkg_status_label'] = make_pkg_status_label 

166 self.jinja2_env.filters['pkg_status_class'] = make_pkg_status_class 

167 self.jinja2_env.filters['pkg_action_label'] = make_pkg_action_label 

168 self.jinja2_env.filters['pkg_action_class'] = make_pkg_action_class 

169 self.jinja2_env.filters['node_status_class'] = make_node_status 

170 self.jinja2_env.filters['pkg_repo_info'] = make_repo_info 

171 

172 # render! 

173 logger_cli.info("-> Using template: {}".format(self.tmpl)) 

174 tmpl = self.jinja2_env.get_template(self.tmpl) 

175 logger_cli.info("-> Rendering") 

176 return tmpl.render(data) 

177 

178 def common_data(self): 

179 return { 

180 'counters': {}, 

181 'salt_info': {}, 

182 'gen_date': time.strftime("%m/%d/%Y %H:%M:%S") 

183 } 

184 

185 def _extend_data(self, data): 

186 pass 

187 

188 

189# HTML Package versions report 

190class CSVAllPackages(_TMPLBase): 

191 tmpl = "pkg_versions_csv.j2" 

192 

193 

194# HTML Package versions report 

195class HTMLPackageCandidates(_TMPLBase): 

196 tmpl = "pkg_versions_html.j2" 

197 

198 

199# Package versions report 

200class HTMLModelCompare(_TMPLBase): 

201 tmpl = "model_tree_cmp_tmpl.j2" 

202 

203 def _extend_data(self, data): 

204 # move names into separate place 

205 data["names"] = data["diffs"].pop("diff_names") 

206 data["tabs"] = data.pop("diffs") 

207 

208 # counters - mdl_diff 

209 for _tab in data["tabs"].keys(): 

210 data['counters'][_tab] = len(data["tabs"][_tab]["diffs"].keys()) 

211 

212 

213class HTMLNetworkReport(_TMPLBase): 

214 tmpl = "network_check_tmpl.j2" 

215 

216 def _extend_data(self, data): 

217 def get_bytes(value): 

218 if value[-1] == 'G': 

219 return int(float(value[:-1]) * 1024 * 1024 * 1024) 

220 elif value[-1] == 'M': 

221 return int(float(value[:-1]) * 1024 * 1024) 

222 elif value[-1] == 'K': 

223 return int(float(value[:-1]) * 1024) 

224 else: 

225 return int(value) 

226 

227 def _dmidecode(_dict, type=0): 

228 _key = "dmi" 

229 _key_r = "dmi_r" 

230 _f_cmd = salt_master.get_cmd_for_nodes 

231 _cmd = "dmidecode -t {}".format(type) 

232 _f_cmd(_cmd, _key_r, target_dict=_dict) 

233 # TODO: parse BIOS output or given type 

234 pass 

235 

236 def _lsblk(_dict): 

237 _key = "lsblk" 

238 _key_r = "lsblk_raw" 

239 _f_cmd = salt_master.get_cmd_for_nodes 

240 _columns = [ 

241 "NAME", 

242 "HCTL", 

243 "TYPE", 

244 "SIZE", 

245 "VENDOR", 

246 "MODEL", 

247 "SERIAL", 

248 "REV", 

249 "TRAN" 

250 ] 

251 _cmd = "lsblk -S --output {}".format(",".join(_columns)) 

252 _f_cmd(_cmd, _key_r, target_dict=_dict) 

253 # TODO: parse lsblk output 

254 pass 

255 

256 def _lscpu(_dict): 

257 _key = "lscpu" 

258 _key_r = "lscpu_raw" 

259 # get all of the values 

260 _f_cmd = salt_master.get_cmd_for_nodes 

261 _cmd = "lscpu | sed -n '/\\:/s/ \\+/ /gp'" 

262 _f_cmd(_cmd, _key_r, target_dict=_dict) 

263 # parse them and put into dict 

264 for node, dt in _dict.items(): 

265 dt[_key] = {} 

266 if dt['status'] == DOWN: 

267 continue 

268 if not dt[_key_r]: 

269 # no stats collected, put negatives 

270 dt.pop(_key_r) 

271 continue 

272 lines = dt[_key_r].splitlines() 

273 for line in lines: 

274 li = line.split(':') 

275 _var_name = li[0].lower() 

276 _var_name = re.sub(' ', '_', _var_name) 

277 _var_name = re.sub('|'.join(['\\(', '\\)']), '', _var_name) 

278 _var_value = li[1].strip() 

279 dt[_key][_var_name] = _var_value 

280 dt.pop(_key_r) 

281 # detect virtual nodes 

282 if "hypervisor_vendor" in dt[_key]: 

283 dt['node_type'] = "virtual" 

284 else: 

285 dt['node_type'] = "physical" 

286 

287 def _free(_dict): 

288 _key = "ram" 

289 _key_r = "ram_raw" 

290 _f_cmd = salt_master.get_cmd_for_nodes 

291 _cmd = "free -h | sed -n '/Mem/s/ \\+/ /gp'" 

292 _f_cmd(_cmd, _key_r, target_dict=_dict) 

293 # parse them and put into dict 

294 for node, dt in _dict.items(): 

295 dt[_key] = {} 

296 if dt['status'] == DOWN: 

297 continue 

298 if not dt[_key_r]: 

299 # no stats collected, put negatives 

300 dt.pop(_key_r) 

301 continue 

302 li = dt[_key_r].split() 

303 dt[_key]['total'] = li[1] 

304 dt[_key]['used'] = li[2] 

305 dt[_key]['free'] = li[3] 

306 dt[_key]['shared'] = li[4] 

307 dt[_key]['cache'] = li[5] 

308 dt[_key]['available'] = li[6] 

309 

310 _total = get_bytes(li[1]) 

311 _avail = get_bytes(li[6]) 

312 _m = _avail * 100.0 / _total 

313 if _m < _ram_critical: 

314 dt[_key]["status"] = "fail" 

315 elif _m < _ram_warn: 

316 dt[_key]["status"] = "warn" 

317 else: 

318 dt[_key]["status"] = "" 

319 

320 def _services(_dict): 

321 _key = "services" 

322 _key_r = "services_raw" 

323 _f_cmd = salt_master.get_cmd_for_nodes 

324 _cmd = "service --status-all" 

325 _f_cmd(_cmd, _key_r, target_dict=_dict) 

326 for node, dt in _dict.items(): 

327 dt[_key] = {} 

328 if dt['status'] == DOWN: 

329 continue 

330 if not dt[_key_r]: 

331 # no stats collected, put negatives 

332 dt.pop(_key_r) 

333 continue 

334 lines = dt[_key_r].splitlines() 

335 for line in lines: 

336 li = line.split() 

337 _status = li[1] 

338 _name = li[3] 

339 if _status == '-': 

340 dt[_key][_name] = False 

341 elif _status == '+': 

342 dt[_key][_name] = True 

343 else: 

344 dt[_key][_name] = None 

345 dt.pop(_key_r) 

346 

347 def _vcp_status(_dict): 

348 _key = "virsh" 

349 _key_r = "virsh_raw" 

350 salt_master.get_cmd_for_nodes( 

351 "virsh list --all | sed -n -e '/[0-9]/s/ \\+/ /gp'", 

352 _key_r, 

353 target_dict=_dict, 

354 nodes="kvm*" 

355 ) 

356 _kvm = filter(lambda x: x.find("kvm") >= 0, _dict.keys()) 

357 for node in _kvm: 

358 dt = _dict[node] 

359 dt[_key] = {} 

360 if dt['status'] == DOWN: 

361 continue 

362 if not dt[_key_r]: 

363 # no stats collected, put negatives 

364 dt.pop(_key_r) 

365 continue 

366 lines = dt[_key_r].splitlines() 

367 for line in lines: 

368 li = line.split() 

369 _id = li[0] 

370 _name = li[1] 

371 _status = li[2] 

372 dt[_key][_name] = { 

373 'id': _id, 

374 'status': _status 

375 } 

376 dt.pop(_key_r) 

377 

378 # query per-cpu and count totals 

379 # total (0), dropped(1), squeezed (2), collision (7) 

380 def _soft_net_stats(_dict): 

381 _key = "net_stats" 

382 _key_r = "net_stats_raw" 

383 _f_cmd = salt_master.get_cmd_for_nodes 

384 _cmd = "cat /proc/net/softnet_stat; echo \\#; " \ 

385 "sleep {}; cat /proc/net/softnet_stat".format( 

386 _softnet_interval 

387 ) 

388 _f_cmd(_cmd, _key_r, target_dict=_dict) 

389 for node, dt in _dict.items(): 

390 _cpuindex = 1 

391 _add_mode = True 

392 # totals for start mark 

393 _ts = [0, 0, 0, 0] 

394 # skip if node is down 

395 if dt['status'] == DOWN: 

396 dt[_key] = { 

397 "total": [-1, -1, -1, -1] 

398 } 

399 continue 

400 if not dt[_key_r]: 

401 # no stats collected, put negatives 

402 dt.pop(_key_r) 

403 dt[_key] = { 

404 "total": [-1, -1, -1, -1] 

405 } 

406 continue 

407 # final totals 

408 dt[_key] = { 

409 "total": [0, 0, 0, 0] 

410 } 

411 lines = dt[_key_r].splitlines() 

412 for line in lines: 

413 if line.startswith("#"): 

414 _add_mode = False 

415 _cpuindex = 1 

416 continue 

417 li = line.split() 

418 _c = [ 

419 int(li[0], 16), 

420 int(li[1], 16), 

421 int(li[2], 16), 

422 int(li[7], 16) 

423 ] 

424 _id = "cpu{:02}".format(_cpuindex) 

425 if _id not in dt[_key]: 

426 dt[_key][_id] = [] 

427 _dc = dt[_key][_id] 

428 if _add_mode: 

429 # saving values and adding totals 

430 dt[_key][_id] = _c 

431 # save start totals 

432 _ts = [_ts[i]+_c[i] for i in range(0, len(_c))] 

433 else: 

434 # this is second measurement 

435 # subtract all values 

436 for i in range(len(_c)): 

437 dt[_key][_id][i] = _c[i] - _dc[i] 

438 dt[_key]["total"][i] += _c[i] 

439 _cpuindex += 1 

440 # finally, subtract initial totals 

441 for k, v in dt[_key].items(): 

442 if k != "total": 

443 dt[_key][k] = [v[i] / 5. for i in range(len(v))] 

444 else: 

445 dt[_key][k] = [(v[i]-_ts[i])/5. for i in range(len(v))] 

446 dt.pop(_key_r) 

447 

448 # prepare yellow and red marker values 

449 data["const"] = { 

450 "net_interval": _softnet_interval, 

451 "ram_warn": _ram_warn, 

452 "ram_critical": _ram_critical, 

453 "disk_warn": _disk_warn, 

454 "disk_critical": _disk_critical, 

455 "services": read_file_as_lines( 

456 os.path.join( 

457 pkg_dir, 

458 'etc', 

459 'services.list' 

460 ) 

461 ) 

462 } 

463 

464 # get kernel version 

465 salt_master.get_cmd_for_nodes( 

466 "uname -r", 

467 "kernel", 

468 target_dict=data["nodes"] 

469 ) 

470 # process lscpu data 

471 _lscpu(data["nodes"]) 

472 

473 # free ram 

474 # sample: 16425392 14883144 220196 

475 _free(data["nodes"]) 

476 

477 # disk space 

478 # sample: /dev/vda1 78G 33G 45G 43% 

479 _key = "disk" 

480 _key_r = "disk_raw" 

481 salt_master.get_cmd_for_nodes( 

482 "df -h | sed -n '/^\\/dev/s/ \\+/ /gp' | cut -d\" \" -f 1-5", 

483 "disk_raw", 

484 target_dict=data["nodes"] 

485 ) 

486 for dt in data["nodes"].itervalues(): 

487 dt["disk"] = {} 

488 dt["disk_max_dev"] = None 

489 if dt['status'] == DOWN: 

490 dt["disk"]["unknown"] = {} 

491 dt["disk_max_dev"] = "unknown" 

492 continue 

493 if not dt[_key_r]: 

494 # no stats collected, put negatives 

495 dt.pop(_key_r) 

496 dt[_key] = {} 

497 continue 

498 # show first device row by default 

499 _d = dt["disk"] 

500 _r = dt["disk_raw"] 

501 _r = _r.splitlines() 

502 _max = -1 

503 for idx in range(0, len(_r)): 

504 _t = _r[idx].split() 

505 _d[_t[0]] = {} 

506 _d[_t[0]]['v'] = _t[1:] 

507 _chk = int(_t[-1].split('%')[0]) 

508 if _chk > _max: 

509 dt["disk_max_dev"] = _t[0] 

510 _max = _chk 

511 if _chk > _disk_critical: 

512 _d[_t[0]]['f'] = "fail" 

513 elif _chk > _disk_warn: 

514 _d[_t[0]]['f'] = "warn" 

515 else: 

516 _d[_t[0]]['f'] = "" 

517 

518 # prepare networks data for report 

519 for net, net_v in data['map'].items(): 

520 for node, ifs in net_v.items(): 

521 for d in ifs: 

522 _err = "fail" 

523 d['interface_error'] = _err if d['interface_error'] else "" 

524 d['mtu_error'] = _err if d['mtu_error'] else "" 

525 d['status_error'] = _err if d['status_error'] else "" 

526 d['subnet_gateway_error'] = \ 

527 _err if d['subnet_gateway_error'] else "" 

528 

529 _services(data["nodes"]) 

530 # vcp status 

531 # query virsh and prepare for report 

532 _vcp_status(data["nodes"]) 

533 

534 # soft net stats 

535 _soft_net_stats(data["nodes"]) 

536 

537 

538class ReportToFile(object): 

539 def __init__(self, report, target): 

540 self.report = report 

541 self.target = target 

542 

543 def __call__(self, payload): 

544 payload = self.report(payload) 

545 

546 if isinstance(self.target, six.string_types): 

547 self._wrapped_dump(payload) 

548 else: 

549 self._dump(payload, self.target) 

550 

551 def _wrapped_dump(self, payload): 

552 with open(self.target, 'wt') as target: 

553 self._dump(payload, target) 

554 

555 @staticmethod 

556 def _dump(payload, target): 

557 target.write(payload)