mcp-agent mode for mcp-checker with web-info and REST API
New:
- agent index page serving on 0.0.0.0:8765
- REST API with modular approach to modules
- 'fio' module working via thread-safe Thread able to return
real-time info on its status
- 'fio' module scheduled run option
- ability to preserve multiple testrun results while active
- dockerfile for agent image
Fixed:
- Network report fixes to work on Kube envs
- Fixed function for running commands inside daemonset pods
Related-PROD: PROD-36669
Change-Id: I57e73001247af9187680bfc5744590eef219d93c
diff --git a/cfg_checker/agent/__init__.py b/cfg_checker/agent/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/cfg_checker/agent/__init__.py
diff --git a/cfg_checker/agent/cfg_agent.py b/cfg_checker/agent/cfg_agent.py
new file mode 100644
index 0000000..5619fea
--- /dev/null
+++ b/cfg_checker/agent/cfg_agent.py
@@ -0,0 +1,9 @@
+from .webserver import agent_server
+
+
+def entrypoint():
+ agent_server()
+
+
+if __name__ == '__main__':
+ entrypoint()
diff --git a/cfg_checker/agent/fio_runner.py b/cfg_checker/agent/fio_runner.py
new file mode 100644
index 0000000..f1cecab
--- /dev/null
+++ b/cfg_checker/agent/fio_runner.py
@@ -0,0 +1,601 @@
+import json
+import os
+import queue
+import signal
+import subprocess
+
+
+from copy import deepcopy
+from datetime import datetime, timedelta
+from platform import system, release, node
+from threading import Thread
+import threading
+from time import sleep
+
+
+from cfg_checker.common.exception import CheckerException
+from cfg_checker.common.other import piped_shell
+from cfg_checker.common.log import logger
+
+
+_datetime_fmt = "%m/%d/%Y, %H:%M:%S"
+
+
+def output_reader(_stdout, outq):
+ for line in iter(_stdout.readline, ''):
+ outq.put(line)
+
+
+def _o(option, param, suffix=""):
+ return "--{}={}{}".format(option, param, suffix)
+
+
+def get_time(timestamp=None):
+ if not timestamp:
+ _t = datetime.now()
+ else:
+ _t = datetime.fromtimestamp(timestamp)
+ return _t.strftime(_datetime_fmt)
+
+
+def _get_seconds(value):
+ # assume that we have symbol at the end
+ _suffix = value[-1]
+ if _suffix == 's':
+ return int(value[:-1])
+ elif _suffix == 'm':
+ return int(value[:-1])*60
+ elif _suffix == 'h':
+ return int(value[:-1])*60*60
+ else:
+ return -1
+
+
+def wait_until(end_datetime):
+ while True:
+ diff = (end_datetime - datetime.now()).total_seconds()
+ # In case end_datetime was in past to begin with
+ if diff < 0:
+ return
+ sleep(diff/2)
+ if diff <= 0.1:
+ return
+
+
+class ShellThread(object):
+ def __init__(self, cmd, queue):
+ self.cmd = cmd
+ self.queue = queue
+ self._p = None
+ self.timeout = 15
+ self.output = []
+
+ def run_shell(self):
+ # Start
+ _cmd = " ".join(self.cmd)
+ logger.debug("... {}".format(_cmd))
+ self._p = subprocess.Popen(
+ _cmd,
+ shell=True,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.STDOUT,
+ env={"PYTHONUNBUFFERED": "1"},
+ universal_newlines=True,
+ bufsize=1
+ )
+ self._t = threading.Thread(
+ target=output_reader,
+ args=(self._p.stdout, self.queue)
+ )
+ self._t.start()
+ if not self.wait_started():
+ self.kill_shell()
+
+ def is_alive(self):
+ if not self._p.poll():
+ return True
+ else:
+ return False
+
+ def wait_started(self):
+ while True:
+ if not self.queue.empty():
+ break
+ else:
+ logger.debug("... {} sec".format(self.timeout))
+ sleep(1)
+ self.timeout -= 1
+ if not self.timeout:
+ logger.debug(
+ "...timed out after {} sec".format(str(self.timeout))
+ )
+ return False
+ logger.debug("... got first fio output")
+ return True
+
+ def kill_shell(self):
+ # Run the poll
+ if not self._p.poll():
+ self._p.send_signal(signal.SIGINT)
+ self.get_output()
+
+ def get_output(self):
+ while True:
+ try:
+ line = self.queue.get(block=False)
+ line = str(line) if isinstance(line, bytes) else line
+ self.output.append(line)
+ except queue.Empty:
+ return self.output
+ return None
+
+
+class FioProcess(Thread):
+ # init vars for status
+ _fio_options_list = [
+ "--time_based",
+ "--output-format=json+",
+ "--eta=always"
+ ]
+ _fio_options_seq_list = [
+ "--thread"
+ ]
+
+ _fio_options_common = {
+ "name": "agent_run",
+ "filename": "/tmp/testfile",
+ "status-interval": "500ms",
+ "randrepeat": 0,
+ "verify": 0,
+ "direct": 1,
+ "gtod_reduce": 0,
+ "bs": "32k",
+ "iodepth": 16,
+ "size": "10G",
+ "readwrite": "randrw",
+ "ramp_time": "5s",
+ "runtime": "30s",
+ "ioengine": "libaio"
+ }
+
+ _fio_options_seq = {
+ "numjobs": 1,
+ "offset_increment": "500M"
+ }
+ _fio_options_mix = {
+ "rwmixread": 50
+ }
+
+ eta_sec = 0
+ total_time_sec = 0
+ elapsed_sec = 0
+ testrun = {}
+
+ mount_point = "/tmp"
+ filename = "testfile"
+
+ # test modes: 'randread', 'randwrite', 'read', 'write', 'randrw'
+ mode = "randrw"
+ _seq_modes = ['read', 'write']
+ _mix_modes = ['randrw']
+ _rand_modes = ['randread', 'randwrite']
+
+ # results
+ results = {}
+
+ def _shell(self, cmd):
+ self._code, self._shell_output = piped_shell(cmd, code=True)
+ if self._code:
+ logger.error(
+ "# Shell error for '{}': [{}] {}".format(
+ cmd,
+ self._code,
+ self._shell_output
+ )
+ )
+ return False
+ else:
+ return True
+
+ def recalc_times(self):
+ _rt = _get_seconds(self._fio_options_common["runtime"])
+ _rup = _get_seconds(self._fio_options_common["ramp_time"])
+ if not _rt:
+ raise CheckerException("invalid 'runtime': '{}'".format(_rt))
+ elif not _rup:
+ raise CheckerException("invalid 'ramp_time': '{}'".format(_rt))
+
+ self.total_time_sec = _rt + _rup
+ self.eta_sec = self.total_time_sec
+
+ def __init__(self):
+ Thread.__init__(self)
+ logger.info("fio thread initialized")
+ # save system
+ self.system = system()
+ self.release = release()
+ self.hostname = node()
+ # create a clear var for last shell output
+ self._shell_output = ""
+ # prepare params
+ self.recalc_times()
+ # prepare the fio
+ self.fio_version = "unknown"
+ if not self._shell("fio --version"):
+ raise CheckerException(
+ "Error running fio: '{}'".format(self._shell_output)
+ )
+ else:
+ self.fio_version = self._shell_output
+ # all outputs timeline
+ self.timeline = {}
+
+ self._fio_options_common["filename"] = os.path.join(
+ self.mount_point,
+ self.filename
+ )
+
+ if self.system == "Darwin":
+ self._fio_options_common["ioengine"] = "posixaio"
+ # Thread finish marker
+ self.finished = False
+ self.scheduled_datetime = None
+
+ def update_options(self, _dict):
+ # validate keys, do not validate numbers themselves
+ for k, v in _dict.items():
+ if k in self._fio_options_mix:
+ self._fio_options_mix[k] = v
+ elif k in self._fio_options_seq:
+ self._fio_options_seq[k] = v
+ elif k in self._fio_options_common:
+ self._fio_options_common[k] = v
+ else:
+ raise CheckerException(
+ "Error running fio: '{}'".format(self._shell_output)
+ )
+ # recalc
+ self.recalc_times()
+
+ def run(self):
+ def _cut(_list, _s, _e):
+ _new = _list[_s:_e]
+ _pre = _list[:_s]
+ _list = _pre + _list[_e:]
+ return (_new, _list)
+
+ # create a cmd
+ _cmd = ["fio"]
+ _cmd += self._fio_options_list
+ _cmd += [_o(k, v) for k, v in self._fio_options_common.items()]
+
+ if self._fio_options_common["readwrite"] in self._seq_modes:
+ _sq = self._fio_options_seq_list
+ _cmd += _sq + [_o(k, v) for k, v in self._fio_options_seq.items()]
+ elif self._fio_options_common["readwrite"] in self._mix_modes:
+ _cmd += [_o(k, v) for k, v in self._fio_options_mix.items()]
+
+ _q = queue.Queue()
+ self.fiorun = ShellThread(_cmd, _q)
+ # Check if schedule is set
+ if self.scheduled_datetime:
+ logger.debug(
+ "waiting for '{}', now is '{}', total of {} sec left".format(
+ self.scheduled_datetime.strftime(_datetime_fmt),
+ datetime.now().strftime(_datetime_fmt),
+ (self.scheduled_datetime - datetime.now()).total_seconds()
+ )
+ )
+ wait_until(self.scheduled_datetime)
+ self.fiorun.run_shell()
+ _raw = []
+ _start = -1
+ _end = -1
+ while self.fiorun.is_alive() or not _q.empty():
+ while not _q.empty():
+ # processing
+ _bb = _q.get(block=False)
+ if isinstance(_bb, bytes):
+ _line = _bb.decode('utf-8')
+ else:
+ _line = _bb
+ if _start < 0 and _end < 0 and not _line.startswith("{"):
+ _time = get_time()
+ self.result[_time] = {
+ "error": _line
+ }
+ self.eta = -1
+ self.fiorun.kill_shell()
+ return
+ _current = _line.splitlines()
+ _raw += _current
+ for ll in range(len(_raw)):
+ if _start < 0 and _raw[ll] == "{":
+ _start = ll
+ elif _end < 0 and _raw[ll] == "}":
+ _end = ll
+ # loop until we have full json
+ if _end < 0 or _start < 0:
+ continue
+ # if start and and found, cut json
+ (_json, _raw) = _cut(_raw, _start, _end+1)
+ _start = -1
+ _end = -1
+ # Try to parse json
+ _json = "\n".join(_json)
+ try:
+ _json = json.loads(_json)
+ _timestamp = _json["timestamp"]
+ self.timeline[_timestamp] = _json["jobs"][0]
+
+ # save last values
+ self.eta_sec = self.timeline[_timestamp]["eta"]
+ self.elapsed_sec = self.timeline[_timestamp]["elapsed"]
+ self.testrun = _json
+ except TypeError as e:
+ logger.error("ERROR: {}".format(e))
+ except json.decoder.JSONDecodeError as e:
+ logger.error("ERROR: {}".format(e))
+ if not self.eta_sec:
+ break
+ sleep(0.1)
+ # Save status to results dictionary
+ self.results[get_time(timestamp=self.testrun["timestamp"])] = {
+ "result": self.testrun,
+ "timeline": self.timeline
+ }
+ self.finished = True
+ return
+
+ def healthcheck(self):
+ _version = self.fio_version
+ _binary_path = self._shell_output if self._shell("which fio") else ""
+ if self._shell("fio --enghelp"):
+ _ioengines = self._shell_output
+ _ioengines = _ioengines.replace("\t", "")
+ _ioengines = _ioengines.splitlines()[1:]
+ else:
+ _ioengines = []
+
+ return {
+ "ready": all((_version, _binary_path, _ioengines)),
+ "version": _version,
+ "path": _binary_path,
+ "ioengines": _ioengines,
+ "system": self.system,
+ "release": self.release,
+ "hostname": self.hostname
+ }
+
+ def status(self):
+ _running = self.is_alive() and self.eta_sec >= 0
+ _scheduled = False
+ _diff = -1
+ if self.scheduled_datetime:
+ _diff = (self.scheduled_datetime - datetime.now()).total_seconds()
+ if _diff > 0:
+ _scheduled = True
+ _s = "running" if _running else "idle"
+ _s = "scheduled" if _scheduled else _s
+ _s = "finished" if self.finished else _s
+ return {
+ "status": _s,
+ "progress": self.get_progress()
+ }
+
+ def end_fio(self):
+ if self.fiorun:
+ self.fiorun.kill_shell()
+
+ # Current run
+ def percent_done(self):
+ _total = self.elapsed_sec + self.eta_sec
+ return float(self.elapsed_sec) / float(_total) * 100.0
+
+ def get_progress(self):
+ return "{:.2f}".format(self.percent_done())
+
+ # latest parsed measurements
+ def get_last_measurements(self):
+ if self.timeline:
+ return self.timeline[max(list(self.timeline.keys()))]
+ else:
+ return {}
+
+
+class FioProcessShellRun(object):
+ stats = {}
+ results = {}
+
+ def __init__(self, init_class=FioProcess):
+ self.init_class = init_class
+ self.actions = {
+ "do_singlerun": self.do_singlerun,
+ "do_scheduledrun": self.do_scheduledrun,
+ "get_options": self.get_options,
+ "get_result": self.get_result,
+ "get_resultlist": self.get_resultlist
+ }
+ self.fio_reset()
+
+ @staticmethod
+ def healthcheck(fio):
+ hchk = fio.healthcheck()
+ hchk_str = \
+ "# fio status: {}\n# {} at {}\n# Engines: {}".format(
+ "ready" if hchk["ready"] else "fail",
+ hchk["version"],
+ hchk["path"],
+ ", ".join(hchk["ioengines"])
+ )
+ return hchk, hchk_str
+
+ def status(self):
+ return self.fio.status()
+
+ def fio_reset(self):
+ # Fancy way of handling fio class not even initialized yet
+ try:
+ _f = self.fio.finished
+ _r = self.fio.results
+ _o = self.fio.get_options()
+ except AttributeError:
+ _f = True
+ _r = None
+ _o = None
+ # Check if reset is needed
+ if not _f:
+ # No need to reset, fio is either idle or running
+ return
+ else:
+ # extract results if they present
+ if _r:
+ self.results.update(_r)
+ # re-init
+ _fio = self.init_class()
+ # Do healthcheck
+ self.hchk, self.hchk_str = self.healthcheck(_fio)
+ # restore options if they existed
+ if _o:
+ _fio.update_options(_o)
+ self.fio = _fio
+
+ def get_options(self):
+ _opts = deepcopy(self.fio._fio_options_common)
+ _opts.update(deepcopy(self.fio._fio_options_seq))
+ _opts.update(deepcopy(self.fio._fio_options_mix))
+ return _opts
+
+ def do_singlerun(self, options):
+ # Reset thread if it closed
+ self.fio_reset()
+ # Fill options
+ self.fio.update_options(options)
+ # Start it
+ self.fio.start()
+ return True
+
+ def do_scheduledrun(self, options):
+ # Reset thread if it closed
+ self.fio_reset()
+ # Handle scheduled time
+ if "scheduled_to" not in options:
+ # required parameter not set
+ return False
+ else:
+ # set time and get rid of it from options
+ self.fio.scheduled_datetime = options.pop("scheduled_to")
+ # Fill options
+ self.fio.update_options(options)
+ # Start it
+ self.fio.start()
+ return True
+
+ def _get_result_object(self, obj_name, time):
+ if time in self.results:
+ if obj_name in self.results[time]:
+ return self.results[time][obj_name]
+ elif "error" in self.results[time]:
+ return self.results[time]["error"]
+ else:
+ return {
+ "error": "Empty {} for '{}'".format(obj_name, time)
+ }
+ else:
+ return {
+ "error": "Result not found for '{}'".format(time)
+ }
+
+ def _update_results(self):
+ # Update only in case of completed thread
+ if self.fio.finished:
+ _r_local = list(self.results.keys())
+ _r_fio = list(self.fio.results.keys())
+ for _r in _r_fio:
+ if _r not in _r_local:
+ self.results[_r] = self.fio.results.pop(_r)
+
+ def get_result(self, time):
+ self._update_results()
+ return self._get_result_object('result', time)
+
+ def get_result_timeline(self, time):
+ self._update_results()
+ return self._get_result_object('timeline', time)
+
+ # reporting
+ def get_resultlist(self):
+ self._update_results()
+ return list(self.results.keys())
+
+ def __call__(self):
+ if not self.fio.is_alive() and not self.fio.finished:
+ self.fio.start()
+
+ while self.fio.is_alive() and self.fio.eta_sec >= 0:
+ sleep(0.2)
+ self.stats = self.fio.get_last_measurements()
+
+ _r = self.stats.get('read', {})
+ _w = self.stats.get('write', {})
+
+ _r_bw = _r.get('bw_bytes', -1)
+ _r_iops = _r.get('iops', -1)
+ _w_bw = _w.get('bw_bytes', -1)
+ _w_iops = _w.get('iops', -1)
+ _s = self.fio.status()
+ if _s["status"] == "scheduled":
+ _t = self.fio.scheduled_datetime
+ _n = datetime.now()
+ _delta = (_t - _n).total_seconds()
+ print(
+ "waiting for '{}'; now '{}'; {} sec left".format(
+ _t.strftime(_datetime_fmt),
+ _n.strftime(_datetime_fmt),
+ _delta
+ )
+ )
+ else:
+ stats = "{}: {:>7}% ({}/{}) " \
+ "(BW/IOPS: " \
+ "Read {:>9.2f} MB/{:>9.2f} " \
+ "Write {:>9.2f} MB/{:>9.2f})".format(
+ _s["status"],
+ _s["progress"],
+ self.fio.elapsed_sec,
+ self.fio.eta_sec,
+ _r_bw / 1024 / 1024,
+ _r_iops,
+ _w_bw / 1024 / 1024,
+ _w_iops
+ )
+ print(stats)
+ self.fio.end_fio()
+
+
+if __name__ == '__main__':
+ # Debug shell to test FioProcessShellRun
+ _shell = FioProcessShellRun()
+ _opts = _shell.get_options()
+ _opts["readwrite"] = "read"
+ _opts["ramp_time"] = "1s"
+ _opts["runtime"] = "5s"
+ _shell.do_singlerun(_opts)
+ _shell()
+ _times = _shell.get_resultlist()
+ print("# results:\n{}".format("\n".join(_times)))
+ # print(
+ # "##### Dumping results\n{}".format(
+ # json.dumps(_shell.get_result(_times[0]), indent=2)
+ # )
+ # )
+ _shell.fio_reset()
+ _opts = _shell.get_options()
+ _opts["readwrite"] = "read"
+ _opts["ramp_time"] = "1s"
+ _opts["runtime"] = "10s"
+ _opts["scheduled_to"] = datetime.now() + timedelta(seconds=12)
+ _shell.do_scheduledrun(_opts)
+ _shell()
+ _times = _shell.get_resultlist()
+ print("# results:\n{}".format("\n".join(_times)))
diff --git a/cfg_checker/agent/webserver.py b/cfg_checker/agent/webserver.py
new file mode 100644
index 0000000..8b8466c
--- /dev/null
+++ b/cfg_checker/agent/webserver.py
@@ -0,0 +1,206 @@
+# author: Alex Savatieiev (osavatieiev@mirantis.com)
+from gevent import monkey, pywsgi
+monkey.patch_all()
+import falcon # noqa E402
+import os # noqa E402
+import json # noqa E402
+
+from copy import deepcopy # noqa E402
+from platform import system, release, node # noqa E402
+
+from cfg_checker.common.settings import pkg_dir # noqa E402
+from cfg_checker.helpers.falcon_jinja2 import FalconTemplate # noqa E402
+from .fio_runner import FioProcessShellRun, get_time # noqa E402
+
+template = FalconTemplate(
+ path=os.path.join(pkg_dir, "templates")
+)
+
+_module_status = {
+ "status": "unknown",
+ "healthcheck": {},
+ "actions": [],
+ "options": {},
+ "uri": "<agent_uri>/api/<modile_name>",
+}
+
+_action = {
+ "module": "<name>",
+ "action": "<action>",
+ "options": "<options_dict>"
+}
+
+_modules = {
+ "fio": deepcopy(_module_status)
+}
+
+_status = {
+ "agent": {
+ "started": get_time()
+ },
+ "modules": list(_modules.keys()),
+ "help": {
+ ".../api": {
+ "GET": "<this_status>",
+ "POST": json.dumps(_action)
+ },
+ ".../api/<module_name>": {
+ "GET": "returns healthcheck and module help"
+ }
+ }
+}
+
+# Populate modules
+_fio = FioProcessShellRun()
+
+
+def _init_status(mod):
+ _modules[mod]["uri"] = "<agent_uri>/api/fio"
+ _modules[mod]["actions"] = list(_fio.actions.keys())
+
+
+def _update_status(mod):
+ _modules[mod]["healthcheck"] = _fio.hchk
+ _modules[mod]["options"] = _fio.get_options()
+ _modules[mod].update(_fio.status())
+
+
+class FioStatus:
+ _name = "fio"
+
+ def on_get(self, req, resp):
+ # Hacky way to handle empty request
+ _m = req.get_media(default_when_empty={})
+ if "fast" in _m and _m["fast"]:
+ resp.status = falcon.HTTP_200
+ resp.content_type = "application/json"
+ resp.text = json.dumps(_fio.status())
+ else:
+ _update_status(self._name)
+
+ resp.status = falcon.HTTP_200
+ resp.content_type = "application/json"
+ resp.text = json.dumps(_modules[self._name])
+
+
+class Api:
+ def on_get(self, req, resp):
+ # return api status
+ resp.status = falcon.HTTP_200
+ resp.content_type = "application/json"
+ resp.text = json.dumps(_status)
+
+ def on_post(self, req, resp):
+ def _resp(resp, code, msg):
+ resp.status = code
+ resp.content_type = "application/json"
+ resp.text = json.dumps({"error": msg})
+ # Handle actions
+ _m = req.get_media(default_when_empty={})
+ if _m:
+ # Validate action structure
+ _module = _m.get('module', "")
+ _action = _m.get('action', "")
+ _options = _m.get('options', {})
+
+ if not _module or _module not in list(_modules.keys()):
+ _resp(
+ resp,
+ falcon.HTTP_400,
+ "Invalid module '{}'".format(_module)
+ )
+ return
+ elif not _action or _action not in _modules[_module]['actions']:
+ _resp(
+ resp,
+ falcon.HTTP_400,
+ "Invalid action '{}'".format(_action)
+ )
+ return
+ else:
+ # Handle command
+ _a = _fio.actions[_action]
+ if _action == 'get_options':
+ resp.status = falcon.HTTP_200
+ resp.content_type = "application/json"
+ resp.text = json.dumps({"options": _a()})
+ elif _action == 'get_resultlist':
+ resp.status = falcon.HTTP_200
+ resp.content_type = "application/json"
+ resp.text = json.dumps({"resultlist": _a()})
+ elif _action == 'get_result':
+ if 'time' not in _options:
+ _resp(
+ resp,
+ falcon.HTTP_400,
+ "No 'time' found for '{}'".format(_action)
+ )
+ return
+ _time = _options['time']
+ resp.status = falcon.HTTP_200
+ resp.content_type = "application/json"
+ resp.text = json.dumps({_time: _a(_time)})
+ elif _action == 'do_singlerun':
+ _a(_options)
+ resp.status = falcon.HTTP_200
+ resp.content_type = "application/json"
+ resp.text = json.dumps({"ok": True})
+ elif _action == 'do_scheduledrun':
+ # prepare scheduled run
+
+ # Run it
+ _a(_options)
+ resp.status = falcon.HTTP_200
+ resp.content_type = "application/json"
+ resp.text = json.dumps({"ok": True})
+ else:
+ _resp(
+ resp,
+ falcon.HTTP_500,
+ "Unknown error happened for '{}/{}/{}'".format(
+ _module,
+ _action,
+ _options
+ )
+ )
+ return
+ else:
+ _resp(falcon.HTTP_400, "Empty request body")
+
+
+class Index:
+ @template.render("agent_index_html.j2")
+ def on_get(self, request, response):
+ # prepare template context
+ _context = {
+ "gen_date": get_time(),
+ "system": system(),
+ "release": release(),
+ "hostname": node()
+ }
+ _context.update(_status)
+ # creating response
+ response.status = falcon.HTTP_200
+ response.content_type = "text/html"
+ response.context = _context
+
+
+def agent_server(host="0.0.0.0", port=8765):
+ # init api
+ api = falcon.API()
+ # populate pages
+ api.add_route("/", Index())
+ api.add_route("/api/", Api())
+
+ # Populate modules list
+ _active_modules = [FioStatus]
+ # init modules
+ for mod in _active_modules:
+ _init_status(mod._name)
+ _update_status(mod._name)
+
+ api.add_route("/api/"+mod._name, mod())
+
+ # init and start server
+ server = pywsgi.WSGIServer((host, port), api)
+ server.serve_forever()
diff --git a/cfg_checker/common/kube_utils.py b/cfg_checker/common/kube_utils.py
index e1aafbb..95bb19c 100644
--- a/cfg_checker/common/kube_utils.py
+++ b/cfg_checker/common/kube_utils.py
@@ -351,6 +351,7 @@
namespace,
strict=False,
_request_timeout=120,
+ arguments=None,
**kwargs
):
_pname = ""
@@ -370,16 +371,18 @@
", ".join(_pnames)
)
)
- _pname = _pnames[0]
elif len(_pnames) < 1:
raise KubeException("No pods found for '{}'".format(pod_name))
+ # in case of >1 and =1 we are taking 1st anyway
+ _pname = _pnames[0]
else:
_pname = pod_name
logger_cli.debug(
- "... cmd: [CoreV1] exec {} -n {} -- {}".format(
+ "... cmd: [CoreV1] exec {} -n {} -- {} '{}'".format(
_pname,
namespace,
- cmd
+ cmd,
+ arguments
)
)
# Set preload_content to False to preserve JSON
@@ -387,6 +390,8 @@
# Which causes to change " to '
# After that json.loads(...) fail
cmd = cmd if isinstance(cmd, list) else cmd.split()
+ if arguments:
+ cmd += [arguments]
_pod_stream = stream(
self.CoreV1.connect_get_namespaced_pod_exec,
_pname,
@@ -404,6 +409,17 @@
_pod_stream.run_forever(timeout=_request_timeout)
# read the output
_output = _pod_stream.read_stdout()
+ _error = _pod_stream.read_stderr()
+ if _error:
+ # copy error to output
+ logger_cli.warning(
+ "WARNING: cmd of '{}' returned error:\n{}\n".format(
+ " ".join(cmd),
+ _error
+ )
+ )
+ if not _output:
+ _output = _error
# Force recreate of api objects
self._coreV1 = None
# Send output
diff --git a/cfg_checker/common/other.py b/cfg_checker/common/other.py
index 3d0cc13..b5a0406 100644
--- a/cfg_checker/common/other.py
+++ b/cfg_checker/common/other.py
@@ -15,12 +15,15 @@
# 'Dirty' and simple way to execute piped cmds
-def piped_shell(command):
+def piped_shell(command, code=False):
logger_cli.debug("... cmd:'{}'".format(command))
_code, _out = subprocess.getstatusoutput(command)
if _code:
logger_cli.error("Non-zero return code: {}, '{}'".format(_code, _out))
- return _out
+ if code:
+ return _code, _out
+ else:
+ return _out
# 'Proper way to execute shell
diff --git a/cfg_checker/common/ssh_utils.py b/cfg_checker/common/ssh_utils.py
index bdfe6b5..d500e36 100644
--- a/cfg_checker/common/ssh_utils.py
+++ b/cfg_checker/common/ssh_utils.py
@@ -240,6 +240,7 @@
return _out
def wait_ready(self, cmd, timeout=60):
+ # Wait for command to finish inside SSH
def _strip_cmd_carrets(_str, carret='\r', skip_chars=1):
_cnt = _str.count(carret)
while _cnt > 0:
diff --git a/cfg_checker/helpers/falcon_jinja2/__init__.py b/cfg_checker/helpers/falcon_jinja2/__init__.py
new file mode 100644
index 0000000..3615d21
--- /dev/null
+++ b/cfg_checker/helpers/falcon_jinja2/__init__.py
@@ -0,0 +1,6 @@
+# https://github.com/myusko/falcon-jinja
+from .template import * # noqa
+
+__version__ = '0.0.3'
+__author__ = 'Michael Yusko'
+__license__ = 'MIT'
diff --git a/cfg_checker/helpers/falcon_jinja2/template.py b/cfg_checker/helpers/falcon_jinja2/template.py
new file mode 100644
index 0000000..cb858ff
--- /dev/null
+++ b/cfg_checker/helpers/falcon_jinja2/template.py
@@ -0,0 +1,85 @@
+# https://github.com/myusko/falcon-jinja
+import falcon
+from falcon.response import Response
+from jinja2 import Environment, FileSystemLoader
+from jinja2.exceptions import TemplateNotFound
+
+__all__ = ['FalconTemplate']
+
+
+class FalconTemplateNotFound(Exception):
+ pass
+
+
+class FalconTemplate:
+ """
+ Args:
+ path (str): Name of an directory where HTML files defined.
+ Attributes:
+ _env (jinja2.Environment): Jinja component which shared
+ variables like configuration and etc.
+ template_path (str): Name of folder where all
+ HTML files are defined.
+ loader (jinja2.FileSystemLoader): Jinja2 class which loaded
+ HTML template from filesystem.
+ """
+
+ BASE_FOLDER = 'templates'
+
+ def __init__(self, path: str = None):
+ self.template_path = path or self.BASE_FOLDER
+ self.loader = FileSystemLoader(self.template_path)
+ self._env = Environment(loader=self.loader)
+
+ @staticmethod
+ def __get_response(objects: tuple):
+ """Retrieve falcon's Response object
+ Args:
+ objects (tuple): An list with falcon.Request,
+ falcon.Response, and other arguments.
+ Returns:
+ An falcon.Response object if it there
+ otherwise False.
+ """
+ for response in objects:
+ if isinstance(response, Response):
+ return response
+ return False
+
+ def _make_template(self, template: str, context: dict):
+ """Makes a jinja template, and rendered passed context
+ Args:
+ template (str): Name of HTML file which will be rendered.
+ context (dict): An dictionary with
+ a response context.
+ Returns:
+ A string representation of HTML content
+ """
+ try:
+ template = self._env.get_template(template)
+ except TemplateNotFound:
+ raise FalconTemplateNotFound(
+ 'Template {} not found '
+ 'in {} folder'.format(template, self.template_path)
+ )
+ return template.render(**context)
+
+ def render(self, template: str):
+ """Decorator which renders HTML content
+ Args:
+ template (str): HTML file for which will
+ be rendered HTML content
+ """
+ def render_template(func):
+ def wrapper(*args, **kwargs):
+
+ response = self.__get_response(args)
+ func(*args, **kwargs)
+
+ response.content_type = falcon.MEDIA_HTML
+ response.status = falcon.HTTP_200
+ response.body = self._make_template(
+ template, response.context
+ )
+ return wrapper
+ return render_template
diff --git a/cfg_checker/modules/ceph/__init__.py b/cfg_checker/modules/ceph/__init__.py
index 74fdde3..495e9b1 100644
--- a/cfg_checker/modules/ceph/__init__.py
+++ b/cfg_checker/modules/ceph/__init__.py
@@ -86,10 +86,6 @@
ceph_info = info.KubeCephInfo(config)
logger_cli.info("# Collecting Ceph cluster information")
- logger_cli.warning(
- "\nWARNING: 'ceph info' has 'Work in progress' status!\n"
- )
-
ceph_info.gather_info()
# Debug, enable if needed to debug report generation
@@ -110,9 +106,6 @@
args_utils.check_supported_env(ENV_TYPE_KUBE, args, config)
_filename = args_utils.get_arg(args, 'html')
logger_cli.info("# Ceph cluster Configuration report")
- logger_cli.warning(
- "\nWARNING: 'ceph info' has 'Work in progress' status!\n"
- )
# _class = _selectClass(_env)
ceph_info = info.KubeCephInfo(config)
diff --git a/cfg_checker/modules/network/__init__.py b/cfg_checker/modules/network/__init__.py
index a99fa9e..736be50 100644
--- a/cfg_checker/modules/network/__init__.py
+++ b/cfg_checker/modules/network/__init__.py
@@ -69,6 +69,14 @@
help="HTML filename to save report"
)
+ net_report_parser.add_argument(
+ '--skip-ifs',
+ metavar='skip_ifs', default="docker",
+ help="String with keywords to skip networks which has interfaces "
+ "names with keywords as substrings. Example: 'eno' keyword will "
+ "cause to skip interface named 'eno1np0'. Example: 'docker'"
+ )
+
net_ping_parser = net_subparsers.add_parser(
'ping',
help="Ping all nodes with each other using network CIDR"
@@ -149,13 +157,22 @@
# Network Report
# Check if there is supported env found
_env = args_utils.check_supported_env(
- [ENV_TYPE_SALT],
+ [ENV_TYPE_SALT, ENV_TYPE_KUBE],
args,
config
)
# Start command
logger_cli.info("# Network report (check, node map)")
+ _skip_ifs_keywords = []
+ for _str in args.skip_ifs.split(","):
+ _skip_ifs_keywords.append(_str)
+ logger_cli.info(
+ "-> Interface keywords skip list is '{}'".format(
+ ", ".join(_skip_ifs_keywords)
+ )
+ )
+
_filename = args_utils.get_arg(args, 'html')
_skip, _skip_file = args_utils.get_skip_args(args)
_class = _selectClass(_env)
@@ -164,7 +181,7 @@
skip_list=_skip,
skip_list_file=_skip_file
)
- netChecker.check_networks(map=False)
+ netChecker.check_networks(skip_keywords=_skip_ifs_keywords, map=False)
# save what was collected
netChecker.errors.save_iteration_data()
diff --git a/cfg_checker/modules/network/mapper.py b/cfg_checker/modules/network/mapper.py
index 875f633..dea7d4e 100644
--- a/cfg_checker/modules/network/mapper.py
+++ b/cfg_checker/modules/network/mapper.py
@@ -798,10 +798,11 @@
_daemonset = self.get_daemonset()
logger_cli.info("-> Running script on daemonset")
# exec script on all pods in daemonset
- _result = self.master.execute_script_on_daemon_set(
+ _result = self.master.execute_cmd_on_daemon_set(
_daemonset,
script,
- args=args
+ args=args,
+ is_script=True
)
# delete daemonset
diff --git a/cfg_checker/nodes.py b/cfg_checker/nodes.py
index ef2219c..1dcec2a 100644
--- a/cfg_checker/nodes.py
+++ b/cfg_checker/nodes.py
@@ -509,6 +509,7 @@
# prepare needed resources
self._check_namespace()
self._scripts = self._check_config_map()
+ self.prepared_daemonsets = []
def _check_namespace(self):
# ensure namespace
@@ -984,7 +985,11 @@
# create daemonset
logger_cli.debug("... preparing daemonset")
- return self.kube.prepare_daemonset_from_yaml(self._namespace, _ds)
+ _ds = self.kube.prepare_daemonset_from_yaml(self._namespace, _ds)
+ # Save prepared daemonset
+ self.prepared_daemonsets.append(_ds)
+ # return it
+ return _ds
def wait_for_daemonset(self, ds, timeout=120):
# iteration timeout
@@ -1076,7 +1081,13 @@
)
return _result
- def execute_script_on_daemon_set(self, ds, script_filename, args=None):
+ def execute_cmd_on_daemon_set(
+ self,
+ ds,
+ cmd,
+ args=None,
+ is_script=False
+ ):
"""
Query daemonset for pods and execute script on all of them
"""
@@ -1090,6 +1101,7 @@
plist[2], # namespace
strict=True,
_request_timeout=120,
+ arguments=plist[5]
)
]
@@ -1103,16 +1115,24 @@
)
_plist = []
_arguments = args if args else ""
- _cmd = [
- "python3",
- os.path.join(
- "/",
- self.env_config.kube_scripts_folder,
- script_filename
- ),
- _arguments
- ]
- _cmd = " ".join(_cmd)
+ if is_script:
+ _cmd = [
+ "python3",
+ os.path.join(
+ "/",
+ self.env_config.kube_scripts_folder,
+ cmd
+ ),
+ _arguments
+ ]
+ _cmd = " ".join(_cmd)
+ else:
+ # decide if we are to wrap it to bash
+ if '|' in cmd:
+ _cmd = "bash -c"
+ _arguments = cmd
+ else:
+ _cmd = cmd
for item in _pods.items:
_plist.append(
[
@@ -1120,12 +1140,12 @@
item.spec.node_name,
item.metadata.namespace,
item.metadata.name,
- _cmd
+ _cmd,
+ _arguments
]
)
# map func and cmd
- logger_cli
pool = Pool(self.env_config.threads)
_results = {}
self.not_responded = []
@@ -1202,3 +1222,48 @@
# TODO: Exception handling
return _target_path
+
+ def get_cmd_for_nodes(self, cmd, target_key, target_dict=None, nodes=None):
+ """Function runs command on daemonset and parses result into place
+ or into dict structure provided
+
+ :return: no return value, data pulished internally
+ """
+ logger_cli.debug(
+ "... collecting results for '{}'".format(cmd)
+ )
+ if target_dict:
+ _nodes = target_dict
+ else:
+ _nodes = self.nodes
+ # Dirty way to get daemonset that was used in checker and not deleted
+ _ds = self.prepared_daemonsets[0]
+ _result = self.execute_cmd_on_daemon_set(_ds, cmd)
+ for node, data in _nodes.items():
+
+ if node in self.skip_list:
+ logger_cli.debug(
+ "... '{}' skipped while collecting '{}'".format(
+ node,
+ cmd
+ )
+ )
+ continue
+ # Prepare target key
+ if target_key not in data:
+ data[target_key] = None
+ # Save data
+ if data['status'] in [NODE_DOWN, NODE_SKIP]:
+ data[target_key] = None
+ elif node not in _result:
+ continue
+ elif not _result[node]:
+ logger_cli.debug(
+ "... '{}' not responded after '{}'".format(
+ node,
+ self.env_config.salt_timeout
+ )
+ )
+ data[target_key] = None
+ else:
+ data[target_key] = _result[node]
diff --git a/cfg_checker/reports/reporter.py b/cfg_checker/reports/reporter.py
index 1f54ff3..dc9a2cf 100644
--- a/cfg_checker/reports/reporter.py
+++ b/cfg_checker/reports/reporter.py
@@ -302,18 +302,19 @@
def _extend_data(self, data):
def get_bytes(value):
- _char = value[-1]
+ _size_i = True if value[-1] == 'i' else False
+ _char = value[-2] if _size_i else value[-1]
_ord = ord(_char)
if _ord > 47 and _ord < 58:
# bytes comes with no Char
return int(value)
else:
_sizes = ["*", "K", "M", "G", "T"]
- _flo = float(value[:-1])
+ _flo = float(value[:-2]) if _size_i else float(value[:-1])
_pwr = 1
if _char in _sizes:
_pwr = _sizes.index(_char)
- return int(_flo**_pwr)
+ return int(1024**_pwr*_flo)
def _dmidecode(_dict, type=0):
# _key = "dmi"
@@ -379,7 +380,7 @@
_key = "ram"
_key_r = "ram_raw"
_f_cmd = self.master.get_cmd_for_nodes
- _cmd = "free -h | sed -n '/Mem/s/ \\+/ /gp'"
+ _cmd = "free -h | grep 'Mem' | sed -n '/Mem/s/ \\+/ /gp'"
_f_cmd(_cmd, _key_r, target_dict=_dict)
# parse them and put into dict
for node, dt in _dict.items():
diff --git a/etc/docker-cfg-agent b/etc/docker-cfg-agent
new file mode 100644
index 0000000..6c5d583
--- /dev/null
+++ b/etc/docker-cfg-agent
@@ -0,0 +1,28 @@
+FROM ubuntu:20.04
+
+MAINTAINER Alex Savatieiev (a.savex@gmail.com)
+
+WORKDIR /opt
+RUN export TZ="America/Chicago" && \
+ ln -snf /usr/share/zoneinfo/$TZ /etc/localtime && echo $TZ > /etc/timezone && \
+ apt-get update && \
+ apt-get install -y make gcc g++ git libaio-dev libaio1 zlib1g-dev && \
+ apt-get install -y python3-pip python3-venv vim iperf3 mtr htop iputils-ping traceroute tcpdump wget iproute2 curl screen && \
+ git clone --depth 1 --branch fio-3.26 https://github.com/axboe/fio.git && \
+ cd /opt/fio && \
+ ./configure && \
+ make && \
+ make install && \
+ cd /opt && \
+ rm -rf /opt/fio && \
+ git clone "https://gerrit.mcp.mirantis.com/mcp/cfg-checker" && \
+ cd cfg-checker && \
+ pip3 install --no-cache-dir -r requirements.txt && \
+ python3 setup.py install && \
+ cd /opt && \
+ rm -rf /opt/cfg-checker && \
+ apt --yes remove gcc g++ && \
+ apt autoremove --yes && \
+ rm -rf /var/lib/apt/lists/*
+
+ENTRYPOINT ["checker-agent"]
diff --git a/requirements.txt b/requirements.txt
index 27030e0..26b0d1e 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -5,3 +5,5 @@
ipaddress
configparser
kubernetes
+gevent
+falcon
diff --git a/setup.py b/setup.py
index c22fc61..1a6329f 100644
--- a/setup.py
+++ b/setup.py
@@ -19,7 +19,10 @@
'jinja2',
'requests',
'ipaddress',
- 'configparser'
+ 'configparser',
+ 'kubernetes',
+ 'gevent',
+ 'falcon'
]
entry_points = {
@@ -27,21 +30,22 @@
"mcp-checker = cfg_checker.cfg_check:config_check_entrypoint",
"mcp-pkg = cfg_checker.cli.packages:entrypoint",
"mcp-net = cfg_checker.cli.network:entrypoint",
- "cmp-reclass = cfg_checker.cli.reclass:entrypoint"
+ "cmp-reclass = cfg_checker.cli.reclass:entrypoint",
+ "checker-agent = cfg_checker.agent.cfg_agent:entrypoint"
]
}
setup(
name="mcp-checker",
- version="0.41a",
+ version="0.6",
author="Alex Savatieiev",
author_email="osavatieiev@mirantis.com",
classifiers=[
"Programming Language :: Python",
"Programming Language :: Python :: 3.6"
],
- keywords="QA, openstack, salt, config, reclass",
+ keywords="QA, openstack, kubernetes, salt, config, reclass, networking, ceph",
entry_points=entry_points,
url="",
packages=find_packages(),
@@ -53,6 +57,6 @@
install_requires=dependencies,
data_files=DATA,
license="Apache Licence, version 2",
- description="MCP Checker tool. For best results use on MCP deployments",
+ description="MCP/MCC/MOS Checker tool. For use only on Mirantis product deployments",
long_description=README
)
diff --git a/templates/agent_index_html.j2 b/templates/agent_index_html.j2
new file mode 100644
index 0000000..ff8f613
--- /dev/null
+++ b/templates/agent_index_html.j2
@@ -0,0 +1,305 @@
+<!DOCTYPE html>
+<html lang="en">
+<head>
+ <meta charset="UTF-8">
+ <title>cfg-checker agent</title>
+ {% include 'common_styles.j2' %}
+ {% include 'common_scripts.j2' %}
+ <script language="JavaScript">
+ const getJSON = async url => {
+ const response = await fetch(url);
+ if(!response.ok) // check if response worked (no 404 errors etc...)
+ throw new Error(response.statusText);
+
+ const data = response.json(); // get JSON from the response
+ return data; // returns a promise, which resolves to this data value
+ }
+
+ function qStatus(uri, tagId) {
+ var dt, el;
+
+ el = document.getElementById(tagId);
+ el.innerHTML = "calling " + uri;
+
+ getJSON(uri).then(data => {
+ el.innerHTML = "<pre>" + JSON.stringify(data, null, ' ') + "</pre>";
+ }).catch(error => {
+ el.innerHTML = JSON.stringify(error, null, ' ');
+ });
+ };
+
+ function updateModules() {
+ var _n, _v, _hc, _s, _phead, _p;
+
+ let mods = [];
+ // Jinja populated list
+ {% for mod in modules %}
+ mods.push("{{ mod }}");
+ {% endfor %}
+
+ mods.forEach((mod) => {
+ // get all ids needed to fill
+ _n = document.getElementById(mod + "_name");
+ _v = document.getElementById(mod + "_version");
+ _hc = document.getElementById(mod + "_healthcheck");
+ _s = document.getElementById(mod + "_status");
+ _phead = document.getElementById(mod + "_progress_head");
+ _p = document.getElementById(mod + "_progress");
+
+ // Fill
+ _n.innerHTML = mod;
+ // get status
+ getJSON("api/" + mod).then(data => {
+ _v.innerHTML = data['healthcheck']['version'];
+ _hc.innerHTML = data['healthcheck']['ready'];
+ _s.innerHTML = data['status'];
+ if ((data['status'] == "idle") || (data['status'] == "finished")) {
+ if (_phead.className.indexOf("idle") < 0) {
+ _phead.className += " idle";
+ }
+ }
+ else if (data['status'] == "running") {
+ _phead.className = _phead.className.replace(" idle", "");
+ }
+ _p.style.width = data['progress'] + "%";
+ }).catch(error => {
+ _v.innerHTML = "?";
+ _hc.innerHTML = "No response";
+ _s.innerHTML = "Unknown";
+ _phead.className = _phead.className.replace(" idle", "");
+ // set bar status to red later
+ //_p.style.width = "100%";
+ });
+ })
+ };
+ var interval = 500;
+ setInterval(updateModules, interval);
+ </script>
+ <style>
+ .barcontent {
+ margin: auto;
+ width: 1350px;
+ padding: 10px;
+ }
+ .bar-centered {
+ float: none;
+ transform: translate(25%);
+ }
+ .agent_labels {
+ display: inline-block;
+ margin: 0px;
+ margin-right: 5px;
+
+ }
+ .info_label {
+ font-family: "LaoSangamMN", Monaco, monospace;
+ display: block;
+ float: left;
+ box-sizing: content-box;
+ background: #262;
+ color: white;
+ border-radius: 5px;
+ text-align: center;
+ padding: 0px 5px 5px 5px;
+ height: 14px;
+ margin: 5px 2px 5px 2px;
+ }
+ .gray_bg { background: #444; }
+ .section_head { font-size: 0.8em; color: Navy; padding-left: 2px; }
+ .row_button {
+ background-color: #468;
+ color: #fff;
+ cursor: pointer;
+ padding: 5px;
+ width: 100%;
+ border: none;
+ text-align: left;
+ outline: none;
+ font-size: 13px;
+ }
+ .row_button:after {
+ content: '\02795'; /* Unicode character for "plus" sign (+) */
+ font-size: 13px;
+ color: white;
+ float: left;
+ margin-left: 5px;
+ }
+
+ .row_active:after {
+ content: "\2796"; /* Unicode character for "minus" sign (-) */
+ color: white
+ }
+
+ .row_active, .row_button:hover {
+ background-color: #68a;
+ color: white
+ }
+
+ .cell_button {
+ color: darkgreen;
+ cursor: pointer;
+ padding: 5px;
+ width: 100%;
+ border: none;
+ text-align: center;
+ outline: none;
+ }
+ .cell_button:hover {
+ background-color: gray;
+ }
+
+ .tooltiptext {
+ transform: translate(100px);
+ }
+
+ .console {
+ background-color: black;
+ font-family: "Lucida Console", Monaco, monospace;
+ font-size: 0.5em;
+ width: auto;
+ color: #fff;
+ border-radius: 6px;
+ padding: 5px 5px;
+ }
+
+ .progress {
+ box-sizing: content-box;
+ height: 10px; /* Can be anything */
+ position: relative;
+ background: #aaa;
+ border-radius: 5px;
+ padding: 5px;
+ box-shadow: inset 0 -1px 1px rgba(255, 255, 255, 0.3);
+ }
+ .progress > span {
+ display: block;
+ height: 100%;
+ border-top-right-radius: 3px;
+ border-bottom-right-radius: 3px;
+ border-top-left-radius: 5px;
+ border-bottom-left-radius: 5px;
+ background-color: rgb(43, 194, 83);
+ background-image: linear-gradient(
+ center bottom,
+ rgb(43, 194, 83) 37%,
+ rgb(84, 240, 84) 69%
+ );
+ position: relative;
+ overflow: hidden;
+ }
+ .progress > span:after,
+ .animate > span > span {
+ content: "";
+ position: absolute;
+ top: 0;
+ left: 0;
+ bottom: 0;
+ right: 0;
+ background-image: linear-gradient(
+ -45deg,
+ rgba(255, 255, 255, 0.1) 25%,
+ transparent 25%,
+ transparent 50%,
+ rgba(255, 255, 255, 0.1) 50%,
+ rgba(255, 255, 255, 0.1) 75%,
+ transparent 75%,
+ transparent
+ );
+ z-index: 1;
+ background-size: 50px 50px;
+ animation: move 2s linear infinite;
+ border-top-right-radius: 8px;
+ border-bottom-right-radius: 8px;
+ border-top-left-radius: 20px;
+ border-bottom-left-radius: 20px;
+ overflow: hidden;
+ }
+
+ .animate > span:after {
+ display: none;
+ }
+
+ @keyframes move {
+ 0% {
+ background-position: 0 0;
+ }
+ 100% {
+ background-position: 50px 50px;
+ }
+ }
+
+ .bluewish > span {
+ background-image: linear-gradient(#3d52b1, #3052b1);
+ }
+ .idle > span > span,
+ .idle > span::after {
+ background-image: none;
+ }
+ </style>
+</head>
+<body onload="init()">
+
+<div class="header">
+ <div class="label date">generated on: {{ gen_date }}</div>
+</div>
+
+<div class="bar">
+ <div class="bar-centered">
+ <button class="bar-item" onclick="openBar(event, 'status')">Status</button>
+ </div>
+</div>
+
+{% macro status_page(info, id_label) %}
+<div id="{{ id_label }}" class="barcontent">
+ <h5>{{ caller() }}</h5>
+ <hr>
+ <div class="agent_labels">
+ <div class="info_label" onclick="updateModules()">{{ hostname }}</div>
+ <div class="info_label">{{ system }}</div>
+ <div class="info_label">{{ release }}</div>
+ <div class="info_label gray_bg">Started: {{ agent['started'] }}</div>
+ </div>
+ <hr>
+ {% for mod in modules %}
+ <div class="agent_labels">
+ <div class="info_label" id="{{ mod }}_name">{{ mod }}</div>
+ <div class="info_label" id="{{ mod }}_version">unknown</div>
+ <div class="info_label" id="{{ mod }}_healthcheck">unknown</div>
+ <div class="info_label" id="{{ mod }}_status">unknown</div>
+ </div>
+ <div class="progress bluewish idle" id="{{ mod }}_progress_head">
+ <span style="width: 0%" id="{{ mod }}_progress"></span>
+ </div>
+ <hr>
+ {% endfor %}
+ <div class="section_head">REST Api help:</div>
+ {% for uri, ops in help.items() %}
+ <div class="console">
+ <div class="">{{ uri | escape }}</div>
+ {% for op, op_help in ops.items() %}
+ <div class="">{{ op }}: {{ op_help | escape }}</div>
+ {% endfor %}
+ </div>
+ <br>
+ {% endfor %}
+ <div class="section_head">Available modules: {{ modules | join(", ")}}</div>
+ <div class="section_head">Agent:</div>
+ <div class="console" onclick="qStatus('api/','agent')">
+ <div class="" id="agent"></div>
+ </div>
+
+ <div class="section_head">Status json:</div>
+ <div class="console" onclick="qStatus('api/{{ modules[0] }}','fio')">
+ <div class="" id="fio"></div>
+ </div>
+
+
+</div>
+{% endmacro %}
+
+
+{% call status_page(info, "status") %}
+ Agent status
+{% endcall %}
+</body>
+</html>
\ No newline at end of file