Cfg-checker ceph benchmark & info updates and fixes

 - Added collecting Ceph global stats while running benchmark
 - Added collecting osd pg dump data
 - Added page with active OSD nodes stats
 - --report-only option, ceph info still collected

 Fixes:
 - fio-runner uses scheduled time when reporting errors
 - proper ceph pv creation
 - updated retry decorator timeouts for overloaded envs
 - calculated volume size creation with size*1.3
 - Proper maximum values indication

 Related-PROD: PROD-36669

Change-Id: Ic518ddbb2ca0915b550e981d0b0fc7084000aa04
diff --git a/cfg_checker/agent/fio_runner.py b/cfg_checker/agent/fio_runner.py
index 29173ed..db012ac 100644
--- a/cfg_checker/agent/fio_runner.py
+++ b/cfg_checker/agent/fio_runner.py
@@ -326,8 +326,7 @@
                 else:
                     _line = _bb
                 if _start < 0 and _end < 0 and not _line.startswith("{"):
-                    _time = get_time()
-                    self.results[_time] = {
+                    self.results[self.testrun_starttime] = {
                         "error": _line
                     }
                     self.eta = -1
diff --git a/cfg_checker/common/decorators.py b/cfg_checker/common/decorators.py
index eed3fba..d83e469 100644
--- a/cfg_checker/common/decorators.py
+++ b/cfg_checker/common/decorators.py
@@ -6,7 +6,7 @@
 from cfg_checker.common import logger, logger_cli
 
 
-def retry(exceptions, total_tries=4, initial_wait=0.5, backoff_factor=2):
+def retry(exceptions, total_tries=5, initial_wait=0.5, backoff_factor=2):
     """
     calling the decorated function applying an exponential backoff.
     Args:
diff --git a/cfg_checker/common/kube_utils.py b/cfg_checker/common/kube_utils.py
index 3e15095..f8c3469 100644
--- a/cfg_checker/common/kube_utils.py
+++ b/cfg_checker/common/kube_utils.py
@@ -374,7 +374,7 @@
 
         return _pods
 
-    @retry(ApiException, initial_wait=5)
+    @retry(ApiException, initial_wait=10)
     def exec_on_target_pod(
         self,
         cmd,
diff --git a/cfg_checker/modules/ceph/__init__.py b/cfg_checker/modules/ceph/__init__.py
index dd483cf..eee01ce 100644
--- a/cfg_checker/modules/ceph/__init__.py
+++ b/cfg_checker/modules/ceph/__init__.py
@@ -109,8 +109,13 @@
         help="Cleanup resources related to benchmark"
     )
     ceph_bench_parser.add_argument(
+        '--report-only',
+        action="store_true", default=False,
+        help="Just create report using files in folder"
+    )
+    ceph_bench_parser.add_argument(
         '--dump-path',
-        metavar="dump_results", default="/tmp",
+        metavar="dump_results",
         help="Dump result after each test run to use them later"
     )
     ceph_bench_parser.add_argument(
@@ -217,6 +222,7 @@
     # Ceph Benchmark using multiple pods
     # if only cleanup needed do it and exit
     _cleanup_only = args_utils.get_arg(args, 'cleanup_only')
+    _report_only = args_utils.get_arg(args, 'report_only')
     config.resource_prefix = "cfgagent"
     if _cleanup_only:
         # Do forced resource cleanup and exit
@@ -232,15 +238,57 @@
         ceph_bench.cleanup()
         return
 
+    # dump results options
+    _dump_path = args_utils.get_arg(args, "dump_path")
+    if _dump_path:
+        logger_cli.info("# Results will be dumped to '{}'".format(_dump_path))
+        config.bench_results_dump_path = _dump_path
+    else:
+        _p = "/tmp"
+        logger_cli.info(
+            "# No result dump path set. Defaulting to {}"
+            "Consider setting it if running long task_file "
+            "based test runs".format(_p)
+        )
+        config.bench_results_dump_path = _p
+
+    # Report filename
+    _filename = args_utils.get_arg(args, 'html')
     # gather Ceph info
     logger_cli.info("# Collecting Ceph cluster information")
     ceph_info = info.KubeCephInfo(config)
 
+    # Task files or options
+    _opts = get_fio_options()
+    # Load name and announce it
+    config.bench_name = args_utils.get_arg(args, "name")
+    _opts["name"] = config.bench_name
+    logger_cli.info(
+        "# Using '{}' as ceph bench jobs name".format(_opts["name"])
+    )
+
+    if _report_only:
+        # Do forced report creation and exit
+        config.bench_mode = "report"
+        config.bench_agent_count = -1
+        ceph_bench = bench.KubeCephBench(config)
+        ceph_bench.set_ceph_info_class(ceph_info)
+        logger_cli.info(
+            "# Preparing to generate report '{}'".format(
+                config.resource_prefix
+            )
+        )
+        # Preload previous results for this name
+        ceph_bench.preload_results()
+        # Gather ceph data
+        ceph_bench.wait_ceph_cooldown()
+        # Generate report
+        ceph_bench.create_report(_filename)
+        return
+
     # Prepare the tasks and do synced testrun or a single one
     logger_cli.info("# Initializing ceph benchmark module")
     args_utils.check_supported_env(ENV_TYPE_KUBE, args, config)
-    # Report filename
-    _filename = args_utils.get_arg(args, 'html')
     # agents count option
     config.bench_agent_count = args_utils.get_arg(args, "agents")
     logger_cli.info("-> using {} agents".format(config.bench_agent_count))
@@ -250,8 +298,6 @@
     _storage_class = args_utils.get_arg(args, "storage_class")
     logger_cli.info("-> using storage class of '{}'".format(_storage_class))
     config.bench_storage_class = _storage_class
-    # dump results options
-    _dump_path = args_utils.get_arg(args, "dump_path")
     if _dump_path:
         logger_cli.info("# Results will be dumped to '{}'".format(_dump_path))
         config.bench_results_dump_path = _dump_path
@@ -261,8 +307,7 @@
             "Consider setting it if running long task_file based test runs"
         )
         config.bench_results_dump_path = _dump_path
-    # Task files or options
-    _opts = get_fio_options()
+
     _task_file = args_utils.get_arg(args, "task_file", nofail=True)
     if not _task_file:
         logger_cli.info("-> Running single benchmark run")
@@ -289,18 +334,11 @@
         logger_cli.info("-> running with tasks from '{}'".format(_task_file))
         config.bench_task_file = _task_file
         config.bench_mode = "tasks"
-    config.bench_name = args_utils.get_arg(args, "name")
-    _opts["name"] = config.bench_name
-    logger_cli.info(
-        "# Using '{}' as ceph bench jobs name".format(_opts["name"])
-    )
     logger_cli.debug("... default/selected options for fio:")
     for _k in _opts.keys():
         # TODO: Update options for single run
         logger_cli.debug("    {} = {}".format(_k, _opts[_k]))
 
-    # handle option inavailability from command line for single mode
-
     # init the Bench class
     ceph_bench = bench.KubeCephBench(config)
     ceph_bench.set_ceph_info_class(ceph_info)
diff --git a/cfg_checker/modules/ceph/bench.py b/cfg_checker/modules/ceph/bench.py
index 2eedcfb..0780596 100644
--- a/cfg_checker/modules/ceph/bench.py
+++ b/cfg_checker/modules/ceph/bench.py
@@ -41,6 +41,19 @@
     return {}
 
 
+def _split_vol_size(size):
+    # I know, but it is faster then regex
+    _numbers = [48, 49, 50, 51, 52, 53, 54, 55, 56, 57]
+    _s_int = "0"
+    _s_type = ""
+    for ch in size:
+        if ord(ch) in _numbers:
+            _s_int += ch
+        else:
+            _s_type += ch
+    return int(_s_int), _s_type
+
+
 class CephBench(object):
     _agent_template = "cfgagent-template.yaml"
 
@@ -69,26 +82,32 @@
 
         self.mode = config.bench_mode
         self.resource_prefix = config.resource_prefix
+
         if config.bench_mode == "tasks":
             self.taskfile = config.bench_task_file
             self.load_tasks(self.taskfile)
-        elif config.bench_mode == "cleanup":
+
+        if config.bench_mode == "cleanup":
             self.cleanup_list = []
             return
 
-        self.storage_class = config.bench_storage_class
-        self.results_dump_path = config.bench_results_dump_path
         self.bench_name = config.bench_name
+        self.results_dump_path = config.bench_results_dump_path
+        self.results = {}
+        self.agent_results = {}
+        self.cleanup_list = []
         self.agent_pods = []
+
+        if config.bench_mode == "report":
+            self.results = {}
+            return
+
+        self.storage_class = config.bench_storage_class
         self.services = []
         # By default,
         # 30 seconds should be enough to send tasks to 3-5 agents
         self.scheduled_delay = 30
 
-        self.cleanup_list = []
-        self.results = {}
-        self.agent_results = {}
-
     def set_ceph_info_class(self, ceph_info):
         self.ceph_info = ceph_info
 
@@ -138,6 +157,19 @@
 
     def prepare_agents(self, options):
         logger_cli.info("# Preparing {} agents".format(self.agent_count))
+        # Increase volume size a bit, so datafile fits
+        _quanitizer = 1.3
+        _v_size, _vol_size_units = _split_vol_size(options['size'])
+        _v_size = round(_v_size * _quanitizer)
+        _vol_size = str(_v_size) + _vol_size_units + "i"
+        logger_cli.info(
+            "-> Testfile size: {0}, Volume size: {1} ({0}*{2})".format(
+                options['size'],
+                _vol_size,
+                _quanitizer
+            )
+        )
+        # Start preparing
         for idx in range(self.agent_count):
             # create pvc/pv and pod
             logger_cli.info("-> creating agent '{:02}'".format(idx))
@@ -146,7 +178,7 @@
                 idx,
                 os.path.split(options["filename"])[0],
                 self.storage_class,
-                options['size'] + 'i',
+                _vol_size,
                 self._agent_template
             )
             # save it to lists
@@ -314,6 +346,8 @@
         _ramptime = _get_seconds(options["ramp_time"])
         # Sum up all timings that we must wait and double it
         _timeout = (self.scheduled_delay + _runtime + _ramptime) * 2
+        # We should have no more than 65 measurements
+        _stats_delay = round((_runtime + _ramptime) / 65)
         _start = self.next_scheduled_time
         _end = datetime.now(timezone.utc) + timedelta(seconds=_timeout)
         while True:
@@ -331,9 +365,10 @@
                 )
             # Get Ceph status if _start time passed
             _elapsed = (datetime.now(timezone.utc) - _start).total_seconds()
-            if _elapsed > 0:
+            if _elapsed > _stats_delay:
                 logger_cli.info("-> {:.2f}s elapsed".format(_elapsed))
-                self.results[options["scheduled_to"]]["ceph"][_elapsed] = \
+                _sec = "{:0.1f}".format(_elapsed)
+                self.results[options["scheduled_to"]]["ceph"][_sec] = \
                     self.ceph_info.get_cluster_status()
             # Check if agents finished
             finished = [True for _s in _sts.values()
@@ -353,6 +388,8 @@
                 return False
 
     def _do_testrun(self, options):
+        self.results[options["scheduled_to"]]["osd_df_before"] = \
+            self.ceph_info.get_ceph_osd_df()
         # send single to agent
         if not self._send_scheduled_task(options):
             return False
@@ -361,11 +398,15 @@
             return False
         else:
             logger_cli.info("-> Finished testrun. Collecting results...")
+            # get ceph osd stats
+            self.results[options["scheduled_to"]]["osd_df_after"] = \
+                self.ceph_info.get_ceph_osd_df()
             # Get results for each agent
             self.collect_results()
             logger_cli.info("-> Calculating totals and averages")
             self.calculate_totals()
             self.calculate_ceph_stats()
+            self.osd_df_compare(options["scheduled_to"])
             logger_cli.info("-> Dumping results")
             for _time, _d in self.results.items():
                 self.dump_result(
@@ -395,8 +436,6 @@
         # TODO: Ceph status check
         # self._wait_ceph_cooldown()
 
-        _get_df = self.ceph_info.get_ceph_osd_df
-
         # Do benchmark according to mode
         if self.mode == "tasks":
             logger_cli.info(
@@ -408,7 +447,6 @@
             _total_tasks = len(self.tasks)
             for idx in range(_total_tasks):
                 # init time to schedule
-                _osd_df_before = _get_df()
                 _task = self.tasks[idx]
                 logger_cli.info(
                     "-> Starting next task ({}/{})".format(idx+1, _total_tasks)
@@ -427,18 +465,15 @@
                 self.results[_sch_time] = {
                     "input_options": options,
                     "agents": {},
-                    "ceph": {},
-                    "osd_df_before": _osd_df_before
+                    "ceph": {}
                 }
+                # exit on error
                 if not self._do_testrun(options):
                     return False
-                else:
-                    self.results[_sch_time]["osd_df_after"] = _get_df()
-
+                # Save ceph osd stats and wait cooldown
                 self.wait_ceph_cooldown()
         elif self.mode == "single":
             logger_cli.info("# Running single benchmark")
-            _osd_df_before = _get_df()
             # init time to schedule
             _sch_time = self._get_next_scheduled_time()
             options["scheduled_to"] = _sch_time
@@ -446,13 +481,11 @@
             self.results[_sch_time] = {
                 "input_options": options,
                 "agents": {},
-                "ceph": {},
-                "osd_df_before": _osd_df_before
+                "ceph": {}
             }
             if not self._do_testrun(options):
                 return False
-            else:
-                self.results[_sch_time]["osd_df_after"] = _get_df()
+            # Save ceph osd stats
         else:
             logger_cli.error("ERROR: Unknown mode '{}'".format(self.mode))
             return False
@@ -669,14 +702,14 @@
 
     def calculate_ceph_stats(self):
         # func to get values as lists
-        def _as_list(key, stats):
-            _list = []
-            for _v in stats.values():
-                if key in _v:
-                    _list += [_v[key]]
-                else:
-                    _list += [0]
-            return _list
+        def _get_max_value(key, stats):
+            _max_time = 0
+            _value = 0
+            for _k, _v in stats.items():
+                if key in _v and _value < _v[key]:
+                    _max_time = _k
+                    _value = _v[key]
+            return _max_time, _value
 
         def _perc(n, m):
             if not n:
@@ -684,7 +717,12 @@
             elif not m:
                 return 0
             else:
-                return (n / m) * 100
+                return "{:.0f}%".format((n / m) * 100)
+
+        def _axis_vals(val):
+            return [
+                val, int(val*1.1), int(val*0.75), int(val*0.50), int(val*0.15)
+            ]
 
         _stats = {}
         for _time, data in self.results.items():
@@ -701,28 +739,148 @@
             for _e, _d in data["ceph"].items():
                 _stats[_e] = _d["pgmap"]
             # Maximums
-            m_r_bytes = max(_as_list("read_bytes_sec", _stats))
-            m_w_bytes = max(_as_list("write_bytes_sec", _stats))
-            m_r_iops = max(_as_list("read_op_per_sec", _stats))
-            m_w_iops = max(_as_list("write_op_per_sec", _stats))
+            mrb_t, mrb = _get_max_value("read_bytes_sec", _stats)
+            mwb_t, mwb = _get_max_value("write_bytes_sec", _stats)
+            mri_t, mri = _get_max_value("read_op_per_sec", _stats)
+            mwi_t, mwi = _get_max_value("write_op_per_sec", _stats)
             # Replace ceph with shorter data
             data["ceph"] = {
-                "max_read_bytes_sec": m_r_bytes,
-                "max_write_bytes_sec": m_w_bytes,
-                "max_read_iops_sec": m_r_iops,
-                "max_write_iops_sec": m_w_iops,
+                "max_rbl": _axis_vals(mrb),
+                "max_rbl_time": mrb_t,
+                "max_wbl": _axis_vals(mwb),
+                "max_wbl_time": mwb_t,
+                "max_ril": _axis_vals(mri),
+                "max_ril_time": mri_t,
+                "max_wil": _axis_vals(mwi),
+                "max_wil_time": mwi_t,
                 "stats": _stats
             }
             # Calculate %% values for barchart
             for _e, _d in data["ceph"]["stats"].items():
                 _d["read_bytes_sec_perc"] = \
-                    _perc(_d.get("read_bytes_sec", 0), m_r_bytes)
+                    _perc(_d.get("read_bytes_sec", 0), mrb)
                 _d["write_bytes_sec_perc"] = \
-                    _perc(_d.get("write_bytes_sec", 0), m_w_bytes)
+                    _perc(_d.get("write_bytes_sec", 0), mwb)
                 _d["read_op_per_sec_perc"] = \
-                    _perc(_d.get("read_op_per_sec", 0), m_r_iops)
+                    _perc(_d.get("read_op_per_sec", 0), mri)
                 _d["write_op_per_sec_perc"] = \
-                    _perc(_d.get("write_op_per_sec", 0), m_w_iops)
+                    _perc(_d.get("write_op_per_sec", 0), mwi)
+        return
+
+    def osd_df_compare(self, _time):
+        def _get_osd(osd_id, nodes):
+            for osd in nodes:
+                if osd["id"] == osd_id:
+                    return osd
+            return None
+
+        logger_cli.info("# Comparing OSD stats")
+        _osd = {}
+        if _time not in self.results:
+            logger_cli.warning("WARNING: {} not found in results. Check data")
+            return
+        data = self.results[_time]
+        # Save summary
+        data["osd_summary"] = {}
+        data["osd_summary"]["before"] = data["osd_df_before"]["summary"]
+        data["osd_summary"]["after"] = data["osd_df_after"]["summary"]
+        data["osd_summary"]["active"] = {
+            "status": "",
+            "device_class": "",
+            "pgs": "",
+            "kb_used": 0,
+            "kb_used_data": 0,
+            "kb_used_omap": 0,
+            "kb_used_meta": 0,
+            "utilization": 0,
+            "var_down": 0,
+            "var_up": 0
+        }
+        # Compare OSD counts
+        osds_before = len(data["osd_df_before"]["nodes"])
+        osds_after = len(data["osd_df_after"]["nodes"])
+        if osds_before != osds_after:
+            logger_cli.warning(
+                "WARNING: Before/After bench OSD "
+                "count mismatch for '{}'".format(_time)
+            )
+        # iterate osds from before
+        _pgs = 0
+        _classes = set()
+        _nodes_up = 0
+        for idx in range(osds_before):
+            _osd_b = data["osd_df_before"]["nodes"][idx]
+            # search for the same osd in after
+            _osd_a = _get_osd(_osd_b["id"], data["osd_df_after"]["nodes"])
+            # Save data to the new place
+            _osd[_osd_b["name"]] = {}
+            _osd[_osd_b["name"]]["before"] = _osd_b
+            if not _osd_a:
+                # If this happen, Ceph cluster is actually broken
+                logger_cli.warning(
+                    "WARNING: Wow! {} dissapered".format(_osd_b["name"])
+                )
+                _osd[_osd_b["name"]]["after"] = {}
+            else:
+                _osd[_osd_b["name"]]["after"] = _osd_a
+            _osd[_osd_b["name"]]["percent"] = {}
+            # Calculate summary using "after" data
+            _pgs += _osd_a["pgs"]
+            _classes.update([_osd_a["device_class"]])
+            if _osd_a["status"] == "up":
+                _nodes_up += 1
+            # compare
+            _keys_b = list(_osd_b.keys())
+            _keys_a = list(_osd_a.keys())
+            _nodes_up
+            # To be safe, detect if some keys are different
+            # ...and log it.
+            _diff = set(_keys_b).symmetric_difference(_keys_a)
+            if len(_diff) > 0:
+                # This should never happen, actually
+                logger_cli.warning(
+                    "WARNING: Before/after keys mismatch "
+                    "for OSD node {}: {}".format(idx, ", ".join(_diff))
+                )
+                continue
+            # Compare each key and calculate how it changed
+            for k in _keys_b:
+                if _osd_b[k] != _osd_a[k]:
+                    # Announce change
+                    logger_cli.debug(
+                        "-> {:4}: {}, {} -> {}".format(
+                            idx,
+                            k,
+                            _osd_b[k],
+                            _osd_a[k]
+                        )
+                    )
+                    # calculate percent
+                    _change_perc = (_osd_a[k] / _osd_b[k]) * 100 - 100
+                    _osd[_osd_b["name"]]["percent"][k] = _change_perc
+
+                    # Increase counters
+                    _p = data["osd_summary"]["active"]
+
+                    if k not in _p:
+                        _p[k] = 1
+                    else:
+                        _p[k] += 1
+                    if k == "var":
+                        if _change_perc > 0:
+                            _p["var_up"] += 1
+                        elif _change_perc < 0:
+                            _p["var_down"] += 1
+        # Save sorted data
+        data["osds"] = _osd
+        logger_cli.info("-> Removing redundand osd before/after data")
+        data.pop("osd_df_before")
+        data.pop("osd_df_after")
+        # Save summary
+        data["osd_summary"]["active"]["status"] = "{}".format(_nodes_up)
+        data["osd_summary"]["active"]["device_class"] = \
+            "{}".format(len(list(_classes)))
+        data["osd_summary"]["active"]["pgs"] = _pgs
         return
 
     # Create report
diff --git a/cfg_checker/nodes.py b/cfg_checker/nodes.py
index b908571..ebf0e00 100644
--- a/cfg_checker/nodes.py
+++ b/cfg_checker/nodes.py
@@ -1322,13 +1322,14 @@
                     _mnt["mountPath"] = path
         # replace claim
         for _v in _pod["spec"]["volumes"]:
-            if "placeholder" in _v["name"]:
+            if "cfgagent-pv" in _v["name"]:
                 # _v["name"] = _pv_n
                 _v["persistentVolumeClaim"]["claimName"] = _pvc_n
 
         # init volume resources
         # _pv_object = self.kube.init_pv_resource(_pv_n, sc, size, path)
         # _pv = self.kube.prepare_pv(_pv_object)
+        # update size of the volume to be 15% larger
         _pvc_object = self.kube.init_pvc_resource(_pvc_n, sc, size)
         _pvc = self.kube.prepare_pvc(_pvc_object)
 
diff --git a/cfg_checker/reports/reporter.py b/cfg_checker/reports/reporter.py
index 150ce65..de0c83c 100644
--- a/cfg_checker/reports/reporter.py
+++ b/cfg_checker/reports/reporter.py
@@ -123,15 +123,15 @@
 
 
 def to_gb(bytes_str):
-    _bytes = int(bytes_str)
-    _gb = _bytes / 1024 / 1024 / 1024
-    return "{}".format(round(_gb, 2))
+    return "{}".format(round(int(bytes_str) / 1024 / 1024 / 1024, 2))
 
 
 def to_mb(bytes_str):
-    _bytes = int(bytes_str)
-    _mb = _bytes / 1024 / 1024
-    return "{}".format(round(_mb, 2))
+    return "{}".format(round(int(bytes_str) / 1024 / 1024, 2))
+
+
+def to_kb(bytes_str):
+    return "{}".format(round(int(bytes_str) / 1024, 2))
 
 
 def get_bucket_item_name(id, cmap):
@@ -251,6 +251,7 @@
         self.jinja2_env.filters['pkg_repo_info'] = make_repo_info
         self.jinja2_env.filters['to_gb'] = to_gb
         self.jinja2_env.filters['to_mb'] = to_mb
+        self.jinja2_env.filters['to_kb'] = to_kb
         self.jinja2_env.filters['get_bucket_item_name'] = get_bucket_item_name
         self.jinja2_env.filters['get_rule_steps'] = get_rule_steps
         self.jinja2_env.filters['get_pool_stats'] = get_pool_stats_by_id
diff --git a/setup.py b/setup.py
index 8fe0ffe..d409398 100644
--- a/setup.py
+++ b/setup.py
@@ -38,7 +38,7 @@
 
 setup(
     name="mcp-checker",
-    version="0.65",
+    version="0.67",
     author="Alex Savatieiev",
     author_email="osavatieiev@mirantis.com",
     classifiers=[
diff --git a/templates/bar_chart.j2 b/templates/bar_chart.j2
index 52a042e..8ee1cbf 100644
--- a/templates/bar_chart.j2
+++ b/templates/bar_chart.j2
@@ -2,7 +2,7 @@
     .bc-wrap {
         display: table;
         position: relative;
-        margin: 7px 0;
+        margin: 12px 0;
         height: 60px;
       }
       .bc-container {
@@ -27,10 +27,17 @@
         position: relative;
         height: 0;
         transition: height 0.5s 2s;
-        width: 25px;
+        width: 27px;
         margin: auto;
         background-color: #358;
       }
+      .bcheader {
+        position: absolute;
+        text-align: center;
+        width: 100%;
+        bottom: 53px;
+        font-size: 11px;
+      }
       .bcfooter {
         position: absolute;
         text-align: center;
diff --git a/templates/ceph_bench_html.j2 b/templates/ceph_bench_html.j2
index c05a412..229a553 100644
--- a/templates/ceph_bench_html.j2
+++ b/templates/ceph_bench_html.j2
@@ -2,7 +2,7 @@
 <html lang="en">
 <head>
     <meta charset="UTF-8">
-    <title>Ceph cluster info</title>
+    <title>Ceph cluster benchmark</title>
     {% include 'common_styles.j2' %}
     {% include 'common_scripts.j2' %}
     {% include 'bar_chart.j2' %}
@@ -21,7 +21,11 @@
             float: none;
             transform: translate(25%);
         }
-    
+        .inlineheader {
+            background-color: lightgray;
+            padding-left: 40px;
+            margin-bottom: 10px;
+        }
         /* Node rows*/
 		.node {
 			font-family: "LaoSangamMN", Monaco, monospace;
@@ -30,6 +34,9 @@
 			background-color: white;
             align-items: center;
 		}
+        .node:hover, .node:active {
+            background-color: #eda;
+        }
 		.collapsable {
 			font-family: "LaoSangamMN", Monaco, monospace;
 			font-size: 0.8em;
@@ -98,10 +105,10 @@
         div.services > .collapsable.in {
             display: table-row;
         }
-        tr:nth-child(even) {
+        .agents:nth-child(even) {
             background-color: #eee;
         }
-        tr:nth-child(odd) {
+        .agents:nth-child(odd) {
             background-color: #fff;
         }
 	
@@ -125,6 +132,21 @@
             padding-right: 0px;
             margin: 1px;
         }
+        td > .osd_props_group {
+            display: grid;
+            grid-template-columns: 50px 50px 50px 50px;
+            padding-left: 0px;
+            padding-right: 0px;
+            margin: 1px;
+        }
+        td > .osd_stats_group {
+            display: grid;
+            grid-template-columns: 80px 110px 110px 110px 100px 100px 110px 140px;
+            padding-left: 0px;
+            padding-right: 0px;
+            margin: 1px;
+
+        }
         td > .pg_group {
             display: grid;
             grid-template-columns: 50px 40px 60px 65px 60px 65px 65px;;
@@ -335,6 +357,33 @@
     
     </style>
 </head>
+
+{% macro put_osd_prop(prop, b, a, p) %}
+    {% if prop in p %}
+    <div class="item bench">{{ a[prop] | to_mb }} ({{ "%0.4f" | format(p[prop]|float) }})</div>
+    {% else %}
+    <div class="item bench">{{ a[prop] | to_mb }}</div>
+    {% endif %}
+{% endmacro %}
+
+{% macro put_osd_perc(prop, b, a, p) %}
+    {% if prop in p %}
+    <div class="item bench">{{ "%0.4f"|format(a[prop]|float) }} ({{ "%0.4f" | format(p[prop]|float) }})</div>
+    {% else %}
+    <div class="item bench">{{ "%0.4f"|format(a[prop]|float) }}</div>
+    {% endif %}
+{% endmacro %}
+
+{% macro summary_value(prop, s) %}
+    {% if prop in s %}
+    <div class="item bench">{{ s[prop] }} nodes</div>
+    {% else %}
+    <div class="item bench">0</div>
+    {% endif %}
+{% endmacro %}
+
+
+
 <body onload="init()">
 
 <div class="header">
@@ -348,6 +397,7 @@
 <div class="bar">
     <div class="bar-centered">
         <button class="bar-item" onclick="openBar(event, 'bench')">Benchmark Results</button>
+        <button class="bar-item" onclick="openBar(event, 'osdstats')">OSD Stats</button>
         <button class="bar-item" onclick="openBar(event, 'status')">Status</button>
         <!-- <button class="bar-item" onclick="openBar(event, 'latency')">Latency</button> -->
     </div>
@@ -400,7 +450,7 @@
                     <div class="item prop">{{ t["storage_class"] }}</div>
                     <div class="item pg">{{ t["storage_class_stats"]["num_pg"] }}</div>
                     <div class="item prop">{{ o["ioengine"] }}</div>
-                    <div class="item prop">{{ o["readwrite"] }} ({{ o["rwmixread"] }}/{{ 100-o["rwmixread"] }})</div>
+                    <div class="item prop">{{ o["readwrite"] }} ({{ o["rwmixread"] }}/{{ 100 - (o["rwmixread"]|int) }})</div>
                     <div class="item prop">{{ o["bs"] }}</div>
                     <div class="item prop">{{ o["iodepth"] }}</div>
                     <div class="item prop">{{ o["size"] }}</div>
@@ -417,35 +467,118 @@
                 </div>
             </td>
         </tr>
+        {% set c = dt["ceph"] %}
         <tr class="collapsable" id="timing_{{ tstripped }}_data"><td colspan=3>
+            <div class="inlineheader">Global READ stats, MB/s vs seconds. Measured maximum is <b>{{ c["max_rbl"][0] | to_mb }}</b> MB/sec</div>
             <div class="bc-wrap">
                 <div class="bctimecol">
-                 <div class="bctime"><span class="bctimetext">110</span></div>
-                 <div class="bctime"><span class="bctimetext">75</span></div>
-                 <div class="bctime"><span class="bctimetext">50</span></div>
-                 <div class="bctime"><span class="bctimetext">15</span></div>
+                 <div class="bctime"><span class="bctimetext">{{ c["max_rbl"][1] | to_mb }}</span></div>
+                 <div class="bctime"><span class="bctimetext">{{ c["max_rbl"][2] | to_mb }}</span></div>
+                 <div class="bctime"><span class="bctimetext">{{ c["max_rbl"][3] | to_mb }}</span></div>
+                 <div class="bctime"><span class="bctimetext">{{ c["max_rbl"][4] | to_mb }}</span></div>
                 </div>
                    
                 <div class="bc-container">
                  <div class="bc">
-                  <div class="bccol"><div class="bcbar" style="height: 75%;"></div><div class="bcfooter">2s</div></div>
-                  <div class="bccol"><div class="bcbar" style="height: 25%;"></div><div class="bcfooter">4s</div></div>
-                  <div class="bccol"><div class="bcbar" style="height: 55%;"></div><div class="bcfooter">6s</div></div>
-                  <div class="bccol"><div class="bcbar" style="height: 65%;"></div><div class="bcfooter">8s</div></div>
-                  <div class="bccol"><div class="bcbar" style="height: 15%;"></div><div class="bcfooter">10s</div></div>
-                  <div class="bccol"><div class="bcbar" style="height: 16%;"></div><div class="bcfooter">12s</div></div>
-                  <div class="bccol"><div class="bcbar" style="height: 17%;"></div><div class="bcfooter">14s</div></div>
-                  <div class="bccol"><div class="bcbar" style="height: 18%;"></div><div class="bcfooter">16s</div></div>
-                  <div class="bccol"><div class="bcbar" style="height: 19%;"></div><div class="bcfooter">18s</div></div>
-                  <div class="bccol"><div class="bcbar" style="height: 20%;"></div><div class="bcfooter">20s</div></div>
-                  <div class="bccol"><div class="bcbar" style="height: 21%;"></div><div class="bcfooter">22s</div></div>
+                  {% for sec, c_data in c["stats"].items() %}
+                  {% set elapsed = sec | float %}
+                  <div class="bccol">
+                      {% if sec == c["max_rbl_time"] %}
+                      <div class="bcheader">{{ c_data["read_bytes_sec"] | to_mb }}</div>
+                      <div class="bcbar green-bar" style="height: {{ c_data["read_bytes_sec_perc"] }};"></div>
+                      {% else %}
+                      <div class="bcbar" style="height: {{ c_data["read_bytes_sec_perc"] }};"></div>
+                      {% endif%}
+                      <div class="bcfooter">{{ "%0.1f" | format(elapsed) }}</div>
+                    </div>
+                  {% endfor %}
                  </div>
                 </div>
             </div>
+            <div class="inlineheader">Global READ stats, IOPS vs seconds. Measured maximum is <b>{{ c["max_ril"][0] }}</b> op/sec</div>
+            <div class="bc-wrap">
+                <div class="bctimecol">
+                 <div class="bctime"><span class="bctimetext">{{ c["max_ril"][1] }}</span></div>
+                 <div class="bctime"><span class="bctimetext">{{ c["max_ril"][2] }}</span></div>
+                 <div class="bctime"><span class="bctimetext">{{ c["max_ril"][3] }}</span></div>
+                 <div class="bctime"><span class="bctimetext">{{ c["max_ril"][4] }}</span></div>
+                </div>
+                   
+                <div class="bc-container">
+                 <div class="bc">
+                  {% for sec, c_data in c["stats"].items() %}
+                  {% set elapsed = sec | float %}
+                  <div class="bccol">
+                      {% if sec == c["max_ril_time"] %}
+                      <div class="bcheader">{{ c_data["read_op_per_sec"] }}</div>
+                      <div class="bcbar green-bar" style="height: {{ c_data["read_op_per_sec_perc"] }};"></div>
+                      {% else %}
+                      <div class="bcbar" style="height: {{ c_data["read_op_per_sec_perc"] }};"></div>
+                      {% endif%}
+                      <div class="bcfooter">{{ "%0.1f" | format(elapsed) }}</div>
+                    </div>
+                  {% endfor %}
+                 </div>
+                </div>
+            </div>
+            <div class="inlineheader">Global WRITE stats, MB/s vs seconds. Measured maximum is <b>{{ c["max_wbl"][0] | to_mb }}</b> MB/sec</div>
+            <div class="bc-wrap">
+                <div class="bctimecol">
+                 <div class="bctime"><span class="bctimetext">{{ c["max_wbl"][1] | to_mb }}</span></div>
+                 <div class="bctime"><span class="bctimetext">{{ c["max_wbl"][2] | to_mb }}</span></div>
+                 <div class="bctime"><span class="bctimetext">{{ c["max_wbl"][3] | to_mb }}</span></div>
+                 <div class="bctime"><span class="bctimetext">{{ c["max_wbl"][4] | to_mb }}</span></div>
+                </div>
+                   
+                <div class="bc-container">
+                 <div class="bc">
+                  {% for sec, c_data in c["stats"].items() %}
+                  {% set elapsed = sec | float %}
+                  <div class="bccol">
+                      {% if sec == c["max_wbl_time"] %}
+                      <div class="bcheader">{{ c_data["write_bytes_sec"] | to_mb }}</div>
+                      <div class="bcbar green-bar" style="height: {{ c_data["write_bytes_sec_perc"] }};"></div>
+                      {% else %}
+                      <div class="bcbar" style="height: {{ c_data["write_bytes_sec_perc"] }};"></div>
+                      {% endif%}
+                      <div class="bcfooter">{{ "%0.1f" | format(elapsed) }}</div>
+                    </div>
+                  {% endfor %}
+                 </div>
+                </div>
+            </div>
+
+            <div class="inlineheader">Global WRITE stats, IOPS vs seconds. Measured maximum is <b>{{ c["max_wil"][0] }}</b> op/sec</div>
+            <div class="bc-wrap">
+                <div class="bctimecol">
+                 <div class="bctime"><span class="bctimetext">{{ c["max_wil"][1] }}</span></div>
+                 <div class="bctime"><span class="bctimetext">{{ c["max_wil"][2] }}</span></div>
+                 <div class="bctime"><span class="bctimetext">{{ c["max_wil"][3] }}</span></div>
+                 <div class="bctime"><span class="bctimetext">{{ c["max_wil"][4] }}</span></div>
+                </div>
+                   
+                <div class="bc-container">
+                 <div class="bc">
+                  {% for sec, c_data in c["stats"].items() %}
+                  {% set elapsed = sec | float %}
+                  <div class="bccol">
+                      {% if sec == c["max_wil_time"] %}
+                      <div class="bcheader">{{ c_data["write_op_per_sec"] }}</div>
+                      <div class="bcbar green-bar" style="height: {{ c_data["write_op_per_sec_perc"] }};"></div>
+                      {% else %}
+                      <div class="bcbar" style="height: {{ c_data["write_op_per_sec_perc"] }};"></div>
+                      {% endif%}
+                      <div class="bcfooter">{{ "%0.1f" | format(elapsed) }}</div>
+                    </div>
+                  {% endfor %}
+                 </div>
+                </div>
+            </div>
+            <div class="inlineheader">Per Agent stats</div>
             <table style="table-layout: auto;"><tbody>
                 {% for agent,ag_result in dt["agents"].items() %}
                 {% set j = ag_result["jobs"][0] %}
-                <tr>
+                <tr class="agents">
                     <td class="status">{{ time }}</td>
                     <td class="status">{{ agent }}</td>
                     <td class="col_properties">
@@ -455,7 +588,7 @@
                             <div class="item prop">{{ t["storage_class"] }}</div>
                             <div class="item pg">{{ t["storage_class_stats"]["num_pg"] }}</div>
                             <div class="item prop">{{ o["ioengine"] }}</div>
-                            <div class="item prop">{{ o["readwrite"] }} ({{ o["rwmixread"] }}/{{ 100-o["rwmixread"] }})</div>
+                            <div class="item prop">{{ o["readwrite"] }} ({{ o["rwmixread"] }}/{{ 100 - (o["rwmixread"]|int) }})</div>
                             <div class="item prop">{{ j["job options"]["bs"] }}</div>
                             <div class="item prop">{{ o["iodepth"] }}</div>
                             <div class="item prop">{{ j["job options"]["size"] }}</div>
@@ -481,6 +614,105 @@
 </div>
 {% endmacro %}
 
+<!-- OSD stats -->
+{% macro osds_page(results, id_label) %}
+<div id="{{ id_label }}" class="barcontent">
+    <h5>{{ caller() }}</h5>
+    <hr>
+    <table class="ceph_status">
+        <tr class="node">
+            <td class="status">Time started</td>
+            <td class="status">Data point</td>
+            <td class="col_properties">
+                <div class="osd_props_group">
+                    <div class="item prop">Status</div>
+                    <div class="item prop">Class</div>
+                    <div class="item prop">Weight</div>
+                    <div class="item pg">PGs</div>
+                </div>
+            </td>
+            <td class="col_bench">
+                <div class="osd_stats_group">
+                    <div class="item bench">Total, GB</div>
+                    <div class="item bench">Avail., GB</div>
+                    <div class="item bench">Used, GB</div>
+                    <div class="item bench">Data, GB</div>
+                    <div class="item bench">OMAP, GB</div>
+                    <div class="item bench">Meta, GB</div>
+                    <div class="item bench">Utilized, %</div>
+                    <div class="item bench">Variance, %</div>
+                </div>
+            </td>
+        </tr>
+        {% for time,dt in results.items() %}
+        {% set b = dt["osd_summary"]["before"] %}
+        {% set a = dt["osd_summary"]["after"] %}
+        {% set s = dt["osd_summary"]["active"] %}
+        {% set tstripped = time | tstrip %}
+        <tr class="node" onclick="toggleClassByID('timing_{{ tstripped }}_osds')" id="timing_{{ tstripped }}_button">
+            <td class="status">{{ time }}</td>
+            <td class="status">Active nodes</td>
+            <td class="col_properties">
+                <div class="osd_props_group">
+                    <div class="item prop">{{ s["status"] }}</div>
+                    <div class="item prop">{{ s["device_class"] }}</div>
+                    <div class="item prop">&minus;</div>
+                    <div class="item pg">{{ s["pgs"] }}</div>
+                </div>
+            </td>
+            <td class="col_bench">
+                <div class="osd_stats_group">
+                    <div class="item bench">{{ a["total_kb"] | to_mb }}</div>
+                    {{ summary_value("kb_avail", s) }}
+                    {{ summary_value("kb_used", s) }}
+                    {{ summary_value("kb_used_data", s) }}
+                    {{ summary_value("kb_used_omap", s) }}
+                    {{ summary_value("kb_used_meta", s) }}
+                    {{ summary_value("utilization", s) }}
+                    <div class="item bench">{{ s["var_down"] }}&darr; / {{ s["var_up"] }}&uarr;</div>
+                </div>
+            </td>
+        </tr>
+        <tr class="collapsable" id="timing_{{ tstripped }}_osds"><td colspan=3>
+            <table style="table-layout: auto;"><tbody>
+                {% for osd in dt["osds"].keys() | sort %}
+                {% set n = dt["osds"][osd] %}
+                {% set b = n["before"] %}
+                {% set a = n["after"] %}
+                {% set p = n["percent"] %}
+                <tr class="agents">
+                    <td class="status">{{ time }}</td>
+                    <td class="status">{{ osd }}</td>
+                    <td class="col_properties">
+                        <div class="osd_props_group">
+                            <div class="item prop">{{ a["status"] }}</div>
+                            <div class="item prop">{{ a["device_class"] }}</div>
+                            <div class="item prop">{{ "%0.4f" | format(a["crush_weight"]|float) }}</div>
+                            <div class="item pg">{{ a["pgs"] }}</div>
+                        </div>
+                    </td>
+                    <td class="col_bench">
+                        <div class="osd_stats_group">
+                            <div class="item bench">{{ a["kb"] | to_mb }}</div>
+                            {{ put_osd_prop("kb_avail", b, a, p) }}
+                            {{ put_osd_prop("kb_used", b, a, p) }}
+                            {{ put_osd_prop("kb_used_data", b, a, p) }}
+                            {{ put_osd_prop("kb_used_omap", b, a, p) }}
+                            {{ put_osd_prop("kb_used_meta", b, a, p) }}
+                            {{ put_osd_perc("utilization", b, a, p) }}
+                            {{ put_osd_perc("var", b, a, p) }}
+                        </div>
+                    </td>
+                </tr>
+                {% endfor %}
+            </tbody></table>
+        </td></tr>
+        {% endfor %}
+    </table>
+</div>
+{% endmacro %}
+
+
 <!-- Status page -->
 {% macro status_page(info, id_label) %}
 <div id="{{ id_label }}" class="barcontent">
@@ -671,6 +903,10 @@
     Benchmark results
 {% endcall %}
 
+{% call osds_page(results, "osdstats") %}
+    OSD nodes stats collected before and after each step
+{% endcall %}
+
 {% call status_page(info, "status") %}
     Cluster status
 {% endcall %}