Cfg-checker ceph benchmark & info updates and fixes

 - Added collecting Ceph global stats while running benchmark
 - Added collecting osd pg dump data
 - Added page with active OSD nodes stats
 - --report-only option, ceph info still collected

 Fixes:
 - fio-runner uses scheduled time when reporting errors
 - proper ceph pv creation
 - updated retry decorator timeouts for overloaded envs
 - calculated volume size creation with size*1.3
 - Proper maximum values indication

 Related-PROD: PROD-36669

Change-Id: Ic518ddbb2ca0915b550e981d0b0fc7084000aa04
diff --git a/cfg_checker/modules/ceph/bench.py b/cfg_checker/modules/ceph/bench.py
index 2eedcfb..0780596 100644
--- a/cfg_checker/modules/ceph/bench.py
+++ b/cfg_checker/modules/ceph/bench.py
@@ -41,6 +41,19 @@
     return {}
 
 
+def _split_vol_size(size):
+    # I know, but it is faster then regex
+    _numbers = [48, 49, 50, 51, 52, 53, 54, 55, 56, 57]
+    _s_int = "0"
+    _s_type = ""
+    for ch in size:
+        if ord(ch) in _numbers:
+            _s_int += ch
+        else:
+            _s_type += ch
+    return int(_s_int), _s_type
+
+
 class CephBench(object):
     _agent_template = "cfgagent-template.yaml"
 
@@ -69,26 +82,32 @@
 
         self.mode = config.bench_mode
         self.resource_prefix = config.resource_prefix
+
         if config.bench_mode == "tasks":
             self.taskfile = config.bench_task_file
             self.load_tasks(self.taskfile)
-        elif config.bench_mode == "cleanup":
+
+        if config.bench_mode == "cleanup":
             self.cleanup_list = []
             return
 
-        self.storage_class = config.bench_storage_class
-        self.results_dump_path = config.bench_results_dump_path
         self.bench_name = config.bench_name
+        self.results_dump_path = config.bench_results_dump_path
+        self.results = {}
+        self.agent_results = {}
+        self.cleanup_list = []
         self.agent_pods = []
+
+        if config.bench_mode == "report":
+            self.results = {}
+            return
+
+        self.storage_class = config.bench_storage_class
         self.services = []
         # By default,
         # 30 seconds should be enough to send tasks to 3-5 agents
         self.scheduled_delay = 30
 
-        self.cleanup_list = []
-        self.results = {}
-        self.agent_results = {}
-
     def set_ceph_info_class(self, ceph_info):
         self.ceph_info = ceph_info
 
@@ -138,6 +157,19 @@
 
     def prepare_agents(self, options):
         logger_cli.info("# Preparing {} agents".format(self.agent_count))
+        # Increase volume size a bit, so datafile fits
+        _quanitizer = 1.3
+        _v_size, _vol_size_units = _split_vol_size(options['size'])
+        _v_size = round(_v_size * _quanitizer)
+        _vol_size = str(_v_size) + _vol_size_units + "i"
+        logger_cli.info(
+            "-> Testfile size: {0}, Volume size: {1} ({0}*{2})".format(
+                options['size'],
+                _vol_size,
+                _quanitizer
+            )
+        )
+        # Start preparing
         for idx in range(self.agent_count):
             # create pvc/pv and pod
             logger_cli.info("-> creating agent '{:02}'".format(idx))
@@ -146,7 +178,7 @@
                 idx,
                 os.path.split(options["filename"])[0],
                 self.storage_class,
-                options['size'] + 'i',
+                _vol_size,
                 self._agent_template
             )
             # save it to lists
@@ -314,6 +346,8 @@
         _ramptime = _get_seconds(options["ramp_time"])
         # Sum up all timings that we must wait and double it
         _timeout = (self.scheduled_delay + _runtime + _ramptime) * 2
+        # We should have no more than 65 measurements
+        _stats_delay = round((_runtime + _ramptime) / 65)
         _start = self.next_scheduled_time
         _end = datetime.now(timezone.utc) + timedelta(seconds=_timeout)
         while True:
@@ -331,9 +365,10 @@
                 )
             # Get Ceph status if _start time passed
             _elapsed = (datetime.now(timezone.utc) - _start).total_seconds()
-            if _elapsed > 0:
+            if _elapsed > _stats_delay:
                 logger_cli.info("-> {:.2f}s elapsed".format(_elapsed))
-                self.results[options["scheduled_to"]]["ceph"][_elapsed] = \
+                _sec = "{:0.1f}".format(_elapsed)
+                self.results[options["scheduled_to"]]["ceph"][_sec] = \
                     self.ceph_info.get_cluster_status()
             # Check if agents finished
             finished = [True for _s in _sts.values()
@@ -353,6 +388,8 @@
                 return False
 
     def _do_testrun(self, options):
+        self.results[options["scheduled_to"]]["osd_df_before"] = \
+            self.ceph_info.get_ceph_osd_df()
         # send single to agent
         if not self._send_scheduled_task(options):
             return False
@@ -361,11 +398,15 @@
             return False
         else:
             logger_cli.info("-> Finished testrun. Collecting results...")
+            # get ceph osd stats
+            self.results[options["scheduled_to"]]["osd_df_after"] = \
+                self.ceph_info.get_ceph_osd_df()
             # Get results for each agent
             self.collect_results()
             logger_cli.info("-> Calculating totals and averages")
             self.calculate_totals()
             self.calculate_ceph_stats()
+            self.osd_df_compare(options["scheduled_to"])
             logger_cli.info("-> Dumping results")
             for _time, _d in self.results.items():
                 self.dump_result(
@@ -395,8 +436,6 @@
         # TODO: Ceph status check
         # self._wait_ceph_cooldown()
 
-        _get_df = self.ceph_info.get_ceph_osd_df
-
         # Do benchmark according to mode
         if self.mode == "tasks":
             logger_cli.info(
@@ -408,7 +447,6 @@
             _total_tasks = len(self.tasks)
             for idx in range(_total_tasks):
                 # init time to schedule
-                _osd_df_before = _get_df()
                 _task = self.tasks[idx]
                 logger_cli.info(
                     "-> Starting next task ({}/{})".format(idx+1, _total_tasks)
@@ -427,18 +465,15 @@
                 self.results[_sch_time] = {
                     "input_options": options,
                     "agents": {},
-                    "ceph": {},
-                    "osd_df_before": _osd_df_before
+                    "ceph": {}
                 }
+                # exit on error
                 if not self._do_testrun(options):
                     return False
-                else:
-                    self.results[_sch_time]["osd_df_after"] = _get_df()
-
+                # Save ceph osd stats and wait cooldown
                 self.wait_ceph_cooldown()
         elif self.mode == "single":
             logger_cli.info("# Running single benchmark")
-            _osd_df_before = _get_df()
             # init time to schedule
             _sch_time = self._get_next_scheduled_time()
             options["scheduled_to"] = _sch_time
@@ -446,13 +481,11 @@
             self.results[_sch_time] = {
                 "input_options": options,
                 "agents": {},
-                "ceph": {},
-                "osd_df_before": _osd_df_before
+                "ceph": {}
             }
             if not self._do_testrun(options):
                 return False
-            else:
-                self.results[_sch_time]["osd_df_after"] = _get_df()
+            # Save ceph osd stats
         else:
             logger_cli.error("ERROR: Unknown mode '{}'".format(self.mode))
             return False
@@ -669,14 +702,14 @@
 
     def calculate_ceph_stats(self):
         # func to get values as lists
-        def _as_list(key, stats):
-            _list = []
-            for _v in stats.values():
-                if key in _v:
-                    _list += [_v[key]]
-                else:
-                    _list += [0]
-            return _list
+        def _get_max_value(key, stats):
+            _max_time = 0
+            _value = 0
+            for _k, _v in stats.items():
+                if key in _v and _value < _v[key]:
+                    _max_time = _k
+                    _value = _v[key]
+            return _max_time, _value
 
         def _perc(n, m):
             if not n:
@@ -684,7 +717,12 @@
             elif not m:
                 return 0
             else:
-                return (n / m) * 100
+                return "{:.0f}%".format((n / m) * 100)
+
+        def _axis_vals(val):
+            return [
+                val, int(val*1.1), int(val*0.75), int(val*0.50), int(val*0.15)
+            ]
 
         _stats = {}
         for _time, data in self.results.items():
@@ -701,28 +739,148 @@
             for _e, _d in data["ceph"].items():
                 _stats[_e] = _d["pgmap"]
             # Maximums
-            m_r_bytes = max(_as_list("read_bytes_sec", _stats))
-            m_w_bytes = max(_as_list("write_bytes_sec", _stats))
-            m_r_iops = max(_as_list("read_op_per_sec", _stats))
-            m_w_iops = max(_as_list("write_op_per_sec", _stats))
+            mrb_t, mrb = _get_max_value("read_bytes_sec", _stats)
+            mwb_t, mwb = _get_max_value("write_bytes_sec", _stats)
+            mri_t, mri = _get_max_value("read_op_per_sec", _stats)
+            mwi_t, mwi = _get_max_value("write_op_per_sec", _stats)
             # Replace ceph with shorter data
             data["ceph"] = {
-                "max_read_bytes_sec": m_r_bytes,
-                "max_write_bytes_sec": m_w_bytes,
-                "max_read_iops_sec": m_r_iops,
-                "max_write_iops_sec": m_w_iops,
+                "max_rbl": _axis_vals(mrb),
+                "max_rbl_time": mrb_t,
+                "max_wbl": _axis_vals(mwb),
+                "max_wbl_time": mwb_t,
+                "max_ril": _axis_vals(mri),
+                "max_ril_time": mri_t,
+                "max_wil": _axis_vals(mwi),
+                "max_wil_time": mwi_t,
                 "stats": _stats
             }
             # Calculate %% values for barchart
             for _e, _d in data["ceph"]["stats"].items():
                 _d["read_bytes_sec_perc"] = \
-                    _perc(_d.get("read_bytes_sec", 0), m_r_bytes)
+                    _perc(_d.get("read_bytes_sec", 0), mrb)
                 _d["write_bytes_sec_perc"] = \
-                    _perc(_d.get("write_bytes_sec", 0), m_w_bytes)
+                    _perc(_d.get("write_bytes_sec", 0), mwb)
                 _d["read_op_per_sec_perc"] = \
-                    _perc(_d.get("read_op_per_sec", 0), m_r_iops)
+                    _perc(_d.get("read_op_per_sec", 0), mri)
                 _d["write_op_per_sec_perc"] = \
-                    _perc(_d.get("write_op_per_sec", 0), m_w_iops)
+                    _perc(_d.get("write_op_per_sec", 0), mwi)
+        return
+
+    def osd_df_compare(self, _time):
+        def _get_osd(osd_id, nodes):
+            for osd in nodes:
+                if osd["id"] == osd_id:
+                    return osd
+            return None
+
+        logger_cli.info("# Comparing OSD stats")
+        _osd = {}
+        if _time not in self.results:
+            logger_cli.warning("WARNING: {} not found in results. Check data")
+            return
+        data = self.results[_time]
+        # Save summary
+        data["osd_summary"] = {}
+        data["osd_summary"]["before"] = data["osd_df_before"]["summary"]
+        data["osd_summary"]["after"] = data["osd_df_after"]["summary"]
+        data["osd_summary"]["active"] = {
+            "status": "",
+            "device_class": "",
+            "pgs": "",
+            "kb_used": 0,
+            "kb_used_data": 0,
+            "kb_used_omap": 0,
+            "kb_used_meta": 0,
+            "utilization": 0,
+            "var_down": 0,
+            "var_up": 0
+        }
+        # Compare OSD counts
+        osds_before = len(data["osd_df_before"]["nodes"])
+        osds_after = len(data["osd_df_after"]["nodes"])
+        if osds_before != osds_after:
+            logger_cli.warning(
+                "WARNING: Before/After bench OSD "
+                "count mismatch for '{}'".format(_time)
+            )
+        # iterate osds from before
+        _pgs = 0
+        _classes = set()
+        _nodes_up = 0
+        for idx in range(osds_before):
+            _osd_b = data["osd_df_before"]["nodes"][idx]
+            # search for the same osd in after
+            _osd_a = _get_osd(_osd_b["id"], data["osd_df_after"]["nodes"])
+            # Save data to the new place
+            _osd[_osd_b["name"]] = {}
+            _osd[_osd_b["name"]]["before"] = _osd_b
+            if not _osd_a:
+                # If this happen, Ceph cluster is actually broken
+                logger_cli.warning(
+                    "WARNING: Wow! {} dissapered".format(_osd_b["name"])
+                )
+                _osd[_osd_b["name"]]["after"] = {}
+            else:
+                _osd[_osd_b["name"]]["after"] = _osd_a
+            _osd[_osd_b["name"]]["percent"] = {}
+            # Calculate summary using "after" data
+            _pgs += _osd_a["pgs"]
+            _classes.update([_osd_a["device_class"]])
+            if _osd_a["status"] == "up":
+                _nodes_up += 1
+            # compare
+            _keys_b = list(_osd_b.keys())
+            _keys_a = list(_osd_a.keys())
+            _nodes_up
+            # To be safe, detect if some keys are different
+            # ...and log it.
+            _diff = set(_keys_b).symmetric_difference(_keys_a)
+            if len(_diff) > 0:
+                # This should never happen, actually
+                logger_cli.warning(
+                    "WARNING: Before/after keys mismatch "
+                    "for OSD node {}: {}".format(idx, ", ".join(_diff))
+                )
+                continue
+            # Compare each key and calculate how it changed
+            for k in _keys_b:
+                if _osd_b[k] != _osd_a[k]:
+                    # Announce change
+                    logger_cli.debug(
+                        "-> {:4}: {}, {} -> {}".format(
+                            idx,
+                            k,
+                            _osd_b[k],
+                            _osd_a[k]
+                        )
+                    )
+                    # calculate percent
+                    _change_perc = (_osd_a[k] / _osd_b[k]) * 100 - 100
+                    _osd[_osd_b["name"]]["percent"][k] = _change_perc
+
+                    # Increase counters
+                    _p = data["osd_summary"]["active"]
+
+                    if k not in _p:
+                        _p[k] = 1
+                    else:
+                        _p[k] += 1
+                    if k == "var":
+                        if _change_perc > 0:
+                            _p["var_up"] += 1
+                        elif _change_perc < 0:
+                            _p["var_down"] += 1
+        # Save sorted data
+        data["osds"] = _osd
+        logger_cli.info("-> Removing redundand osd before/after data")
+        data.pop("osd_df_before")
+        data.pop("osd_df_after")
+        # Save summary
+        data["osd_summary"]["active"]["status"] = "{}".format(_nodes_up)
+        data["osd_summary"]["active"]["device_class"] = \
+            "{}".format(len(list(_classes)))
+        data["osd_summary"]["active"]["pgs"] = _pgs
         return
 
     # Create report