Cfg-checker ceph benchmark & info updates and fixes

 - Added collecting Ceph global stats while running benchmark
 - Added collecting osd pg dump data
 - Added page with active OSD nodes stats
 - --report-only option, ceph info still collected

 Fixes:
 - fio-runner uses scheduled time when reporting errors
 - proper ceph pv creation
 - updated retry decorator timeouts for overloaded envs
 - calculated volume size creation with size*1.3
 - Proper maximum values indication

 Related-PROD: PROD-36669

Change-Id: Ic518ddbb2ca0915b550e981d0b0fc7084000aa04
diff --git a/cfg_checker/modules/ceph/__init__.py b/cfg_checker/modules/ceph/__init__.py
index dd483cf..eee01ce 100644
--- a/cfg_checker/modules/ceph/__init__.py
+++ b/cfg_checker/modules/ceph/__init__.py
@@ -109,8 +109,13 @@
         help="Cleanup resources related to benchmark"
     )
     ceph_bench_parser.add_argument(
+        '--report-only',
+        action="store_true", default=False,
+        help="Just create report using files in folder"
+    )
+    ceph_bench_parser.add_argument(
         '--dump-path',
-        metavar="dump_results", default="/tmp",
+        metavar="dump_results",
         help="Dump result after each test run to use them later"
     )
     ceph_bench_parser.add_argument(
@@ -217,6 +222,7 @@
     # Ceph Benchmark using multiple pods
     # if only cleanup needed do it and exit
     _cleanup_only = args_utils.get_arg(args, 'cleanup_only')
+    _report_only = args_utils.get_arg(args, 'report_only')
     config.resource_prefix = "cfgagent"
     if _cleanup_only:
         # Do forced resource cleanup and exit
@@ -232,15 +238,57 @@
         ceph_bench.cleanup()
         return
 
+    # dump results options
+    _dump_path = args_utils.get_arg(args, "dump_path")
+    if _dump_path:
+        logger_cli.info("# Results will be dumped to '{}'".format(_dump_path))
+        config.bench_results_dump_path = _dump_path
+    else:
+        _p = "/tmp"
+        logger_cli.info(
+            "# No result dump path set. Defaulting to {}"
+            "Consider setting it if running long task_file "
+            "based test runs".format(_p)
+        )
+        config.bench_results_dump_path = _p
+
+    # Report filename
+    _filename = args_utils.get_arg(args, 'html')
     # gather Ceph info
     logger_cli.info("# Collecting Ceph cluster information")
     ceph_info = info.KubeCephInfo(config)
 
+    # Task files or options
+    _opts = get_fio_options()
+    # Load name and announce it
+    config.bench_name = args_utils.get_arg(args, "name")
+    _opts["name"] = config.bench_name
+    logger_cli.info(
+        "# Using '{}' as ceph bench jobs name".format(_opts["name"])
+    )
+
+    if _report_only:
+        # Do forced report creation and exit
+        config.bench_mode = "report"
+        config.bench_agent_count = -1
+        ceph_bench = bench.KubeCephBench(config)
+        ceph_bench.set_ceph_info_class(ceph_info)
+        logger_cli.info(
+            "# Preparing to generate report '{}'".format(
+                config.resource_prefix
+            )
+        )
+        # Preload previous results for this name
+        ceph_bench.preload_results()
+        # Gather ceph data
+        ceph_bench.wait_ceph_cooldown()
+        # Generate report
+        ceph_bench.create_report(_filename)
+        return
+
     # Prepare the tasks and do synced testrun or a single one
     logger_cli.info("# Initializing ceph benchmark module")
     args_utils.check_supported_env(ENV_TYPE_KUBE, args, config)
-    # Report filename
-    _filename = args_utils.get_arg(args, 'html')
     # agents count option
     config.bench_agent_count = args_utils.get_arg(args, "agents")
     logger_cli.info("-> using {} agents".format(config.bench_agent_count))
@@ -250,8 +298,6 @@
     _storage_class = args_utils.get_arg(args, "storage_class")
     logger_cli.info("-> using storage class of '{}'".format(_storage_class))
     config.bench_storage_class = _storage_class
-    # dump results options
-    _dump_path = args_utils.get_arg(args, "dump_path")
     if _dump_path:
         logger_cli.info("# Results will be dumped to '{}'".format(_dump_path))
         config.bench_results_dump_path = _dump_path
@@ -261,8 +307,7 @@
             "Consider setting it if running long task_file based test runs"
         )
         config.bench_results_dump_path = _dump_path
-    # Task files or options
-    _opts = get_fio_options()
+
     _task_file = args_utils.get_arg(args, "task_file", nofail=True)
     if not _task_file:
         logger_cli.info("-> Running single benchmark run")
@@ -289,18 +334,11 @@
         logger_cli.info("-> running with tasks from '{}'".format(_task_file))
         config.bench_task_file = _task_file
         config.bench_mode = "tasks"
-    config.bench_name = args_utils.get_arg(args, "name")
-    _opts["name"] = config.bench_name
-    logger_cli.info(
-        "# Using '{}' as ceph bench jobs name".format(_opts["name"])
-    )
     logger_cli.debug("... default/selected options for fio:")
     for _k in _opts.keys():
         # TODO: Update options for single run
         logger_cli.debug("    {} = {}".format(_k, _opts[_k]))
 
-    # handle option inavailability from command line for single mode
-
     # init the Bench class
     ceph_bench = bench.KubeCephBench(config)
     ceph_bench.set_ceph_info_class(ceph_info)