cfg-checker ceph bench module alpha version
- Ceph benchmark report (beta)
- Updated result time choosing. Now results reported based on start time
- New methods for listing
- Cleanup-only mode
- Unified results processing
- Additional ceph info gather
- Experimental barchart graph example
Fixes:
- Kube API client recreated each time for stability (HTTP/WebSocket specifics)
- args naming fixes
-
Change-Id: Id541f789a00ab4ee827603c5b6f7f07899aaa7c5
diff --git a/cfg_checker/modules/ceph/__init__.py b/cfg_checker/modules/ceph/__init__.py
index 0f1de01..f9bf3ca 100644
--- a/cfg_checker/modules/ceph/__init__.py
+++ b/cfg_checker/modules/ceph/__init__.py
@@ -89,7 +89,7 @@
)
ceph_bench_parser.add_argument(
'--task-file',
- metavar='task-file',
+ metavar='task_file',
help="Task file for benchmark"
)
ceph_bench_parser.add_argument(
@@ -97,6 +97,16 @@
action="store_true", default=False,
help="Do not cleanup services, agents, pvc, and pv"
)
+ ceph_bench_parser.add_argument(
+ '--cleanup-only',
+ action="store_true", default=False,
+ help="Cleanup resources related to benchmark"
+ )
+ ceph_bench_parser.add_argument(
+ '--dump-path',
+ metavar="dump_results", default="/tmp",
+ help="Dump result after each test run to use them later"
+ )
return _parser
@@ -149,8 +159,29 @@
def do_bench(args, config):
# Ceph Benchmark using multiple pods
+ # if only cleanup needed do it and exit
+ _cleanup_only = args_utils.get_arg(args, 'cleanup_only')
+ config.resource_prefix = "cfgagent"
+ if _cleanup_only:
+ # Do forced resource cleanup and exit
+ config.bench_mode = "cleanup"
+ config.bench_agent_count = -1
+ ceph_bench = bench.KubeCephBench(config)
+ logger_cli.info(
+ "# Discovering benchmark resources using prefix of '{}'".format(
+ config.resource_prefix
+ )
+ )
+ ceph_bench.prepare_cleanup()
+ ceph_bench.cleanup()
+ return
+
+ # gather Ceph info
+ logger_cli.info("# Collecting Ceph cluster information")
+ ceph_info = info.KubeCephInfo(config)
+
# Prepare the tasks and do synced testrun or a single one
- logger_cli.info("# Initializing benchmark run")
+ logger_cli.info("# Initializing ceph benchmark module")
args_utils.check_supported_env(ENV_TYPE_KUBE, args, config)
_filename = args_utils.get_arg(args, 'html')
# agents count option
@@ -161,6 +192,17 @@
_storage_class = args_utils.get_arg(args, "storage_class")
logger_cli.info("-> using storage class of '{}'".format(_storage_class))
config.bench_storage_class = _storage_class
+ # dump results options
+ _dump_path = args_utils.get_arg(args, "dump_path")
+ if _dump_path:
+ logger_cli.info("# Results will be dumped to '{}'".format(_dump_path))
+ config.bench_results_dump_path = _dump_path
+ else:
+ logger_cli.info(
+ "# No result dump path set. "
+ "Consider setting it if running long task_file based test runs"
+ )
+ config.bench_results_dump_path = _dump_path
# Task files or options
_task_file = args_utils.get_arg(args, "task_file", nofail=True)
if not _task_file:
@@ -180,12 +222,20 @@
# init the Bench class
ceph_bench = bench.KubeCephBench(config)
+ ceph_bench.set_ceph_info_class(ceph_info)
# Do the testrun
ceph_bench.prepare_agents(_opts)
+ ceph_bench.wait_ceph_cooldown()
+
+ # DEBUG of report in progress
if not ceph_bench.run_benchmark(_opts):
# No cleaning and/or report if benchmark was not finished
logger_cli.info("# Abnormal benchmark run, no cleaning performed")
return
+ # Remove after DEBUG
+ # ceph_bench.collect_results(_opts)
+ # END DEBUG
+
# Cleaning
if not config.no_cleaning_after_benchmark:
ceph_bench.cleanup()