fix multythreaded bugs in disk_test_agent, add linearity data and script
diff --git a/tests/disk_test_agent.py b/tests/disk_test_agent.py
index 4f23f90..d626889 100644
--- a/tests/disk_test_agent.py
+++ b/tests/disk_test_agent.py
@@ -1,3 +1,4 @@
+import re
import sys
import time
import json
@@ -65,18 +66,29 @@
else:
processed_vals[val_name] = val.format(**params)
+ group_report_err_msg = "Group reporting should be set if numjobs != 1"
+
if iterable_values == []:
params['UNIQ'] = 'UN{0}'.format(counter[0])
counter[0] += 1
params['TEST_SUMM'] = get_test_summary(processed_vals)
+
+ if processed_vals.get('numjobs', '1') != '1':
+ assert 'group_reporting' in processed_vals, group_report_err_msg
+
for i in range(repeat):
- yield name.format(**params), processed_vals
+ yield name.format(**params), processed_vals.copy()
else:
for it_vals in itertools.product(*iterable_values):
processed_vals.update(dict(zip(iterable_names, it_vals)))
params['UNIQ'] = 'UN{0}'.format(counter[0])
counter[0] += 1
params['TEST_SUMM'] = get_test_summary(processed_vals)
+
+ if processed_vals.get('numjobs', '1') != '1':
+ assert 'group_reporting' in processed_vals,\
+ group_report_err_msg
+
for i in range(repeat):
yield name.format(**params), processed_vals.copy()
@@ -372,7 +384,7 @@
j_res["direct_io"] = jconfig.get("direct", "0") == "1"
j_res["sync"] = jconfig.get("sync", "0") == "1"
j_res["concurence"] = int(jconfig.get("numjobs", 1))
- j_res["size"] = jconfig["size"]
+ j_res["blocksize"] = jconfig["blocksize"]
j_res["jobname"] = job_output["jobname"]
j_res["timings"] = (jconfig.get("runtime"),
jconfig.get("ramp_time"))
@@ -385,7 +397,7 @@
assert j_res["sync"] == (jconfig.get("sync", "0") == "1")
assert j_res["concurence"] == int(jconfig.get("numjobs", 1))
- assert j_res["size"] == jconfig["size"]
+ assert j_res["blocksize"] == jconfig["blocksize"]
assert j_res["jobname"] == job_output["jobname"]
assert j_res["timings"] == (jconfig.get("runtime"),
jconfig.get("ramp_time"))
@@ -414,7 +426,7 @@
whole_conf = whole_conf[skip_tests:]
res = {}
curr_test_num = skip_tests
- execited_tests = 0
+ executed_tests = 0
try:
for bconf in next_test_portion(whole_conf, runcycle):
@@ -426,9 +438,9 @@
res_cfg_it = enumerate(res_cfg_it, curr_test_num)
for curr_test_num, (job_output, (jname, jconfig)) in res_cfg_it:
- execited_tests += 1
+ executed_tests += 1
if raw_results_func is not None:
- raw_results_func(curr_test_num,
+ raw_results_func(executed_tests,
[job_output, jname, jconfig])
assert jname == job_output["jobname"], \
@@ -445,7 +457,7 @@
except Exception:
traceback.print_exc()
- return res, execited_tests
+ return res, executed_tests
def run_benchmark(binary_tp, *argv, **kwargs):
@@ -454,6 +466,22 @@
raise ValueError("Unknown behcnmark {0}".format(binary_tp))
+def parse_output(out_err):
+ start_patt = r"(?ims)=+\s+RESULTS\(format=json\)\s+=+"
+ end_patt = r"(?ims)=+\s+END OF RESULTS\s+=+"
+
+ for block in re.split(start_patt, out_err)[1:]:
+ data, garbage = re.split(end_patt, block)
+ yield json.loads(data.strip())
+
+ start_patt = r"(?ims)=+\s+RESULTS\(format=eval\)\s+=+"
+ end_patt = r"(?ims)=+\s+END OF RESULTS\s+=+"
+
+ for block in re.split(start_patt, out_err)[1:]:
+ data, garbage = re.split(end_patt, block)
+ yield eval(data.strip())
+
+
def parse_args(argv):
parser = argparse.ArgumentParser(
description="Run fio' and return result")