single node tests
diff --git a/tests/disk_test_agent.py b/tests/disk_test_agent.py
index 7537438..4f23f90 100644
--- a/tests/disk_test_agent.py
+++ b/tests/disk_test_agent.py
@@ -1,6 +1,7 @@
import sys
import time
import json
+import random
import select
import pprint
import argparse
@@ -48,35 +49,36 @@
repeat = 1
# this code can be optimized
- for i in range(repeat):
- iterable_names = []
- iterable_values = []
- processed_vals = {}
+ iterable_names = []
+ iterable_values = []
+ processed_vals = {}
- for val_name, val in vals.items():
- if val is None:
- processed_vals[val_name] = val
- # remove hardcode
- elif val.startswith('{%'):
- assert val.endswith("%}")
- content = val[2:-2].format(**params)
- iterable_names.append(val_name)
- iterable_values.append(i.strip() for i in content.split(','))
- else:
- processed_vals[val_name] = val.format(**params)
+ for val_name, val in vals.items():
+ if val is None:
+ processed_vals[val_name] = val
+ # remove hardcode
+ elif val.startswith('{%'):
+ assert val.endswith("%}")
+ content = val[2:-2].format(**params)
+ iterable_names.append(val_name)
+ iterable_values.append(list(i.strip() for i in content.split(',')))
+ else:
+ processed_vals[val_name] = val.format(**params)
- if iterable_values == []:
+ if iterable_values == []:
+ params['UNIQ'] = 'UN{0}'.format(counter[0])
+ counter[0] += 1
+ params['TEST_SUMM'] = get_test_summary(processed_vals)
+ for i in range(repeat):
+ yield name.format(**params), processed_vals
+ else:
+ for it_vals in itertools.product(*iterable_values):
+ processed_vals.update(dict(zip(iterable_names, it_vals)))
params['UNIQ'] = 'UN{0}'.format(counter[0])
counter[0] += 1
params['TEST_SUMM'] = get_test_summary(processed_vals)
- yield name.format(**params), processed_vals
- else:
- for it_vals in itertools.product(*iterable_values):
- processed_vals.update(dict(zip(iterable_names, it_vals)))
- params['UNIQ'] = 'UN{0}'.format(counter[0])
- counter[0] += 1
- params['TEST_SUMM'] = get_test_summary(processed_vals)
- yield name.format(**params), processed_vals
+ for i in range(repeat):
+ yield name.format(**params), processed_vals.copy()
def calculate_execution_time(combinations):
@@ -174,6 +176,129 @@
return res
+count = 0
+
+
+def to_bytes(sz):
+ sz = sz.lower()
+ try:
+ return int(sz)
+ except ValueError:
+ if sz[-1] == 'm':
+ return (1024 ** 2) * int(sz[:-1])
+ if sz[-1] == 'k':
+ return 1024 * int(sz[:-1])
+ raise
+
+
+def estimate_iops(sz, bw, lat):
+ return 1 / (lat + float(sz) / bw)
+
+
+def do_run_fio_fake(bconf):
+ global count
+ count += 1
+ parsed_out = []
+
+ BW = 120.0 * (1024 ** 2)
+ LAT = 0.003
+
+ for name, cfg in bconf:
+ sz = to_bytes(cfg['blocksize'])
+ curr_lat = LAT * ((random.random() - 0.5) * 0.1 + 1)
+ curr_ulat = curr_lat * 1000000
+ curr_bw = BW * ((random.random() - 0.5) * 0.1 + 1)
+ iops = estimate_iops(sz, curr_bw, curr_lat)
+ bw = iops * sz
+
+ res = {'ctx': 10683,
+ 'error': 0,
+ 'groupid': 0,
+ 'jobname': name,
+ 'majf': 0,
+ 'minf': 30,
+ 'read': {'bw': 0,
+ 'bw_agg': 0.0,
+ 'bw_dev': 0.0,
+ 'bw_max': 0,
+ 'bw_mean': 0.0,
+ 'bw_min': 0,
+ 'clat': {'max': 0,
+ 'mean': 0.0,
+ 'min': 0,
+ 'stddev': 0.0},
+ 'io_bytes': 0,
+ 'iops': 0,
+ 'lat': {'max': 0, 'mean': 0.0,
+ 'min': 0, 'stddev': 0.0},
+ 'runtime': 0,
+ 'slat': {'max': 0, 'mean': 0.0,
+ 'min': 0, 'stddev': 0.0}
+ },
+ 'sys_cpu': 0.64,
+ 'trim': {'bw': 0,
+ 'bw_agg': 0.0,
+ 'bw_dev': 0.0,
+ 'bw_max': 0,
+ 'bw_mean': 0.0,
+ 'bw_min': 0,
+ 'clat': {'max': 0,
+ 'mean': 0.0,
+ 'min': 0,
+ 'stddev': 0.0},
+ 'io_bytes': 0,
+ 'iops': 0,
+ 'lat': {'max': 0, 'mean': 0.0,
+ 'min': 0, 'stddev': 0.0},
+ 'runtime': 0,
+ 'slat': {'max': 0, 'mean': 0.0,
+ 'min': 0, 'stddev': 0.0}
+ },
+ 'usr_cpu': 0.23,
+ 'write': {'bw': 0,
+ 'bw_agg': 0,
+ 'bw_dev': 0,
+ 'bw_max': 0,
+ 'bw_mean': 0,
+ 'bw_min': 0,
+ 'clat': {'max': 0, 'mean': 0,
+ 'min': 0, 'stddev': 0},
+ 'io_bytes': 0,
+ 'iops': 0,
+ 'lat': {'max': 0, 'mean': 0,
+ 'min': 0, 'stddev': 0},
+ 'runtime': 0,
+ 'slat': {'max': 0, 'mean': 0.0,
+ 'min': 0, 'stddev': 0.0}
+ }
+ }
+
+ if cfg['rw'] in ('read', 'randread'):
+ key = 'read'
+ elif cfg['rw'] in ('write', 'randwrite'):
+ key = 'write'
+ else:
+ raise ValueError("Uknown op type {0}".format(key))
+
+ res[key]['bw'] = bw
+ res[key]['iops'] = iops
+ res[key]['runtime'] = 30
+ res[key]['io_bytes'] = res[key]['runtime'] * bw
+ res[key]['bw_agg'] = bw
+ res[key]['bw_dev'] = bw / 30
+ res[key]['bw_max'] = bw * 1.5
+ res[key]['bw_min'] = bw / 1.5
+ res[key]['bw_mean'] = bw
+ res[key]['clat'] = {'max': curr_ulat * 10, 'mean': curr_ulat,
+ 'min': curr_ulat / 2, 'stddev': curr_ulat}
+ res[key]['lat'] = res[key]['clat'].copy()
+ res[key]['slat'] = res[key]['clat'].copy()
+
+ parsed_out.append(res)
+
+ return zip(parsed_out, bconf)
+
+
def do_run_fio(bconf):
benchmark_config = format_fio_config(bconf)
cmd = ["fio", "--output-format=json", "-"]
@@ -192,7 +317,6 @@
return zip(parsed_out, bconf)
-
# limited by fio
MAX_JOBS = 1000
@@ -283,7 +407,8 @@
params,
runcycle=None,
raw_results_func=None,
- skip_tests=0):
+ skip_tests=0,
+ fake_fio=False):
whole_conf = list(parse_fio_config_full(benchmark_config, params))
whole_conf = whole_conf[skip_tests:]
@@ -292,7 +417,12 @@
execited_tests = 0
try:
for bconf in next_test_portion(whole_conf, runcycle):
- res_cfg_it = do_run_fio(bconf)
+
+ if fake_fio:
+ res_cfg_it = do_run_fio_fake(bconf)
+ else:
+ res_cfg_it = do_run_fio(bconf)
+
res_cfg_it = enumerate(res_cfg_it, curr_test_num)
for curr_test_num, (job_output, (jname, jconfig)) in res_cfg_it:
@@ -301,7 +431,8 @@
raw_results_func(curr_test_num,
[job_output, jname, jconfig])
- assert jname == job_output["jobname"]
+ assert jname == job_output["jobname"], \
+ "{0} != {1}".format(jname, job_output["jobname"])
if jname.startswith('_'):
continue
@@ -348,6 +479,8 @@
default=False, help="Output raw input and results")
parser.add_argument("--skip-tests", type=int, default=0, metavar="NUM",
help="Skip NUM tests")
+ parser.add_argument("--faked-fio", action='store_true',
+ default=False, help="Emulate fio with 0 test time")
parser.add_argument("--params", nargs="*", metavar="PARAM=VAL",
default=[],
help="Provide set of pairs PARAM=VAL to" +
@@ -435,7 +568,8 @@
params,
argv_obj.runcycle,
rrfunc,
- argv_obj.skip_tests)
+ argv_obj.skip_tests,
+ argv_obj.faked_fio)
etime = time.time()
res = {'__meta__': {'raw_cfg': job_cfg}, 'res': job_res}
diff --git a/tests/io_scenario_check_assumptions.cfg b/tests/io_scenario_check_assumptions.cfg
deleted file mode 100644
index 25d99bc..0000000
--- a/tests/io_scenario_check_assumptions.cfg
+++ /dev/null
@@ -1,59 +0,0 @@
-[defaults]
-NUM_ROUNDS=7
-
-ramp_time=5
-buffered=0
-wait_for_previous
-filename=/media/koder/a5230078-4c27-4c3b-99aa-26148e78b2e7/xxx.bin
-iodepth=1
-size=10Gb
-time_based
-runtime=30
-
-# ---------------------------------------------------------------------
-# check test time, no warmup time. iops = func(rune_time)
-# ---------------------------------------------------------------------
-[runtime_test_wo_wu_{TEST_SUMM}_{UNIQ} * {NUM_ROUNDS}]
-startdelay=10
-blocksize={% 4k, 1m %}
-rw=randwrite
-direct=1
-ramp_time=0
-runtime={% 10, 15, 20, 30, 60, 120 %}
-
-# ---------------------------------------------------------------------
-# check test time, with warmup time. iops = func(rune_time)
-# ---------------------------------------------------------------------
-[runtime_test_w_wu_{TEST_SUMM}_{UNIQ} * {NUM_ROUNDS}]
-startdelay=10
-blocksize={% 4k, 1m %}
-rw=randwrite
-direct=1
-ramp_time={% 5, 10, 15 %}
-runtime={% 15, 30 %}
-
-# ---------------------------------------------------------------------
-# check read and write linearity. oper_time = func(size)
-# ---------------------------------------------------------------------
-[linearity_test_{TEST_SUMM} * {NUM_ROUNDS}]
-blocksize={% 512, 1k, 4k, 8k, 16k, 32k, 64k, 128k, 256k, 512k, 1m, 2m, 4m %}
-rw={% randwrite, randread %}
-direct=1
-
-# ---------------------------------------------------------------------
-# check sync write linearity. oper_time = func(size)
-# check sync BW as well
-# ---------------------------------------------------------------------
-[linearity_test_{TEST_SUMM} * {NUM_ROUNDS}]
-blocksize={% 512, 1k, 4k, 8k, 16k, 32k, 64k, 128k, 256k, 512k, 1m, 2m, 4m %}
-rw=randread
-sync=1
-
-# ---------------------------------------------------------------------
-# check different thread count. (latency, bw) = func(th_count)
-# ---------------------------------------------------------------------
-[concurrence_test_{TEST_SUMM} * {NUM_ROUNDS}]
-blocksize={% 4k, 1m %}
-rw=randwrite
-direct=1
-numjobs={% 1, 2, 5, 10, 15, 20, 25, 30, 35, 40 %}
diff --git a/tests/io_scenario_check_linearity.cfg b/tests/io_scenario_check_linearity.cfg
new file mode 100644
index 0000000..4017cf3
--- /dev/null
+++ b/tests/io_scenario_check_linearity.cfg
@@ -0,0 +1,29 @@
+[defaults]
+NUM_ROUNDS=7
+
+ramp_time=5
+buffered=0
+wait_for_previous
+filename={FILENAME}
+iodepth=1
+size=10Gb
+time_based
+runtime=30
+
+# ---------------------------------------------------------------------
+# check read and write linearity. oper_time = func(size)
+# ---------------------------------------------------------------------
+[linearity_test_{TEST_SUMM} * {NUM_ROUNDS}]
+blocksize={% 512, 1k, 4k, 8k, 16k, 32k, 64k, 128k, 256k, 512k, 1m, 2m, 4m %}
+rw={% randwrite, randread %}
+direct=1
+
+# ---------------------------------------------------------------------
+# check sync write linearity. oper_time = func(size)
+# check sync BW as well
+# ---------------------------------------------------------------------
+[linearity_test_{TEST_SUMM} * {NUM_ROUNDS}]
+blocksize={% 512, 1k, 4k, 8k, 16k, 32k, 64k, 128k, 256k, 512k, 1m, 2m, 4m %}
+rw=randwrite
+sync=1
+
diff --git a/tests/io_scenario_check_th_count.cfg b/tests/io_scenario_check_th_count.cfg
new file mode 100644
index 0000000..bc30c1c
--- /dev/null
+++ b/tests/io_scenario_check_th_count.cfg
@@ -0,0 +1,21 @@
+[defaults]
+NUM_ROUNDS=7
+
+ramp_time=5
+buffered=0
+wait_for_previous
+filename={FILENAME}
+iodepth=1
+size=10Gb
+time_based
+runtime=30
+
+# ---------------------------------------------------------------------
+# check different thread count. (latency, bw) = func(th_count)
+# ---------------------------------------------------------------------
+[concurrence_test_{TEST_SUMM} * {NUM_ROUNDS}]
+blocksize={% 4k, 1m %}
+rw=randwrite
+direct=1
+numjobs={% 1, 2, 5, 10, 15, 20, 25, 30, 35, 40 %}
+
diff --git a/tests/io_scenario_check_warmup.cfg b/tests/io_scenario_check_warmup.cfg
new file mode 100644
index 0000000..6a9c622
--- /dev/null
+++ b/tests/io_scenario_check_warmup.cfg
@@ -0,0 +1,33 @@
+[defaults]
+NUM_ROUNDS=7
+
+ramp_time=5
+buffered=0
+wait_for_previous
+filename={FILENAME}
+iodepth=1
+size=10Gb
+time_based
+runtime=30
+
+# ---------------------------------------------------------------------
+# check test time, no warmup time. iops = func(rune_time)
+# ---------------------------------------------------------------------
+[runtime_test_wo_wu_{TEST_SUMM}_{UNIQ} * {NUM_ROUNDS}]
+startdelay=10
+blocksize={% 4k, 1m %}
+rw=randwrite
+direct=1
+ramp_time=0
+runtime={% 10, 15, 20, 30, 60, 120 %}
+
+# ---------------------------------------------------------------------
+# check test time, with warmup time. iops = func(rune_time)
+# ---------------------------------------------------------------------
+[runtime_test_w_wu_{TEST_SUMM}_{UNIQ} * {NUM_ROUNDS}]
+startdelay=10
+blocksize={% 4k, 1m %}
+rw=randwrite
+direct=1
+ramp_time={% 5, 10, 15 %}
+runtime={% 15, 30 %}