pre-release bug fixes
diff --git a/tests/disk_test_agent.py b/tests/disk_test_agent.py
index 5de3038..b129175 100644
--- a/tests/disk_test_agent.py
+++ b/tests/disk_test_agent.py
@@ -76,8 +76,15 @@
if processed_vals.get('numjobs', '1') != '1':
assert 'group_reporting' in processed_vals, group_report_err_msg
+ ramp_time = processed_vals.get('ramp_time')
for i in range(repeat):
yield name.format(**params), processed_vals.copy()
+
+ if 'ramp_time' in processed_vals:
+ del processed_vals['ramp_time']
+
+ if ramp_time is not None:
+ processed_vals['ramp_time'] = ramp_time
else:
for it_vals in itertools.product(*iterable_values):
processed_vals.update(dict(zip(iterable_names, it_vals)))
@@ -377,6 +384,20 @@
yield bconf
+def get_test_sync_mode(jconfig):
+ is_sync = jconfig.get("sync", "0") == "1"
+ is_direct = jconfig.get("direct_io", "0") == "1"
+
+ if is_sync and is_direct:
+ return 'sd'
+ elif is_sync:
+ return 's'
+ elif is_direct:
+ return 'd'
+ else:
+ return 'a'
+
+
def add_job_results(jname, job_output, jconfig, res):
if job_output['write']['iops'] != 0:
raw_result = job_output['write']
@@ -386,8 +407,7 @@
if jname not in res:
j_res = {}
j_res["action"] = jconfig["rw"]
- j_res["direct_io"] = jconfig.get("direct", "0") == "1"
- j_res["sync"] = jconfig.get("sync", "0") == "1"
+ j_res["sync_mode"] = get_test_sync_mode(jconfig)
j_res["concurence"] = int(jconfig.get("numjobs", 1))
j_res["blocksize"] = jconfig["blocksize"]
j_res["jobname"] = job_output["jobname"]
@@ -396,22 +416,21 @@
else:
j_res = res[jname]
assert j_res["action"] == jconfig["rw"]
-
- assert j_res["direct_io"] == \
- (jconfig.get("direct", "0") == "1")
-
- assert j_res["sync"] == (jconfig.get("sync", "0") == "1")
+ assert j_res["sync_mode"] == get_test_sync_mode(jconfig)
assert j_res["concurence"] == int(jconfig.get("numjobs", 1))
assert j_res["blocksize"] == jconfig["blocksize"]
assert j_res["jobname"] == job_output["jobname"]
- assert j_res["timings"] == (jconfig.get("runtime"),
- jconfig.get("ramp_time"))
+
+ # ramp part is skipped for all tests, except first
+ # assert j_res["timings"] == (jconfig.get("runtime"),
+ # jconfig.get("ramp_time"))
def j_app(name, x):
j_res.setdefault(name, []).append(x)
# 'bw_dev bw_mean bw_max bw_min'.split()
- j_app("bw_mean", raw_result["bw_mean"])
+ # probably fix fio bug - iops is scaled to joncount, but bw - isn't
+ j_app("bw_mean", raw_result["bw_mean"] * j_res["concurence"])
j_app("iops", raw_result["iops"])
j_app("lat", raw_result["lat"]["mean"])
j_app("clat", raw_result["clat"]["mean"])
@@ -457,7 +476,7 @@
add_job_results(jname, job_output, jconfig, res)
except (SystemExit, KeyboardInterrupt):
- pass
+ raise
except Exception:
traceback.print_exc()
@@ -471,6 +490,37 @@
raise ValueError("Unknown behcnmark {0}".format(binary_tp))
+def read_config(fd, timeout=10):
+ job_cfg = ""
+ etime = time.time() + timeout
+ while True:
+ wtime = etime - time.time()
+ if wtime <= 0:
+ raise IOError("No config provided")
+
+ r, w, x = select.select([fd], [], [], wtime)
+ if len(r) == 0:
+ raise IOError("No config provided")
+
+ char = fd.read(1)
+ if '' == char:
+ return job_cfg
+
+ job_cfg += char
+
+
+def estimate_cfg(job_cfg, params):
+ bconf = list(parse_fio_config_full(job_cfg, params))
+ return calculate_execution_time(bconf)
+
+
+def sec_to_str(seconds):
+ h = seconds // 3600
+ m = (seconds % 3600) // 60
+ s = seconds % 60
+ return "{0}:{1:02d}:{2:02d}".format(h, m, s)
+
+
def parse_args(argv):
parser = argparse.ArgumentParser(
description="Run fio' and return result")
@@ -506,25 +556,6 @@
return parser.parse_args(argv)
-def read_config(fd, timeout=10):
- job_cfg = ""
- etime = time.time() + timeout
- while True:
- wtime = etime - time.time()
- if wtime <= 0:
- raise IOError("No config provided")
-
- r, w, x = select.select([fd], [], [], wtime)
- if len(r) == 0:
- raise IOError("No config provided")
-
- char = fd.read(1)
- if '' == char:
- return job_cfg
-
- job_cfg += char
-
-
def main(argv):
argv_obj = parse_args(argv)
@@ -544,7 +575,11 @@
name, val = param_val.split("=", 1)
params[name] = val
- if argv_obj.num_tests or argv_obj.compile or argv_obj.estimate:
+ if argv_obj.estimate:
+ print sec_to_str(estimate_cfg(job_cfg, params))
+ return 0
+
+ if argv_obj.num_tests or argv_obj.compile:
bconf = list(parse_fio_config_full(job_cfg, params))
bconf = bconf[argv_obj.skip_tests:]
@@ -555,14 +590,6 @@
if argv_obj.num_tests:
print len(bconf)
- if argv_obj.estimate:
- seconds = calculate_execution_time(bconf)
-
- h = seconds // 3600
- m = (seconds % 3600) // 60
- s = seconds % 60
-
- print "{0}:{1}:{2}".format(h, m, s)
return 0
if argv_obj.start_at is not None:
@@ -589,11 +616,11 @@
argv_obj.faked_fio)
etime = time.time()
- res = {'__meta__': {'raw_cfg': job_cfg}, 'res': job_res}
+ res = {'__meta__': {'raw_cfg': job_cfg, 'params': params}, 'res': job_res}
oformat = 'json' if argv_obj.json else 'eval'
- out_fd.write("\nRun {} tests in {} seconds\n".format(num_tests,
- int(etime - stime)))
+ out_fd.write("\nRun {0} tests in {1} seconds\n".format(num_tests,
+ int(etime - stime)))
out_fd.write("========= RESULTS(format={0}) =========\n".format(oformat))
if argv_obj.json:
out_fd.write(json.dumps(res))
diff --git a/tests/io_scenario_check_th_count.cfg b/tests/io_scenario_check_th_count.cfg
index 478439e..3d57154 100644
--- a/tests/io_scenario_check_th_count.cfg
+++ b/tests/io_scenario_check_th_count.cfg
@@ -9,19 +9,38 @@
time_based
runtime=30
group_reporting
+numjobs={% 1, 2, 5, 10, 15, 20, 25, 30, 35, 40 %}
# ---------------------------------------------------------------------
# check different thread count. (latency, bw) = func(th_count)
+#
+# RANDOM R IOPS, DIRECT, should act same as AS (4k + randread + sync)
+# just faster. Not sure, that we need it
+# 4k + randread + direct
+#
+# RANDOM R/W IOPS
+# 4k + randread + sync
+# 4k + randwrite + sync
+#
+# LINEAR BW
+# 1m + write + direct
+# 1m + read + direct
+#
# ---------------------------------------------------------------------
[concurrence_test_{TEST_SUMM} * {NUM_ROUNDS}]
-blocksize={% 4k, 1m %}
-rw={% randwrite, randread %}
+blocksize=4k
+rw={% randread %}
direct=1
-numjobs={% 1, 2, 5, 10, 15, 20, 25, 30, 35, 40 %}
+sync=0
[concurrence_test_{TEST_SUMM} * {NUM_ROUNDS}]
-blocksize={% 4k, 1m %}
+blocksize=4k
rw=randwrite
direct=0
sync=1
-numjobs={% 1, 2, 5, 10, 15, 20, 25, 30, 35, 40 %}
+
+[concurrence_test_{TEST_SUMM} * {NUM_ROUNDS}]
+blocksize=1m
+rw={% write, read %}
+direct=1
+sync=0
diff --git a/tests/io_scenario_check_vm_count_ec2.cfg b/tests/io_scenario_check_vm_count_ec2.cfg
new file mode 100644
index 0000000..19c9e50
--- /dev/null
+++ b/tests/io_scenario_check_vm_count_ec2.cfg
@@ -0,0 +1,29 @@
+[defaults]
+NUM_ROUNDS=7
+ramp_time=5
+buffered=0
+wait_for_previous
+filename={FILENAME}
+iodepth=1
+size=10Gb
+time_based
+runtime=30
+group_reporting
+rate={BW_LIMIT}
+rate_iops={IOPS_LIMIT}
+
+# ---------------------------------------------------------------------
+# check different thread count. (latency, bw) = func(th_count)
+# ---------------------------------------------------------------------
+[vm_count_{TEST_SUMM} * {NUM_ROUNDS}]
+blocksize={% 4k, 1m %}
+rw={% randwrite, randread %}
+direct=1
+numjobs={% 1, 2, 5, 10, 15, 20, 25, 30, 35, 40 %}
+
+[vm_count_{TEST_SUMM} * {NUM_ROUNDS}]
+blocksize={% 4k, 1m %}
+rw=randwrite
+direct=0
+sync=1
+numjobs={% 1, 2, 5, 10, 15, 20, 25, 30, 35, 40 %}
diff --git a/tests/io_scenario_hdd.cfg b/tests/io_scenario_hdd.cfg
index 3238503..0c36324 100644
--- a/tests/io_scenario_hdd.cfg
+++ b/tests/io_scenario_hdd.cfg
@@ -6,14 +6,14 @@
iodepth=1
filename={FILENAME}
-NUM_ROUNDS={NUM_ROUNDS}
+NUM_ROUNDS=7
ramp_time=5
size=10Gb
runtime=30
# ---------------------------------------------------------------------
-# check different thread count, sync mode. (latency, bw) = func(th_count)
+# check different thread count, sync mode. (latency, iops) = func(th_count)
# ---------------------------------------------------------------------
[hdd_test_{TEST_SUMM} * {NUM_ROUNDS}]
blocksize={% 4k %}
@@ -22,7 +22,8 @@
numjobs={% 1, 5, 10, 15, 20, 30, 40, 80, 120 %}
# ---------------------------------------------------------------------
-# check different thread count, direct read mode. (latency, bw) = func(th_count)
+# check different thread count, direct read mode. (latency, iops) = func(th_count)
+# also check iops for randread
# ---------------------------------------------------------------------
[hdd_test_{TEST_SUMM} * {NUM_ROUNDS}]
blocksize={% 4k %}
@@ -31,17 +32,20 @@
numjobs={% 1, 5, 10, 15, 20, 30, 40, 80, 120 %}
# ---------------------------------------------------------------------
-# check IOPS read/write. (latency, bw) = func(th_count)
+# check different thread count, direct read/write mode. (bw, iops) = func(th_count)
+# also check BW for seq read/write.
+# ---------------------------------------------------------------------
+[hdd_test_{TEST_SUMM} * {NUM_ROUNDS}]
+blocksize={% 1m %}
+rw={% read, write %}
+direct=1
+numjobs={% 1, 5, 10, 15, 20, 30, 40, 80, 120 %}
+
+# ---------------------------------------------------------------------
+# check IOPS randwrite.
# ---------------------------------------------------------------------
[hdd_test_{TEST_SUMM} * {NUM_ROUNDS}]
blocksize=4k
-rw={% randwrite, randread %}
+rw=randwrite
direct=1
-# ---------------------------------------------------------------------
-# check BW for seq read/write. (latency, bw) = func(th_count)
-# ---------------------------------------------------------------------
-[hdd_test_{TEST_SUMM} * {NUM_ROUNDS}]
-blocksize=1m
-rw={% write, read %}
-direct=1
diff --git a/tests/io_scenario_long_test.cfg b/tests/io_scenario_long_test.cfg
new file mode 100644
index 0000000..b1a40d9
--- /dev/null
+++ b/tests/io_scenario_long_test.cfg
@@ -0,0 +1,20 @@
+[defaults]
+# 24h test
+NUM_ROUNDS=288
+
+buffered=0
+wait_for_previous
+filename={FILENAME}
+iodepth=1
+size=50Gb
+time_based
+runtime=300
+
+# ---------------------------------------------------------------------
+# check read and write linearity. oper_time = func(size)
+# ---------------------------------------------------------------------
+[24h_test * {NUM_ROUNDS}]
+blocksize=128k
+rw=randwrite
+direct=1
+
diff --git a/tests/itest.py b/tests/itest.py
index a048703..3b71d3f 100644
--- a/tests/itest.py
+++ b/tests/itest.py
@@ -5,9 +5,11 @@
from disk_perf_test_tool.tests import disk_test_agent
from disk_perf_test_tool.tests.disk_test_agent import parse_fio_config_full
+from disk_perf_test_tool.tests.disk_test_agent import estimate_cfg, sec_to_str
from disk_perf_test_tool.tests.io_results_loader import parse_output
-from disk_perf_test_tool.ssh_utils import copy_paths
-from disk_perf_test_tool.utils import run_over_ssh, ssize_to_b
+from disk_perf_test_tool.ssh_utils import copy_paths, run_over_ssh
+from disk_perf_test_tool.utils import ssize_to_b
+
logger = logging.getLogger("io-perf-tool")
@@ -96,33 +98,45 @@
self.config_params = test_options.get('params', {})
self.tool = test_options.get('tool', 'fio')
self.raw_cfg = open(self.config_fname).read()
- self.configs = parse_fio_config_full(self.raw_cfg, self.config_params)
+ self.configs = list(parse_fio_config_full(self.raw_cfg,
+ self.config_params))
def pre_run(self, conn):
- # TODO: install fio, if not installed
- cmd = "sudo apt-get -y install fio"
+ try:
+ run_over_ssh(conn, 'which fio')
+ except OSError:
+ # TODO: install fio, if not installed
+ cmd = "sudo apt-get -y install fio"
- for i in range(3):
- try:
- run_over_ssh(conn, cmd)
- break
- except OSError:
- time.sleep(3)
+ for i in range(3):
+ try:
+ run_over_ssh(conn, cmd)
+ break
+ except OSError as err:
+ time.sleep(3)
+ else:
+ raise OSError("Can't install fio - " + err.message)
local_fname = disk_test_agent.__file__.rsplit('.')[0] + ".py"
self.files_to_copy = {local_fname: self.io_py_remote}
copy_paths(conn, self.files_to_copy)
cmd_templ = "dd if=/dev/zero of={0} bs={1} count={2}"
+ files = {}
+
for secname, params in self.configs:
sz = ssize_to_b(params['size'])
msz = msz = sz / (1024 ** 2)
if sz % (1024 ** 2) != 0:
msz += 1
- cmd = cmd_templ.format(params['filename'], 1024 ** 2, msz)
- run_over_ssh(conn, cmd)
+ fname = params['filename']
+ files[fname] = max(files.get(fname, 0), msz)
+
+ for fname, sz in files.items():
+ cmd = cmd_templ.format(fname, 1024 ** 2, msz)
+ run_over_ssh(conn, cmd, timeout=msz)
def run(self, conn, barrier):
cmd_templ = "env python2 {0} --type {1} {2} --json -"
@@ -135,14 +149,22 @@
cmd = cmd_templ.format(self.io_py_remote, self.tool, params)
logger.debug("Waiting on barrier")
+
+ exec_time = estimate_cfg(self.raw_cfg, self.config_params)
+ exec_time_str = sec_to_str(exec_time)
+
try:
- barrier.wait()
- logger.debug("Run {0}".format(cmd))
- out_err = run_over_ssh(conn, cmd, stdin_data=self.raw_cfg)
- self.on_result(out_err, cmd)
+ if barrier.wait():
+ logger.info("Test will takes about {0}".format(exec_time_str))
+
+ out_err = run_over_ssh(conn, cmd,
+ stdin_data=self.raw_cfg,
+ timeout=int(exec_time * 1.1))
finally:
barrier.exit()
+ self.on_result(out_err, cmd)
+
def on_result(self, out_err, cmd):
try:
for data in parse_output(out_err):