pre-release updates, bug fixes
diff --git a/tests/disk_test_agent.py b/tests/disk_test_agent.py
index 8a0d165..5de3038 100644
--- a/tests/disk_test_agent.py
+++ b/tests/disk_test_agent.py
@@ -89,8 +89,15 @@
assert 'group_reporting' in processed_vals,\
group_report_err_msg
+ ramp_time = processed_vals.get('ramp_time')
+
for i in range(repeat):
yield name.format(**params), processed_vals.copy()
+ if 'ramp_time' in processed_vals:
+ del processed_vals['ramp_time']
+
+ if ramp_time is not None:
+ processed_vals['ramp_time'] = ramp_time
def calculate_execution_time(combinations):
@@ -203,11 +210,9 @@
raise
-def estimate_iops(sz, bw, lat):
- return 1 / (lat + float(sz) / bw)
-
-
def do_run_fio_fake(bconf):
+ def estimate_iops(sz, bw, lat):
+ return 1 / (lat + float(sz) / bw)
global count
count += 1
parsed_out = []
@@ -386,8 +391,8 @@
j_res["concurence"] = int(jconfig.get("numjobs", 1))
j_res["blocksize"] = jconfig["blocksize"]
j_res["jobname"] = job_output["jobname"]
- j_res["timings"] = (jconfig.get("runtime"),
- jconfig.get("ramp_time"))
+ j_res["timings"] = [int(jconfig.get("runtime", 0)),
+ int(jconfig.get("ramp_time", 0))]
else:
j_res = res[jname]
assert j_res["action"] == jconfig["rw"]
diff --git a/tests/io_scenario_check_distribution.cfg b/tests/io_scenario_check_distribution.cfg
new file mode 100644
index 0000000..6ba3f9f
--- /dev/null
+++ b/tests/io_scenario_check_distribution.cfg
@@ -0,0 +1,13 @@
+[distrubution_test_{TEST_SUMM} * {NUM_ROUNDS}]
+blocksize=4k
+rw=randwrite
+direct=1
+ramp_time=5
+buffered=0
+wait_for_previous
+filename={FILENAME}
+iodepth=1
+size=10Gb
+time_based
+runtime=30
+group_reporting
diff --git a/tests/io_scenario_hdd.cfg b/tests/io_scenario_hdd.cfg
new file mode 100644
index 0000000..3238503
--- /dev/null
+++ b/tests/io_scenario_hdd.cfg
@@ -0,0 +1,47 @@
+[defaults]
+wait_for_previous
+group_reporting
+time_based
+buffered=0
+iodepth=1
+
+filename={FILENAME}
+NUM_ROUNDS={NUM_ROUNDS}
+
+ramp_time=5
+size=10Gb
+runtime=30
+
+# ---------------------------------------------------------------------
+# check different thread count, sync mode. (latency, bw) = func(th_count)
+# ---------------------------------------------------------------------
+[hdd_test_{TEST_SUMM} * {NUM_ROUNDS}]
+blocksize={% 4k %}
+rw=randwrite
+sync=1
+numjobs={% 1, 5, 10, 15, 20, 30, 40, 80, 120 %}
+
+# ---------------------------------------------------------------------
+# check different thread count, direct read mode. (latency, bw) = func(th_count)
+# ---------------------------------------------------------------------
+[hdd_test_{TEST_SUMM} * {NUM_ROUNDS}]
+blocksize={% 4k %}
+rw=randread
+direct=1
+numjobs={% 1, 5, 10, 15, 20, 30, 40, 80, 120 %}
+
+# ---------------------------------------------------------------------
+# check IOPS read/write. (latency, bw) = func(th_count)
+# ---------------------------------------------------------------------
+[hdd_test_{TEST_SUMM} * {NUM_ROUNDS}]
+blocksize=4k
+rw={% randwrite, randread %}
+direct=1
+
+# ---------------------------------------------------------------------
+# check BW for seq read/write. (latency, bw) = func(th_count)
+# ---------------------------------------------------------------------
+[hdd_test_{TEST_SUMM} * {NUM_ROUNDS}]
+blocksize=1m
+rw={% write, read %}
+direct=1
diff --git a/tests/io_task_test.cfg b/tests/io_task_test.cfg
index 4d78578..24d62a9 100644
--- a/tests/io_task_test.cfg
+++ b/tests/io_task_test.cfg
@@ -1,14 +1,29 @@
-[writetest * 3]
+[writetest * 7]
group_reporting
numjobs=4
wait_for_previous
-ramp_time=5
+ramp_time=15
blocksize=4k
-filename=/tmp/xxx.bin
+filename={FILENAME}
rw=randwrite
direct=1
buffered=0
iodepth=1
-size=100Mb
-runtime=10
+size=1000Mb
+runtime=30
+time_based
+
+[readtest * 7]
+group_reporting
+numjobs=4
+wait_for_previous
+ramp_time=15
+blocksize=4k
+filename={FILENAME}
+rw=randread
+direct=1
+buffered=0
+iodepth=1
+size=1000Mb
+runtime=30
time_based
diff --git a/tests/itest.py b/tests/itest.py
index 53c4af3..a048703 100644
--- a/tests/itest.py
+++ b/tests/itest.py
@@ -1,10 +1,10 @@
-import re
import abc
-import json
+import time
import os.path
import logging
from disk_perf_test_tool.tests import disk_test_agent
+from disk_perf_test_tool.tests.disk_test_agent import parse_fio_config_full
from disk_perf_test_tool.tests.io_results_loader import parse_output
from disk_perf_test_tool.ssh_utils import copy_paths
from disk_perf_test_tool.utils import run_over_ssh, ssize_to_b
@@ -51,17 +51,15 @@
def pre_run(self, conn):
remote_script = self.copy_script(conn, self.pre_run_script)
cmd = remote_script
- code, out_err = run_over_ssh(conn, cmd)
- if code != 0:
- raise Exception("Pre run failed. %s" % out_err)
+ run_over_ssh(conn, cmd)
def run(self, conn, barrier):
remote_script = self.copy_script(conn, self.run_script)
cmd_opts = ' '.join(["%s %s" % (key, val) for key, val
in self.opts.items()])
cmd = remote_script + ' ' + cmd_opts
- code, out_err = run_over_ssh(conn, cmd)
- self.on_result(code, out_err, cmd)
+ out_err = run_over_ssh(conn, cmd)
+ self.on_result(out_err, cmd)
def parse_results(self, out):
for line in out.split("\n"):
@@ -69,16 +67,12 @@
if key and value:
self.on_result_cb((key, float(value)))
- def on_result(self, code, out_err, cmd):
- if 0 == code:
- try:
- self.parse_results(out_err)
- except Exception as exc:
- msg_templ = "Error during postprocessing results: {0!r}"
- raise RuntimeError(msg_templ.format(exc.message))
- else:
- templ = "Command {0!r} failed with code {1}. Error output is:\n{2}"
- logger.error(templ.format(cmd, code, out_err))
+ def on_result(self, out_err, cmd):
+ try:
+ self.parse_results(out_err)
+ except Exception as exc:
+ msg_templ = "Error during postprocessing results: {0!r}. {1}"
+ raise RuntimeError(msg_templ.format(exc.message, out_err))
class PgBenchTest(TwoScriptTest):
@@ -102,14 +96,19 @@
self.config_params = test_options.get('params', {})
self.tool = test_options.get('tool', 'fio')
self.raw_cfg = open(self.config_fname).read()
-
- parse_func = disk_test_agent.parse_fio_config_full
- self.configs = parse_func(self.raw_cfg, self.config_params)
+ self.configs = parse_fio_config_full(self.raw_cfg, self.config_params)
def pre_run(self, conn):
# TODO: install fio, if not installed
- run_over_ssh(conn, "apt-get -y install fio")
+ cmd = "sudo apt-get -y install fio"
+
+ for i in range(3):
+ try:
+ run_over_ssh(conn, cmd)
+ break
+ except OSError:
+ time.sleep(3)
local_fname = disk_test_agent.__file__.rsplit('.')[0] + ".py"
self.files_to_copy = {local_fname: self.io_py_remote}
@@ -123,30 +122,64 @@
msz += 1
cmd = cmd_templ.format(params['filename'], 1024 ** 2, msz)
- code, out_err = run_over_ssh(conn, cmd)
-
- if code != 0:
- raise RuntimeError("Preparation failed " + out_err)
+ run_over_ssh(conn, cmd)
def run(self, conn, barrier):
- cmd_templ = "env python2 {0} --type {1} --json -"
- cmd = cmd_templ.format(self.io_py_remote, self.tool)
- logger.debug("Run {0}".format(cmd))
+ cmd_templ = "env python2 {0} --type {1} {2} --json -"
+
+ params = " ".join("{0}={1}".format(k, v)
+ for k, v in self.config_params.items())
+
+ if "" != params:
+ params = "--params " + params
+
+ cmd = cmd_templ.format(self.io_py_remote, self.tool, params)
+ logger.debug("Waiting on barrier")
try:
barrier.wait()
- code, out_err = run_over_ssh(conn, cmd, stdin_data=self.raw_cfg)
- self.on_result(code, out_err, cmd)
+ logger.debug("Run {0}".format(cmd))
+ out_err = run_over_ssh(conn, cmd, stdin_data=self.raw_cfg)
+ self.on_result(out_err, cmd)
finally:
barrier.exit()
- def on_result(self, code, out_err, cmd):
- if 0 == code:
- try:
- for data in parse_output(out_err):
- self.on_result_cb(data)
- except Exception as exc:
- msg_templ = "Error during postprocessing results: {0!r}"
- raise RuntimeError(msg_templ.format(exc.message))
- else:
- templ = "Command {0!r} failed with code {1}. Output is:\n{2}"
- logger.error(templ.format(cmd, code, out_err))
+ def on_result(self, out_err, cmd):
+ try:
+ for data in parse_output(out_err):
+ self.on_result_cb(data)
+ except Exception as exc:
+ msg_templ = "Error during postprocessing results: {0!r}"
+ raise RuntimeError(msg_templ.format(exc.message))
+
+ def merge_results(self, results):
+ merged_result = results[0]
+ merged_data = merged_result['res']
+ expected_keys = set(merged_data.keys())
+ mergable_fields = ['bw_mean', 'clat', 'iops', 'lat', 'slat']
+
+ for res in results[1:]:
+ assert res['__meta__'] == merged_result['__meta__']
+
+ data = res['res']
+ diff = set(data.keys()).symmetric_difference(expected_keys)
+
+ msg = "Difference: {0}".format(",".join(diff))
+ assert len(diff) == 0, msg
+
+ for testname, test_data in data.items():
+ res_test_data = merged_data[testname]
+
+ diff = set(test_data.keys()).symmetric_difference(
+ res_test_data.keys())
+
+ msg = "Difference: {0}".format(",".join(diff))
+ assert len(diff) == 0, msg
+
+ for k, v in test_data.items():
+ if k in mergable_fields:
+ res_test_data[k].extend(v)
+ else:
+ msg = "{0!r} != {1!r}".format(res_test_data[k], v)
+ assert res_test_data[k] == v, msg
+
+ return merged_result