updates to node complete test, add io_py_result_processor.py lib
diff --git a/io_scenario/io.py b/io_scenario/io.py
index 2ad8f19..d698546 100644
--- a/io_scenario/io.py
+++ b/io_scenario/io.py
@@ -8,7 +8,6 @@
import os.path
import argparse
import warnings
-import threading
import subprocess
@@ -318,7 +317,6 @@
"--filename=%s" % tmpname,
"--size={0}k".format(benchmark.size),
"--numjobs={0}".format(benchmark.concurence),
- "--runtime=60",
"--output-format=json",
"--sync=" + ('1' if benchmark.sync else '0')]
diff --git a/scripts/__init__.py b/scripts/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/scripts/__init__.py
diff --git a/scripts/io_py_result_processor.py b/scripts/io_py_result_processor.py
new file mode 100644
index 0000000..9166b0a
--- /dev/null
+++ b/scripts/io_py_result_processor.py
@@ -0,0 +1,206 @@
+import sys
+import math
+import itertools
+
+from colorama import Fore, Style
+
+
+def med_dev(vals):
+ med = sum(vals) / len(vals)
+ dev = ((sum(abs(med - i) ** 2 for i in vals) / len(vals)) ** 0.5)
+ return int(med), int(dev)
+
+
+def round_deviation(med_dev):
+ med, dev = med_dev
+
+ if dev < 1E-7:
+ return med_dev
+
+ dev_div = 10.0 ** (math.floor(math.log10(dev)) - 1)
+ dev = int(dev / dev_div) * dev_div
+ med = int(med / dev_div) * dev_div
+ return (type(med_dev[0])(med),
+ type(med_dev[1])(dev))
+
+
+def groupby_globally(data, key_func):
+ grouped = {}
+ grouped_iter = itertools.groupby(data, key_func)
+
+ for (bs, cache_tp, act, conc), curr_data_it in grouped_iter:
+ key = (bs, cache_tp, act, conc)
+ grouped.setdefault(key, []).extend(curr_data_it)
+
+ return grouped
+
+
+class Data(object):
+ def __init__(self, name):
+ self.name = name
+ self.series = {}
+ self.processed_series = {}
+
+
+def process_inplace(data):
+ processed = {}
+ for key, values in data.series.items():
+ processed[key] = round_deviation(med_dev(values))
+ data.processed_series = processed
+
+
+def diff_table(*datas):
+ res_table = {}
+
+ for key in datas[0].processed_series:
+ baseline = datas[0].processed_series[key]
+ base_max = baseline[0] + baseline[1]
+ base_min = baseline[0] - baseline[1]
+
+ res_line = [baseline]
+
+ for data in datas[1:]:
+ val, dev = data.processed_series[key]
+ val_min = val - dev
+ val_max = val + dev
+
+ diff_1 = int(float(val_min - base_max) / base_max * 100)
+ diff_2 = int(float(val_max - base_min) / base_max * 100)
+
+ diff_max = max(diff_1, diff_2)
+ diff_min = min(diff_1, diff_2)
+
+ res_line.append((diff_max, diff_min))
+ res_table[key] = res_line
+
+ return [data.name for data in datas], res_table
+
+
+def print_table(headers, table):
+ lines = []
+ items = sorted(table.items())
+ lines.append([(len(i), i) for i in [""] + headers])
+ item_frmt = "{0}{1:>4}{2} ~ {3}{4:>4}{5}"
+
+ for key, vals in items:
+ ln1 = "{0:>4} {1} {2:>9} {3}".format(*map(str, key))
+ ln2 = "{0:>4} ~ {1:>3}".format(*vals[0])
+
+ line = [(len(ln1), ln1), (len(ln2), ln2)]
+
+ for idx, val in enumerate(vals[1:], 2):
+ cval = []
+ for vl in val:
+ if vl < -10:
+ cval.extend([Fore.RED, vl, Style.RESET_ALL])
+ elif vl > 10:
+ cval.extend([Fore.GREEN, vl, Style.RESET_ALL])
+ else:
+ cval.extend(["", vl, ""])
+
+ ln = len(item_frmt.format("", cval[1], "", "", cval[4], ""))
+ line.append((ln, item_frmt.format(*cval)))
+
+ lines.append(line)
+
+ max_columns_with = []
+ for idx in range(len(lines[0])):
+ max_columns_with.append(
+ max(line[idx][0] for line in lines))
+
+ sep = '-' * (4 + sum(max_columns_with) + 3 * (len(lines[0]) - 1))
+
+ print sep
+ for idx, line in enumerate(lines):
+ cline = []
+ for (curr_len, txt), exp_ln in zip(line, max_columns_with):
+ cline.append(" " * (exp_ln - curr_len) + txt)
+ print "| " + " | ".join(cline) + " |"
+ if 0 == idx:
+ print sep
+ print sep
+
+
+def key_func(x):
+ return (x['__meta__']['blocksize'],
+ 'd' if x['__meta__']['direct_io'] else 's',
+ x['__meta__']['action'],
+ x['__meta__']['concurence'])
+
+
+template = "{bs:>4} {action:>12} {cache_tp:>3} {conc:>4}"
+template += " | {iops[0]:>6} ~ {iops[1]:>5} | {bw[0]:>7} ~ {bw[1]:>6}"
+template += " | {lat[0]:>6} ~ {lat[1]:>5} |"
+
+headers = dict(bs="BS",
+ action="operation",
+ cache_tp="S/D",
+ conc="CONC",
+ iops=("IOPS", "dev"),
+ bw=("BW kBps", "dev"),
+ lat=("LAT ms", "dev"))
+
+
+def load_io_py_file(fname):
+ with open(fname) as fc:
+ block = None
+ for line in fc:
+ if line.startswith("{'__meta__':"):
+ block = line
+ elif block is not None:
+ block += line
+
+ if block is not None and block.count('}') == block.count('{'):
+ yield eval(block)
+ block = None
+
+ if block is not None and block.count('}') == block.count('{'):
+ yield eval(block)
+
+
+def main(argv):
+ items = []
+ CONC_POS = 3
+ for hdr_fname in argv[1:]:
+ hdr, fname = hdr_fname.split("=", 1)
+ data = list(load_io_py_file(fname))
+ item = Data(hdr)
+ for key, vals in groupby_globally(data, key_func).items():
+ item.series[key] = [val['iops'] * key[CONC_POS] for val in vals]
+ process_inplace(item)
+ items.append(item)
+
+ print_table(*diff_table(*items))
+
+ # print template.format(**headers)
+
+ # for (bs, cache_tp, act, conc), curr_data in sorted(grouped.items()):
+ # iops = med_dev([i['iops'] * int(conc) for i in curr_data])
+ # bw_mean = med_dev([i['bw_mean'] * int(conc) for i in curr_data])
+ # lat = med_dev([i['lat'] / 1000 for i in curr_data])
+
+ # iops = round_deviation(iops)
+ # bw_mean = round_deviation(bw_mean)
+ # lat = round_deviation(lat)
+
+ # params = dict(
+ # bs=bs,
+ # action=act,
+ # cache_tp=cache_tp,
+ # iops=iops,
+ # bw=bw_mean,
+ # lat=lat,
+ # conc=conc
+ # )
+
+ # print template.format(**params)
+
+
+if __name__ == "__main__":
+ exit(main(sys.argv))
+
+ # vals = [(123, 23), (125678, 5678), (123.546756, 23.77),
+ # (123.546756, 102.77), (0.1234, 0.0224),
+ # (0.001234, 0.000224), (0.001234, 0.0000224)]
+ # for val in :
+ # print val, "=>", round_deviation(val)
diff --git a/scripts/run_test.sh b/scripts/run_test.sh
index 94612ba..fefc5d7 100644
--- a/scripts/run_test.sh
+++ b/scripts/run_test.sh
@@ -34,7 +34,7 @@
# note : function will works properly only when image dame is single string without spaces that can brake awk
function wait_image_active() {
image_state="none"
- image_name=$IMAGE_NAME
+ image_name="$IMAGE_NAME"
counter=0
while [ ! $image_state eq "active" ] ; do
@@ -43,7 +43,7 @@
echo $image_state
counter=$((counter + 1))
- if [ $counter -eq $TIMEOUT ]
+ if [ "$counter" -eq "$TIMEOUT" ]
then
echo "Time limit exceed"
break
diff --git a/scripts/single_node_test_complete.sh b/scripts/single_node_test_complete.sh
index c5982cb..269e127 100644
--- a/scripts/single_node_test_complete.sh
+++ b/scripts/single_node_test_complete.sh
@@ -20,14 +20,21 @@
}
function run_tests(){
- OPTS="--test-file $TEST_FILE --type fio --iodepth 1 --iosize 10G"
+
+ super_sync ; dd if=/dev/zero of=$TEST_FILE bs=1048576 count=10240
+
+ OPTS="--test-file $TEST_FILE --type fio --iodepth 1 --iosize 10G --timeout 15"
+ for cycle in $(seq 50) ; do
+ super_sync ; python io.py $OPTS -a randwrite --blocksize 4k -d --concurrency 1
+ done
+
+ echo "--------------------------------------------------------------------------------"
+
+ OPTS="--test-file $TEST_FILE --type fio --iodepth 1 --iosize 10G --timeout 30"
OPERS="read write randread randwrite"
CONCS="1 4 8 64"
SIZES="4k 16k 64k 256k 1m 2m"
- # num cycles = 6 * 4 * 7 * 4 + 7 * 4 * 4 == 784 == 13 hours
-
- super_sync ; dd if=/dev/zero of=$TEST_FILE bs=1048576 count=10240
for cycle in $(seq $NUM_CYCLES) ; do
for conc in $CONCS ; do