single-node OS tests addede
diff --git a/scripts/data2.py b/scripts/data2.py
new file mode 100644
index 0000000..08dbc77
--- /dev/null
+++ b/scripts/data2.py
@@ -0,0 +1,105 @@
+import sys
+import math
+import itertools
+
+
+def key(x):
+    return (x['__meta__']['blocksize'],
+            'd' if x['__meta__']['direct_io'] else 's',
+            x['__meta__']['action'],
+            x['__meta__']['concurence'])
+
+
+def med_dev(vals):
+    med = sum(vals) / len(vals)
+    dev = ((sum(abs(med - i) ** 2 for i in vals) / len(vals)) ** 0.5)
+    return int(med), int(dev)
+
+
+def round_deviation(med_dev):
+    med, dev = med_dev
+
+    if dev < 1E-7:
+        return med_dev
+
+    dev_div = 10.0 ** (math.floor(math.log10(dev)) - 1)
+    dev = int(dev / dev_div) * dev_div
+    med = int(med / dev_div) * dev_div
+    return (type(med_dev[0])(med),
+            type(med_dev[1])(dev))
+
+
+def groupby_globally(data, key_func):
+    grouped = {}
+    grouped_iter = itertools.groupby(data, key_func)
+
+    for (bs, cache_tp, act, conc), curr_data_it in grouped_iter:
+        key = (bs, cache_tp, act, conc)
+        grouped.setdefault(key, []).extend(curr_data_it)
+
+    return grouped
+
+
+template = "{bs:>4}  {action:>12}  {cache_tp:>3}  {conc:>4}"
+template += " | {iops[0]:>6} ~ {iops[1]:>5} | {bw[0]:>7} ~ {bw[1]:>6}"
+template += " | {lat[0]:>6} ~ {lat[1]:>5} |"
+
+headers = dict(bs="BS",
+               action="operation",
+               cache_tp="S/D",
+               conc="CONC",
+               iops=("IOPS", "dev"),
+               bw=("BW kBps", "dev"),
+               lat=("LAT ms", "dev"))
+
+
+def main(argv):
+    data = []
+
+    with open(argv[1]) as fc:
+        block = None
+        for line in fc:
+            if line.startswith("{'__meta__':"):
+                block = line
+            elif block is not None:
+                block += line
+
+            if block is not None:
+                if block.count('}') == block.count('{'):
+                    data.append(eval(block))
+                    block = None
+
+    grouped = groupby_globally(data, key)
+
+    print template.format(**headers)
+
+    for (bs, cache_tp, act, conc), curr_data in sorted(grouped.items()):
+        iops = med_dev([i['iops'] * int(conc) for i in curr_data])
+        bw_mean = med_dev([i['bw_mean'] * int(conc) for i in curr_data])
+        lat = med_dev([i['lat'] / 1000 for i in curr_data])
+
+        iops = round_deviation(iops)
+        bw_mean = round_deviation(bw_mean)
+        lat = round_deviation(lat)
+
+        params = dict(
+            bs=bs,
+            action=act,
+            cache_tp=cache_tp,
+            iops=iops,
+            bw=bw_mean,
+            lat=lat,
+            conc=conc
+        )
+
+        print template.format(**params)
+
+
+if __name__ == "__main__":
+    exit(main(sys.argv))
+
+    # vals = [(123, 23), (125678, 5678), (123.546756, 23.77),
+    #         (123.546756, 102.77), (0.1234, 0.0224),
+    #         (0.001234, 0.000224), (0.001234, 0.0000224)]
+    # for val in :
+    #     print val, "=>", round_deviation(val)
diff --git a/scripts/gen_load.sh b/scripts/gen_load.sh
new file mode 100755
index 0000000..e3af3cd
--- /dev/null
+++ b/scripts/gen_load.sh
@@ -0,0 +1,21 @@
+#!/bin/bash
+
+TESTER="--tester-type fio"
+CACHE="--cache-modes d"
+REPEATS="--repeats 3"
+
+# python generate_load.py $TESTER --size 4k --opers randwrite $CACHE --concurrences 1 --direct-default-size x1000
+# python generate_load.py $TESTER --size 4k --opers randwrite $CACHE --concurrences 1 --direct-default-size x2000
+# python generate_load.py $TESTER --size 4k --opers randwrite $CACHE --concurrences 1 --direct-default-size x4000
+# python generate_load.py $TESTER --size 4k --opers randwrite $CACHE --concurrences 1 --direct-default-size x8000
+# python generate_load.py $TESTER --size 4k --opers randwrite $CACHE --concurrences 1 --direct-default-size x16000
+# python generate_load.py $TESTER --size 4k --opers randwrite $CACHE --concurrences 1 --direct-default-size x32000
+# python generate_load.py $TESTER --size 4k --opers randwrite $CACHE --concurrences 1 --direct-default-size x64000
+# python generate_load.py $TESTER --size 4k --opers randwrite $CACHE --concurrences 1 --direct-default-size x128000
+
+python generate_load.py $TESTER --size 4k --opers randwrite $CACHE --concurrences 1 4 8 $REPEATS --io-size 10G
+python generate_load.py $TESTER --size 4k --opers randread $CACHE --concurrences 1 4 8 $REPEATS --io-size 10G
+
+python generate_load.py $TESTER --size 4k --opers randwrite --cache-modes s --concurrences 1 $REPEATS --io-size 10G
+python generate_load.py $TESTER --size 4k --opers randread randwrite $CACHE --concurrences 1 $REPEATS --io-size 10G
+python generate_load.py $TESTER --size 2m --opers read write $CACHE --concurrences 1 $REPEATS --io-size 10G
diff --git a/scripts/generate_load.py b/scripts/generate_load.py
index f99c816..0d25038 100644
--- a/scripts/generate_load.py
+++ b/scripts/generate_load.py
@@ -1,14 +1,7 @@
-# BLOCK_SIZES = "1k 4k 64k 256k 1m"
-# OPERATIONS="randwrite write randread read"
-# SYNC_TYPES="s a d"
-# REPEAT_COUNT="3"
-# CONCURRENCES="1 8 64"
+import sys
+import argparse
 
-from utils import ssize_to_kb
-
-SYNC_FACTOR = "x500"
-DIRECT_FACTOR = "x10000"
-ASYNC_FACTOR = "r2"
+from disk_perf_test_tool.utils import ssize_to_b
 
 
 def make_list(x):
@@ -16,76 +9,97 @@
         return [x]
     return x
 
-HDD_SIZE_KB = 45 * 1000 * 1000
 
-
-def max_file():
-    pass
-
-
-def make_load(sizes, opers, sync_types, concurrence,
-              tester_type='iozone', repeat_count=3):
+def make_load(settings):
 
     iodepth = 1
-    for conc in make_list(concurrence):
-        for bsize in make_list(sizes):
-            for oper in make_list(opers):
-                for sync_type in make_list(sync_types):
+    for conc in make_list(settings.concurrences):
+        for bsize in make_list(settings.sizes):
+            for oper in make_list(settings.opers):
+                for cache_mode in make_list(settings.cache_modes):
 
                     # filter out too slow options
-                    if bsize in "1k 4k" and sync_type == "a":
+                    if bsize in "1k 4k" and cache_mode == "a":
                         continue
 
                     # filter out sync reads
-                    if oper in "read randread" and sync_type == "s":
+                    if oper in "read randread" and cache_mode == "s":
                         continue
 
-                    if sync_type == "s":
-                        size_sync_opts = "--iosize {0} -s".format(SYNC_FACTOR)
-                    elif sync_type == "d":
-                        if oper == 'randread':
-                            assert SYNC_FACTOR[0] == 'x'
-                            max_f = int(SYNC_FACTOR[1:])
-                        else:
-                            max_f = None
-
-                        mmax_f = HDD_SIZE_KB / (int(conc) * ssize_to_kb(bsize))
-
-                        if max_f is None or mmax_f > max_f:
-                            max_f = mmax_f
-
-                        assert DIRECT_FACTOR[0] == 'x'
-                        if max_f > int(DIRECT_FACTOR[1:]):
-                            max_f = DIRECT_FACTOR
-                        else:
-                            max_f = "x{0}".format(max_f)
-
-                        size_sync_opts = "--iosize {0} -d".format(max_f)
-
+                    if settings.io_size is not None:
+                        size_sync_opts = " --iosize " + str(settings.io_size)
+                        if cache_mode == "s":
+                            size_sync_opts += " -s"
+                        elif cache_mode == "d":
+                            size_sync_opts += " -d"
                     else:
-                        if oper == 'randread' or oper == 'read':
-                            size_sync_opts = "--iosize " + str(SYNC_FACTOR)
+                        if cache_mode == "s":
+                            size_sync_opts = "--iosize {0} -s".format(
+                                settings.sync_default_size)
+                        elif cache_mode == "d":
+                            if oper == 'randread':
+                                assert settings.sync_default_size[0] == 'x'
+                                max_f = int(settings.sync_default_size[1:])
+                            else:
+                                max_f = None
+
+                            mmax_f = ssize_to_b(settings.hdd_size) / \
+                                (int(conc) * ssize_to_b(bsize))
+
+                            if max_f is None or mmax_f > max_f:
+                                max_f = mmax_f
+
+                            assert settings.direct_default_size[0] == 'x'
+                            if max_f > int(settings.direct_default_size[1:]):
+                                max_f = settings.direct_default_size
+                            else:
+                                max_f = "x{0}".format(max_f)
+
+                            size_sync_opts = "--iosize {0} -d".format(max_f)
+
                         else:
-                            size_sync_opts = "--iosize " + str(ASYNC_FACTOR)
+                            if oper == 'randread' or oper == 'read':
+                                size_sync_opts = "--iosize " + \
+                                    str(settings.sync_default_size)
+                            else:
+                                size_sync_opts = "--iosize " + \
+                                    str(settings.sync_default_size)
 
                     # size_sync_opts = get_file_size_opts(sync_type)
 
-                    io_opts = "--type {0} ".format(tester_type)
+                    io_opts = "--type {0} ".format(settings.tester_type)
                     io_opts += "-a {0} ".format(oper)
                     io_opts += "--iodepth {0} ".format(iodepth)
                     io_opts += "--blocksize {0} ".format(bsize)
                     io_opts += size_sync_opts + " "
                     io_opts += "--concurrency {0}".format(conc)
 
-                    for i in range(repeat_count):
+                    for i in range(settings.repeats):
                         yield io_opts
 
 
-sizes = "4k 64k 2m".split()
-opers = "randwrite write randread read".split()
-sync_types = "s a d".split()
-concurrence = "1 8 64".split()
+def parse_opts(args):
+    parser = argparse.ArgumentParser()
+    parser.add_argument('--sizes', nargs="+", required=True)
+    parser.add_argument('--opers', nargs="+", required=True)
+    parser.add_argument('--cache-modes', nargs="+", required=True)
+    parser.add_argument('--concurrences', nargs="+", required=True)
+    parser.add_argument('--repeats', type=int, default=3)
+    parser.add_argument("--hdd-size", default="45G")
+    parser.add_argument("--tester-type", default="iozone")
+    parser.add_argument("--io-size", default=None)
 
-for io_opts in make_load(sizes=sizes, concurrence=concurrence,
-                         sync_types=sync_types, opers=opers):
-    print io_opts
+    parser.add_argument("--direct-default-size", default="x1000")
+    parser.add_argument("--sync-default-size", default="x1000")
+    parser.add_argument("--async-default-size", default="r2")
+
+    return parser.parse_args(args[1:])
+
+
+def main(args):
+    opts = parse_opts(args)
+    for io_opts in make_load(opts):
+        print "python io.py --test-file /opt/xxx.bin " + io_opts
+
+if __name__ == "__main__":
+    exit(main(sys.argv))
diff --git a/scripts/prepare.sh b/scripts/prepare.sh
index 3d85072..7151b2f 100644
--- a/scripts/prepare.sh
+++ b/scripts/prepare.sh
@@ -6,7 +6,7 @@
 
 # settings
 FL_RAM=256
-FL_HDD=50
+FL_HDD=20
 FL_CPU=1
 
 
@@ -100,6 +100,7 @@
     if [ -z "$keypair_id" ] ; then
         echo "Creating server group $SERV_GROUP. Key would be stored into $KEY_FILE_NAME"
         nova keypair-add "$KEYPAIR_NAME" > "$KEY_FILE_NAME"
+        chmod og= "$KEY_FILE_NAME"
     fi
 
     echo "Adding rules for ping and ssh"
diff --git a/scripts/run_vm.sh b/scripts/run_vm.sh
new file mode 100644
index 0000000..ddc5cfc
--- /dev/null
+++ b/scripts/run_vm.sh
@@ -0,0 +1,75 @@
+#!/bin/bash
+MASTER_IP=$1
+FUEL_PASSWD=$2
+
+OS_ORIGIN_IP=192.168.0.2
+OS_EXT_IP=172.16.53.2
+VM_NAME=koder-disk-test
+
+FIXED_NET_NAME="net04"
+FLOATING_NET="net04_ext"
+
+my_dir="$(dirname "$0")"
+source "$my_dir/config.sh"
+SSH_OVER_MASTER="sshpass -p${FUEL_PASSWD} ssh root@${MASTER_IP}"
+VOLUME_NAME="test-volume"
+VOLUME_SIZE=20
+VOLUME_DEVICE="/dev/vdb"
+
+# VM_IP=$(nova floating-ip-create "$FLOATIN_NET" | grep "$FLOATIN_NET" | awk '{print $2}')
+VM_IP=172.16.53.18
+
+function get_openrc() {
+	OPENRC=`tempfile`
+	CONTROLLER_NODE=$($SSH_OVER_MASTER fuel node | grep controller | awk '-F|' '{gsub(" ", "", $5); print $5}')
+	$SSH_OVER_MASTER ssh $CONTROLLER_NODE cat openrc 2>/dev/null | sed "s/$OS_ORIGIN_IP/$OS_EXT_IP/g" > $OPENRC
+	echo $OPENRC
+}
+
+function boot_vm() {
+	FIXED_NET_ID=$(nova net-list | grep "\b${FIXED_NET_NAME}\b" | awk '{print $2}')
+	nova boot --flavor "$FLAVOR_NAME" --image "$IMAGE_NAME" --key-name "$KEYPAIR_NAME" --security-groups default --nic net-id=$FIXED_NET_ID $VM_NAME
+	nova floating-ip-associate $VM_NAME $VM_IP
+	VOL_ID=$(cinder create --display-name $VOLUME_NAME $VOLUME_SIZE | grep '\bid\b' | awk '{print $4}')
+	nova volume-attach $VM_NAME $VOL_ID $VOLUME_DEVICE
+}
+
+function prepare_vm() {
+	scp -i "$KEY_FILE_NAME" -r ../io_scenario ubuntu@${VM_IP}:/tmp
+	scp -i "$KEY_FILE_NAME" $DEBS ubuntu@${VM_IP}:/tmp
+	scp -i "$KEY_FILE_NAME" single_node_test_short.sh ubuntu@${VM_IP}:/tmp
+	ssh -i "$KEY_FILE_NAME" ubuntu@${VM_IP} sudo dpkg -i $DEBS
+}
+
+function prepare_node() {
+	COMPUTE_NODE=$($SSH_OVER_MASTER fuel node | grep compute | awk '-F|' '{gsub(" ", "", $5); print $5}')
+
+	sshpass -p${FUEL_PASSWD} scp -r ../io_scenario root@${MASTER_IP}:/tmp
+	$SSH_OVER_MASTER scp -r /tmp/io_scenario $COMPUTE_NODE:/tmp
+
+	sshpass -p${FUEL_PASSWD} scp $DEBS root@${MASTER_IP}:/tmp
+
+	$SSH_OVER_MASTER scp $DEBS $COMPUTE_NODE:/tmp
+	$SSH_OVER_MASTER ssh $COMPUTE_NODE dpkg -i $DEBS
+
+	sshpass -p${FUEL_PASSWD} scp single_node_test_short.sh root@${MASTER_IP}:/tmp
+	$SSH_OVER_MASTER scp /tmp/single_node_test_short.sh $COMPUTE_NODE:/tmp
+}
+
+function download_debs() {
+	pushd /tmp >/dev/null
+	rm -f *.deb >/dev/null
+	aptitude download libibverbs1 librdmacm1 libaio1 fio >/dev/null
+	popd >/dev/null
+	echo /tmp/*.deb
+}
+
+DEBS=`download_debs`
+OPENRC=`get_openrc`
+source $OPENRC
+rm $OPENRC
+
+boot_vm
+prepare_vm
+
+
diff --git a/scripts/show_disk_delta.py b/scripts/show_disk_delta.py
index 634b201..754e7a8 100644
--- a/scripts/show_disk_delta.py
+++ b/scripts/show_disk_delta.py
@@ -1,7 +1,7 @@
 import os
 import sys
 import time
-# import pprint
+import pprint
 import threading
 
 
@@ -36,8 +36,11 @@
     return {key: (val - obj2[key]) for key, val in obj1.items()}
 
 
-def run_tool(cmd, suppress_console=True):
-    os.system(" ".join(cmd) + " >/dev/null 2>&1 ")
+def run_tool(cmd, suppress_console=False):
+    s_cmd = " ".join(cmd)
+    if suppress_console:
+        s_cmd += " >/dev/null 2>&1 "
+    os.system(s_cmd)
 
 devices = sys.argv[1].split(',')
 cmd = sys.argv[2:]
@@ -47,6 +50,7 @@
 
 rstats = read_dstats()
 prev_stats = {device: rstats[device] for device in devices}
+begin_stats = prev_stats
 
 th.start()
 
@@ -58,15 +62,15 @@
     rstats = read_dstats()
     new_stats = {device: rstats[device] for device in devices}
 
-    print "Delta writes complete =",
+    # print "Delta writes complete =",
     for device in devices:
         delta = new_stats[device][wr_compl] - prev_stats[device][wr_compl]
-        print device, delta,
-    print
+        # print device, delta,
+    # print
 
     prev_stats = new_stats
 
     if not th.is_alive():
         break
 
-# pprint.pprint(diff_stats(stat2, stat1))
+pprint.pprint(diff_stats(new_stats[device], begin_stats[device]))
diff --git a/scripts/single_node_test_short.sh b/scripts/single_node_test_short.sh
new file mode 100644
index 0000000..384e8bd
--- /dev/null
+++ b/scripts/single_node_test_short.sh
@@ -0,0 +1,34 @@
+#!/bin/bash
+set -x
+
+TEST_FILE=$1
+OUT_FILE=$2
+NUM_CYCLES=7
+
+function run_tests(){
+	OPTS="--test-file $TEST_FILE --type fio --iodepth 1 --iosize 10G"
+
+	sync ; echo 3 > /proc/sys/vm/drop_caches ; python io.py $OPTS -a randwrite --blocksize 4k -d --concurrency 1
+
+	sync ; echo 3 > /proc/sys/vm/drop_caches ; dd if=/dev/zero of=$TEST_FILE bs=1048576 count=10240
+	sync ; echo 3 > /proc/sys/vm/drop_caches ; dd if=/dev/zero of=$TEST_FILE bs=1048576 count=10240
+
+	for cycle in $(seq $NUM_CYCLES) ; do
+		for conc in 1 4 8 ; do
+			sync ; echo 3 > /proc/sys/vm/drop_caches ; python io.py $OPTS -a randwrite --blocksize 4k -d --concurrency $conc
+		done
+
+		for conc in 1 4 8 ; do
+			sync ; echo 3 > /proc/sys/vm/drop_caches ; python io.py $OPTS -a randread  --blocksize 4k -d --concurrency $conc
+		done
+
+		sync ; echo 3 > /proc/sys/vm/drop_caches ; python io.py $OPTS -a randwrite --blocksize 4k -s --concurrency 1
+
+		sync ; echo 3 > /proc/sys/vm/drop_caches ; python io.py $OPTS -a read      --blocksize 2m -d --concurrency 1
+		sync ; echo 3 > /proc/sys/vm/drop_caches ; python io.py $OPTS -a write     --blocksize 2m -d --concurrency 1
+	done
+}
+
+run_tests "$FILE_1" 2>&1 | tee "$OUT_FILE"
+
+