wqrefactor postprocessing code
diff --git a/assumptions_check.py b/assumptions_check.py
new file mode 100644
index 0000000..e17586e
--- /dev/null
+++ b/assumptions_check.py
@@ -0,0 +1,94 @@
+import sys
+
+import numpy as np
+import matplotlib.pyplot as plt
+from numpy.polynomial.chebyshev import chebfit, chebval
+
+from disk_perf_test_tool.tests.io_results_loader import load_data, filter_data
+
+
+def linearity_plot(plt, data, types):
+    fields = 'blocksize_b', 'iops_mediana', 'iops_stddev'
+
+    names = {}
+    for tp1 in ('rand', 'seq'):
+        for oper in ('read', 'write'):
+            for sync in ('sync', 'direct', 'async'):
+                sq = (tp1, oper, sync)
+                name = "{0} {1} {2}".format(*sq)
+                names["".join(word[0] for word in sq)] = name
+
+    for tp in types:
+        filtered_data = filter_data('linearity_test_' + tp, fields)
+        x = []
+        y = []
+        e = []
+
+        for sz, med, dev in sorted(filtered_data(data)):
+            iotime_ms = 1000. // med
+            iotime_max = 1000. // (med - dev * 3)
+
+            x.append(sz / 1024)
+            y.append(iotime_ms)
+            e.append(iotime_max - iotime_ms)
+
+        plt.errorbar(x, y, e, linestyle='None', marker=names[tp])
+    plt.legend(loc=2)
+
+
+def th_plot(data, tt):
+    fields = 'concurence', 'iops_mediana', 'lat_mediana'
+    conc_4k = filter_data('concurrence_test_' + tt, fields, blocksize='4k')
+    filtered_data = sorted(list(conc_4k(data)))
+
+    x, iops, lat = zip(*filtered_data)
+
+    _, ax1 = plt.subplots()
+
+    xnew = np.linspace(min(x), max(x), 50)
+    # plt.plot(xnew, power_smooth, 'b-', label='iops')
+    ax1.plot(x, iops, 'b*')
+
+    for degree in (3,):
+        c = chebfit(x, iops, degree)
+        vals = chebval(xnew, c)
+        ax1.plot(xnew, vals, 'g--')
+
+    # ax1.set_xlabel('thread count')
+    # ax1.set_ylabel('iops')
+
+    # ax2 = ax1.twinx()
+    # lat = [i / 1000 for i in lat]
+    # ax2.plot(x, lat, 'r*')
+
+    # tck = splrep(x, lat, s=0.0)
+    # power_smooth = splev(xnew, tck)
+    # ax2.plot(xnew, power_smooth, 'r-', label='lat')
+
+    # xp = xnew[0]
+    # yp = power_smooth[0]
+    # for _x, _y in zip(xnew[1:], power_smooth[1:]):
+    #     if _y >= 100:
+    #         xres = (_y - 100.) / (_y - yp) * (_x - xp) + xp
+    #         ax2.plot([xres, xres], [min(power_smooth), max(power_smooth)], 'g--')
+    #         break
+    #     xp = _x
+    #     yp = _y
+
+    # ax2.plot([min(x), max(x)], [20, 20], 'g--')
+    # ax2.plot([min(x), max(x)], [100, 100], 'g--')
+
+    # ax2.set_ylabel("lat ms")
+    # plt.legend(loc=2)
+
+
+def main(argv):
+    data = list(load_data(open(argv[1]).read()))
+    # linearity_plot(data)
+    th_plot(data, 'rws')
+    # th_plot(data, 'rrs')
+    plt.show()
+
+
+if __name__ == "__main__":
+    exit(main(sys.argv))
diff --git a/koder.yaml b/koder.yaml
index 21dea2e..5a719cd 100644
--- a/koder.yaml
+++ b/koder.yaml
@@ -1,5 +1,15 @@
-explicit_nodes:
-    "ssh://192.168.152.43": testnode
+clouds:
+     fuel:
+         id: 3
+         url: http://172.16.52.112:8000/
+         creds: admin:admin@admin
+         ssh_creds: root:test37
+         openstack_env: test
+
+discover: fuel
+
+# explicit_nodes:
+#     "ssh://192.168.152.43": testnode
 
 sensors:
     receiver_uri: udp://192.168.152.1:5699
@@ -8,16 +18,26 @@
         testnode: system-cpu, block-io
 
 tests:
-    - io:
-        cfg: tests/io_task_test.cfg
-        params:
-            SOME_OPT: 12
-            FILENAME: /tmp/xxx.bin
-    - pgbench:
-         num_clients: "4,8,12"
-         transactions_per_client: "1,2,3"
+    start_test_nodes:
+        openstack:
+            vm_params:
+                count: x1
+                img_name: disk_io_perf
+                flavor_name: disk_io_perf.256
+                keypair_name: disk_io_perf
+                network_zone_name: novanetwork
+                flt_ip_pool: nova
+                creds: "ssh://ubuntu@{0}::disk_io_perf.pem"
+
+        internal_tests:
+            - io:
+                cfg: tests/io_task_test.cfg
+                params:
+                    SOME_OPT: 12
+                    FILENAME: /tmp/xxx.bin
 
 logging:
     extra_logs: 1
+
 charts_img_path: tmp/charts
-output_dest: results.html
\ No newline at end of file
+output_dest: results.html
diff --git a/run_test.py b/run_test.py
index 3cbdae2..3f078c2 100755
--- a/run_test.py
+++ b/run_test.py
@@ -10,8 +10,9 @@
 import collections
 
 from concurrent.futures import ThreadPoolExecutor
-import formatters
+
 import report
+# import formatters
 
 import utils
 import ssh_utils
@@ -37,6 +38,9 @@
                                   "%H:%M:%S")
     ch.setFormatter(formatter)
 
+    # logger.setLevel(logging.INFO)
+    # logger.addHandler(logging.FileHandler('log.txt'))
+
 
 def format_result(res, formatter):
     data = "\n{0}\n".format("=" * 80)
@@ -217,6 +221,7 @@
 
     if 'start_test_nodes' in cfg['tests']:
         params = cfg['tests']['start_test_nodes']['openstack']
+
     for new_node in start_vms.launch_vms(params):
         new_node.roles.append('testnode')
         ctx.nodes.append(new_node)
@@ -308,11 +313,13 @@
 
     stages = [
         discover_stage,
-        connect_stage,
+        log_nodes_statistic,
         complete_log_nodes_statistic,
-        # deploy_sensors_stage,
+        connect_stage,
+        # complete_log_nodes_statistic,
+        deploy_sensors_stage,
         run_tests_stage,
-        report_stage
+        # report_stage
     ]
 
     load_config(opts.config_file)
@@ -322,8 +329,7 @@
     ctx.build_meta['build_descrption'] = opts.build_description
     ctx.build_meta['build_type'] = opts.build_type
     ctx.build_meta['username'] = opts.username
-    logger.setLevel(logging.INFO)
-    logger.addHandler(logging.FileHandler('log.txt'))
+
     try:
         for stage in stages:
             logger.info("Start {0.__name__} stage".format(stage))
diff --git a/scripts/linearity.py b/scripts/linearity.py
deleted file mode 100644
index fe3bca9..0000000
--- a/scripts/linearity.py
+++ /dev/null
@@ -1,77 +0,0 @@
-import re
-import sys
-from collections import defaultdict
-
-from disk_perf_test_tool.utils import ssize_to_b
-from disk_perf_test_tool.tests import disk_test_agent
-from disk_perf_test_tool.scripts.postprocessing import data_stat
-
-
-def filter_data(data, *params, **filters):
-    for result in data:
-        for k, v in filters.items():
-            if v == result.get(k):
-                yield map(result.get, params)
-
-
-raw_data = open(sys.argv[1]).read()
-data = list(disk_test_agent.parse_output(raw_data))[0]
-
-processed_data = defaultdict(lambda: [])
-pref = len("linearity_test_rrd")
-
-for key, val in data['res'].items():
-    val['blocksize'] = key[pref:].split('th')[0]
-
-    info = key[pref - 3:]
-    sz = info[3:].split("th")[0]
-    sinfo = info[:3]
-
-    if val['iops'] != []:
-        med, dev = map(int, data_stat.med_dev(val['iops']))
-        sdata = "{0:>4} ~ {1:>2}".format(med, dev)
-        processed_data[sinfo].append([sdata, sz, med, dev])
-    else:
-        processed_data[sinfo].append(["None", sz, "None", "None"])
-
-
-def sort_func(x):
-    return ssize_to_b(x[1])
-
-
-for sinfo, iops_sz in sorted(processed_data.items()):
-    for siops, sz, _, _ in sorted(iops_sz, key=sort_func):
-        print "{0} {1:>6} {2}".format(sinfo, sz, siops)
-
-
-import math
-import matplotlib.pyplot as plt
-
-
-prep = lambda x: x
-max_xz = 10000000
-
-
-def add_plt(plt, processed_data, flt, marker):
-    x = []
-    y = []
-    e = []
-
-    for sinfo, iops_sz in sorted(processed_data.items()):
-        if sinfo == flt:
-            for siops, sz, med, dev in sorted(iops_sz, key=sort_func):
-                if ssize_to_b(sz) < max_xz:
-                    iotime_us = 1000. // med
-                    iotime_max = 1000. // (med - dev * 3)
-                    x.append(prep(ssize_to_b(sz) / 1024))
-                    y.append(prep(iotime_us))
-                    e.append(prep(iotime_max) - prep(iotime_us))
-
-    plt.errorbar(x, y, e, linestyle='None', marker=marker)
-    plt.plot([x[0], x[-1]], [y[0], y[-1]])
-
-add_plt(plt, processed_data, 'rwd', '*')
-add_plt(plt, processed_data, 'rws', '^')
-add_plt(plt, processed_data, 'rrd', '+')
-
-plt.show()
diff --git a/scripts/postprocessing/data_stat.py b/scripts/postprocessing/data_stat.py
deleted file mode 100644
index 1323272..0000000
--- a/scripts/postprocessing/data_stat.py
+++ /dev/null
@@ -1,49 +0,0 @@
-import math
-import itertools
-
-
-def med_dev(vals):
-    med = sum(vals) / len(vals)
-    dev = ((sum(abs(med - i) ** 2.0 for i in vals) / len(vals)) ** 0.5)
-    return med, dev
-
-
-def round_deviation(med_dev):
-    med, dev = med_dev
-
-    if dev < 1E-7:
-        return med_dev
-
-    dev_div = 10.0 ** (math.floor(math.log10(dev)) - 1)
-    dev = int(dev / dev_div) * dev_div
-    med = int(med / dev_div) * dev_div
-    return (type(med_dev[0])(med),
-            type(med_dev[1])(dev))
-
-
-def groupby_globally(data, key_func):
-    grouped = {}
-    grouped_iter = itertools.groupby(data, key_func)
-
-    for (bs, cache_tp, act, conc), curr_data_it in grouped_iter:
-        key = (bs, cache_tp, act, conc)
-        grouped.setdefault(key, []).extend(curr_data_it)
-
-    return grouped
-
-
-def read_data_agent_result(fname):
-    data = []
-    with open(fname) as fc:
-        block = None
-        for line in fc:
-            if line.startswith("{'__meta__':"):
-                block = line
-            elif block is not None:
-                block += line
-
-            if block is not None:
-                if block.count('}') == block.count('{'):
-                    data.append(eval(block))
-                    block = None
-    return data
diff --git a/start_vms.py b/start_vms.py
index f7cb09f..7317fd0 100644
--- a/start_vms.py
+++ b/start_vms.py
@@ -88,6 +88,7 @@
 
 def launch_vms(config):
     creds = config['vm_params']['creds']
+
     # if creds != 'ENV':
     #     raise ValueError("Only 'ENV' creds are supported")
 
diff --git a/statistic.py b/statistic.py
new file mode 100644
index 0000000..a662901
--- /dev/null
+++ b/statistic.py
@@ -0,0 +1,58 @@
+import math
+import itertools
+from numpy.polynomial.chebyshev import chebfit, chebval
+
+
+def med_dev(vals):
+    med = sum(vals) / len(vals)
+    dev = ((sum(abs(med - i) ** 2.0 for i in vals) / len(vals)) ** 0.5)
+    return med, dev
+
+
+def round_deviation(med_dev):
+    med, dev = med_dev
+
+    if dev < 1E-7:
+        return med_dev
+
+    dev_div = 10.0 ** (math.floor(math.log10(dev)) - 1)
+    dev = int(dev / dev_div) * dev_div
+    med = int(med / dev_div) * dev_div
+    return (type(med_dev[0])(med),
+            type(med_dev[1])(dev))
+
+
+def groupby_globally(data, key_func):
+    grouped = {}
+    grouped_iter = itertools.groupby(data, key_func)
+
+    for (bs, cache_tp, act, conc), curr_data_it in grouped_iter:
+        key = (bs, cache_tp, act, conc)
+        grouped.setdefault(key, []).extend(curr_data_it)
+
+    return grouped
+
+
+def approximate_curve(x, y, xnew, curved_coef):
+    """returns ynew - y values of some curve approximation"""
+    return chebval(xnew, chebfit(x, y, curved_coef))
+
+
+def approximate_line(x, y, xnew, relative_dist=False):
+    """returns ynew - y values of linear approximation"""
+
+
+def difference(y, ynew):
+    """returns average and maximum relative and
+       absolute differences between y and ynew"""
+
+
+def calculate_distribution_properties(data):
+    """chi, etc"""
+
+
+def minimal_measurement_amount(data, max_diff, req_probability):
+    """
+    should returns amount of measurements to get results (avg and deviation)
+    with error less, that max_diff in at least req_probability% cases
+    """
diff --git a/tests/disk_test_agent.py b/tests/disk_test_agent.py
index d626889..8a0d165 100644
--- a/tests/disk_test_agent.py
+++ b/tests/disk_test_agent.py
@@ -466,22 +466,6 @@
     raise ValueError("Unknown behcnmark {0}".format(binary_tp))
 
 
-def parse_output(out_err):
-    start_patt = r"(?ims)=+\s+RESULTS\(format=json\)\s+=+"
-    end_patt = r"(?ims)=+\s+END OF RESULTS\s+=+"
-
-    for block in re.split(start_patt, out_err)[1:]:
-        data, garbage = re.split(end_patt, block)
-        yield json.loads(data.strip())
-
-    start_patt = r"(?ims)=+\s+RESULTS\(format=eval\)\s+=+"
-    end_patt = r"(?ims)=+\s+END OF RESULTS\s+=+"
-
-    for block in re.split(start_patt, out_err)[1:]:
-        data, garbage = re.split(end_patt, block)
-        yield eval(data.strip())
-
-
 def parse_args(argv):
     parser = argparse.ArgumentParser(
         description="Run fio' and return result")
diff --git a/tests/io_results_loader.py b/tests/io_results_loader.py
new file mode 100644
index 0000000..2ccd9de
--- /dev/null
+++ b/tests/io_results_loader.py
@@ -0,0 +1,59 @@
+import re
+import json
+
+
+from disk_perf_test_tool.utils import ssize_to_b
+from disk_perf_test_tool.scripts.postprocessing import data_stat
+
+
+def parse_output(out_err):
+    start_patt = r"(?ims)=+\s+RESULTS\(format=json\)\s+=+"
+    end_patt = r"(?ims)=+\s+END OF RESULTS\s+=+"
+
+    for block in re.split(start_patt, out_err)[1:]:
+        data, garbage = re.split(end_patt, block)
+        yield json.loads(data.strip())
+
+    start_patt = r"(?ims)=+\s+RESULTS\(format=eval\)\s+=+"
+    end_patt = r"(?ims)=+\s+END OF RESULTS\s+=+"
+
+    for block in re.split(start_patt, out_err)[1:]:
+        data, garbage = re.split(end_patt, block)
+        yield eval(data.strip())
+
+
+def filter_data(name_prefix, fields_to_select, **filters):
+    def closure(data):
+        for result in data:
+            if name_prefix is not None:
+                if not result['jobname'].startswith(name_prefix):
+                    continue
+
+            for k, v in filters.items():
+                if result.get(k) != v:
+                    break
+            else:
+                yield map(result.get, fields_to_select)
+    return closure
+
+
+def load_data(raw_data):
+    data = list(parse_output(raw_data))[0]
+
+    for key, val in data['res'].items():
+        if 'blocksize' not in val:
+            val['blocksize'] = key.split('_')[2][3:].split('th')[0]
+
+        val['blocksize_b'] = ssize_to_b(val['blocksize'])
+
+        val['iops_mediana'], val['iops_stddev'] = \
+            data_stat.med_dev(val['iops'])
+        val['bw_mediana'], val['bw_stddev'] = data_stat.med_dev(val['bw_mean'])
+        val['lat_mediana'], val['lat_stddev'] = data_stat.med_dev(val['lat'])
+        yield val
+
+
+def load_files(*fnames):
+    for fname in fnames:
+        for i in load_data(open(fname).read()):
+            yield i
diff --git a/tests/io_task_test.cfg b/tests/io_task_test.cfg
index a319fa0..c9fc2ab 100644
--- a/tests/io_task_test.cfg
+++ b/tests/io_task_test.cfg
@@ -1,14 +1,3 @@
-# [__warmup]
-# blocksize=4k
-# filename=/tmp/xxx.bin
-# rw=randwrite
-# direct=1
-# buffered=0
-# iodepth=1
-# size=1Gb
-# runtime=5
-# time_based
-
 [writetest * 3]
 numjobs=4
 wait_for_previous