lot of fixes
diff --git a/TODO b/TODO
index 0feaffa..c7d6826 100644
--- a/TODO
+++ b/TODO
@@ -1,4 +1,12 @@
+Репорты
 Юнит-тесты
 Автоинтеграция с опенстек
 Отчеты
-Унифицировать имена параметров на входе и на выходе
+Добавить к отчету экстраполированные скорости
+
+Стат-обработка:
+	расчет async
+	расчет количества измерений
+	расчет смешанных IOPS
+
+
diff --git a/chart/__init__.py b/chart/__init__.py
index 91f24c6..cd49ed9 100644
--- a/chart/__init__.py
+++ b/chart/__init__.py
@@ -1,9 +1,8 @@
-# Patch MARKER constant
-
 import sys
-from GChartWrapper import constants
-import GChartWrapper.GChart
 
+from GChartWrapper import constants
+
+# Patch MARKER constant
 constants.MARKERS += 'E'
-print sys.modules['GChartWrapper.GChart']
+
 sys.modules['GChartWrapper.GChart'].MARKERS += 'E'
diff --git a/chart/charts.py b/chart/charts.py
index 549b820..7c49abd 100644
--- a/chart/charts.py
+++ b/chart/charts.py
@@ -1,5 +1,5 @@
-import hashlib
 import os
+import hashlib
 import threading
 
 from GChartWrapper import VerticalBarGroup
@@ -13,10 +13,9 @@
 constants.MARKERS += 'E'  # append E marker to available markers
 
 
-def save_image(chart, img_path):
-    t = threading.Thread(target=chart.save, kwargs={'fname': img_path})
-    t.daemon = True
-    t.start()
+def get_top_top_dir(path):
+    top_top_dir = os.path.dirname(os.path.dirname(path))
+    return path[len(top_top_dir) + 1:]
 
 
 def render_vertical_bar(title, legend, bars_data, bars_dev_top,
@@ -106,10 +105,11 @@
     bar.scale(*scale)
     img_name = hashlib.md5(str(bar)).hexdigest() + ".png"
     img_path = os.path.join(cfg_dict['charts_img_path'], img_name)
+
     if not os.path.exists(img_path):
-        save_image(bar, img_path)
-        return str(bar)
-    return img_path
+        bar.save(img_path)
+
+    return get_top_top_dir(img_path)
 
 
 def render_lines(title, legend, dataset, scale_x, width=700, height=400):
@@ -129,6 +129,6 @@
     img_name = hashlib.md5(str(line)).hexdigest() + ".png"
     img_path = os.path.join(cfg_dict['charts_img_path'], img_name)
     if not os.path.exists(img_path):
-        save_image(line, img_path)
-        return str(line)
-    return img_path
+        line.save(img_path)
+
+    return get_top_top_dir(img_path)
diff --git a/config.py b/config.py
index bb8fab3..cfe5438 100644
--- a/config.py
+++ b/config.py
@@ -7,7 +7,7 @@
 cfg_dict = {}
 
 
-def load_config(file_name):
+def load_config(file_name, explicit_folder=None):
     global cfg_dict
     first_load = len(cfg_dict) == 0
     cfg_dict.update(yaml.load(open(file_name).read()))
@@ -15,19 +15,25 @@
     if first_load:
         var_dir = cfg_dict.get('internal', {}).get('var_dir_root', '/tmp')
 
-    while True:
-        dr = os.path.join(var_dir, pet_generate(2, "_"))
-        if not os.path.exists(dr):
-            break
+    if explicit_folder is None:
+        while True:
+            dr = os.path.join(var_dir, pet_generate(2, "_"))
+            if not os.path.exists(dr):
+                break
+    else:
+        dr = explicit_folder
 
     cfg_dict['var_dir'] = dr
-    os.makedirs(cfg_dict['var_dir'])
+    if not os.path.exists(cfg_dict['var_dir']):
+        os.makedirs(cfg_dict['var_dir'])
 
     def in_var_dir(fname):
         return os.path.join(cfg_dict['var_dir'], fname)
 
-    cfg_dict['charts_img_path'] = in_var_dir('charts')
-    os.makedirs(cfg_dict['charts_img_path'])
+    charts_img_path = in_var_dir('charts')
+    cfg_dict['charts_img_path'] = charts_img_path
+    if not os.path.exists(charts_img_path):
+        os.makedirs(charts_img_path)
 
     cfg_dict['vm_ids_fname'] = in_var_dir('os_vm_ids')
     cfg_dict['html_report_file'] = in_var_dir('report.html')
diff --git a/config.yaml b/config.yaml
index b0e6cc1..b2d80ea 100644
--- a/config.yaml
+++ b/config.yaml
@@ -1,18 +1,30 @@
 clouds:
     fuel:
-        ext_ip: 172.16.53.3
+        # ext_ip: 172.16.53.3
         url: http://172.16.52.112:8000/
         creds: admin:admin@admin
         ssh_creds: root:test37
         openstack_env: test
 
-discover: fuel
+    openstack:
+        OS_TENANT_NAME: admin
+        OS_USERNAME: admin
+        OS_PASSWORD: admin
+        OS_AUTH_URL: http://172.16.53.3:5000/v2.0/
+
+    ceph: local
+
+discover: fuel, ceph
+
+explicit_nodes:
+    "ssh://root@172.16.52.112:3022:/home/koder/.ssh/id_rsa": testnode
+
 
 internal:
     var_dir_root: /tmp/perf_tests
 
 tests:
-    start_test_nodes:
+    - start_test_nodes:
         openstack:
             creds: clouds
             vm_params:
diff --git a/fuel_rest_api.py b/fuel_rest_api.py
index ee5cd9c..23f7289 100644
--- a/fuel_rest_api.py
+++ b/fuel_rest_api.py
@@ -1,6 +1,7 @@
 import re
 import json
 import time
+import logging
 import urllib2
 
 from functools import partial, wraps
@@ -12,12 +13,7 @@
 from keystoneclient import exceptions
 
 
-logger = None
-
-
-def set_logger(log):
-    global logger
-    logger = log
+logger = logging.getLogger("io-perf-tool.fuel_api")
 
 
 class Urllib2HTTP(object):
@@ -27,7 +23,7 @@
 
     allowed_methods = ('get', 'put', 'post', 'delete', 'patch', 'head')
 
-    def __init__(self, root_url, headers=None, echo=False):
+    def __init__(self, root_url, headers=None):
         """
         """
         if root_url.endswith('/'):
@@ -36,7 +32,6 @@
             self.root_url = root_url
 
         self.headers = headers if headers is not None else {}
-        self.echo = echo
 
     def host(self):
         return self.root_url.split('/')[2]
@@ -53,8 +48,7 @@
         else:
             data_json = json.dumps(params)
 
-        if self.echo and logger is not None:
-            logger.debug("HTTP: {} {}".format(method.upper(), url))
+        logger.debug("HTTP: {} {}".format(method.upper(), url))
 
         request = urllib2.Request(url,
                                   data=data_json,
@@ -65,8 +59,7 @@
         request.get_method = lambda: method.upper()
         response = urllib2.urlopen(request)
 
-        if self.echo and logger is not None:
-            logger.debug("HTTP Responce: {}".format(response.code))
+        logger.debug("HTTP Responce: {}".format(response.code))
 
         if response.code < 200 or response.code > 209:
             raise IndexError(url)
@@ -85,8 +78,8 @@
 
 
 class KeystoneAuth(Urllib2HTTP):
-    def __init__(self, root_url, creds, headers=None, echo=False):
-        super(KeystoneAuth, self).__init__(root_url, headers, echo)
+    def __init__(self, root_url, creds, headers=None):
+        super(KeystoneAuth, self).__init__(root_url, headers)
         admin_node_ip = urlparse.urlparse(root_url).hostname
         self.keystone_url = "http://{0}:5000/v2.0".format(admin_node_ip)
         self.keystone = keystoneclient(
@@ -99,10 +92,9 @@
             self.keystone.authenticate()
             self.headers['X-Auth-Token'] = self.keystone.auth_token
         except exceptions.AuthorizationFailure:
-            if logger is not None:
-                logger.warning(
-                    'Cant establish connection to keystone with url %s',
-                    self.keystone_url)
+            logger.warning(
+                'Cant establish connection to keystone with url %s',
+                self.keystone_url)
 
     def do(self, method, path, params=None):
         """Do request. If gets 401 refresh token"""
@@ -110,9 +102,8 @@
             return super(KeystoneAuth, self).do(method, path, params)
         except urllib2.HTTPError as e:
             if e.code == 401:
-                if logger is not None:
-                    logger.warning(
-                        'Authorization failure: {0}'.format(e.read()))
+                logger.warning(
+                    'Authorization failure: {0}'.format(e.read()))
                 self.refresh_token()
                 return super(KeystoneAuth, self).do(method, path, params)
             else:
@@ -377,8 +368,7 @@
         data['id'] = node.id
         data['pending_addition'] = True
 
-        if logger is not None:
-            logger.debug("Adding node %s to cluster..." % node.id)
+        logger.debug("Adding node %s to cluster..." % node.id)
 
         self.add_node_call([data])
         self.nodes.append(node)
@@ -461,9 +451,6 @@
     """Get cluster id by name"""
     for cluster in get_all_clusters(conn):
         if cluster.name == name:
-            if logger is not None:
-                logger.debug('cluster name is %s' % name)
-                logger.debug('cluster id is %s' % cluster.id)
             return cluster.id
 
     raise ValueError("Cluster {0} not found".format(name))
diff --git a/nodes/discover.py b/nodes/discover.py
index a59f365..6c02fe6 100644
--- a/nodes/discover.py
+++ b/nodes/discover.py
@@ -1,15 +1,15 @@
 import logging
+import urlparse
+
 import ceph
 import openstack
-
 from utils import parse_creds
 from scripts import connector
-import urlparse
 
 logger = logging.getLogger("io-perf-tool")
 
 
-def discover(discover, clusters_info):
+def discover(ctx, discover, clusters_info):
     nodes_to_run = []
     for cluster in discover:
         if cluster == "openstack":
@@ -36,15 +36,17 @@
                                                           cluster_info)
             nodes_to_run.extend(os_nodes)
 
-        elif cluster == "fuel" or cluster == "fuel+openstack":
+        elif cluster == "fuel":
             cluster_info = clusters_info['fuel']
             cluster_name = cluster_info['openstack_env']
             url = cluster_info['url']
             creds = cluster_info['creds']
             ssh_creds = cluster_info['ssh_creds']
+
             # if user:password format us used
             if not ssh_creds.startswith("ssh://"):
                 ip_port = urlparse.urlparse(url).netloc
+
                 if ':' in ip_port:
                     ip = ip_port.split(":")[0]
                 else:
@@ -52,24 +54,13 @@
 
                 ssh_creds = "ssh://{0}@{1}".format(ssh_creds, ip)
 
-            env = cluster_info['openstack_env']
-            nodes, _, openrc_dict = connector.discover_fuel_nodes(url, creds, cluster_name)
+            dfunc = connector.discover_fuel_nodes
+            nodes, clean_data, openrc_dict = dfunc(url, creds, cluster_name)
 
-            if 'openstack' not in clusters_info:
-                clusters_info['openstack'] = {}
-
-                for key in openrc_dict:
-                        if key == 'OS_AUTH_URL':
-                            url = urlparse.urlparse(openrc_dict[key])
-                            clusters_info['openstack'][key] = \
-                                url.scheme + '://' + \
-                                cluster_info['ext_ip'] \
-                                + ':' +\
-                                str(url.port) +\
-                                url.path
-                        else:
-                            clusters_info['openstack'][key] = \
-                                openrc_dict[key]
+            ctx.fuel_openstack_creds = {'name': openrc_dict['username'],
+                                        'passwd': openrc_dict['password'],
+                                        'tenant': openrc_dict['tenant_name'],
+                                        'auth_url': openrc_dict['os_auth_url']}
 
             nodes_to_run.extend(nodes)
 
diff --git a/nodes/fuel.py b/nodes/fuel.py
index 9b1312e..82e7542 100644
--- a/nodes/fuel.py
+++ b/nodes/fuel.py
@@ -1,34 +1,37 @@
-import logging
+# import logging
 
 
-from node import Node
-import fuel_rest_api
+# import fuel_rest_api
+# from node import Node
 
 
-logger = logging.getLogger("io-perf-tool")
+# logger = logging.getLogger("io-perf-tool")
 
 
-def discover_fuel_nodes(root_url, credentials, cluster_name):
-    """Discover Fuel nodes"""
-    assert credentials.count(':') >= 2
-    user, passwd_tenant = credentials.split(":", 1)
-    passwd, tenant = passwd_tenant.rsplit(":", 1)
-    creds = dict(
-        username=user,
-        password=passwd,
-        tenant_name=tenant,
-    )
+# def discover_fuel_nodes(root_url, credentials, cluster_name):
+#     """Discover Fuel nodes"""
+#     assert credentials.count(':') >= 2
+#     user, passwd_tenant = credentials.split(":", 1)
+#     passwd, tenant = passwd_tenant.rsplit(":", 1)
+#     creds = dict(
+#         username=user,
+#         password=passwd,
+#         tenant_name=tenant,
+#     )
 
-    connection = fuel_rest_api.KeystoneAuth(root_url, creds)
-    fi = fuel_rest_api.FuelInfo(connection)
+#     connection = fuel_rest_api.KeystoneAuth(root_url, creds)
+#     fi = fuel_rest_api.FuelInfo(connection)
 
-    clusters_id = fuel_rest_api.get_cluster_id(connection, cluster_name)
+#     logger.debug("wtf")
+#     clusters_id = fuel_rest_api.get_cluster_id(connection, cluster_name)
+#     logger.debug("wtf2")
 
-    nodes = []
+#     nodes = []
 
-    for node in fi.nodes:
-        if node.cluster == clusters_id:
-            nodes.append(node)
-    res = [Node(n.ip, n.get_roles()) for n in nodes]
-    logger.debug("Found %s fuel nodes for env %r" % (len(res), cluster_name))
-    return res
+#     for node in fi.nodes:
+#         if node.cluster == clusters_id:
+#             nodes.append(node)
+
+#     res = [Node(n.ip, n.get_roles()) for n in nodes]
+#     logger.debug("Found %s fuel nodes for env %r" % (len(res), cluster_name))
+#     return res
diff --git a/report.py b/report.py
index f609805..ec42035 100644
--- a/report.py
+++ b/report.py
@@ -4,10 +4,10 @@
 
 import formatters
 from chart import charts
-from io_results_loader import filter_data, load_files
-from meta_info import total_lab_info, collect_lab_data
 from utils import ssize_to_b
-from statistic import med_dev, approximate_curve
+from statistic import med_dev
+from io_results_loader import filter_data
+from meta_info import total_lab_info, collect_lab_data
 
 
 OPERATIONS = (('async', ('randwrite asynchronous', 'randread asynchronous',
@@ -157,18 +157,6 @@
     open(dest, 'w').write(html)
 
 
-# def render_html_results(ctx):
-#     charts = []
-#     for res in ctx.results:
-#         if res[0] == "io":
-#             charts.append(build_io_chart(res))
-
-#     bars = build_vertical_bar(ctx.results)
-#     lines = build_lines_chart(ctx.results)
-
-    # render_html(bars + lines, dest)
-
-
 def io_chart(title, concurence, latv, iops_or_bw, iops_or_bw_dev,
              legend):
     bar_data, bar_dev = iops_or_bw, iops_or_bw_dev
@@ -201,8 +189,8 @@
         charts_url = []
 
         name_filters = [
-            #('hdd_test_rws4k', ('concurence', 'lat', 'iops')),
-            #('hdd_test_rrs4k', ('concurence', 'lat', 'iops')),
+            # ('hdd_test_rws4k', ('concurence', 'lat', 'iops')),
+            # ('hdd_test_rrs4k', ('concurence', 'lat', 'iops')),
             ('hdd_test_rrd4k', ('concurence', 'lat', 'iops')),
             ('hdd_test_swd1m', ('concurence', 'lat', 'bw')),
         ]
@@ -210,9 +198,11 @@
         for name_filter, fields in name_filters:
             th_filter = filter_data(name_filter, fields)
 
-            data_iter = sorted(th_filter(io_test_suite_res.values()))
+            data = sorted(th_filter(io_test_suite_res.values()))
+            if len(data) == 0:
+                continue
 
-            concurence, latv, iops_or_bw_v = zip(*data_iter)
+            concurence, latv, iops_or_bw_v = zip(*data)
             iops_or_bw_v, iops_or_bw_dev_v = zip(*map(med_dev, iops_or_bw_v))
             latv, _ = zip(*map(med_dev, latv))
 
@@ -221,51 +211,17 @@
                            fields[2])
 
             charts_url.append(url)
-            # _, ax1 = plt.subplots()
-            #
-            # ax1.plot(concurence, iops_or_bw_v)
-            # ax1.errorbar(concurence, iops_or_bw_v, iops_or_bw_dev_v,
-            #              linestyle='None',
-            #              label="iops_or_bw_v",
-            #              marker="*")
-            #
-            # # ynew = approximate_line(ax, ay, ax, True)
-            #
-            # ax2 = ax1.twinx()
-            #
-            # ax2.errorbar(concurence,
-            #              [med_dev(lat)[0] / 1000 for lat in latv],
-            #              [med_dev(lat)[1] / 1000 for lat in latv],
-            #              linestyle='None',
-            #              label="iops_or_bw_v",
-            #              marker="*")
-            # ax2.plot(concurence, [med_dev(lat)[0] / 1000 for lat in latv])
-            # plt.show()
-            # exit(0)
 
-            # bw_only = []
-
-            # for conc, _, _, (bw, _) in data:
-            #     bw_only.append(bw)
-            #     bw_d_per_th.append((bw / conc, 0))
-
-            # lines = [(zip(*lat_d)[0], 'msec', 'rr', 'lat'), (bw_sum, None, None, 'bw_sum')]
-
-            # chart_url = charts.render_vertical_bar(
-            #                 chart_name, ["bw"], [bw_d_per_th], label_x="KBps",
-            #                 scale_x=ordered_data.keys(),
-            #                 lines=lines)
-
-            # charts_url.append(str(chart_url))
-
-        render_html(charts_url, path, lab_info)
+        if len(charts_url) != 0:
+            render_html(charts_url, path, lab_info)
 
 
 def main(args):
-    make_io_report(results=[('a','b')],
+    make_io_report(results=[('a', 'b')],
                    path=os.path.dirname(args[0]),
                    lab_url='http://172.16.52.112:8000',
-                   creds={'username': 'admin', 'password': 'admin', "tenant_name": 'admin'})
+                   creds={'username': 'admin', 'password': 'admin',
+                          "tenant_name": 'admin'})
     return 0
 
 
diff --git a/results/perf-1-compute-hdd/charts/6fa7da0da5f789f40c0adc40fb125521.png b/results/perf-1-compute-hdd/charts/6fa7da0da5f789f40c0adc40fb125521.png
new file mode 100644
index 0000000..208f28c
--- /dev/null
+++ b/results/perf-1-compute-hdd/charts/6fa7da0da5f789f40c0adc40fb125521.png
Binary files differ
diff --git a/results/perf-1-compute-hdd/charts/9cfbfff2077f35018f164fa9cb4cc9f7.png b/results/perf-1-compute-hdd/charts/9cfbfff2077f35018f164fa9cb4cc9f7.png
new file mode 100644
index 0000000..3cf19a8
--- /dev/null
+++ b/results/perf-1-compute-hdd/charts/9cfbfff2077f35018f164fa9cb4cc9f7.png
Binary files differ
diff --git a/results/perf-1-compute-hdd/log.txt b/results/perf-1-compute-hdd/log.txt
new file mode 100644
index 0000000..4eb4324
--- /dev/null
+++ b/results/perf-1-compute-hdd/log.txt
@@ -0,0 +1,109 @@
+12:30:22 - INFO   - io-perf-tool - All info would be stored into /tmp/perf_tests/rotund_elfriede
+12:30:22 - INFO   - io-perf-tool - Start discover_stage stage
+12:30:22 - INFO   - io-perf-tool - Start log_nodes_statistic stage
+12:30:22 - INFO   - io-perf-tool - Found 1 nodes total
+12:30:22 - DEBUG - io-perf-tool - Found 1 nodes with role testnode
+12:30:22 - INFO   - io-perf-tool - Start connect_stage stage
+12:30:22 - INFO   - io-perf-tool - Connecting to nodes
+12:30:22 - INFO   - io-perf-tool - All nodes connected successfully
+12:30:22 - INFO   - io-perf-tool - Start deploy_sensors_stage stage
+12:30:22 - INFO   - io-perf-tool - Start run_tests_stage stage
+12:30:22 - INFO   - io-perf-tool - Starting io tests
+12:30:22 - DEBUG - io-perf-tool - Starting io test on ssh://root@172.16.52.112:3022:/home/koder/.ssh/id_rsa node
+12:30:22 - DEBUG - io-perf-tool - Run preparation for ssh://root@172.16.52.112:3022:/home/koder/.ssh/id_rsa
+12:30:22 - DEBUG - io-perf-tool - SSH: Exec 'which fio'
+12:30:23 - DEBUG - io-perf-tool - SSH: Exec 'dd if=/dev/zero of=/opt/xxx.bin bs=1048576 count=10240'
+12:31:26 - DEBUG - io-perf-tool - Run test for ssh://root@172.16.52.112:3022:/home/koder/.ssh/id_rsa
+12:31:26 - DEBUG - io-perf-tool - Waiting on barrier
+12:31:26 - INFO   - io-perf-tool - Test will takes about 2:12:35
+12:31:27 - DEBUG - io-perf-tool - SSH: Exec 'env python2 /tmp/disk_test_agent.py --type fio --params NUM_ROUNDS=7 FILENAME=/opt/xxx.bin --json -'
+14:46:24 - INFO   - io-perf-tool - Start store_raw_results_stage stage
+14:46:24 - INFO   - io-perf-tool - Start console_report_stage stage
+14:46:24 - INFO   - io-perf-tool - Start report_stage stage
+14:46:24 - INFO   - io-perf-tool - Text report were stored in /tmp/perf_tests/rotund_elfriede/report.txt
+14:46:24 - INFO   - io-perf-tool - Start shut_down_vms_stage stage
+14:46:24 - INFO   - io-perf-tool - Start disconnect_stage stage
+14:46:24 - INFO   - io-perf-tool - All info stotored into /tmp/perf_tests/rotund_elfriede
+17:43:04 - INFO   - io-perf-tool - All info would be stored into /tmp/perf_tests/rotund_elfriede
+17:43:04 - INFO   - io-perf-tool - Start load_data_from_file stage
+17:43:04 - INFO   - io-perf-tool - Start console_report_stage stage
+17:43:04 - INFO   - io-perf-tool - Start report_stage stage
+17:43:37 - INFO   - io-perf-tool - All info would be stored into /tmp/perf_tests/rotund_elfriede
+17:43:37 - INFO   - io-perf-tool - Start load_data_from_file stage
+17:43:37 - INFO   - io-perf-tool - Start console_report_stage stage
+17:43:37 - INFO   - io-perf-tool - Start report_stage stage
+17:43:37 - INFO   - io-perf-tool - Html report were stored in /tmp/perf_tests/rotund_elfriede/report.html
+17:43:37 - INFO   - io-perf-tool - Text report were stored in /tmp/perf_tests/rotund_elfriede/report.txt
+17:43:37 - INFO   - io-perf-tool - All info stotored into /tmp/perf_tests/rotund_elfriede
+17:46:54 - INFO   - io-perf-tool - All info would be stored into /tmp/perf_tests/rotund_elfriede
+17:46:54 - INFO   - io-perf-tool - Start load_data_from_file stage
+17:46:54 - INFO   - io-perf-tool - Start report_stage stage
+17:46:54 - INFO   - io-perf-tool - Html report were stored in /tmp/perf_tests/rotund_elfriede/report.html
+17:46:54 - INFO   - io-perf-tool - Text report were stored in /tmp/perf_tests/rotund_elfriede/report.txt
+17:46:54 - INFO   - io-perf-tool - All info stotored into /tmp/perf_tests/rotund_elfriede
+17:47:54 - INFO   - io-perf-tool - All info would be stored into /tmp/perf_tests/rotund_elfriede
+17:47:54 - INFO   - io-perf-tool - Start load_data_from_file stage
+17:47:54 - INFO   - io-perf-tool - Start report_stage stage
+17:47:54 - INFO   - io-perf-tool - Html report were stored in /tmp/perf_tests/rotund_elfriede/report.html
+17:47:54 - INFO   - io-perf-tool - Text report were stored in /tmp/perf_tests/rotund_elfriede/report.txt
+17:47:54 - INFO   - io-perf-tool - All info stotored into /tmp/perf_tests/rotund_elfriede
+17:49:42 - INFO   - io-perf-tool - All info would be stored into /tmp/perf_tests/rotund_elfriede
+17:49:42 - INFO   - io-perf-tool - Start load_data_from_file stage
+17:49:42 - INFO   - io-perf-tool - Start report_stage stage
+17:49:42 - INFO   - io-perf-tool - Html report were stored in /tmp/perf_tests/rotund_elfriede/report.html
+17:49:42 - INFO   - io-perf-tool - Text report were stored in /tmp/perf_tests/rotund_elfriede/report.txt
+17:49:42 - INFO   - io-perf-tool - All info stotored into /tmp/perf_tests/rotund_elfriede
+17:50:59 - INFO   - io-perf-tool - All info would be stored into /tmp/perf_tests/rotund_elfriede
+17:50:59 - INFO   - io-perf-tool - Start load_data_from_file stage
+17:50:59 - INFO   - io-perf-tool - Start report_stage stage
+17:50:59 - INFO   - io-perf-tool - Html report were stored in /tmp/perf_tests/rotund_elfriede/report.html
+17:50:59 - INFO   - io-perf-tool - Text report were stored in /tmp/perf_tests/rotund_elfriede/report.txt
+17:50:59 - INFO   - io-perf-tool - All info stotored into /tmp/perf_tests/rotund_elfriede
+17:52:01 - INFO   - io-perf-tool - All info would be stored into /tmp/perf_tests/rotund_elfriede
+17:52:01 - INFO   - io-perf-tool - Start load_data_from_file stage
+17:52:01 - INFO   - io-perf-tool - Start report_stage stage
+17:52:01 - INFO   - io-perf-tool - Html report were stored in /tmp/perf_tests/rotund_elfriede/report.html
+17:52:01 - INFO   - io-perf-tool - Text report were stored in /tmp/perf_tests/rotund_elfriede/report.txt
+17:52:01 - INFO   - io-perf-tool - All info stotored into /tmp/perf_tests/rotund_elfriede
+17:52:37 - INFO   - io-perf-tool - All info would be stored into /tmp/perf_tests/rotund_elfriede
+17:52:37 - INFO   - io-perf-tool - Start load_data_from_file stage
+17:52:37 - INFO   - io-perf-tool - Start report_stage stage
+17:52:37 - INFO   - io-perf-tool - Html report were stored in /tmp/perf_tests/rotund_elfriede/report.html
+17:52:37 - INFO   - io-perf-tool - Text report were stored in /tmp/perf_tests/rotund_elfriede/report.txt
+17:52:37 - INFO   - io-perf-tool - All info stotored into /tmp/perf_tests/rotund_elfriede
+18:45:15 - INFO   - io-perf-tool - All info would be stored into /tmp/perf_tests/rotund_elfriede
+18:45:15 - INFO   - io-perf-tool - Start load_data_from_file stage
+18:45:15 - INFO   - io-perf-tool - Start report_stage stage
+18:45:15 - INFO   - io-perf-tool - Html report were stored in /tmp/perf_tests/rotund_elfriede/report.html
+18:45:15 - INFO   - io-perf-tool - Text report were stored in /tmp/perf_tests/rotund_elfriede/report.txt
+18:45:15 - INFO   - io-perf-tool - All info stotored into /tmp/perf_tests/rotund_elfriede
+18:45:19 - INFO   - io-perf-tool - All info would be stored into /tmp/perf_tests/rotund_elfriede
+18:45:19 - INFO   - io-perf-tool - Start load_data_from_file stage
+18:45:20 - INFO   - io-perf-tool - Start report_stage stage
+18:45:20 - INFO   - io-perf-tool - Html report were stored in /tmp/perf_tests/rotund_elfriede/report.html
+18:45:20 - INFO   - io-perf-tool - Text report were stored in /tmp/perf_tests/rotund_elfriede/report.txt
+18:45:20 - INFO   - io-perf-tool - All info stotored into /tmp/perf_tests/rotund_elfriede
+18:46:01 - INFO   - io-perf-tool - All info would be stored into /tmp/perf_tests/rotund_elfriede
+18:46:01 - INFO   - io-perf-tool - Start load_data_from_file stage
+18:46:01 - INFO   - io-perf-tool - Start report_stage stage
+18:46:02 - INFO   - io-perf-tool - Html report were stored in /tmp/perf_tests/rotund_elfriede/report.html
+18:46:02 - INFO   - io-perf-tool - Text report were stored in /tmp/perf_tests/rotund_elfriede/report.txt
+18:46:02 - INFO   - io-perf-tool - All info stotored into /tmp/perf_tests/rotund_elfriede
+18:48:51 - INFO   - io-perf-tool - All info would be stored into /tmp/perf_tests/rotund_elfriede
+18:48:51 - INFO   - io-perf-tool - Start load_data_from_file stage
+18:48:51 - INFO   - io-perf-tool - Start report_stage stage
+18:48:52 - INFO   - io-perf-tool - Html report were stored in /tmp/perf_tests/rotund_elfriede/report.html
+18:48:52 - INFO   - io-perf-tool - Text report were stored in /tmp/perf_tests/rotund_elfriede/report.txt
+18:48:52 - INFO   - io-perf-tool - All info stotored into /tmp/perf_tests/rotund_elfriede
+18:49:47 - INFO   - io-perf-tool - All info would be stored into /tmp/perf_tests/rotund_elfriede
+18:49:47 - INFO   - io-perf-tool - Start load_data_from_file stage
+18:49:47 - INFO   - io-perf-tool - Start report_stage stage
+18:49:47 - INFO   - io-perf-tool - Html report were stored in /tmp/perf_tests/rotund_elfriede/report.html
+18:49:47 - INFO   - io-perf-tool - Text report were stored in /tmp/perf_tests/rotund_elfriede/report.txt
+18:49:47 - INFO   - io-perf-tool - All info stotored into /tmp/perf_tests/rotund_elfriede
+18:50:09 - INFO   - io-perf-tool - All info would be stored into /tmp/perf_tests/rotund_elfriede
+18:50:09 - INFO   - io-perf-tool - Start load_data_from_file stage
+18:50:09 - INFO   - io-perf-tool - Start report_stage stage
+18:50:10 - INFO   - io-perf-tool - Html report were stored in /tmp/perf_tests/rotund_elfriede/report.html
+18:50:10 - INFO   - io-perf-tool - Text report were stored in /tmp/perf_tests/rotund_elfriede/report.txt
+18:50:10 - INFO   - io-perf-tool - All info stotored into /tmp/perf_tests/rotund_elfriede
diff --git a/results/perf-1-compute-hdd/raw_results.yaml b/results/perf-1-compute-hdd/raw_results.yaml
new file mode 100644
index 0000000..6a6b4f6
--- /dev/null
+++ b/results/perf-1-compute-hdd/raw_results.yaml
@@ -0,0 +1,452 @@
+-   -   io
+    -   res: 
+            hdd_test_swd1mth40: 
+                rw: write
+                blocksize: 1m
+                slat: [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
+                sync_mode: d
+                concurence: 40
+                timings: [30, 5]
+                jobname: hdd_test_swd1mth40
+                iops: [160, 174, 172, 171, 167, 172, 169]
+                bw: [163850, 178577, 176845, 175673, 171042, 176273, 173693]
+                lat: [249262.59, 226253.83, 228928.36, 232426.25, 235542.05, 228821.3, 229864.66]
+                clat: [249221.16, 226213.61, 228885.42, 232385.36, 235498.86, 228780.83, 229823.97]
+            hdd_test_srd1mth120: 
+                rw: read
+                blocksize: 1m
+                slat: [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
+                sync_mode: d
+                concurence: 120
+                timings: [30, 5]
+                jobname: hdd_test_srd1mth120
+                iops: [438, 473, 421, 469, 477, 451, 474]
+                bw: [448702, 485077, 431596, 481014, 488770, 462393, 485464]
+                lat: [272341.19, 252221.05, 283448.97, 254310.42, 250329.45, 264582.62, 252028.58]
+                clat: [272341.0, 252220.88, 283448.78, 254310.25, 250329.27, 264582.44, 252028.41]
+            hdd_test_rwd4kth1: 
+                rw: randwrite
+                blocksize: 4k
+                slat: [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
+                sync_mode: d
+                concurence: 1
+                timings: [30, 5]
+                jobname: hdd_test_rwd4kth1
+                iops: [404, 406, 405, 410, 404, 407, 408]
+                bw: [1616, 1624, 1623, 1640, 1617, 1630, 1632]
+                lat: [2470.42, 2457.72, 2459.19, 2433.45, 2469.15, 2449.21, 2446.34]
+                clat: [2470.1, 2457.38, 2458.88, 2433.11, 2468.82, 2448.89, 2446.02]
+            hdd_test_rrd4kth5: 
+                rw: randread
+                blocksize: 4k
+                slat: [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
+                sync_mode: d
+                concurence: 5
+                timings: [30, 5]
+                jobname: hdd_test_rrd4kth5
+                iops: [262, 287, 277, 292, 279, 290, 270]
+                bw: [1048, 1149, 1108, 1170, 1119, 1161, 1082]
+                lat: [19059.87, 17386.08, 18032.29, 17070.71, 17856.45, 17204.53, 18461.88]
+                clat: [19059.61, 17385.79, 18031.99, 17070.41, 17856.17, 17204.21, 18461.62]
+            hdd_test_rws4kth5: 
+                rw: randwrite
+                blocksize: 4k
+                slat: [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
+                sync_mode: s
+                concurence: 5
+                timings: [30, 5]
+                jobname: hdd_test_rws4kth5
+                iops: [71, 74, 71, 73, 74, 73, 73]
+                bw: [285, 297, 287, 295, 296, 293, 295]
+                lat: [69964.5, 67239.55, 69604.95, 67606.77, 67311.6, 68136.38, 67596.02]
+                clat: [69964.09, 67239.18, 69604.56, 67606.38, 67311.23, 68136.01, 67595.64]
+            hdd_test_swd1mth1: 
+                rw: write
+                blocksize: 1m
+                slat: [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
+                sync_mode: d
+                concurence: 1
+                timings: [30, 5]
+                jobname: hdd_test_swd1mth1
+                iops: [177, 174, 173, 177, 175, 173, 173]
+                bw: [181320, 178232, 177891, 181685, 179892, 177816, 177999]
+                lat: [5644.35, 5742.4, 5753.36, 5633.08, 5689.31, 5755.67, 5749.82]
+                clat: [5629.54, 5727.65, 5738.58, 5618.43, 5674.7, 5740.86, 5735.11]
+            hdd_test_srd1mth80: 
+                rw: read
+                blocksize: 1m
+                slat: [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
+                sync_mode: d
+                concurence: 80
+                timings: [30, 5]
+                jobname: hdd_test_srd1mth80
+                iops: [488, 481, 474, 468, 489, 481, 489]
+                bw: [500256, 493161, 485776, 480072, 501654, 492882, 501021]
+                lat: [163227.78, 165575.86, 168155.67, 170139.34, 162830.11, 165750.03, 163235.84]
+                clat: [163227.61, 165575.69, 168155.52, 170139.17, 162829.92, 165749.88, 163235.67]
+            hdd_test_swd1mth120: 
+                rw: write
+                blocksize: 1m
+                slat: [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
+                sync_mode: d
+                concurence: 120
+                timings: [30, 5]
+                jobname: hdd_test_swd1mth120
+                iops: [201, 211, 205, 218, 200, 211, 216]
+                bw: [206004, 216334, 210607, 223384, 205479, 216865, 221524]
+                lat: [592248.31, 531388.81, 550845.62, 507271.84, 564501.19, 517807.72, 532421.38]
+                clat: [592203.56, 531346.5, 550801.75, 507227.88, 564458.19, 517765.69, 532375.44]
+            hdd_test_swd1mth5: 
+                rw: write
+                blocksize: 1m
+                slat: [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
+                sync_mode: d
+                concurence: 5
+                timings: [30, 5]
+                jobname: hdd_test_swd1mth5
+                iops: [151, 145, 152, 148, 151, 143, 152]
+                bw: [155361, 148761, 155975, 152010, 155283, 146807, 155969]
+                lat: [32873.79, 34407.42, 32816.2, 33670.63, 32944.83, 34865.57, 32814.71]
+                clat: [32857.0, 34389.8, 32799.38, 33653.61, 32928.36, 34848.3, 32798.34]
+            hdd_test_rws4kth15: 
+                rw: randwrite
+                blocksize: 4k
+                slat: [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
+                sync_mode: s
+                concurence: 15
+                timings: [30, 5]
+                jobname: hdd_test_rws4kth15
+                iops: [80, 78, 83, 76, 82, 82, 78]
+                bw: [321, 312, 333, 307, 331, 329, 312]
+                lat: [186285.27, 190777.61, 179656.09, 194278.34, 180218.91, 181491.55, 191513.5]
+                clat: [186284.88, 190777.22, 179655.69, 194277.94, 180218.5, 181491.14, 191513.09]
+            hdd_test_rws4kth30: 
+                rw: randwrite
+                blocksize: 4k
+                slat: [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
+                sync_mode: s
+                concurence: 30
+                timings: [30, 5]
+                jobname: hdd_test_rws4kth30
+                iops: [82, 77, 81, 82, 83, 76, 82]
+                bw: [328, 308, 326, 329, 333, 306, 329]
+                lat: [363845.31, 386380.12, 364771.66, 361253.5, 357521.56, 388269.97, 361152.19]
+                clat: [363844.91, 386379.72, 364771.25, 361253.06, 357521.12, 388269.56, 361151.78]
+            hdd_test_rws4kth10: 
+                rw: randwrite
+                blocksize: 4k
+                slat: [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
+                sync_mode: s
+                concurence: 10
+                timings: [30, 5]
+                jobname: hdd_test_rws4kth10
+                iops: [82, 77, 83, 77, 82, 78, 83]
+                bw: [330, 311, 332, 309, 331, 313, 333]
+                lat: [120939.94, 128316.23, 120108.23, 128817.77, 120450.09, 127472.73, 119743.85]
+                clat: [120939.52, 128315.8, 120107.84, 128817.36, 120449.7, 127472.34, 119743.43]
+            hdd_test_srd1mth15: 
+                rw: read
+                blocksize: 1m
+                slat: [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
+                sync_mode: d
+                concurence: 15
+                timings: [30, 5]
+                jobname: hdd_test_srd1mth15
+                iops: [192, 176, 172, 181, 186, 186, 186]
+                bw: [197618, 180275, 177117, 185532, 190677, 190988, 190529]
+                lat: [77377.37, 85025.45, 86539.22, 82610.98, 80411.26, 80263.92, 80454.14]
+                clat: [77377.22, 85025.3, 86539.05, 82610.83, 80411.09, 80263.76, 80453.98]
+            hdd_test_srd1mth30: 
+                rw: read
+                blocksize: 1m
+                slat: [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
+                sync_mode: d
+                concurence: 30
+                timings: [30, 5]
+                jobname: hdd_test_srd1mth30
+                iops: [375, 421, 423, 325, 346, 405, 420]
+                bw: [384467, 431356, 433528, 333485, 355080, 414940, 431082]
+                lat: [79710.12, 71103.94, 70713.8, 92025.48, 86380.95, 73954.95, 71141.71]
+                clat: [79709.97, 71103.77, 70713.64, 92025.31, 86380.8, 73954.78, 71141.55]
+            hdd_test_srd1mth10: 
+                rw: read
+                blocksize: 1m
+                slat: [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
+                sync_mode: d
+                concurence: 10
+                timings: [30, 5]
+                jobname: hdd_test_srd1mth10
+                iops: [157, 174, 168, 166, 167, 175, 182]
+                bw: [161732, 178381, 172617, 170814, 171364, 179492, 186602]
+                lat: [63202.16, 57316.83, 59239.18, 59862.51, 59670.26, 56965.62, 54803.95]
+                clat: [63201.98, 57316.68, 59239.02, 59862.35, 59670.09, 56965.46, 54803.78]
+            hdd_test_rrd4kth30: 
+                rw: randread
+                blocksize: 4k
+                slat: [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
+                sync_mode: d
+                concurence: 30
+                timings: [30, 5]
+                jobname: hdd_test_rrd4kth30
+                iops: [469, 463, 462, 457, 460, 460, 457]
+                bw: [1879, 1855, 1850, 1828, 1843, 1842, 1828]
+                lat: [63604.93, 64526.77, 64710.67, 65465.62, 64910.79, 64978.81, 65426.57]
+                clat: [63604.78, 64526.62, 64710.52, 65465.47, 64910.63, 64978.67, 65426.43]
+            hdd_test_rrd4kth10: 
+                rw: randread
+                blocksize: 4k
+                slat: [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
+                sync_mode: d
+                concurence: 10
+                timings: [30, 5]
+                jobname: hdd_test_rrd4kth10
+                iops: [353, 353, 354, 358, 356, 349, 353]
+                bw: [1415, 1413, 1419, 1433, 1424, 1397, 1413]
+                lat: [28220.8, 28267.07, 28136.39, 27866.14, 28059.99, 28568.01, 28278.55]
+                clat: [28220.62, 28266.85, 28136.17, 27865.95, 28059.8, 28567.81, 28278.35]
+            hdd_test_rrd4kth15: 
+                rw: randread
+                blocksize: 4k
+                slat: [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
+                sync_mode: d
+                concurence: 15
+                timings: [30, 5]
+                jobname: hdd_test_rrd4kth15
+                iops: [395, 390, 394, 393, 399, 389, 392]
+                bw: [1582, 1560, 1576, 1574, 1599, 1559, 1570]
+                lat: [37844.47, 38370.14, 38009.38, 38071.2, 37473.45, 38400.98, 38149.96]
+                clat: [37844.32, 38369.99, 38009.23, 38071.05, 37473.29, 38400.84, 38149.82]
+            hdd_test_swd1mth20: 
+                rw: write
+                blocksize: 1m
+                slat: [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
+                sync_mode: d
+                concurence: 20
+                timings: [30, 5]
+                jobname: hdd_test_swd1mth20
+                iops: [151, 149, 154, 150, 155, 149, 150]
+                bw: [155326, 153496, 157881, 153975, 159391, 152655, 153698]
+                lat: [131614.89, 133148.73, 129457.04, 132566.56, 128049.26, 134008.36, 132869.22]
+                clat: [131585.36, 133118.5, 129427.01, 132534.31, 128020.7, 133976.36, 132838.5]
+            hdd_test_rws4kth120: 
+                rw: randwrite
+                blocksize: 4k
+                slat: [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
+                sync_mode: s
+                concurence: 120
+                timings: [30, 5]
+                jobname: hdd_test_rws4kth120
+                iops: [79, 82, 81, 83, 77, 82, 77]
+                bw: [319, 328, 324, 332, 311, 329, 311]
+                lat: [1491354.75, 1415709.5, 1426159.62, 1390980.88, 1489516.88, 1403286.62, 1492233.62]
+                clat: [1491354.38, 1415709.12, 1426159.12, 1390980.5, 1489516.38, 1403286.12, 1492233.25]
+            hdd_test_rrd4kth120: 
+                rw: randread
+                blocksize: 4k
+                slat: [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
+                sync_mode: d
+                concurence: 120
+                timings: [30, 5]
+                jobname: hdd_test_rrd4kth120
+                iops: [478, 472, 484, 485, 484, 474, 488]
+                bw: [1913, 1889, 1938, 1941, 1936, 1898, 1952]
+                lat: [246951.8, 250621.23, 244948.47, 244863.33, 245347.81, 249996.78, 243654.67]
+                clat: [246951.64, 250621.08, 244948.31, 244863.19, 245347.66, 249996.62, 243654.53]
+            hdd_test_swd1mth15: 
+                rw: write
+                blocksize: 1m
+                slat: [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
+                sync_mode: d
+                concurence: 15
+                timings: [30, 5]
+                jobname: hdd_test_swd1mth15
+                iops: [149, 155, 149, 151, 153, 152, 148]
+                bw: [153004, 159443, 152738, 155260, 157541, 156277, 151712]
+                lat: [99993.16, 96187.08, 100372.34, 98703.3, 97320.62, 97898.12, 100343.73]
+                clat: [99967.08, 96163.2, 100345.82, 98680.35, 97293.82, 97873.88, 100317.48]
+            hdd_test_srd1mth1: 
+                rw: read
+                blocksize: 1m
+                slat: [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
+                sync_mode: d
+                concurence: 1
+                timings: [30, 5]
+                jobname: hdd_test_srd1mth1
+                iops: [180, 180, 181, 177, 180, 180, 180]
+                bw: [184751, 184931, 185614, 181816, 185235, 184429, 185272]
+                lat: [5539.57, 5534.02, 5513.71, 5628.99, 5524.93, 5549.08, 5523.87]
+                clat: [5539.29, 5533.76, 5513.46, 5628.72, 5524.68, 5548.82, 5523.62]
+            hdd_test_srd1mth5: 
+                rw: read
+                blocksize: 1m
+                slat: [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
+                sync_mode: d
+                concurence: 5
+                timings: [30, 5]
+                jobname: hdd_test_srd1mth5
+                iops: [137, 165, 154, 132, 141, 167, 159]
+                bw: [140534, 169331, 158458, 135958, 144873, 171172, 163514]
+                lat: [36426.63, 30212.38, 32287.75, 37629.91, 35317.55, 29885.17, 31288.82]
+                clat: [36426.43, 30212.16, 32287.56, 37629.69, 35317.33, 29884.99, 31288.62]
+            hdd_test_swd1mth80: 
+                rw: write
+                blocksize: 1m
+                slat: [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
+                sync_mode: d
+                concurence: 80
+                timings: [30, 5]
+                jobname: hdd_test_swd1mth80
+                iops: [203, 196, 202, 194, 188, 190, 194]
+                bw: [208326, 201104, 206881, 198828, 192591, 194757, 199225]
+                lat: [390070.41, 395894.09, 386283.0, 391240.69, 405315.09, 395800.53, 393988.16]
+                clat: [390023.34, 395848.88, 386237.84, 391197.75, 405272.56, 395758.59, 393945.19]
+            hdd_test_rrd4kth80: 
+                rw: randread
+                blocksize: 4k
+                slat: [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
+                sync_mode: d
+                concurence: 80
+                timings: [30, 5]
+                jobname: hdd_test_rrd4kth80
+                iops: [474, 478, 476, 469, 482, 477, 475]
+                bw: [1896, 1913, 1906, 1876, 1928, 1909, 1900]
+                lat: [166645.81, 165584.61, 166682.8, 169626.28, 164584.86, 166233.72, 167094.14]
+                clat: [166645.67, 165584.48, 166682.64, 169626.14, 164584.7, 166233.56, 167094.0]
+            hdd_test_rws4kth20: 
+                rw: randwrite
+                blocksize: 4k
+                slat: [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
+                sync_mode: s
+                concurence: 20
+                timings: [30, 5]
+                jobname: hdd_test_rws4kth20
+                iops: [77, 82, 79, 82, 81, 82, 78]
+                bw: [311, 329, 319, 329, 326, 330, 312]
+                lat: [255892.94, 241595.78, 249261.97, 241525.91, 243757.88, 240959.8, 254951.75]
+                clat: [255892.52, 241595.38, 249261.56, 241525.5, 243757.47, 240959.38, 254951.34]
+            hdd_test_srd1mth20: 
+                rw: read
+                blocksize: 1m
+                slat: [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
+                sync_mode: d
+                concurence: 20
+                timings: [30, 5]
+                jobname: hdd_test_srd1mth20
+                iops: [286, 278, 252, 301, 266, 270, 253]
+                bw: [292956, 285286, 258565, 308458, 272386, 276708, 259966]
+                lat: [69842.21, 71653.4, 79006.03, 66292.67, 75043.28, 73879.11, 78661.34]
+                clat: [69842.04, 71653.23, 79005.87, 66292.51, 75043.12, 73878.95, 78661.18]
+            hdd_test_rrd4kth40: 
+                rw: randread
+                blocksize: 4k
+                slat: [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
+                sync_mode: d
+                concurence: 40
+                timings: [30, 5]
+                jobname: hdd_test_rrd4kth40
+                iops: [462, 463, 465, 457, 456, 460, 457]
+                bw: [1851, 1854, 1861, 1828, 1827, 1843, 1828]
+                lat: [85900.73, 86016.24, 85645.2, 87243.56, 87067.41, 86435.66, 87320.75]
+                clat: [85900.58, 86016.09, 85645.03, 87243.41, 87067.26, 86435.52, 87320.59]
+            hdd_test_rrd4kth20: 
+                rw: randread
+                blocksize: 4k
+                slat: [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
+                sync_mode: d
+                concurence: 20
+                timings: [30, 5]
+                jobname: hdd_test_rrd4kth20
+                iops: [419, 421, 429, 425, 423, 427, 424]
+                bw: [1679, 1685, 1716, 1702, 1692, 1709, 1697]
+                lat: [47528.56, 47403.04, 46551.27, 46879.47, 47227.83, 46707.35, 47008.94]
+                clat: [47528.41, 47402.89, 46551.11, 46879.32, 47227.68, 46707.2, 47008.79]
+            hdd_test_rws4kth80: 
+                rw: randwrite
+                blocksize: 4k
+                slat: [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
+                sync_mode: s
+                concurence: 80
+                timings: [30, 5]
+                jobname: hdd_test_rws4kth80
+                iops: [75, 82, 78, 81, 82, 81, 76]
+                bw: [302, 329, 314, 327, 329, 325, 307]
+                lat: [1050249.5, 947348.38, 994411.31, 950421.94, 947885.5, 961421.5, 1013113.69]
+                clat: [1050249.12, 947347.94, 994410.88, 950421.5, 947885.0, 961421.06, 1013113.25]
+            hdd_test_rws4kth1: 
+                rw: randwrite
+                blocksize: 4k
+                slat: [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
+                sync_mode: s
+                concurence: 1
+                timings: [30, 5]
+                jobname: hdd_test_rws4kth1
+                iops: [41, 41, 41, 41, 42, 41, 41]
+                bw: [166, 165, 167, 164, 168, 165, 167]
+                lat: [23983.87, 24110.1, 23813.04, 24365.01, 23743.05, 24174.92, 23816.97]
+                clat: [23983.49, 24109.79, 23812.69, 24364.66, 23742.73, 24174.54, 23816.61]
+            hdd_test_rws4kth40: 
+                rw: randwrite
+                blocksize: 4k
+                slat: [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
+                sync_mode: s
+                concurence: 40
+                timings: [30, 5]
+                jobname: hdd_test_rws4kth40
+                iops: [78, 78, 83, 82, 77, 77, 83]
+                bw: [313, 312, 332, 331, 309, 311, 333]
+                lat: [506244.72, 507070.97, 476336.69, 476645.34, 512317.62, 507324.06, 473760.31]
+                clat: [506244.34, 507070.56, 476336.31, 476644.94, 512317.22, 507323.59, 473759.91]
+            hdd_test_rrd4kth1: 
+                rw: randread
+                blocksize: 4k
+                slat: [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
+                sync_mode: d
+                concurence: 1
+                timings: [30, 5]
+                jobname: hdd_test_rrd4kth1
+                iops: [143, 157, 149, 157, 152, 157, 155]
+                bw: [575, 629, 597, 631, 611, 628, 620]
+                lat: [6950.36, 6351.87, 6695.59, 6330.2, 6534.99, 6359.04, 6439.06]
+                clat: [6950.11, 6351.6, 6695.33, 6329.94, 6534.73, 6358.75, 6438.8]
+            hdd_test_swd1mth30: 
+                rw: write
+                blocksize: 1m
+                slat: [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
+                sync_mode: d
+                concurence: 30
+                timings: [30, 5]
+                jobname: hdd_test_swd1mth30
+                iops: [142, 155, 160, 167, 163, 157, 164]
+                bw: [145482, 158842, 164228, 171272, 167495, 161213, 168625]
+                lat: [210477.3, 191831.08, 185666.44, 178756.48, 181382.86, 187686.77, 181725.97]
+                clat: [210439.06, 191794.2, 185629.3, 178718.39, 181345.3, 187648.77, 181689.28]
+            hdd_test_swd1mth10: 
+                rw: write
+                blocksize: 1m
+                slat: [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
+                sync_mode: d
+                concurence: 10
+                timings: [30, 5]
+                jobname: hdd_test_swd1mth10
+                iops: [149, 154, 147, 150, 153, 148, 151]
+                bw: [152757, 158453, 150542, 153884, 156809, 152038, 155536]
+                lat: [66851.63, 64582.15, 67974.72, 66451.34, 65271.57, 67256.13, 65793.96]
+                clat: [66831.3, 64562.43, 67954.29, 66430.45, 65252.41, 67235.51, 65774.64]
+            hdd_test_srd1mth40: 
+                rw: read
+                blocksize: 1m
+                slat: [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
+                sync_mode: d
+                concurence: 40
+                timings: [30, 5]
+                jobname: hdd_test_srd1mth40
+                iops: [458, 467, 424, 440, 457, 477, 447]
+                bw: [469523, 478406, 435021, 451389, 468090, 488883, 458133]
+                lat: [87134.27, 85494.91, 94015.34, 90609.92, 87388.05, 83693.32, 89268.15]
+                clat: [87134.11, 85494.76, 94015.19, 90609.77, 87387.9, 83693.16, 89267.99]
+        __meta__: 
+            raw_cfg: 
+                '[defaults]\nwait_for_previous\ngroup_reporting\ntime_based\nbuffered=0\niodepth=1\n\nfilename={FILENAME}\nNUM_ROUNDS=7\n\nramp_time=5\nsize=10Gb\nruntime=30\n\n# ---------------------------------------------------------------------\n# check different thread count, sync mode. (latency, iops) = func(th_count)\n# ---------------------------------------------------------------------\n[hdd_test_{TEST_SUMM} * {NUM_ROUNDS}]\nblocksize=4k\nrw=randwrite\nsync=1\nnumjobs={% 1, 5, 10, 15, 20, 30, 40, 80, 120 %}\n\n# ---------------------------------------------------------------------\n# check different thread count, direct read mode. (latency, iops) = func(th_count)\n# also check iops for randread\n# ---------------------------------------------------------------------\n[hdd_test_{TEST_SUMM} * {NUM_ROUNDS}]\nblocksize=4k\nrw=randread\ndirect=1\nnumjobs={% 1, 5, 10, 15, 20, 30, 40, 80, 120 %}\n\n# ---------------------------------------------------------------------\n# check different thread count, direct read/write mode. (bw, iops) = func(th_count)\n# also check BW for seq read/write.\n# ---------------------------------------------------------------------\n[hdd_test_{TEST_SUMM} * {NUM_ROUNDS}]\nblocksize=1m\nrw={% read, write %}\ndirect=1\nnumjobs={% 1, 5, 10, 15, 20, 30, 40, 80, 120 %}\n\n# ---------------------------------------------------------------------\n# check IOPS randwrite.\n# ---------------------------------------------------------------------\n[hdd_test_{TEST_SUMM} * {NUM_ROUNDS}]\nblocksize=4k\nrw=randwrite\ndirect=1\n'
+            params: 
+                NUM_ROUNDS: 7
+                FILENAME: /opt/xxx.bin
\ No newline at end of file
diff --git a/results/perf-1-compute-hdd/report.html b/results/perf-1-compute-hdd/report.html
new file mode 100644
index 0000000..ef326eb
--- /dev/null
+++ b/results/perf-1-compute-hdd/report.html
@@ -0,0 +1,12 @@
+<!DOCTYPE html>
+<html>
+<head>
+  <title>Report</title>
+</head>
+
+<body>
+<div><ol><li><img src='charts/6fa7da0da5f789f40c0adc40fb125521.png'></li>
+<li><img src='charts/9cfbfff2077f35018f164fa9cb4cc9f7.png'></li></ol></div>
+</body>
+
+</html>
\ No newline at end of file
diff --git a/results/perf-1-compute-hdd/report.txt b/results/perf-1-compute-hdd/report.txt
new file mode 100644
index 0000000..9d4992f
--- /dev/null
+++ b/results/perf-1-compute-hdd/report.txt
@@ -0,0 +1,45 @@
++-------------+------+---------+-----------+--------+
+| Description | IOPS | BW KBps | Dev * 3 % | LAT ms |
++=============+======+=========+===========+========+
+| rrd4kth1    |  152 |     613 |         9 |      6 |
+| rrd4kth5    |  279 |    1119 |        11 |     17 |
+| rrd4kth10   |  353 |    1416 |         2 |     28 |
+| rrd4kth15   |  393 |    1574 |         2 |     38 |
+| rrd4kth20   |  424 |    1697 |         2 |     47 |
+| rrd4kth30   |  461 |    1846 |         2 |     64 |
+| rrd4kth40   |  460 |    1841 |         2 |     86 |
+| rrd4kth80   |  475 |    1904 |         2 |    166 |
+| rrd4kth120  |  480 |    1923 |         3 |    246 |
+| ---         |  --- |     --- |       --- |    --- |
+| rwd4kth1    |  406 |    1626 |         1 |      2 |
+| ---         |  --- |     --- |       --- |    --- |
+| rws4kth1    |   41 |     166 |         2 |     24 |
+| rws4kth5    |   72 |     292 |         4 |     68 |
+| rws4kth10   |   80 |     322 |         9 |    123 |
+| rws4kth15   |   79 |     320 |         9 |    186 |
+| rws4kth20   |   80 |     322 |         7 |    246 |
+| rws4kth30   |   80 |     322 |         9 |    369 |
+| rws4kth40   |   79 |     320 |         9 |    494 |
+| rws4kth80   |   79 |     319 |         9 |    980 |
+| rws4kth120  |   80 |     322 |         7 |   1444 |
+| ---         |  --- |     --- |       --- |    --- |
+| srd1mth1    |  179 |  184578 |         1 |      5 |
+| srd1mth5    |  150 |  154834 |        25 |     33 |
+| srd1mth10   |  169 |  174428 |        12 |     58 |
+| srd1mth15   |  182 |  187533 |        10 |     81 |
+| srd1mth20   |  272 |  279189 |        17 |     73 |
+| srd1mth30   |  387 |  397705 |        28 |     77 |
+| srd1mth40   |  452 |  464206 |        10 |     88 |
+| srd1mth80   |  481 |  493546 |         4 |    165 |
+| srd1mth120  |  457 |  469002 |        13 |    261 |
+| ---         |  --- |     --- |       --- |    --- |
+| swd1mth1    |  174 |  179262 |         2 |      5 |
+| swd1mth5    |  148 |  152880 |         6 |     33 |
+| swd1mth10   |  150 |  154288 |         5 |     66 |
+| swd1mth15   |  151 |  155139 |         5 |     98 |
+| swd1mth20   |  151 |  155203 |         4 |    131 |
+| swd1mth30   |  158 |  162451 |        14 |    188 |
+| swd1mth40   |  169 |  173707 |         7 |    233 |
+| swd1mth80   |  195 |  200244 |         8 |    394 |
+| swd1mth120  |  208 |  214313 |         9 |    542 |
++-------------+------+---------+-----------+--------+
diff --git a/results/usb_hdd/log.txt b/results/usb_hdd/log.txt
new file mode 100644
index 0000000..40af172
--- /dev/null
+++ b/results/usb_hdd/log.txt
@@ -0,0 +1,26 @@
+03:17:05 - INFO   - io-perf-tool - All info would be stored into /tmp/perf_tests/ungored_babara
+03:17:05 - INFO   - io-perf-tool - Start discover_stage stage
+03:17:05 - INFO   - io-perf-tool - Start log_nodes_statistic stage
+03:17:05 - INFO   - io-perf-tool - Found 1 nodes total
+03:17:05 - DEBUG - io-perf-tool - Found 1 nodes with role testnode
+03:17:05 - INFO   - io-perf-tool - Start connect_stage stage
+03:17:05 - INFO   - io-perf-tool - Connecting to nodes
+03:17:05 - INFO   - io-perf-tool - All nodes connected successfully
+03:17:05 - INFO   - io-perf-tool - Start deploy_sensors_stage stage
+03:17:05 - INFO   - io-perf-tool - Start run_tests_stage stage
+03:17:05 - INFO   - io-perf-tool - Starting io tests
+03:17:05 - DEBUG - io-perf-tool - Starting io test on ssh://koder:koder771@@127.0.0.1 node
+03:17:05 - DEBUG - io-perf-tool - Run preparation for ssh://koder:koder771@@127.0.0.1
+03:17:06 - DEBUG - io-perf-tool - SSH: Exec 'which fio'
+03:17:06 - DEBUG - io-perf-tool - SSH: Exec 'dd if=/dev/zero of=/media/koder/a5230078-4c27-4c3b-99aa-26148e78b2e7/xxx.bin bs=1048576 count=10240'
+03:18:59 - DEBUG - io-perf-tool - Run test for ssh://koder:koder771@@127.0.0.1
+03:18:59 - DEBUG - io-perf-tool - Waiting on barrier
+03:18:59 - INFO   - io-perf-tool - Test will takes about 2:12:35
+03:18:59 - DEBUG - io-perf-tool - SSH: Exec 'env python2 /tmp/disk_test_agent.py --type fio --params NUM_ROUNDS=7 FILENAME=/media/koder/a5230078-4c27-4c3b-99aa-26148e78b2e7/xxx.bin --json -'
+05:34:57 - INFO   - io-perf-tool - Start store_raw_results_stage stage
+05:34:57 - INFO   - io-perf-tool - Start console_report_stage stage
+05:34:57 - INFO   - io-perf-tool - Start report_stage stage
+05:34:57 - INFO   - io-perf-tool - Text report were stored in /tmp/perf_tests/ungored_babara/report.txt
+05:34:57 - INFO   - io-perf-tool - Start shut_down_vms_stage stage
+05:34:57 - INFO   - io-perf-tool - Start disconnect_stage stage
+05:34:57 - INFO   - io-perf-tool - All info stotored into /tmp/perf_tests/ungored_babara
diff --git a/results/usb_hdd/raw_results.yaml b/results/usb_hdd/raw_results.yaml
new file mode 100644
index 0000000..6d45a36
--- /dev/null
+++ b/results/usb_hdd/raw_results.yaml
@@ -0,0 +1,452 @@
+-   -   io
+    -   res: 
+            hdd_test_swd1mth40: 
+                rw: write
+                blocksize: 1m
+                slat: [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
+                sync_mode: d
+                concurence: 40
+                timings: [30, 5]
+                jobname: hdd_test_swd1mth40
+                iops: [105, 145, 127, 124, 105, 122, 109]
+                bw: [107893, 148789, 130439, 127263, 107984, 124948, 112555]
+                lat: [375066.53, 273705.81, 312135.53, 319384.53, 376148.97, 325759.97, 361182.31]
+                clat: [375007.91, 273649.44, 312076.34, 319325.84, 376091.69, 325702.38, 361123.75]
+            hdd_test_srd1mth120: 
+                rw: read
+                blocksize: 1m
+                slat: [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
+                sync_mode: d
+                concurence: 120
+                timings: [30, 5]
+                jobname: hdd_test_srd1mth120
+                iops: [158, 159, 166, 173, 167, 167, 169]
+                bw: [162507, 163010, 170981, 177790, 171275, 171870, 173625]
+                lat: [744070.25, 742432.19, 706168.94, 681298.69, 708267.62, 704216.81, 698211.88]
+                clat: [744070.0, 742431.94, 706168.69, 681298.44, 708267.38, 704216.56, 698211.62]
+            hdd_test_rwd4kth1: 
+                rw: randwrite
+                blocksize: 4k
+                slat: [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
+                sync_mode: d
+                concurence: 1
+                timings: [30, 5]
+                jobname: hdd_test_rwd4kth1
+                iops: [112, 123, 149, 161, 157, 164, 165]
+                bw: [450, 492, 596, 645, 631, 657, 661]
+                lat: [8877.02, 8121.73, 6698.91, 6195.59, 6331.68, 6076.8, 6044.41]
+                clat: [8876.59, 8121.3, 6698.45, 6195.07, 6331.21, 6076.32, 6043.92]
+            hdd_test_rrd4kth5: 
+                rw: randread
+                blocksize: 4k
+                slat: [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
+                sync_mode: d
+                concurence: 5
+                timings: [30, 5]
+                jobname: hdd_test_rrd4kth5
+                iops: [85, 86, 86, 87, 86, 86, 86]
+                bw: [340, 345, 347, 348, 344, 347, 346]
+                lat: [58704.49, 57792.17, 57554.85, 57388.99, 57998.39, 57433.83, 57678.13]
+                clat: [58704.05, 57791.74, 57554.36, 57388.57, 57997.91, 57433.34, 57677.67]
+            hdd_test_rws4kth5: 
+                rw: randwrite
+                blocksize: 4k
+                slat: [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
+                sync_mode: s
+                concurence: 5
+                timings: [30, 5]
+                jobname: hdd_test_rws4kth5
+                iops: [34, 34, 34, 34, 34, 34, 34]
+                bw: [137, 138, 137, 139, 137, 137, 139]
+                lat: [145517.03, 144205.89, 145408.03, 143756.66, 144969.97, 145307.56, 142772.22]
+                clat: [145516.48, 144205.38, 145407.48, 143756.16, 144969.41, 145307.02, 142771.7]
+            hdd_test_swd1mth1: 
+                rw: write
+                blocksize: 1m
+                slat: [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
+                sync_mode: d
+                concurence: 1
+                timings: [30, 5]
+                jobname: hdd_test_swd1mth1
+                iops: [95, 94, 94, 94, 94, 94, 94]
+                bw: [97366, 96860, 96779, 97127, 96625, 96940, 96932]
+                lat: [10511.75, 10566.27, 10575.46, 10537.2, 10592.11, 10557.77, 10558.46]
+                clat: [10468.69, 10521.81, 10531.77, 10493.78, 10547.31, 10513.95, 10514.26]
+            hdd_test_srd1mth80: 
+                rw: read
+                blocksize: 1m
+                slat: [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
+                sync_mode: d
+                concurence: 80
+                timings: [30, 5]
+                jobname: hdd_test_srd1mth80
+                iops: [148, 153, 158, 156, 153, 160, 158]
+                bw: [151692, 156724, 162496, 160126, 157015, 164230, 162224]
+                lat: [534233.0, 517392.75, 499338.84, 507032.94, 516170.78, 493990.31, 499771.66]
+                clat: [534232.75, 517392.5, 499338.59, 507032.66, 516170.53, 493990.06, 499771.41]
+            hdd_test_swd1mth120: 
+                rw: write
+                blocksize: 1m
+                slat: [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
+                sync_mode: d
+                concurence: 120
+                timings: [30, 5]
+                jobname: hdd_test_swd1mth120
+                iops: [116, 142, 144, 124, 121, 132, 124]
+                bw: [118930, 145829, 147548, 127265, 124088, 135927, 127405]
+                lat: [1025413.25, 827922.44, 814388.75, 940374.5, 971605.06, 882674.88, 949472.88]
+                clat: [1025355.81, 827862.56, 814328.62, 940316.12, 971544.88, 882618.44, 949414.25]
+            hdd_test_swd1mth5: 
+                rw: write
+                blocksize: 1m
+                slat: [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
+                sync_mode: d
+                concurence: 5
+                timings: [30, 5]
+                jobname: hdd_test_swd1mth5
+                iops: [151, 147, 150, 133, 137, 136, 133]
+                bw: [154844, 151361, 153796, 136197, 140825, 139394, 136646]
+                lat: [33048.85, 33796.82, 33257.17, 37566.2, 36330.36, 36702.46, 37434.75]
+                clat: [32990.97, 33735.27, 33198.08, 37505.76, 36270.16, 36642.41, 37376.55]
+            hdd_test_rws4kth15: 
+                rw: randwrite
+                blocksize: 4k
+                slat: [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
+                sync_mode: s
+                concurence: 15
+                timings: [30, 5]
+                jobname: hdd_test_rws4kth15
+                iops: [35, 35, 36, 35, 36, 35, 35]
+                bw: [141, 142, 144, 143, 144, 142, 140]
+                lat: [419827.44, 418355.41, 412684.53, 414331.69, 412641.34, 418578.97, 422547.66]
+                clat: [419826.88, 418354.88, 412683.97, 414331.19, 412640.78, 418578.44, 422547.12]
+            hdd_test_rws4kth30: 
+                rw: randwrite
+                blocksize: 4k
+                slat: [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
+                sync_mode: s
+                concurence: 30
+                timings: [30, 5]
+                jobname: hdd_test_rws4kth30
+                iops: [34, 34, 34, 33, 34, 35, 36]
+                bw: [139, 138, 139, 135, 139, 141, 145]
+                lat: [851475.19, 854181.0, 847631.75, 869914.25, 848062.88, 832771.06, 813126.06]
+                clat: [851474.69, 854180.5, 847631.19, 869913.69, 848062.25, 832770.5, 813125.56]
+            hdd_test_rws4kth10: 
+                rw: randwrite
+                blocksize: 4k
+                slat: [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
+                sync_mode: s
+                concurence: 10
+                timings: [30, 5]
+                jobname: hdd_test_rws4kth10
+                iops: [34, 35, 35, 35, 35, 35, 34]
+                bw: [137, 142, 141, 142, 141, 140, 139]
+                lat: [289637.41, 278670.94, 281827.5, 280160.75, 281804.59, 283823.5, 284971.53]
+                clat: [289636.84, 278670.34, 281826.97, 280160.22, 281804.06, 283822.91, 284971.0]
+            hdd_test_srd1mth15: 
+                rw: read
+                blocksize: 1m
+                slat: [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
+                sync_mode: d
+                concurence: 15
+                timings: [30, 5]
+                jobname: hdd_test_srd1mth15
+                iops: [170, 170, 170, 170, 169, 170, 170]
+                bw: [174429, 174411, 174905, 174742, 173865, 174197, 174156]
+                lat: [88054.62, 88052.15, 87804.64, 87892.23, 88331.83, 88159.85, 88181.89]
+                clat: [88054.4, 88051.94, 87804.43, 87892.02, 88331.61, 88159.66, 88181.67]
+            hdd_test_srd1mth30: 
+                rw: read
+                blocksize: 1m
+                slat: [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
+                sync_mode: d
+                concurence: 30
+                timings: [30, 5]
+                jobname: hdd_test_srd1mth30
+                iops: [136, 116, 106, 124, 122, 121, 128]
+                bw: [139511, 118903, 109102, 127276, 125137, 124635, 131880]
+                lat: [219711.06, 257222.5, 280296.47, 240494.25, 244719.33, 245545.17, 232045.8]
+                clat: [219710.8, 257222.22, 280296.16, 240493.98, 244719.06, 245544.89, 232045.55]
+            hdd_test_srd1mth10: 
+                rw: read
+                blocksize: 1m
+                slat: [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
+                sync_mode: d
+                concurence: 10
+                timings: [30, 5]
+                jobname: hdd_test_srd1mth10
+                iops: [159, 161, 161, 159, 161, 161, 161]
+                bw: [163714, 165408, 165051, 163758, 165034, 165886, 165315]
+                lat: [62544.5, 61899.84, 62031.3, 62523.12, 62040.04, 61720.03, 61932.24]
+                clat: [62544.27, 61899.61, 62031.08, 62522.9, 62039.81, 61719.81, 61932.02]
+            hdd_test_rrd4kth30: 
+                rw: randread
+                blocksize: 4k
+                slat: [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
+                sync_mode: d
+                concurence: 30
+                timings: [30, 5]
+                jobname: hdd_test_rrd4kth30
+                iops: [108, 109, 109, 109, 109, 108, 109]
+                bw: [435, 437, 437, 438, 437, 434, 438]
+                lat: [273883.81, 272697.97, 273037.22, 272646.16, 273106.78, 274951.75, 272044.94]
+                clat: [273883.41, 272697.59, 273036.84, 272645.78, 273106.41, 274951.34, 272044.53]
+            hdd_test_rrd4kth10: 
+                rw: randread
+                blocksize: 4k
+                slat: [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
+                sync_mode: d
+                concurence: 10
+                timings: [30, 5]
+                jobname: hdd_test_rrd4kth10
+                iops: [98, 98, 100, 100, 99, 98, 99]
+                bw: [394, 392, 402, 400, 397, 395, 396]
+                lat: [101016.6, 101717.11, 99280.92, 99826.44, 100580.12, 100984.79, 100799.66]
+                clat: [101016.17, 101716.7, 99280.52, 99825.99, 100579.7, 100984.38, 100799.27]
+            hdd_test_rrd4kth15: 
+                rw: randread
+                blocksize: 4k
+                slat: [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
+                sync_mode: d
+                concurence: 15
+                timings: [30, 5]
+                jobname: hdd_test_rrd4kth15
+                iops: [105, 105, 105, 104, 104, 105, 105]
+                bw: [421, 422, 420, 417, 419, 422, 422]
+                lat: [141965.73, 141790.3, 142330.92, 143405.28, 142748.47, 141609.78, 141561.55]
+                clat: [141965.33, 141789.88, 142330.5, 143404.88, 142748.08, 141609.38, 141561.14]
+            hdd_test_swd1mth20: 
+                rw: write
+                blocksize: 1m
+                slat: [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
+                sync_mode: d
+                concurence: 20
+                timings: [30, 5]
+                jobname: hdd_test_swd1mth20
+                iops: [135, 140, 130, 134, 125, 132, 139]
+                bw: [139168, 143839, 133801, 137426, 128849, 135635, 142659]
+                lat: [147055.97, 141970.05, 152486.52, 148646.81, 158481.55, 150673.88, 143078.95]
+                clat: [146995.77, 141910.28, 152425.36, 148589.34, 158421.92, 150612.77, 143020.75]
+            hdd_test_rws4kth120: 
+                rw: randwrite
+                blocksize: 4k
+                slat: [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
+                sync_mode: s
+                concurence: 120
+                timings: [30, 5]
+                jobname: hdd_test_rws4kth120
+                iops: [35, 35, 35, 35, 36, 35, 35]
+                bw: [142, 142, 142, 143, 145, 143, 143]
+                lat: [3353833.0, 3149759.0, 3151540.25, 3134665.5, 3074830.25, 3130676.75, 3121878.75]
+                clat: [3353832.25, 3149758.5, 3151539.5, 3134664.75, 3074829.75, 3130676.25, 3121878.25]
+            hdd_test_rrd4kth120: 
+                rw: randread
+                blocksize: 4k
+                slat: [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
+                sync_mode: d
+                concurence: 120
+                timings: [30, 5]
+                jobname: hdd_test_rrd4kth120
+                iops: [119, 120, 120, 121, 120, 121, 120]
+                bw: [476, 482, 482, 484, 483, 485, 483]
+                lat: [968035.0, 972234.5, 974273.5, 969773.44, 970889.38, 967950.88, 972564.88]
+                clat: [968034.56, 972234.12, 974273.12, 969773.06, 970889.0, 967950.5, 972564.5]
+            hdd_test_swd1mth15: 
+                rw: write
+                blocksize: 1m
+                slat: [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
+                sync_mode: d
+                concurence: 15
+                timings: [30, 5]
+                jobname: hdd_test_swd1mth15
+                iops: [133, 140, 137, 158, 125, 136, 143]
+                bw: [137031, 143704, 140820, 162059, 128416, 139475, 146815]
+                lat: [111952.41, 106685.93, 108824.16, 94581.61, 119328.67, 109901.46, 104422.56]
+                clat: [111892.44, 106627.74, 108764.02, 94523.07, 119272.63, 109842.91, 104362.23]
+            hdd_test_srd1mth1: 
+                rw: read
+                blocksize: 1m
+                slat: [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
+                sync_mode: d
+                concurence: 1
+                timings: [30, 5]
+                jobname: hdd_test_srd1mth1
+                iops: [97, 97, 97, 97, 97, 97, 97]
+                bw: [99727, 99864, 100014, 99711, 100068, 99956, 99806]
+                lat: [10263.29, 10249.58, 10234.19, 10265.01, 10228.67, 10240.41, 10255.61]
+                clat: [10262.85, 10249.16, 10233.77, 10264.57, 10228.25, 10239.98, 10255.2]
+            hdd_test_srd1mth5: 
+                rw: read
+                blocksize: 1m
+                slat: [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
+                sync_mode: d
+                concurence: 5
+                timings: [30, 5]
+                jobname: hdd_test_srd1mth5
+                iops: [130, 130, 129, 130, 129, 130, 129]
+                bw: [133144, 133328, 132694, 133400, 132936, 133179, 132752]
+                lat: [38450.97, 38397.21, 38579.73, 38354.87, 38508.18, 38438.96, 38563.57]
+                clat: [38450.69, 38396.93, 38579.48, 38354.59, 38507.92, 38438.68, 38563.31]
+            hdd_test_swd1mth80: 
+                rw: write
+                blocksize: 1m
+                slat: [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
+                sync_mode: d
+                concurence: 80
+                timings: [30, 5]
+                jobname: hdd_test_swd1mth80
+                iops: [129, 123, 145, 153, 107, 127, 125]
+                bw: [132816, 126383, 148689, 157365, 110078, 130476, 128364]
+                lat: [614422.0, 636398.06, 546438.19, 515600.72, 734548.0, 619536.5, 627088.88]
+                clat: [614365.5, 636338.31, 546380.5, 515541.19, 734489.25, 619476.38, 627031.44]
+            hdd_test_rrd4kth80: 
+                rw: randread
+                blocksize: 4k
+                slat: [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
+                sync_mode: d
+                concurence: 80
+                timings: [30, 5]
+                jobname: hdd_test_rrd4kth80
+                iops: [116, 116, 118, 118, 116, 116, 116]
+                bw: [466, 465, 473, 475, 466, 467, 467]
+                lat: [669399.56, 679100.31, 666550.19, 662553.31, 676849.56, 675408.56, 676391.44]
+                clat: [669399.12, 679099.88, 666549.75, 662552.94, 676849.19, 675408.12, 676391.06]
+            hdd_test_rws4kth20: 
+                rw: randwrite
+                blocksize: 4k
+                slat: [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
+                sync_mode: s
+                concurence: 20
+                timings: [30, 5]
+                jobname: hdd_test_rws4kth20
+                iops: [35, 35, 36, 34, 35, 34, 34]
+                bw: [143, 141, 145, 138, 140, 138, 138]
+                lat: [553134.62, 558376.38, 545084.12, 571753.0, 563008.81, 569984.75, 571231.25]
+                clat: [553134.06, 558375.81, 545083.56, 571752.5, 563008.31, 569984.19, 571230.69]
+            hdd_test_srd1mth20: 
+                rw: read
+                blocksize: 1m
+                slat: [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
+                sync_mode: d
+                concurence: 20
+                timings: [30, 5]
+                jobname: hdd_test_srd1mth20
+                iops: [145, 122, 137, 121, 171, 172, 146]
+                bw: [148882, 125791, 140862, 124373, 176011, 176921, 150427]
+                lat: [136756.89, 162314.12, 144940.86, 164203.52, 116297.33, 115730.1, 135656.83]
+                clat: [136756.64, 162313.89, 144940.61, 164203.25, 116297.12, 115729.89, 135656.58]
+            hdd_test_rrd4kth40: 
+                rw: randread
+                blocksize: 4k
+                slat: [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
+                sync_mode: d
+                concurence: 40
+                timings: [30, 5]
+                jobname: hdd_test_rrd4kth40
+                iops: [111, 112, 111, 110, 112, 111, 111]
+                bw: [446, 449, 446, 441, 449, 446, 444]
+                lat: [355492.03, 353707.62, 355797.09, 360247.22, 353819.62, 356157.81, 357968.97]
+                clat: [355491.66, 353707.22, 355796.69, 360246.81, 353819.22, 356157.41, 357968.56]
+            hdd_test_rrd4kth20: 
+                rw: randread
+                blocksize: 4k
+                slat: [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
+                sync_mode: d
+                concurence: 20
+                timings: [30, 5]
+                jobname: hdd_test_rrd4kth20
+                iops: [109, 107, 106, 107, 107, 108, 108]
+                bw: [437, 430, 424, 431, 431, 432, 434]
+                lat: [181916.44, 185327.86, 187631.55, 184738.78, 184936.75, 184551.91, 183614.64]
+                clat: [181916.05, 185327.45, 187631.17, 184738.38, 184936.34, 184551.48, 183614.27]
+            hdd_test_rws4kth80: 
+                rw: randwrite
+                blocksize: 4k
+                slat: [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
+                sync_mode: s
+                concurence: 80
+                timings: [30, 5]
+                jobname: hdd_test_rws4kth80
+                iops: [35, 35, 35, 35, 35, 35, 35]
+                bw: [143, 143, 143, 143, 143, 143, 143]
+                lat: [2210007.0, 2118441.25, 2125228.0, 2128654.5, 2121707.25, 2125617.25, 2125574.25]
+                clat: [2210006.5, 2118440.75, 2125227.5, 2128654.0, 2121706.5, 2125616.5, 2125573.5]
+            hdd_test_rws4kth1: 
+                rw: randwrite
+                blocksize: 4k
+                slat: [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
+                sync_mode: s
+                concurence: 1
+                timings: [30, 5]
+                jobname: hdd_test_rws4kth1
+                iops: [20, 20, 20, 20, 20, 20, 20]
+                bw: [83, 83, 82, 82, 82, 83, 83]
+                lat: [47929.32, 47856.71, 48242.46, 48323.69, 48311.01, 47969.01, 47746.06]
+                clat: [47928.57, 47855.95, 48241.66, 48322.78, 48309.9, 47968.21, 47745.31]
+            hdd_test_rws4kth40: 
+                rw: randwrite
+                blocksize: 4k
+                slat: [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
+                sync_mode: s
+                concurence: 40
+                timings: [30, 5]
+                jobname: hdd_test_rws4kth40
+                iops: [34, 37, 37, 37, 35, 35, 35]
+                bw: [139, 148, 151, 150, 142, 142, 142]
+                lat: [1117152.0, 1052526.38, 1034599.5, 1037669.94, 1097834.0, 1099318.38, 1101577.12]
+                clat: [1117151.5, 1052525.88, 1034599.0, 1037669.38, 1097833.38, 1099317.75, 1101576.5]
+            hdd_test_rrd4kth1: 
+                rw: randread
+                blocksize: 4k
+                slat: [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
+                sync_mode: d
+                concurence: 1
+                timings: [30, 5]
+                jobname: hdd_test_rrd4kth1
+                iops: [69, 84, 84, 84, 84, 84, 84]
+                bw: [276, 337, 339, 339, 339, 337, 337]
+                lat: [14458.61, 11846.5, 11764.3, 11756.94, 11786.95, 11835.68, 11844.28]
+                clat: [14458.04, 11845.98, 11763.74, 11756.4, 11786.38, 11835.14, 11843.73]
+            hdd_test_swd1mth30: 
+                rw: write
+                blocksize: 1m
+                slat: [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
+                sync_mode: d
+                concurence: 30
+                timings: [30, 5]
+                jobname: hdd_test_swd1mth30
+                iops: [140, 137, 144, 117, 120, 150, 137]
+                bw: [143862, 141065, 148231, 119956, 123841, 154147, 140681]
+                lat: [213424.62, 216748.52, 206302.14, 254895.95, 246975.12, 198406.42, 217475.83]
+                clat: [213365.42, 216691.48, 206246.2, 254836.0, 246916.7, 198348.56, 217415.95]
+            hdd_test_swd1mth10: 
+                rw: write
+                blocksize: 1m
+                slat: [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
+                sync_mode: d
+                concurence: 10
+                timings: [30, 5]
+                jobname: hdd_test_swd1mth10
+                iops: [150, 145, 139, 155, 144, 142, 124]
+                bw: [154040, 149094, 142555, 159163, 148217, 145492, 127506]
+                lat: [66453.27, 68589.56, 71715.65, 64245.41, 69003.03, 70286.77, 80188.92]
+                clat: [66393.21, 68528.28, 71655.97, 64184.57, 68945.55, 70230.63, 80130.2]
+            hdd_test_srd1mth40: 
+                rw: read
+                blocksize: 1m
+                slat: [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
+                sync_mode: d
+                concurence: 40
+                timings: [30, 5]
+                jobname: hdd_test_srd1mth40
+                iops: [138, 162, 157, 152, 165, 147, 153]
+                bw: [141870, 166350, 161057, 156377, 169421, 151106, 156917]
+                lat: [287373.0, 245129.75, 253322.08, 260845.11, 240698.88, 269809.94, 260059.66]
+                clat: [287372.75, 245129.53, 253321.83, 260844.88, 240698.64, 269809.69, 260059.42]
+        __meta__: 
+            raw_cfg: 
+                '[defaults]\nwait_for_previous\ngroup_reporting\ntime_based\nbuffered=0\niodepth=1\n\nfilename={FILENAME}\nNUM_ROUNDS=7\n\nramp_time=5\nsize=10Gb\nruntime=30\n\n# ---------------------------------------------------------------------\n# check different thread count, sync mode. (latency, iops) = func(th_count)\n# ---------------------------------------------------------------------\n[hdd_test_{TEST_SUMM} * {NUM_ROUNDS}]\nblocksize=4k\nrw=randwrite\nsync=1\nnumjobs={% 1, 5, 10, 15, 20, 30, 40, 80, 120 %}\n\n# ---------------------------------------------------------------------\n# check different thread count, direct read mode. (latency, iops) = func(th_count)\n# also check iops for randread\n# ---------------------------------------------------------------------\n[hdd_test_{TEST_SUMM} * {NUM_ROUNDS}]\nblocksize=4k\nrw=randread\ndirect=1\nnumjobs={% 1, 5, 10, 15, 20, 30, 40, 80, 120 %}\n\n# ---------------------------------------------------------------------\n# check different thread count, direct read/write mode. (bw, iops) = func(th_count)\n# also check BW for seq read/write.\n# ---------------------------------------------------------------------\n[hdd_test_{TEST_SUMM} * {NUM_ROUNDS}]\nblocksize=1m\nrw={% read, write %}\ndirect=1\nnumjobs={% 1, 5, 10, 15, 20, 30, 40, 80, 120 %}\n\n# ---------------------------------------------------------------------\n# check IOPS randwrite.\n# ---------------------------------------------------------------------\n[hdd_test_{TEST_SUMM} * {NUM_ROUNDS}]\nblocksize=4k\nrw=randwrite\ndirect=1\n'
+            params: 
+                NUM_ROUNDS: 7
+                FILENAME: /media/koder/a5230078-4c27-4c3b-99aa-26148e78b2e7/xxx.bin
\ No newline at end of file
diff --git a/results/usb_hdd/report.txt b/results/usb_hdd/report.txt
new file mode 100644
index 0000000..b9d96f2
--- /dev/null
+++ b/results/usb_hdd/report.txt
@@ -0,0 +1,45 @@
++-------------+------+---------+-----------+--------+
+| Description | IOPS | BW KBps | Dev * 3 % | LAT ms |
++=============+======+=========+===========+========+
+| rrd4kth1    |   81 |     329 |        19 |     12 |
+| rrd4kth5    |   86 |     345 |         2 |     57 |
+| rrd4kth10   |   98 |     396 |         2 |    100 |
+| rrd4kth15   |  104 |     420 |         1 |    142 |
+| rrd4kth20   |  107 |     431 |         2 |    184 |
+| rrd4kth30   |  108 |     436 |         1 |    273 |
+| rrd4kth40   |  111 |     445 |         1 |    356 |
+| rrd4kth80   |  116 |     468 |         2 |    672 |
+| rrd4kth120  |  120 |     482 |         1 |    970 |
+| ---         |  --- |     --- |       --- |    --- |
+| rwd4kth1    |  147 |     590 |        40 |      6 |
+| ---         |  --- |     --- |       --- |    --- |
+| rws4kth1    |   20 |      82 |         2 |     48 |
+| rws4kth5    |   34 |     137 |         2 |    144 |
+| rws4kth10   |   34 |     140 |         3 |    282 |
+| rws4kth15   |   35 |     142 |         2 |    416 |
+| rws4kth20   |   34 |     140 |         5 |    561 |
+| rws4kth30   |   34 |     139 |         6 |    845 |
+| rws4kth40   |   35 |     144 |         9 |   1077 |
+| rws4kth80   |   35 |     143 |         0 |   2136 |
+| rws4kth120  |   35 |     142 |         2 |   3159 |
+| ---         |  --- |     --- |       --- |    --- |
+| srd1mth1    |   97 |   99878 |         0 |     10 |
+| srd1mth5    |  129 |  133061 |         0 |     38 |
+| srd1mth10   |  160 |  164880 |         1 |     62 |
+| srd1mth15   |  169 |  174386 |         0 |     88 |
+| srd1mth20   |  144 |  149038 |        39 |    139 |
+| srd1mth30   |  121 |  125206 |        21 |    245 |
+| srd1mth40   |  153 |  157585 |        16 |    259 |
+| srd1mth80   |  155 |  159215 |         7 |    509 |
+| srd1mth120  |  165 |  170151 |         9 |    712 |
+| ---         |  --- |     --- |       --- |    --- |
+| swd1mth1    |   94 |   96947 |         0 |     10 |
+| swd1mth5    |  141 |  144723 |        15 |     35 |
+| swd1mth10   |  142 |  146581 |        19 |     70 |
+| swd1mth15   |  138 |  142617 |        20 |    107 |
+| swd1mth20   |  133 |  137339 |        10 |    148 |
+| swd1mth30   |  135 |  138826 |        25 |    222 |
+| swd1mth40   |  119 |  122838 |        33 |    334 |
+| swd1mth80   |  129 |  133453 |        32 |    613 |
+| swd1mth120  |  129 |  132427 |        23 |    915 |
++-------------+------+---------+-----------+--------+
diff --git a/run_test.py b/run_test.py
index 4e3f6ef..b424b76 100755
--- a/run_test.py
+++ b/run_test.py
@@ -54,8 +54,11 @@
     def format(self, record):
         levelname = record.levelname
 
+        prn_name = ' ' * (6 - len(levelname)) + levelname
         if levelname in self.colors:
-            record.levelname = self.colors[levelname](levelname)
+            record.levelname = self.colors[levelname](prn_name)
+        else:
+            record.levelname = prn_name
 
         return logging.Formatter.format(self, record)
 
@@ -65,7 +68,7 @@
     sh = logging.StreamHandler()
     sh.setLevel(level)
 
-    log_format = '%(asctime)s - %(levelname)-6s - %(name)s - %(message)s'
+    log_format = '%(asctime)s - %(levelname)s - %(name)s - %(message)s'
     colored_formatter = ColoredFormatter(log_format,
                                          "%H:%M:%S")
 
@@ -80,6 +83,10 @@
         fh.setLevel(logging.DEBUG)
         logger.addHandler(fh)
 
+    logger_api = logging.getLogger("io-perf-tool.fuel_api")
+    logger_api.addHandler(sh)
+    logger_api.setLevel(logging.WARNING)
+
 
 def format_result(res, formatter):
     data = "\n{0}\n".format("=" * 80)
@@ -140,7 +147,7 @@
         res_q.put(exc)
 
 
-def run_tests(config, nodes):
+def run_tests(test_block, nodes):
     tool_type_mapper = {
         "io": IOPerfTest,
         "pgbench": PgBenchTest,
@@ -151,64 +158,43 @@
 
     res_q = Queue.Queue()
 
-    for test_block in config:
-        for name, params in test_block.items():
-            logger.info("Starting {0} tests".format(name))
+    for name, params in test_block.items():
+        logger.info("Starting {0} tests".format(name))
 
-            threads = []
-            barrier = utils.Barrier(len(test_nodes))
-            for node in test_nodes:
-                msg = "Starting {0} test on {1} node"
-                logger.debug(msg.format(name, node.conn_url))
-                test = tool_type_mapper[name](params, res_q.put)
-                th = threading.Thread(None, test_thread, None,
-                                      (test, node, barrier, res_q))
-                threads.append(th)
-                th.daemon = True
-                th.start()
+        threads = []
+        barrier = utils.Barrier(len(test_nodes))
+        for node in test_nodes:
+            msg = "Starting {0} test on {1} node"
+            logger.debug(msg.format(name, node.conn_url))
+            test = tool_type_mapper[name](params, res_q.put)
+            th = threading.Thread(None, test_thread, None,
+                                  (test, node, barrier, res_q))
+            threads.append(th)
+            th.daemon = True
+            th.start()
 
-            def gather_results(res_q, results):
-                while not res_q.empty():
-                    val = res_q.get()
+        def gather_results(res_q, results):
+            while not res_q.empty():
+                val = res_q.get()
 
-                    if isinstance(val, Exception):
-                        msg = "Exception during test execution: {0}"
-                        raise ValueError(msg.format(val.message))
+                if isinstance(val, Exception):
+                    msg = "Exception during test execution: {0}"
+                    raise ValueError(msg.format(val.message))
 
-                    results.append(val)
+                results.append(val)
 
-            results = []
+        results = []
 
-            while True:
-                for th in threads:
-                    th.join(1)
-                    gather_results(res_q, results)
+        while True:
+            for th in threads:
+                th.join(1)
+                gather_results(res_q, results)
 
-                if all(not th.is_alive() for th in threads):
-                    break
+            if all(not th.is_alive() for th in threads):
+                break
 
-            gather_results(res_q, results)
-            yield name, test.merge_results(results)
-
-
-def parse_args(argv):
-    parser = argparse.ArgumentParser(
-        description="Run disk io performance test")
-
-    parser.add_argument("-l", dest='extra_logs',
-                        action='store_true', default=False,
-                        help="print some extra log info")
-
-    parser.add_argument("-b", '--build_description',
-                        type=str, default="Build info")
-    parser.add_argument("-i", '--build_id', type=str, default="id")
-    parser.add_argument("-t", '--build_type', type=str, default="GA")
-    parser.add_argument("-u", '--username', type=str, default="admin")
-    parser.add_argument("-p", '--post-process-only', default=None)
-    parser.add_argument("-o", '--output-dest', nargs="*")
-    parser.add_argument("config_file", nargs="?", default="config.yaml")
-
-    return parser.parse_args(argv[1:])
+        gather_results(res_q, results)
+        yield name, test.merge_results(results)
 
 
 def log_nodes_statistic(_, ctx):
@@ -223,10 +209,6 @@
         logger.debug("Found {0} nodes with role {1}".format(count, role))
 
 
-def log_sensors_config(cfg):
-    pass
-
-
 def connect_stage(cfg, ctx):
     ctx.clear_calls_stack.append(disconnect_stage)
     connect_all(ctx.nodes)
@@ -235,7 +217,7 @@
 def discover_stage(cfg, ctx):
     if cfg.get('discover') is not None:
         discover_objs = [i.strip() for i in cfg['discover'].strip().split(",")]
-        ctx.nodes.extend(discover.discover(discover_objs, cfg['clouds']))
+        ctx.nodes.extend(discover.discover(ctx, discover_objs, cfg['clouds']))
 
     for url, roles in cfg.get('explicit_nodes', {}).items():
         ctx.nodes.append(Node(url, roles.split(",")))
@@ -258,8 +240,6 @@
             if role in node.roles:
                 sens_cfg.append((node.connection, collect_cfg))
 
-    log_sensors_config(sens_cfg)
-
     ctx.sensor_cm = start_monitoring(cfg["receiver_uri"], None,
                                      connected_config=sens_cfg)
 
@@ -278,53 +258,84 @@
     ctx.sensor_data = ctx.sensors_control_queue.get()
 
 
-def run_all_test(cfg, ctx):
-    ctx.results = []
+def get_os_credentials(cfg, ctx, creds_type):
+    creds = None
 
-    if 'start_test_nodes' in cfg['tests']:
-        params = cfg['tests']['start_test_nodes']['openstack']
-        os_nodes_ids = []
-
-        os_creds = params['creds']
-
-        if os_creds == 'fuel':
-            raise NotImplementedError()
-
-        elif os_creds == 'clouds':
+    if creds_type == 'clouds':
+        if 'openstack' in cfg['clouds']:
             os_cfg = cfg['clouds']['openstack']
+
             tenant = os_cfg['OS_TENANT_NAME'].strip()
             user = os_cfg['OS_USERNAME'].strip()
             passwd = os_cfg['OS_PASSWORD'].strip()
             auth_url = os_cfg['OS_AUTH_URL'].strip()
 
-        elif os_creds == 'ENV':
-            tenant = None
-            user = None
-            passwd = None
-            auth_url = None
+        elif 'fuel' in cfg['clouds'] and \
+             'openstack_env' in cfg['clouds']['fuel']:
+            creds = ctx.fuel_openstack_creds
 
-        else:
-            raise ValueError("Only 'ENV' creds are supported")
+    elif creds_type == 'ENV':
+        user, passwd, tenant, auth_url = start_vms.ostack_get_creds()
+    elif os.path.isfile(creds_type):
+        user, passwd, tenant, auth_url = start_vms.ostack_get_creds()
+    else:
+        msg = "Creds {0!r} isn't supported".format(creds_type)
+        raise ValueError(msg)
 
-        start_vms.nova_connect(user, passwd, tenant, auth_url)
+    if creds is None:
+        creds = {'name': user,
+                 'passwd': passwd,
+                 'tenant': tenant,
+                 'auth_url': auth_url}
 
-        logger.info("Preparing openstack")
-        start_vms.prepare_os(user, passwd, tenant, auth_url)
+    return creds
 
-        new_nodes = []
-        for new_node, node_id in start_vms.launch_vms(params):
-            new_node.roles.append('testnode')
-            ctx.nodes.append(new_node)
-            os_nodes_ids.append(node_id)
-            new_nodes.append(new_node)
 
-        store_nodes_in_log(cfg, os_nodes_ids)
-        ctx.openstack_nodes_ids = os_nodes_ids
+def run_tests_stage(cfg, ctx):
+    ctx.results = []
 
-        connect_all(new_nodes)
+    if 'tests' not in cfg:
+        return
 
-    if 'tests' in cfg:
-        ctx.results.extend(run_tests(cfg['tests'], ctx.nodes))
+    for group in cfg['tests']:
+
+        assert len(group.items()) == 1
+        key, config = group.items()[0]
+
+        if 'start_test_nodes' == key:
+            params = config['vm_params']
+            os_nodes_ids = []
+
+            os_creds_type = config['creds']
+            os_creds = get_os_credentials(cfg, ctx, os_creds_type)
+
+            start_vms.nova_connect(**os_creds)
+
+            # logger.info("Preparing openstack")
+            # start_vms.prepare_os(**os_creds)
+
+            new_nodes = []
+            try:
+                for new_node, node_id in start_vms.launch_vms(params):
+                    new_node.roles.append('testnode')
+                    ctx.nodes.append(new_node)
+                    os_nodes_ids.append(node_id)
+                    new_nodes.append(new_node)
+
+                store_nodes_in_log(cfg, os_nodes_ids)
+                ctx.openstack_nodes_ids = os_nodes_ids
+
+                connect_all(new_nodes)
+
+                for test_group in config.get('tests', []):
+                    ctx.results.extend(run_tests(test_group, ctx.nodes))
+
+            finally:
+                # shut_down_vms_stage(cfg, ctx)
+                pass
+
+        elif 'tests' in key:
+            ctx.results.extend(run_tests(config, ctx.nodes))
 
 
 def shut_down_vms_stage(cfg, ctx):
@@ -353,13 +364,6 @@
         shut_down_vms_stage(cfg, ctx)
 
 
-def run_tests_stage(cfg, ctx):
-    # clear nodes that possible were created on previous test running
-    # clear_enviroment(cfg, ctx) << fix OS connection
-    ctx.clear_calls_stack.append(shut_down_vms_stage)
-    run_all_test(cfg, ctx)
-
-
 def disconnect_stage(cfg, ctx):
     ssh_utils.close_all_sessions()
 
@@ -433,11 +437,30 @@
 def load_data_from(var_dir):
     def load_data_from_file(cfg, ctx):
         raw_results = os.path.join(var_dir, 'raw_results.yaml')
-        print "load data from", raw_results
         ctx.results = yaml.load(open(raw_results).read())
     return load_data_from_file
 
 
+def parse_args(argv):
+    parser = argparse.ArgumentParser(
+        description="Run disk io performance test")
+
+    parser.add_argument("-l", dest='extra_logs',
+                        action='store_true', default=False,
+                        help="print some extra log info")
+
+    parser.add_argument("-b", '--build_description',
+                        type=str, default="Build info")
+    parser.add_argument("-i", '--build_id', type=str, default="id")
+    parser.add_argument("-t", '--build_type', type=str, default="GA")
+    parser.add_argument("-u", '--username', type=str, default="admin")
+    parser.add_argument("-p", '--post-process-only', default=None)
+    parser.add_argument("-o", '--output-dest', nargs="*")
+    parser.add_argument("config_file", nargs="?", default="config.yaml")
+
+    return parser.parse_args(argv[1:])
+
+
 def main(argv):
     opts = parse_args(argv)
 
@@ -459,7 +482,7 @@
             report_stage
         ]
 
-    load_config(opts.config_file)
+    load_config(opts.config_file, opts.post_process_only)
 
     level = logging.DEBUG if opts.extra_logs else logging.WARNING
     setup_logger(logger, level, cfg_dict['log_file'])
@@ -489,7 +512,7 @@
         if exc is not None:
             raise exc, cls, tb
 
-    logger.info("All info stotored into {0}".format(cfg_dict['var_dir']))
+    logger.info("All info stored into {0}".format(cfg_dict['var_dir']))
     return 0
 
 
diff --git a/scripts/connector.py b/scripts/connector.py
index 3cf40cc..6f0f744 100644
--- a/scripts/connector.py
+++ b/scripts/connector.py
@@ -1,71 +1,33 @@
-import argparse
-import logging
-import sys
-import tempfile
 import os
+import sys
+import logging
+import argparse
+import tempfile
 import paramiko
 
-from urlparse import urlparse
+import fuel_rest_api
 from nodes.node import Node
-from ssh_utils import ssh_connect, ssh_copy_file, connect
 from utils import parse_creds
-from fuel_rest_api import KeystoneAuth
+from urlparse import urlparse
+
 
 tmp_file = tempfile.NamedTemporaryFile().name
 openrc_path = tempfile.NamedTemporaryFile().name
 logger = logging.getLogger("io-perf-tool")
 
 
-def get_cluster_id(cluster_name, conn):
-    clusters = conn.do("get", path="/api/clusters")
-    for cluster in clusters:
-        if cluster['name'] == cluster_name:
-            return cluster['id']
-
-
-def get_openrc_data(file_name):
-    openrc_dict = {}
-
-    with open(file_name) as f:
-        for line in f.readlines():
-            if len(line.split(" ")) > 1:
-                line = line.split(' ')[1]
-                key, value = line.split('=')
-
-                if key in ['OS_AUTH_URL', 'OS_PASSWORD',
-                              'OS_TENANT_NAME', 'OS_USERNAME']:
-                    openrc_dict[key] = value[1: len(value) - 2]
-
-    return openrc_dict
-
-
-def get_openrc(nodes):
-    controller = None
-
-    for node in nodes:
-        if 'controller' in node.roles:
-            controller = node
-            break
-
-    url = controller.conn_url[6:]
-    ssh = connect(url)
-    sftp = ssh.open_sftp()
-    sftp.get('/root/openrc', openrc_path)
-    sftp.close()
-
-    return get_openrc_data(openrc_path)
-
-
 def discover_fuel_nodes(fuel_url, creds, cluster_name):
     username, tenant_name, password = parse_creds(creds)
     creds = {"username": username,
              "tenant_name": tenant_name,
              "password": password}
 
-    fuel = KeystoneAuth(fuel_url, creds, headers=None, echo=None,)
-    cluster_id = get_cluster_id(cluster_name, fuel)
-    nodes = fuel.do("get", path="/api/nodes?cluster_id=" + str(cluster_id))
-    ips = [node["ip"] for node in nodes]
+    conn = fuel_rest_api.KeystoneAuth(fuel_url, creds, headers=None)
+    cluster_id = fuel_rest_api.get_cluster_id(conn, cluster_name)
+    cluster = fuel_rest_api.reflect_cluster(conn, cluster_id)
+
+    nodes = list(cluster.get_nodes())
+    ips = [node.get_ip('admin') for node in nodes]
     roles = [node["roles"] for node in nodes]
 
     host = urlparse(fuel_url).hostname
@@ -73,8 +35,9 @@
     nodes, to_clean = run_agent(ips, roles, host, tmp_file)
     nodes = [Node(node[0], node[1]) for node in nodes]
 
-    openrc_dict = get_openrc(nodes)
+    openrc_dict = cluster.get_openrc()
 
+    logger.debug("Found %s fuel nodes for env %r" % (len(nodes), cluster_name))
     return nodes, to_clean, openrc_dict
 
 
@@ -144,9 +107,8 @@
                       + ":" + fuel_id_rsa_path, role))
 
     ssh.close()
-    logger.info('Files has been transferred successfully to Fuel node, ' \
+    logger.info('Files has been transferred successfully to Fuel node, ' +
                 'agent has been launched')
-    logger.info("Nodes : " + str(nodes))
 
     return nodes, nodes_to_clean
 
@@ -170,7 +132,8 @@
     args = parse_command_line(argv)
 
     nodes, to_clean, _ = discover_fuel_nodes(args.fuel_url,
-                                          args.creds, args.cluster_name)
+                                             args.creds,
+                                             args.cluster_name)
     discover_fuel_nodes_clean(args.fuel_url, {"username": "root",
                                               "password": "test37",
                                               "port": 22}, to_clean)
diff --git a/start_vms.py b/start_vms.py
index f7d097c..93162a3 100644
--- a/start_vms.py
+++ b/start_vms.py
@@ -145,9 +145,9 @@
     return [ip for ip in ip_list if ip.instance_id is None][:amount]
 
 
-def launch_vms(config):
+def launch_vms(params):
     logger.debug("Starting new nodes on openstack")
-    params = config['vm_params'].copy()
+    params = params.copy()
     count = params.pop('count')
 
     if isinstance(count, basestring):
@@ -156,9 +156,9 @@
         srv_count = len([srv for srv in lst if srv.status == 'enabled'])
         count = srv_count * int(count[1:])
 
+    srv_params = "img: {img_name}, flavor: {flavor_name}".format(**params)
     msg_templ = "Will start {0} servers with next params: {1}"
-    logger.debug(msg_templ.format(count, ""))
-    # vm_creds = config['vm_params']['creds'] ?????
+    logger.info(msg_templ.format(count, srv_params))
     vm_creds = params.pop('creds')
 
     for ip, os_node in create_vms_mt(NOVA_CONNECTION, count, **params):
@@ -296,25 +296,3 @@
                         cinder.volumes.delete(vol)
 
     logger.debug("Clearing done (yet some volumes may still deleting)")
-
-
-# def prepare_host(key_file, ip, fio_path, dst_fio_path, user='cirros'):
-#     print "Wait till ssh ready...."
-#     wait_ssh_ready(ip, user, key_file)
-
-#     print "Preparing host >"
-#     print "    Coping fio"
-#     copy_fio(key_file, ip, fio_path, user, dst_fio_path)
-
-#     key_opts = '-o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no'
-#     args = (key_file, user, ip, key_opts)
-#     cmd_format = "ssh {3} -i {0} {1}@{2} '{{0}}'".format(*args).format
-
-#     def exec_on_host(cmd):
-#         print "    " + cmd
-#         subprocess.check_call(cmd_format(cmd), shell=True)
-
-#     exec_on_host("sudo /usr/sbin/mkfs.ext4 /dev/vdb")
-#     exec_on_host("sudo /bin/mkdir /media/ceph")
-#     exec_on_host("sudo /bin/mount /dev/vdb /media/ceph")
-#     exec_on_host("sudo /bin/chmod a+rwx /media/ceph")
diff --git a/tests/itest.py b/tests/itest.py
index 1dbbe13..ad76b9e 100644
--- a/tests/itest.py
+++ b/tests/itest.py
@@ -122,7 +122,7 @@
         self.files_to_copy = {local_fname: self.io_py_remote}
         copy_paths(conn, self.files_to_copy)
 
-        cmd_templ = "dd if=/dev/zero of={0} bs={1} count={2}"
+        cmd_templ = "sudo dd if=/dev/zero of={0} bs={1} count={2}"
         files = {}
 
         for secname, params in self.configs:
@@ -139,7 +139,7 @@
             run_over_ssh(conn, cmd, timeout=msz)
 
     def run(self, conn, barrier):
-        cmd_templ = "env python2 {0} --type {1} {2} --json -"
+        cmd_templ = "sudo env python2 {0} --type {1} {2} --json -"
 
         params = " ".join("{0}={1}".format(k, v)
                           for k, v in self.config_params.items())
@@ -159,7 +159,7 @@
 
             out_err = run_over_ssh(conn, cmd,
                                    stdin_data=self.raw_cfg,
-                                   timeout=int(exec_time * 1.1))
+                                   timeout=int(exec_time * 1.1 + 300))
         finally:
             barrier.exit()