a lot of fixes
diff --git a/wally/suits/io/ceph.cfg b/wally/suits/io/ceph.cfg
index 6abaae5..085153f 100644
--- a/wally/suits/io/ceph.cfg
+++ b/wally/suits/io/ceph.cfg
@@ -25,7 +25,7 @@
 numjobs={NUMJOBS}
 
 # ---------------------------------------------------------------------
-# check different thread count, sync mode. (latency, iops) = func(th_count)
+# direct write
 # ---------------------------------------------------------------------
 [ceph_test_{TEST_SUMM} * {NUM_ROUNDS}]
 blocksize=4k
diff --git a/wally/suits/io/formatter.py b/wally/suits/io/formatter.py
index a1da1c3..63c9408 100644
--- a/wally/suits/io/formatter.py
+++ b/wally/suits/io/formatter.py
@@ -24,9 +24,11 @@
     tab.set_cols_align(["l", "r", "r", "r", "r"])
 
     prev_k = None
+
     items = sorted(test_set['res'].items(), key=key_func)
 
     for test_name, data in items:
+
         curr_k = key_func((test_name, data))[:3]
 
         if prev_k is not None:
diff --git a/wally/suits/io/results_loader.py b/wally/suits/io/results_loader.py
index c67dbe8..3b50bf7 100644
--- a/wally/suits/io/results_loader.py
+++ b/wally/suits/io/results_loader.py
@@ -3,7 +3,7 @@
 import collections
 
 
-from wally.utils import ssize_to_b
+# from wally.utils import ssize_to_b
 from wally.statistic import med_dev
 
 PerfInfo = collections.namedtuple('PerfInfo',
@@ -12,16 +12,28 @@
                                    'lat', 'lat_dev', 'raw'))
 
 
+def split_and_add(data, block_count):
+    assert len(data) % block_count == 0
+    res = [0] * (len(data) // block_count)
+
+    for i in range(block_count):
+        for idx, val in enumerate(data[i::block_count]):
+            res[idx] += val
+
+    return res
+
+
 def process_disk_info(test_output):
     data = {}
-
     for tp, pre_result in test_output:
         if tp != 'io' or pre_result is None:
             pass
 
+        vm_count = pre_result['__test_meta__']['testnodes_count']
+
         for name, results in pre_result['res'].items():
-            bw, bw_dev = med_dev(results['bw'])
-            iops, iops_dev = med_dev(results['iops'])
+            bw, bw_dev = med_dev(split_and_add(results['bw'], vm_count))
+            iops, iops_dev = med_dev(split_and_add(results['iops'], vm_count))
             lat, lat_dev = med_dev(results['lat'])
             dev = bw_dev / float(bw)
             data[name] = PerfInfo(name, bw, iops, dev, lat, lat_dev, results)
@@ -82,19 +94,19 @@
     return closure
 
 
-def load_data(raw_data):
-    data = list(parse_output(raw_data))[0]
+# def load_data(raw_data):
+#     data = list(parse_output(raw_data))[0]
 
-    for key, val in data['res'].items():
-        val['blocksize_b'] = ssize_to_b(val['blocksize'])
+#     for key, val in data['res'].items():
+#         val['blocksize_b'] = ssize_to_b(val['blocksize'])
 
-        val['iops_mediana'], val['iops_stddev'] = med_dev(val['iops'])
-        val['bw_mediana'], val['bw_stddev'] = med_dev(val['bw'])
-        val['lat_mediana'], val['lat_stddev'] = med_dev(val['lat'])
-        yield val
+#         val['iops_mediana'], val['iops_stddev'] = med_dev(val['iops'])
+#         val['bw_mediana'], val['bw_stddev'] = med_dev(val['bw'])
+#         val['lat_mediana'], val['lat_stddev'] = med_dev(val['lat'])
+#         yield val
 
 
-def load_files(*fnames):
-    for fname in fnames:
-        for i in load_data(open(fname).read()):
-            yield i
+# def load_files(*fnames):
+#     for fname in fnames:
+#         for i in load_data(open(fname).read()):
+#             yield i
diff --git a/wally/suits/itest.py b/wally/suits/itest.py
index 4b9db19..605df2c 100644
--- a/wally/suits/itest.py
+++ b/wally/suits/itest.py
@@ -301,13 +301,16 @@
                     try:
                         pid = read_from_remote(sftp, self.pid_file)
                         is_running = True
-                    except (NameError, IOError) as exc:
+                    except (NameError, IOError, OSError) as exc:
                         pid = None
                         is_running = False
 
                     if is_running:
                         if not self.check_process_is_running(sftp, pid):
-                            sftp.remove(self.pid_file)
+                            try:
+                                sftp.remove(self.pid_file)
+                            except (IOError, NameError, OSError):
+                                pass
                             is_running = False
 
             is_connected = True
@@ -370,6 +373,10 @@
                 for test in self.fio_configs:
                     exec_time += io_agent.calculate_execution_time(test)
 
+                # +5% - is a rough estimation for additional operations
+                # like sftp, etc
+                exec_time = int(exec_time * 1.05)
+
                 exec_time_s = sec_to_str(exec_time)
                 now_dt = datetime.datetime.now()
                 end_dt = now_dt + datetime.timedelta(0, exec_time)
@@ -444,6 +451,7 @@
         exec_time_str = sec_to_str(exec_time)
 
         timeout = int(exec_time + max(300, exec_time))
+        soft_tout = exec_time
         barrier.wait()
         self.run_over_ssh(cmd, nolog=nolog)
 
@@ -467,7 +475,7 @@
         if self.node.connection is not Local:
             self.node.connection.close()
 
-        self.wait_till_finished(timeout)
+        self.wait_till_finished(soft_tout, timeout)
         if not nolog:
             logger.debug("Test on node {0} is finished".format(conn_id))