temporary commit
diff --git a/scripts/prepare.sh b/scripts/prepare.sh
index c9be2f5..e8813f1 100644
--- a/scripts/prepare.sh
+++ b/scripts/prepare.sh
@@ -97,7 +97,7 @@
 
         IMAGE_FILE="/tmp/${IMAGE_NAME}.qcow"
         if [ ! -f "$IMAGE_FILE" ] ; then
-            curl "$IMAGE_URL" -o "$IMAGE_FILE"
+            curl "$IMAGE_URL" -o "$IMAGE_FILE" >/dev/null
         fi
         opts="--disk-format qcow2 --container-format bare --is-public true"
         glance image-create --name "$IMAGE_NAME" $opts --file "$IMAGE_FILE" >/dev/null
diff --git a/wally/report.py b/wally/report.py
index 16c487d..cb871b3 100644
--- a/wally/report.py
+++ b/wally/report.py
@@ -277,10 +277,14 @@
 
     for res in processed_results.values():
         if res.name.startswith('lat_vs_iops'):
-            lat_iops[res.concurence].append((res.lat.average / 1000.0,
-                                             res.lat.deviation / 1000.0,
+            lat_iops[res.concurence].append((res.lat,
+                                             0,
                                              res.iops.average,
                                              res.iops.deviation))
+            # lat_iops[res.concurence].append((res.lat.average / 1000.0,
+            #                                  res.lat.deviation / 1000.0,
+            #                                  res.iops.average,
+            #                                  res.iops.deviation))
             requested_iops = res.p.rate_iops * res.concurence
             requsted_vs_real[res.concurence][requested_iops] = \
                 (res.iops.average, res.iops.deviation)
@@ -344,7 +348,9 @@
 def io_chart(title, concurence,
              latv, latv_min, latv_max,
              iops_or_bw, iops_or_bw_err,
-             legend, log=False,
+             legend,
+             log_iops=False,
+             log_lat=False,
              boxplots=False,
              latv_50=None, latv_95=None):
     points = " MiBps" if legend == 'BW' else ""
@@ -388,9 +394,12 @@
     plt.legend(handles1 + handles2, labels1 + labels2,
                loc='center left', bbox_to_anchor=(1.1, 0.81))
 
-    if log:
+    if log_iops:
         p1.set_yscale('log')
+
+    if log_lat:
         p2.set_yscale('log')
+
     plt.subplots_adjust(right=0.68)
 
     return get_emb_data_svg(plt)
@@ -420,9 +429,13 @@
         lat = None
         lat_min = None
         lat_max = None
+
         lat_50 = [x.lat_50 for x in chart_data]
         lat_95 = [x.lat_95 for x in chart_data]
 
+        lat_diff_max = max(x.lat_95 / x.lat_50 for x in chart_data)
+        lat_log_scale = (lat_diff_max > 10)
+
         testnodes_count = x.testnodes_count
         concurence = [(testnodes_count, x.concurence)
                       for x in chart_data]
@@ -438,7 +451,7 @@
 
         fc = io_chart(title=desc,
                       concurence=concurence,
-                      
+
                       latv=lat,
                       latv_min=lat_min,
                       latv_max=lat_max,
@@ -447,6 +460,7 @@
                       iops_or_bw_err=data_dev,
 
                       legend=name,
+                      log_lat=lat_log_scale,
 
                       latv_50=lat_50,
                       latv_95=lat_95)
@@ -510,7 +524,8 @@
             if res.p.rw != 'randwrite':
                 continue
             rws4k_iops_lat_th.append((res.iops.average,
-                                      res.lat.average,
+                                      res.lat,
+                                      # res.lat.average,
                                       res.concurence))
 
     rws4k_iops_lat_th.sort(key=lambda (_1, _2, conc): conc)
@@ -629,8 +644,10 @@
             if res.name.startswith('mixed-ssd'):
                 is_ssd = True
             mixed[res.concurence].append((res.p.rwmixread,
-                                          res.lat.average / 1000.0,
-                                          res.lat.deviation / 1000.0,
+                                          res.lat,
+                                          0,
+                                          # res.lat.average / 1000.0,
+                                          # res.lat.deviation / 1000.0,
                                           res.iops.average,
                                           res.iops.deviation))
 
diff --git a/wally/suits/io/ceph.cfg b/wally/suits/io/ceph.cfg
index bf410bb..330b1bd 100644
--- a/wally/suits/io/ceph.cfg
+++ b/wally/suits/io/ceph.cfg
@@ -1,12 +1,12 @@
 [global]
 include defaults.cfg
 
-NUMJOBS={% 1, 5, 10, 15, 40 %}
-# NUMJOBS_SHORT={% 1, 2, 3, 10 %}
-NUMJOBS_SHORT=1
+NUMJOBS_R={% 1, 5, 10, 15, 25, 40 %}
+NUMJOBS_W={% 1, 3, 5, 7, 10%}
+NUMJOBS_SEQ_OPS={% 1, 2, 3, 10 %}
 
 ramp_time=15
-runtime=120
+runtime=240
 
 # ---------------------------------------------------------------------
 # check different thread count, sync mode. (latency, iops) = func(th_count)
@@ -15,7 +15,17 @@
 blocksize=4k
 rw=randwrite
 sync=1
-numjobs={NUMJOBS}
+numjobs={NUMJOBS_W}
+
+# ---------------------------------------------------------------------
+# check different thread count, direct read mode. (latency, iops) = func(th_count)
+# also check iops for randread
+# ---------------------------------------------------------------------
+[ceph_{TEST_SUMM}]
+blocksize=4k
+rw=randread
+direct=1
+numjobs={NUMJOBS_R}
 
 # ---------------------------------------------------------------------
 # direct write
@@ -27,16 +37,6 @@
 numjobs=1
 
 # ---------------------------------------------------------------------
-# check different thread count, direct read mode. (latency, iops) = func(th_count)
-# also check iops for randread
-# ---------------------------------------------------------------------
-[ceph_{TEST_SUMM}]
-blocksize=4k
-rw=randread
-direct=1
-numjobs={NUMJOBS}
-
-# ---------------------------------------------------------------------
 # this is essentially sequential write/read operations
 # we can't use sequential with numjobs > 1 due to caching and block merging
 # ---------------------------------------------------------------------
@@ -44,5 +44,5 @@
 blocksize=16m
 rw={% randread, randwrite %}
 direct=1
-numjobs={NUMJOBS_SHORT}
+numjobs={NUMJOBS_SEQ_OPS}
 
diff --git a/wally/suits/io/cinder_iscsi.cfg b/wally/suits/io/cinder_iscsi.cfg
index 4d19dd9..fd67090 100644
--- a/wally/suits/io/cinder_iscsi.cfg
+++ b/wally/suits/io/cinder_iscsi.cfg
@@ -1,13 +1,11 @@
 [global]
 include defaults.cfg
 
-# NUMJOBS={% 1, 5, 10, 15, 20, 30, 40, 80 %}
+NUMJOBS={% 1, 5, 10, 15, 25, 40 %}
 
-NUMJOBS={% 1, 3, 5, 10, 20, 40 %}
-
+ramp_time=15
+runtime=240
 direct=1
-ramp_time=5
-runtime=30
 
 # ---------------------------------------------------------------------
 # check different thread count, sync mode. (latency, iops) = func(th_count)
@@ -31,26 +29,22 @@
 # check IOPS randwrite.
 # ---------------------------------------------------------------------
 [cinder_iscsi_{TEST_SUMM}]
-blocksize=64k
+blocksize=4k
 rw=randwrite
-ramp_time=180
-runtime=120
 
 # ---------------------------------------------------------------------
 # No reason for th count > 1 in case of sequantial operations
 # ot they became random
 # ---------------------------------------------------------------------
 [cinder_iscsi_{TEST_SUMM}]
-blocksize=1m
-rw={% read,write %}
-offset={UNIQ_OFFSET}
-ramp_time=90
-runtime=30
+blocksize=16m
+rw={% randread,randwrite %}
+numjobs={% 1, 2, 3, 10 %}
 
 # [cinder_iscsi_{TEST_SUMM}]
-# blocksize=64m
-# rw={% randread,randwrite %}
-# direct=1
-# ramp_time=30
+# blocksize=1m
+# rw={% read,write %}
+# offset={UNIQ_OFFSET}
+# ramp_time=90
 # runtime=30
-#
+# 
diff --git a/wally/suits/io/fio.py b/wally/suits/io/fio.py
index b85bbb1..a57faff 100644
--- a/wally/suits/io/fio.py
+++ b/wally/suits/io/fio.py
@@ -1,6 +1,7 @@
 import re
 import time
 import json
+import stat
 import random
 import os.path
 import logging
@@ -22,7 +23,9 @@
 from wally.ssh_utils import (save_to_remote, read_from_remote, BGSSHTask, reconnect)
 
 from .fio_task_parser import (execution_time, fio_cfg_compile,
-                              get_test_summary, get_test_sync_mode, FioJobSection)
+                              get_test_summary, get_test_summary_tuple,
+                              get_test_sync_mode, FioJobSection)
+
 from ..itest import (TimeSeriesValue, PerfTest, TestResults,
                      run_on_node, TestConfig, MeasurementMatrix)
 
@@ -179,6 +182,12 @@
         pkey = key
         curr_perc += val
 
+    # for k, v in sorted(lat_mks.items()):
+    #     if k / 1000 > 0:
+    #         print "{0:>4}".format(k / 1000), v
+
+    # print perc_50 / 1000., perc_95 / 1000.
+    # exit(1)
     return perc_50 / 1000., perc_95 / 1000.
 
 
@@ -228,8 +237,10 @@
                 'flt_bw': flt_bw}
 
     def summary(self):
-        return get_test_summary(self.fio_task) + "vm" \
-               + str(len(self.config.nodes))
+        return get_test_summary(self.fio_task, len(self.config.nodes))
+
+    def summary_tpl(self):
+        return get_test_summary_tuple(self.fio_task, len(self.config.nodes))
 
     def get_yamable(self):
         return self.summary()
@@ -309,8 +320,6 @@
         pinfo.raw_bw = map(prepare, self.bw.per_vm())
         pinfo.raw_iops = map(prepare, self.iops.per_vm())
 
-        iops_per_th = sum(sum(pinfo.raw_iops, []), [])
-
         fparams = self.get_params_from_fio_report()
         fio_report_bw = sum(fparams['flt_bw'])
         fio_report_iops = sum(fparams['flt_iops'])
@@ -333,17 +342,20 @@
 
         # When IOPS/BW per thread is too low
         # data from logs is rounded to match
+        iops_per_th = sum(sum(pinfo.raw_iops, []), [])
         if average(iops_per_th) > 10:
-            pinfo.bw = bw_log
             pinfo.iops = iops_log
-            pinfo.bw2 = bw_report
             pinfo.iops2 = iops_report
         else:
-            pinfo.bw = bw_report
             pinfo.iops = iops_report
-            pinfo.bw2 = bw_log
             pinfo.iops2 = iops_log
 
+        bw_per_th = sum(sum(pinfo.raw_bw, []), [])
+        if average(bw_per_th) > 10:
+            pinfo.bw = bw_log
+        else:
+            pinfo.bw2 = bw_report
+
         self._pinfo = pinfo
 
         return pinfo
@@ -388,13 +400,11 @@
         self.min_bw_per_thread = get("min_bw", None)
 
         self.use_sudo = get("use_sudo", True)
-        self.test_logging = get("test_logging", False)
 
         self.raw_cfg = open(self.config_fname).read()
         self.fio_configs = fio_cfg_compile(self.raw_cfg,
                                            self.config_fname,
-                                           self.config_params,
-                                           split_on_names=self.test_logging)
+                                           self.config_params)
         self.fio_configs = list(self.fio_configs)
 
     @classmethod
@@ -409,29 +419,36 @@
         # Need to remove tempo files, used for testing
         pass
 
+    # size is megabytes
     def check_prefill_required(self, rossh, fname, size, num_blocks=16):
-        try:
-            data = rossh("ls -l " + fname, nolog=True)
-        except:
+        with rossh.connection.open_sftp() as sftp:
+            fstats = sftp.stat(fname)
+
+        if stat.S_ISREG(fstats) and fstats.st_size < size * 1024 ** 2:
             return True
 
-        sz = data.split()[4]
-        if int(sz) / (1024 ** 2) < size:
-            return True
-
-        cmd = """python -c "import sys; fd = open('{0}', 'rb');""" + \
-              """fd.seek({1}); sys.stdout.write(fd.read(1024))" | md5sum"""
+        cmd = 'python -c "' + \
+              "import sys;" + \
+              "fd = open('{0}', 'rb');" + \
+              "fd.seek({1});" + \
+              "data = fd.read(1024); " + \
+              "sys.stdout.write(data + ' ' * ( 1024 - len(data)))\" | md5sum"
 
         if self.use_sudo:
             cmd = "sudo " + cmd
 
         zero_md5 = '0f343b0931126a20f133d67c2b018a3b'
-        offsets = [random.randrange(size * 1024) for _ in range(num_blocks)]
-        offsets.append(size * 1024 - 1024)
+        offsets = [random.randrange(size - 1024) for _ in range(num_blocks)]
+        offsets.append(size - 1024)
 
         for offset in offsets:
             data = rossh(cmd.format(fname, offset), nolog=True)
-            md = data.split()[0].strip()
+
+            md = ""
+            for line in data.split("\n"):
+                if "unable to resolve" not in line:
+                    md = line.split()[0].strip()
+                    break
 
             if len(md) != 32:
                 logger.error("File data check is failed - " + data)
@@ -463,7 +480,7 @@
         for fname, curr_sz in files.items():
             if not force:
                 if not self.check_prefill_required(rossh, fname, curr_sz):
-                    print "prefill is skipped"
+                    logger.debug("prefill is skipped")
                     continue
 
             logger.info("Prefilling file {0}".format(fname))
@@ -555,6 +572,7 @@
     def pre_run_th(self, node, files, force):
         # fill files with pseudo-random data
         rossh = run_on_node(node)
+        rossh.connection = node.connection
 
         try:
             cmd = 'mkdir -p "{0}"'.format(self.config.remote_dir)
@@ -839,23 +857,13 @@
         for console
         """
 
-        def getconc(data):
-            th_count = data.params['vals'].get('numjobs')
-
-            if th_count is None:
-                th_count = data.params['vals'].get('concurence', 1)
-            return th_count
-
         def key_func(data):
-            p = data.params['vals']
-
-            th_count = getconc(data)
-
+            tpl = data.summary_tpl()
             return (data.name.rsplit("_", 1)[0],
-                    p['rw'],
-                    get_test_sync_mode(data.params['vals']),
-                    ssize2b(p['blocksize']),
-                    int(th_count) * len(data.config.nodes))
+                    tpl.oper,
+                    tpl.mode,
+                    tpl.bsize,
+                    int(tpl.th_count) * int(tpl.vm_count))
         res = []
 
         for item in sorted(results, key=key_func):
@@ -878,13 +886,15 @@
             iops = round_3_digit(iops)
             bw = round_3_digit(bw)
 
-            res.append({"name": item.name.rsplit('_', 1)[0],
-                        "key": key_func(item),
-                        "summ": item.summary()[3:],
+            summ = "{0.oper}{0.mode} {0.bsize:>4} {0.th_count:>3}th {0.vm_count:>2}vm".format(item.summary_tpl())
+
+            res.append({"name": key_func(item)[0],
+                        "key": key_func(item)[:4],
+                        "summ": summ,
                         "iops": int(iops),
                         "bw": int(bw),
-                        "iops_conf": str(conf_perc),
-                        "iops_dev": str(dev_perc),
+                        "conf": str(conf_perc),
+                        "dev": str(dev_perc),
                         "iops_per_vm": int(iops_per_vm),
                         "bw_per_vm": int(bw_per_vm),
                         "lat_50": lat_50,
@@ -895,13 +905,13 @@
     Field = collections.namedtuple("Field", ("header", "attr", "allign", "size"))
     fiels_and_header = [
         Field("Name",           "name",        "l",  7),
-        Field("Description",    "summ",        "l", 10),
+        Field("Description",    "summ",        "l", 19),
         Field("IOPS\ncum",      "iops",        "r",  3),
-        Field("KiBps\ncum",     "bw",          "r",  3),
-        Field("Cnf %\n95%",     "iops_conf",   "r",  3),
-        Field("Dev%",           "iops_dev",    "r",  3),
-        Field("iops\nper vm",   "iops_per_vm", "r",  3),
-        Field("KiBps\nper vm",  "bw_per_vm",   "r",  3),
+        Field("KiBps\ncum",     "bw",          "r",  6),
+        Field("Cnf %\n95%",     "conf",        "r",  3),
+        Field("Dev%",           "dev",         "r",  3),
+        Field("iops\n/vm",      "iops_per_vm", "r",  3),
+        Field("KiBps\n/vm",     "bw_per_vm",   "r",  6),
         Field("lat ms\nmedian", "lat_50",      "r",  3),
         Field("lat ms\n95%",    "lat_95",      "r",  3)
     ]
@@ -923,12 +933,11 @@
 
         prev_k = None
         for item in cls.prepare_data(results):
-            curr_k = item['summ'][:4]
             if prev_k is not None:
-                if prev_k != curr_k:
+                if prev_k != item["key"]:
                     tab.add_row(sep)
 
-            prev_k = curr_k
+            prev_k = item["key"]
             tab.add_row([item[f.attr] for f in cls.fiels_and_header])
 
         return tab.draw()
@@ -978,14 +987,13 @@
             ))
 
         prev_k = None
-        iops_frmt = "{0[iops]} ~ {0[iops_conf]:>2} ~ {0[iops_dev]:>2}"
+        iops_frmt = "{0[iops]} ~ {0[conf]:>2} ~ {0[dev]:>2}"
         for item in processed_results[0]:
-            curr_k = item['summ'][:4]
             if prev_k is not None:
-                if prev_k != curr_k:
+                if prev_k != item["key"]:
                     tab.add_row(sep)
 
-            prev_k = curr_k
+            prev_k = item["key"]
 
             key = (item['name'], item['summ'])
             line = list(key)
@@ -1000,8 +1008,7 @@
                 elif base['iops'] == 0:
                     line.append("Nan")
                 else:
-                    prc_val = {'iops_dev': val['iops_dev'],
-                               'iops_conf': val['iops_conf']}
+                    prc_val = {'dev': val['dev'], 'conf': val['conf']}
                     prc_val['iops'] = int(100 * val['iops'] / base['iops'])
                     line.append(iops_frmt.format(prc_val))
 
diff --git a/wally/suits/io/fio_task_parser.py b/wally/suits/io/fio_task_parser.py
index c08bd37..9d19f3a 100644
--- a/wally/suits/io/fio_task_parser.py
+++ b/wally/suits/io/fio_task_parser.py
@@ -331,7 +331,10 @@
         return 'a'
 
 
-def get_test_summary(sec):
+TestSumm = namedtuple("TestSumm", ("oper", "mode", "bsize", "th_count", "vm_count"))
+
+
+def get_test_summary_tuple(sec, vm_count=None):
     if isinstance(sec, dict):
         vals = sec
     else:
@@ -351,10 +354,17 @@
     if th_count is None:
         th_count = vals.get('concurence', 1)
 
-    return "{0}{1}{2}th{3}".format(rw,
-                                   sync_mode,
-                                   vals['blocksize'],
-                                   th_count)
+    return TestSumm(rw, sync_mode, vals['blocksize'], th_count, vm_count)
+
+
+def get_test_summary(sec, vm_count=None):
+    tpl = get_test_summary_tuple(sec, vm_count)
+    res = "{0.oper}{0.mode}{0.bsize}th{0.th_count}".format(tpl)
+
+    if tpl.vm_count is not None:
+        res += "vm" + str(tpl.vm_count)
+
+    return res
 
 
 def execution_time(sec):
@@ -371,7 +381,7 @@
             yield res
 
 
-def fio_cfg_compile(source, fname, test_params, **slice_params):
+def fio_cfg_compile(source, fname, test_params):
     it = parse_all_in_1(source, fname)
     it = (apply_params(sec, test_params) for sec in it)
     it = flatmap(process_cycles, it)
@@ -382,9 +392,6 @@
 def parse_args(argv):
     parser = argparse.ArgumentParser(
         description="Run fio' and return result")
-    parser.add_argument("--runcycle", type=int, default=None,
-                        metavar="MAX_CYCLE_SECONDS",
-                        help="Max cycle length in seconds")
     parser.add_argument("-p", "--params", nargs="*", metavar="PARAM=VAL",
                         default=[],
                         help="Provide set of pairs PARAM=VAL to" +
@@ -408,12 +415,7 @@
         name, val = param_val.split("=", 1)
         params[name] = parse_value(val)
 
-    slice_params = {
-        'runcycle': argv_obj.runcycle,
-    }
-
-    sec_it = fio_cfg_compile(job_cfg, argv_obj.jobfile,
-                             params, **slice_params)
+    sec_it = fio_cfg_compile(job_cfg, argv_obj.jobfile, params)
 
     if argv_obj.action == 'estimate':
         print sec_to_str(sum(map(execution_time, sec_it)))
diff --git a/wally/suits/io/rrd.cfg b/wally/suits/io/rrd.cfg
index d5a92bb..3383dce 100644
--- a/wally/suits/io/rrd.cfg
+++ b/wally/suits/io/rrd.cfg
@@ -1,16 +1,14 @@
 [global]
 include defaults.cfg
 
-size={TEST_FILE_SIZE}
-
 # ---------------------------------------------------------------------
 [rws_{TEST_SUMM}]
 blocksize=4k
 rw=randwrite
 sync=1
-ramp_time=10
-runtime=60
-numjobs={% 20,40,60,80,100,120,140 %}
+ramp_time=15
+runtime=120
+numjobs={% 50,150 %}
 
 # ---------------------------------------------------------------------
 # [rws_{TEST_SUMM}]