working on reporting, this commit represent broking code state
diff --git a/wally/suits/all_suits.py b/wally/suits/all_suits.py
new file mode 100644
index 0000000..310b1f5
--- /dev/null
+++ b/wally/suits/all_suits.py
@@ -0,0 +1,8 @@
+from .io.fio import FioTest
+# from .suits.itest import TestSuiteConfig
+# from .suits.mysql import MysqlTest
+# from .suits.omgbench import OmgTest
+# from .suits.postgres import PgBenchTest
+
+
+all_suits = {suite.name: suite for suite in [FioTest]}
diff --git a/wally/suits/io/defaults_qd.cfg b/wally/suits/io/defaults_qd.cfg
index fcf5c16..c3dee19 100644
--- a/wally/suits/io/defaults_qd.cfg
+++ b/wally/suits/io/defaults_qd.cfg
@@ -18,7 +18,6 @@
 filename={FILENAME}
 size={FILESIZE}
 
-write_iops_log=fio_iops_log
 write_bw_log=fio_bw_log
 log_avg_msec=1000
 write_hist_log=fio_lat_hist_log
diff --git a/wally/suits/io/fio.py b/wally/suits/io/fio.py
index 7b2c3e3..33e8343 100644
--- a/wally/suits/io/fio.py
+++ b/wally/suits/io/fio.py
@@ -1,14 +1,15 @@
 import array
 import os.path
 import logging
-from typing import cast, Any
+from typing import cast, Any, Tuple, List
 
 import wally
 
-from ...utils import StopTestError, get_os, ssize2b
+from ...utils import StopTestError, ssize2b, b2ssize
 from ...node_interfaces import IRPCNode
+from ...node_utils import get_os
 from ..itest import ThreadedTest
-from ...result_classes import TimeSeries, JobMetrics
+from ...result_classes import TimeSeries, DataSource, TestJobConfig
 from .fio_task_parser import execution_time, fio_cfg_compile, FioJobConfig, FioParams, get_log_files
 from . import rpc_plugin
 from .fio_hist import expected_lat_bins
@@ -17,7 +18,7 @@
 logger = logging.getLogger("wally")
 
 
-class IOPerfTest(ThreadedTest):
+class FioTest(ThreadedTest):
     soft_runcycle = 5 * 60
     retry_time = 30
     configs_dir = os.path.dirname(__file__)  # type: str
@@ -27,7 +28,7 @@
     def __init__(self, *args, **kwargs) -> None:
         super().__init__(*args, **kwargs)
 
-        get = self.config.params.get
+        get = self.suite.params.get
 
         self.remote_task_file = self.join_remote("task.fio")
         self.remote_output_file = self.join_remote("fio_result.json")
@@ -35,7 +36,7 @@
         self.use_sudo = get("use_sudo", True)  # type: bool
         self.force_prefill = get('force_prefill', False)  # type: bool
 
-        self.load_profile_name = self.config.params['load']  # type: str
+        self.load_profile_name = self.suite.params['load']  # type: str
 
         if os.path.isfile(self.load_profile_name):
             self.load_profile_path = self.load_profile_name   # type: str
@@ -47,16 +48,16 @@
         if self.use_system_fio:
             self.fio_path = "fio"    # type: str
         else:
-            self.fio_path = os.path.join(self.config.remote_dir, "fio")
+            self.fio_path = os.path.join(self.suite.remote_dir, "fio")
 
-        self.load_params = self.config.params['params']
+        self.load_params = self.suite.params['params']
         self.file_name = self.load_params['FILENAME']
 
         if 'FILESIZE' not in self.load_params:
             logger.debug("Getting test file sizes on all nodes")
             try:
-                sizes = {node.conn.fs.file_stat(self.file_name)['size']
-                         for node in self.config.nodes}
+                sizes = {node.conn.fs.file_stat(self.file_name)[b'size']
+                         for node in self.suite.nodes}
             except Exception:
                 logger.exception("FILESIZE is not set in config file and fail to detect it." +
                                  "Set FILESIZE or fix error and rerun test")
@@ -68,7 +69,7 @@
                 raise StopTestError()
 
             self.file_size = list(sizes)[0]
-            logger.info("Detected test file size is %s", self.file_size)
+            logger.info("Detected test file size is %sB", b2ssize(self.file_size))
             self.load_params['FILESIZE'] = self.file_size
         else:
             self.file_size = ssize2b(self.load_params['FILESIZE'])
@@ -80,31 +81,41 @@
             logger.error("Empty fio config provided")
             raise StopTestError()
 
-        self.exec_folder = self.config.remote_dir
+        self.exec_folder = self.suite.remote_dir
 
     def config_node(self, node: IRPCNode) -> None:
         plugin_code = open(rpc_plugin.__file__.rsplit(".", 1)[0] + ".py", "rb").read()  # type: bytes
         node.upload_plugin("fio", plugin_code)
 
         try:
-            node.conn.fs.rmtree(self.config.remote_dir)
+            node.conn.fs.rmtree(self.suite.remote_dir)
         except Exception:
             pass
 
         try:
-            node.conn.fs.makedirs(self.config.remote_dir)
+            node.conn.fs.makedirs(self.suite.remote_dir)
         except Exception:
-            msg = "Failed to recreate folder {} on remote {}.".format(self.config.remote_dir, node)
+            msg = "Failed to recreate folder {} on remote {}.".format(self.suite.remote_dir, node)
             logger.exception(msg)
             raise StopTestError()
 
+        # TODO: check this during config validation
+        if self.file_size % (4 * (1024 ** 2)) != 0:
+            logger.error("Test file size must be proportional to 4MiB")
+            raise StopTestError()
+
         self.install_utils(node)
 
         mb = int(self.file_size / 1024 ** 2)
-        logger.info("Filling test file %s with %sMiB of random data", self.file_name, mb)
-        fill_bw = node.conn.fio.fill_file(self.file_name, mb, force=self.force_prefill, fio_path=self.fio_path)
-        if fill_bw is not None:
-            logger.info("Initial fio fill bw is {} MiBps for {}".format(fill_bw, node))
+        logger.info("Filling test file %s on node %s with %sMiB of random data", self.file_name, node.info, mb)
+        is_prefilled, fill_bw = node.conn.fio.fill_file(self.file_name, mb,
+                                                        force=self.force_prefill,
+                                                        fio_path=self.fio_path)
+
+        if not is_prefilled:
+            logger.info("Test file on node %s is already prefilled", node.info)
+        elif fill_bw is not None:
+            logger.info("Initial fio fill bw is %s MiBps for %s", fill_bw, node.info)
 
     def install_utils(self, node: IRPCNode) -> None:
         os_info = get_os(node)
@@ -126,19 +137,19 @@
                 raise StopTestError()
 
             bz_dest = self.join_remote('fio.bz2')  # type: str
-            node.copy_file(fio_path, bz_dest)
+            node.copy_file(fio_path, bz_dest, compress=False)
             node.run("bzip2 --decompress {} ; chmod a+x {}".format(bz_dest, self.join_remote("fio")))
 
-    def get_expected_runtime(self, job_config: FioJobConfig) -> int:
+    def get_expected_runtime(self, job_config: TestJobConfig) -> int:
         return execution_time(cast(FioJobConfig, job_config))
 
-    def prepare_iteration(self, node: IRPCNode, job_config: FioJobConfig) -> None:
-        node.put_to_file(self.remote_task_file, str(job_config).encode("utf8"))
+    def prepare_iteration(self, node: IRPCNode, job: TestJobConfig) -> None:
+        node.put_to_file(self.remote_task_file, str(job).encode("utf8"))
 
     # TODO: get a link to substorage as a parameter
-    def run_iteration(self, node: IRPCNode, iter_config: FioJobConfig, job_root: str) -> JobMetrics:
-        f_iter_config = cast(FioJobConfig, iter_config)
-        exec_time = execution_time(f_iter_config)
+    def run_iteration(self, node: IRPCNode, job: TestJobConfig) -> List[TimeSeries]:
+        exec_time = execution_time(cast(FioJobConfig, job))
+
 
         fio_cmd_templ = "cd {exec_folder}; " + \
                         "{fio_path} --output-format=json --output={out_file} --alloc-size=262144 {job_file}"
@@ -152,20 +163,26 @@
         if must_be_empty:
             logger.error("Unexpected fio output: %r", must_be_empty)
 
-        res = {}  # type: JobMetrics
-
         # put fio output into storage
         fio_out = node.get_file_content(self.remote_output_file)
-        self.rstorage.put_extra(job_root, node.info.node_id(), "fio_raw", fio_out)
+
+        path = DataSource(suite_id=self.suite.storage_id,
+                          job_id=job.storage_id,
+                          node_id=node.node_id,
+                          dev='fio',
+                          sensor='stdout',
+                          tag='json')
+
+        self.storage.put_extra(fio_out, path)
         node.conn.fs.unlink(self.remote_output_file)
 
         files = [name for name in node.conn.fs.listdir(self.exec_folder)]
-
-        for name, path in get_log_files(f_iter_config):
-            log_files = [fname for fname in files if fname.startswith(path)]
+        result = []
+        for name, file_path in get_log_files(cast(FioJobConfig, job)):
+            log_files = [fname for fname in files if fname.startswith(file_path)]
             if len(log_files) != 1:
                 logger.error("Found %s files, match log pattern %s(%s) - %s",
-                             len(log_files), path, name, ",".join(log_files[10:]))
+                             len(log_files), file_path, name, ",".join(log_files[10:]))
                 raise StopTestError()
 
             fname = os.path.join(self.exec_folder, log_files[0])
@@ -203,14 +220,13 @@
                         logger.exception("Error during parse %s fio log file in line %s: %r", name, idx, line)
                         raise StopTestError()
 
-            ts = TimeSeries(name=name,
-                            raw=raw_result,
-                            second_axis_size=expected_lat_bins if name == 'lat' else 1,
-                            data=parsed,
-                            times=times)
-            res[(node.info.node_id(), 'fio', name)] = ts
-
-        return res
+            result.append(TimeSeries(name=name,
+                                     raw=raw_result,
+                                     second_axis_size=expected_lat_bins if name == 'lat' else 1,
+                                     data=parsed,
+                                     times=times,
+                                     source=path(sensor=name, tag=None)))
+        return result
 
     def format_for_console(self, data: Any) -> str:
         raise NotImplementedError()
diff --git a/wally/suits/io/fio_task_parser.py b/wally/suits/io/fio_task_parser.py
index 6940aaf..03702ae 100644
--- a/wally/suits/io/fio_task_parser.py
+++ b/wally/suits/io/fio_task_parser.py
@@ -7,13 +7,12 @@
 import os.path
 import argparse
 import itertools
-from typing import Optional, Iterator, Union, Dict, Iterable, List, TypeVar, Callable, Tuple, NamedTuple, Any
+from typing import Optional, Iterator, Union, Dict, Iterable, List, TypeVar, Callable, Tuple, NamedTuple, Any, cast
 from collections import OrderedDict
 
 
-from ...result_classes import IStorable
 from ...result_classes import TestJobConfig
-from ...utils import sec_to_str, ssize2b
+from ...utils import sec_to_str, ssize2b, b2ssize, flatmap
 
 
 SECTION = 0
@@ -29,23 +28,162 @@
                       ('tp', int),
                       ('name', str),
                       ('val', Any)])
+FioTestSumm = NamedTuple("FioTestSumm",
+                         [("oper", str),
+                          ("sync_mode", str),
+                          ("bsize", int),
+                          ("qd", int),
+                          ("thcount", int),
+                          ("write_perc", Optional[int])])
 
-TestSumm = NamedTuple("TestSumm",
-                      [("oper", str),
-                       ("mode", str),
-                       ("bsize", int),
-                       ("iodepth", int),
-                       ("vm_count", int)])
+
+def is_fio_opt_true(vl: Union[str, int]) -> bool:
+    return str(vl).lower() in ['1', 'true', 't', 'yes', 'y']
 
 
 class FioJobConfig(TestJobConfig):
-    def __init__(self, name: str) -> None:
-        TestJobConfig.__init__(self)
-        self.vals = OrderedDict()  # type: Dict[str, Any]
-        self.name = name
 
-    def __eq__(self, other: 'FioJobConfig') -> bool:
-        return self.vals == other.vals
+    ds2mode = {(True, True): 'x',
+               (True, False): 's',
+               (False, True): 'd',
+               (False, False): 'a'}
+
+    sync2long = {'x': "sync direct",
+                 's': "sync",
+                 'd': "direct",
+                 'a': "buffered"}
+
+    op_type2short = {"randread": "rr",
+                     "randwrite": "rw",
+                     "read": "sr",
+                     "write": "sw",
+                     "randrw": "rx"}
+
+    def __init__(self, name: str, idx: int) -> None:
+        TestJobConfig.__init__(self, idx)
+        self.name = name
+        self._sync_mode = None  # type: Optional[str]
+        self._ctuple = None  # type: Optional[FioTestSumm]
+        self._ctuple_no_qd = None  # type: Optional[FioTestSumm]
+
+    # ------------- BASIC PROPERTIES -----------------------------------------------------------------------------------
+
+    @property
+    def write_perc(self) -> Optional[int]:
+        try:
+            return int(self.vals["rwmixwrite"])
+        except (KeyError, TypeError):
+            try:
+                return 100 - int(self.vals["rwmixread"])
+            except (KeyError, TypeError):
+                return None
+
+    @property
+    def qd(self) -> int:
+        return int(self.vals['iodepth'])
+
+    @property
+    def bsize(self) -> int:
+        return ssize2b(self.vals['blocksize']) // 1024
+
+    @property
+    def oper(self) -> str:
+        return self.vals['rw']
+
+    @property
+    def op_type_short(self) -> str:
+        return self.op_type2short[self.vals['rw']]
+
+    @property
+    def thcount(self) -> int:
+        return int(self.vals.get('numjobs', 1))
+
+    @property
+    def sync_mode(self) -> str:
+        if self._sync_mode is None:
+            direct = is_fio_opt_true(self.vals.get('direct', '0')) or \
+                     not is_fio_opt_true(self.vals.get('buffered', '0'))
+            sync = is_fio_opt_true(self.vals.get('sync', '0'))
+            self._sync_mode = self.ds2mode[(sync, direct)]
+        return cast(str, self._sync_mode)
+
+    @property
+    def sync_mode_long(self) -> str:
+        return self.sync2long[self.sync_mode]
+
+    # ----------- COMPLEX PROPERTIES -----------------------------------------------------------------------------------
+
+    @property
+    def characterized_tuple(self) -> Tuple:
+        if self._ctuple is None:
+            self._ctuple = FioTestSumm(oper=self.oper,
+                                       sync_mode=self.sync_mode,
+                                       bsize=self.bsize,
+                                       qd=self.qd,
+                                       thcount=self.thcount,
+                                       write_perc=self.write_perc)
+
+        return cast(Tuple, self._ctuple)
+
+    @property
+    def characterized_tuple_no_qd(self) -> FioTestSumm:
+        if self._ctuple_no_qd is None:
+            self._ctuple_no_qd = FioTestSumm(oper=self.oper,
+                                             sync_mode=self.sync_mode,
+                                             bsize=self.bsize,
+                                             qd=None,
+                                             thcount=self.thcount,
+                                             write_perc=self.write_perc)
+
+        return cast(FioTestSumm, self._ctuple_no_qd)
+
+    @property
+    def long_summary(self) -> str:
+        res = "{0.sync_mode_long} {0.oper} {1} QD={0.qd}".format(self, b2ssize(self.bsize * 1024))
+        if self.thcount != 1:
+            res += " threads={}".format(self.thcount)
+        if self.write_perc is not None:
+            res += " write_perc={}%".format(self.write_perc)
+        return res
+
+    @property
+    def long_summary_no_qd(self) -> str:
+        res = "{0.sync_mode_long} {0.oper} {1}".format(self, b2ssize(self.bsize * 1024))
+        if self.thcount != 1:
+            res += " threads={}".format(self.thcount)
+        if self.write_perc is not None:
+            res += " write_perc={}%".format(self.write_perc)
+        return res
+
+    @property
+    def summary(self) -> str:
+        tpl = cast(FioTestSumm, self.characterized_tuple)
+        res = "{0.oper}{0.sync_mode}{0.bsize}_qd{0.qd}".format(tpl)
+
+        if tpl.thcount != 1:
+            res += "th" + str(tpl.thcount)
+        if tpl.write_perc != 1:
+            res += "wr" + str(tpl.write_perc)
+
+        return res
+
+    @property
+    def summary_no_qd(self) -> str:
+        tpl = cast(FioTestSumm, self.characterized_tuple)
+        res = "{0.oper}{0.sync_mode}{0.bsize}".format(tpl)
+
+        if tpl.thcount != 1:
+            res += "th" + str(tpl.thcount)
+        if tpl.write_perc != 1:
+            res += "wr" + str(tpl.write_perc)
+
+        return res
+    # ------------------------------------------------------------------------------------------------------------------
+
+    def __eq__(self, o: object) -> bool:
+        if not isinstance(o, FioJobConfig):
+            return False
+        return self.vals == cast(FioJobConfig, o).vals
 
     def copy(self) -> 'FioJobConfig':
         return copy.deepcopy(self)
@@ -75,17 +213,17 @@
         return str(self)
 
     def raw(self) -> Dict[str, Any]:
-        return {
-            'vals': [[key, val] for key, val in self.vals.items()],
-            'summary': self.summary,
-            'name': self.name
-        }
+        res = self.__dict__.copy()
+        del res['_sync_mode']
+        res['vals'] = [[key, val] for key, val in self.vals.items()]
+        return res
 
     @classmethod
     def fromraw(cls, data: Dict[str, Any]) -> 'FioJobConfig':
-        obj = cls(data['name'])
-        obj.summary = data['summary']
-        obj.vals.update(data['vals'])
+        obj = cls.__new__(cls)
+        data['vals'] = OrderedDict(data['vals'])
+        data['_sync_mode'] = None
+        obj.__dict__.update(data)
         return obj
 
 
@@ -203,6 +341,8 @@
 
         lexed_lines = new_lines
 
+    suite_section_idx = 0
+
     for fname, lineno, oline, tp, name, val in lexed_lines:
         if tp == SECTION:
             if curr_section is not None:
@@ -215,7 +355,8 @@
                 in_globals = True
             else:
                 in_globals = False
-                curr_section = FioJobConfig(name)
+                curr_section = FioJobConfig(name, idx=suite_section_idx)
+                suite_section_idx += 1
                 curr_section.vals = glob_vals.copy()
             sections_count += 1
         else:
@@ -332,68 +473,13 @@
     params = sec.vals.copy()
     params['UNIQ'] = 'UN{0}'.format(counter[0])
     params['COUNTER'] = str(counter[0])
-    params['TEST_SUMM'] = get_test_summary(sec)
+    params['TEST_SUMM'] = sec.summary
     sec.name = sec.name.format(**params)
     counter[0] += 1
 
     return sec
 
 
-def get_test_sync_mode(sec: FioJobConfig) -> str:
-    if isinstance(sec, dict):
-        vals = sec
-    else:
-        vals = sec.vals
-
-    is_sync = str(vals.get("sync", "0")) == "1"
-    is_direct = str(vals.get("direct", "0")) == "1"
-
-    if is_sync and is_direct:
-        return 'x'
-    elif is_sync:
-        return 's'
-    elif is_direct:
-        return 'd'
-    else:
-        return 'a'
-
-
-def get_test_summary_tuple(sec: FioJobConfig, vm_count: int = None) -> TestSumm:
-    if isinstance(sec, dict):
-        vals = sec
-    else:
-        vals = sec.vals
-
-    rw = {"randread": "rr",
-          "randwrite": "rw",
-          "read": "sr",
-          "write": "sw",
-          "randrw": "rm",
-          "rw": "sm",
-          "readwrite": "sm"}[vals["rw"]]
-
-    sync_mode = get_test_sync_mode(sec)
-
-    return TestSumm(rw,
-                    sync_mode,
-                    vals['blocksize'],
-                    vals.get('iodepth', '1'),
-                    vm_count)
-
-
-def get_test_summary(sec: FioJobConfig, vm_count: int = None, noiodepth: bool = False) -> str:
-    tpl = get_test_summary_tuple(sec, vm_count)
-
-    res = "{0.oper}{0.mode}{0.bsize}".format(tpl)
-    if not noiodepth:
-        res += "qd{}".format(tpl.iodepth)
-
-    if tpl.vm_count is not None:
-        res += "vm{}".format(tpl.vm_count)
-
-    return res
-
-
 def execution_time(sec: FioJobConfig) -> int:
     return sec.vals.get('ramp_time', 0) + sec.vals.get('runtime', 0)
 
@@ -402,23 +488,18 @@
     return fio_config_parse(fio_config_lexer(source, fname))
 
 
-FM_FUNC_INPUT = TypeVar("FM_FUNC_INPUT")
-FM_FUNC_RES = TypeVar("FM_FUNC_RES")
-
-
-def flatmap(func: Callable[[FM_FUNC_INPUT], Iterable[FM_FUNC_RES]],
-            inp_iter: Iterable[FM_FUNC_INPUT]) -> Iterator[FM_FUNC_RES]:
-    for val in inp_iter:
-        for res in func(val):
-            yield res
-
-
-def get_log_files(sec: FioJobConfig) -> List[Tuple[str, str]]:
+def get_log_files(sec: FioJobConfig, iops: bool = False) -> List[Tuple[str, str]]:
     res = []  # type: List[Tuple[str, str]]
-    for key, name in (('write_iops_log', 'iops'), ('write_bw_log', 'bw'), ('write_hist_log', 'lat')):
+
+    keys = [('write_bw_log', 'bw'), ('write_hist_log', 'lat')]
+    if iops:
+        keys.append(('write_iops_log', 'iops'))
+
+    for key, name in keys:
         log = sec.vals.get(key)
         if log is not None:
             res.append((name, log))
+
     return res
 
 
@@ -427,7 +508,6 @@
     it = (apply_params(sec, test_params) for sec in it)
     it = flatmap(process_cycles, it)
     for sec in map(final_process, it):
-        sec.summary = get_test_summary(sec)
         yield sec
 
 
diff --git a/wally/suits/io/one_step.cfg b/wally/suits/io/one_step.cfg
new file mode 100644
index 0000000..3e08c8b
--- /dev/null
+++ b/wally/suits/io/one_step.cfg
@@ -0,0 +1,9 @@
+[global]
+include defaults_qd.cfg
+ramp_time=0
+runtime={RUNTIME}
+
+[test_{TEST_SUMM}]
+blocksize=60k
+rw=randread
+iodepth=1
\ No newline at end of file
diff --git a/wally/suits/io/rpc_plugin.py b/wally/suits/io/rpc_plugin.py
index 98e55f0..5f5cfb5 100644
--- a/wally/suits/io/rpc_plugin.py
+++ b/wally/suits/io/rpc_plugin.py
@@ -13,6 +13,7 @@
 logger = logging.getLogger("agent.fio")
 
 
+# TODO: fix this in case if file is block device
 def check_file_prefilled(path, used_size_mb):
     used_size = used_size_mb * 1024 ** 2
     blocks_to_check = 16
@@ -20,9 +21,9 @@
     try:
         fstats = os.stat(path)
         if stat.S_ISREG(fstats.st_mode) and fstats.st_size < used_size:
-            return True
+            return False
     except EnvironmentError:
-        return True
+        return False
 
     offsets = [random.randrange(used_size - 1024) for _ in range(blocks_to_check)]
     offsets.append(used_size - 1024)
@@ -32,15 +33,15 @@
         for offset in offsets:
             fd.seek(offset)
             if b"\x00" * 1024 == fd.read(1024):
-                return True
+                return False
 
-    return False
+    return True
 
 
 def rpc_fill_file(fname, size, force=False, fio_path='fio'):
     if not force:
-        if not check_file_prefilled(fname, size):
-            return
+        if check_file_prefilled(fname, size):
+            return False, None
 
     assert size % 4 == 0, "File size must be proportional to 4M"
 
@@ -50,7 +51,9 @@
     subprocess.check_output(cmd_templ.format(fio_path, fname, size), shell=True)
     run_time = time.time() - run_time
 
-    return None if run_time < 1.0 else int(size / run_time)
+    prefill_bw = None if run_time < 1.0 else int(size / run_time)
+
+    return True, prefill_bw
 
 
 def rpc_install(name, binary):
diff --git a/wally/suits/io/rrd.cfg b/wally/suits/io/rrd.cfg
index ca3c613..ae2d960 100644
--- a/wally/suits/io/rrd.cfg
+++ b/wally/suits/io/rrd.cfg
@@ -1,7 +1,7 @@
 [global]
 include defaults_qd.cfg
 ramp_time=0
-runtime=10
+runtime={RUNTIME}
 
 [test_{TEST_SUMM}]
 blocksize=60k
@@ -12,3 +12,23 @@
 iodepth=16
 blocksize=60k
 rw=randread
+
+[test_{TEST_SUMM}]
+blocksize=60k
+rw=randwrite
+iodepth=1
+
+[test_{TEST_SUMM}]
+iodepth=16
+blocksize=60k
+rw=randwrite
+
+[test_{TEST_SUMM}]
+iodepth=1
+blocksize=1m
+rw=write
+
+[test_{TEST_SUMM}]
+iodepth=1
+blocksize=1m
+rw=read
diff --git a/wally/suits/io/rrd_qd_scan.cfg b/wally/suits/io/rrd_qd_scan.cfg
new file mode 100644
index 0000000..e0937c9
--- /dev/null
+++ b/wally/suits/io/rrd_qd_scan.cfg
@@ -0,0 +1,9 @@
+[global]
+include defaults_qd.cfg
+ramp_time=0
+runtime={RUNTIME}
+
+[test_{TEST_SUMM}]
+blocksize=4k
+rw=randread
+iodepth={QDS}
diff --git a/wally/suits/io/rrd_raw.cfg b/wally/suits/io/rrd_raw.cfg
new file mode 100644
index 0000000..2b0fc74
--- /dev/null
+++ b/wally/suits/io/rrd_raw.cfg
@@ -0,0 +1,21 @@
+[test]
+blocksize=4k
+rw=randread
+iodepth=1
+ramp_time=0
+runtime=120
+buffered=0
+direct=1
+sync=0
+ioengine=libaio
+group_reporting=1
+unified_rw_reporting=1
+norandommap=1
+numjobs=1
+thread=1
+time_based=1
+wait_for_previous=1
+per_job_logs=0
+randrepeat=0
+filename=/dev/sdb
+size=100G
\ No newline at end of file
diff --git a/wally/suits/itest.py b/wally/suits/itest.py
index bc6b115..ac9e1c1 100644
--- a/wally/suits/itest.py
+++ b/wally/suits/itest.py
@@ -1,19 +1,14 @@
-import re
 import abc
 import time
-import array
-import struct
 import logging
 import os.path
-import datetime
-from typing import Any, List, Optional, Callable, cast, Iterator, Tuple, Iterable
+from typing import Any, List, Optional, Callable, Tuple, Iterable, cast
 
 from concurrent.futures import ThreadPoolExecutor, wait, Future
 
-from ..utils import StopTestError, sec_to_str, get_time_interval_printable_info
+from ..utils import StopTestError, get_time_interval_printable_info
 from ..node_interfaces import IRPCNode
-from ..storage import Storage
-from ..result_classes import TestSuiteConfig, TestJobConfig, JobMetrics, TimeSeries
+from ..result_classes import TestSuiteConfig, TestJobConfig, JobMetrics, TimeSeries, IResultStorage
 
 
 logger = logging.getLogger("wally")
@@ -22,127 +17,6 @@
 __doc__ = "Contains base classes for performance tests"
 
 
-class ResultStorage:
-    ts_header_format = "!IIIcc"
-
-    def __init__(self, storage: Storage, job_config_cls: type) -> None:
-        self.storage = storage
-        self.job_config_cls = job_config_cls
-
-    def get_suite_root(self, suite_type: str, idx: int) -> str:
-        return "results/{}_{}".format(suite_type, idx)
-
-    def get_job_root(self, suite_root: str, summary: str, run_id: int) -> str:
-        return "{}/{}_{}".format(suite_root, summary, run_id)
-
-    # store
-    def put_suite_config(self, config: TestSuiteConfig, root: str) -> None:
-        self.storage.put(config, root, "config.yml")
-
-    def put_job_config(self, config: TestJobConfig, root: str) -> None:
-        self.storage.put(config, root, "config.yml")
-
-    def get_suite_config(self, suite_root: str) -> TestSuiteConfig:
-        return self.storage.load(TestSuiteConfig, suite_root, "config.yml")
-
-    def get_job_node_prefix(self, job_root_path: str, node_id: str) -> str:
-        return "{}/{}".format(job_root_path, node_id)
-
-    def get_ts_path(self, job_root_path: str, node_id: str, dev: str, sensor_name: str) -> str:
-        return "{}_{}.{}".format(self.get_job_node_prefix(job_root_path, node_id), dev, sensor_name)
-
-    def put_ts(self, ts: TimeSeries, job_root_path: str, node_id: str, dev: str, sensor_name: str) -> None:
-        # TODO: check that 'metrics', 'dev' and 'node_id' match required patterns
-        root_path = self.get_ts_path(job_root_path, node_id, dev, sensor_name)
-
-        if len(ts.data) / ts.second_axis_size != len(ts.times):
-            logger.error("Unbalanced time series data. Array size has % elements, while time size has %",
-                         len(ts.data) / ts.second_axis_size, len(ts.times))
-            raise StopTestError()
-
-        with self.storage.get_fd(root_path, "cb") as fd:
-            header = struct.pack(self.ts_header_format,
-                                 ts.second_axis_size,
-                                 len(ts.data),
-                                 len(ts.times),
-                                 cast(array.array, ts.data).typecode.encode("ascii"),
-                                 cast(array.array, ts.times).typecode.encode("ascii"))
-            fd.write(header)
-            cast(array.array, ts.data).tofile(fd)
-            cast(array.array, ts.times).tofile(fd)
-
-        if ts.raw is not None:
-            self.storage.put_raw(ts.raw, root_path + ":raw")
-
-    def put_extra(self, job_root: str, node_id: str, key: str, data: bytes) -> None:
-        self.storage.put_raw(data, job_root, node_id + "_" + key)
-
-    def list_suites(self) -> Iterator[Tuple[TestSuiteConfig, str]]:
-        """iterates over (suite_name, suite_id, suite_root_path)
-        primary this function output should be used as input into list_jobs_in_suite method
-        """
-        ts_re = re.compile(r"[a-zA-Z]+_\d+$")
-        for is_file, name in self.storage.list("results"):
-            if not is_file:
-                rr = ts_re.match(name)
-                if rr:
-                    path = "results/" + name
-                    yield self.get_suite_config(path), path
-
-    def list_jobs_in_suite(self, suite_root_path: str) -> Iterator[Tuple[TestJobConfig, str, int]]:
-        """iterates over (job_summary, job_root_path)
-        primary this function output should be used as input into list_ts_in_job method
-        """
-        ts_re = re.compile(r"(?P<job_summary>[a-zA-Z0-9]+)_(?P<id>\d+)$")
-        for is_file, name in self.storage.list(suite_root_path):
-            if is_file:
-                continue
-            rr = ts_re.match(name)
-            if rr:
-                config_path = "{}/{}/config.yml".format(suite_root_path, name)
-                if config_path in self.storage:
-                    cfg = self.storage.load(self.job_config_cls, config_path)
-                    yield cfg, "{}/{}".format(suite_root_path, name), int(rr.group("id"))
-
-    def list_ts_in_job(self, job_root_path: str) -> Iterator[Tuple[str, str, str]]:
-        """iterates over (node_id, device_name, sensor_name)
-        primary this function output should be used as input into load_ts method
-        """
-        # TODO: check that all TS files available
-        ts_re = re.compile(r"(?P<node_id>\d+\.\d+\.\d+\.\d+:\d+)_(?P<dev>[^.]+)\.(?P<sensor>[a-z_]+)$")
-        already_found = set()
-        for is_file, name in self.storage.list(job_root_path):
-            if not is_file:
-                continue
-            rr = ts_re.match(name)
-            if rr:
-                key = (rr.group("node_id"), rr.group("dev"), rr.group("sensor"))
-                if key not in already_found:
-                    already_found.add(key)
-                    yield key
-
-    def load_ts(self, root_path: str, node_id: str, dev: str, sensor_name: str) -> TimeSeries:
-        path = self.get_ts_path(root_path, node_id, dev, sensor_name)
-
-        with self.storage.get_fd(path, "rb") as fd:
-            header = fd.read(struct.calcsize(self.ts_header_format))
-            second_axis_size, data_sz, time_sz, data_typecode, time_typecode = \
-                struct.unpack(self.ts_header_format, header)
-
-            data = array.array(data_typecode.decode("ascii"))
-            times = array.array(time_typecode.decode("ascii"))
-
-            data.fromfile(fd, data_sz)
-            times.fromfile(fd, time_sz)
-
-            # calculate number of elements
-            return TimeSeries("{}.{}".format(dev, sensor_name),
-                              raw=None,
-                              data=data,
-                              times=times,
-                              second_axis_size=second_axis_size)
-
-
 class PerfTest(metaclass=abc.ABCMeta):
     """Base class for all tests"""
     name = None  # type: str
@@ -150,20 +24,18 @@
     retry_time = 30
     job_config_cls = None  # type: type
 
-    def __init__(self, storage: Storage, config: TestSuiteConfig, idx: int, on_idle: Callable[[], None] = None) -> None:
-        self.config = config
+    def __init__(self, storage: IResultStorage, suite: TestSuiteConfig, on_idle: Callable[[], None] = None) -> None:
+        self.suite = suite
         self.stop_requested = False
-        self.sorted_nodes_ids = sorted(node.info.node_id() for node in self.config.nodes)
+        self.sorted_nodes_ids = sorted(node.node_id for node in self.suite.nodes)
         self.on_idle = on_idle
         self.storage = storage
-        self.rstorage = ResultStorage(self.storage, self.job_config_cls)
-        self.idx = idx
 
     def request_stop(self) -> None:
         self.stop_requested = True
 
     def join_remote(self, path: str) -> str:
-        return os.path.join(self.config.remote_dir, path)
+        return os.path.join(self.suite.remote_dir, path)
 
     @abc.abstractmethod
     def run(self) -> None:
@@ -185,62 +57,51 @@
 
     def __init__(self, *args, **kwargs) -> None:
         PerfTest.__init__(self, *args, **kwargs)
-        self.job_configs = [None]  # type: List[Optional[TestJobConfig]]
-        self.suite_root_path = self.rstorage.get_suite_root(self.config.test_type, self.idx)
+        self.job_configs = None  # type: List[TestJobConfig]
 
     @abc.abstractmethod
     def get_expected_runtime(self, iter_cfg: TestJobConfig) -> Optional[int]:
         pass
 
-    def get_not_done_stages(self) -> Iterable[Tuple[int, TestJobConfig]]:
-        all_jobs = dict(enumerate(self.job_configs))
-        for db_config, path, jid in self.rstorage.list_jobs_in_suite(self.suite_root_path):
-            if jid in all_jobs:
-                job_config = all_jobs[jid]
-                if job_config != db_config:
-                    logger.error("Test info at path '%s/config' is not equal to expected config for iteration %s.%s." +
+    def get_not_done_jobs(self) -> Iterable[TestJobConfig]:
+        jobs_map = {job.storage_id: job for job in self.job_configs}
+        already_in_storage = set()
+        for db_config in cast(List[TestJobConfig], self.storage.iter_job(self.suite)):
+            if db_config.storage_id in jobs_map:
+                job = jobs_map[db_config.storage_id]
+                if job != db_config:
+                    logger.error("Test info at '%s.%s' is not equal to expected config for iteration %s.%s." +
                                  " Maybe configuration was changed before test was restarted. " +
                                  "DB cfg is:\n    %s\nExpected cfg is:\n    %s\nFix DB or rerun test from beginning",
-                                 path, self.name, job_config.summary,
+                                 self.suite.storage_id, job.storage_id, self.name, job.summary,
                                  str(db_config).replace("\n", "\n    "),
-                                 str(job_config).replace("\n", "\n    "))
+                                 str(job).replace("\n", "\n    "))
                     raise StopTestError()
 
-                logger.info("Test iteration %s.%s found in storage and will be skipped",
-                            self.name, job_config.summary)
-                del all_jobs[jid]
-        return all_jobs.items()
+                logger.info("Test iteration %s.%s found in storage and will be skipped", self.name, job.summary)
+                already_in_storage.add(db_config.storage_id)
+
+        return [job for job in self.job_configs if job.storage_id not in already_in_storage]
 
     def run(self) -> None:
-        try:
-            cfg = self.rstorage.get_suite_config(self.suite_root_path)
-        except KeyError:
-            cfg = None
+        self.storage.put_or_check_suite(self.suite)
 
-        if cfg is not None and cfg != self.config:
-            logger.error("Current suite %s config is not equal to found in storage at %s",
-                         self.config.test_type, self.suite_root_path)
-            raise StopTestError()
-
-        not_in_storage = list(self.get_not_done_stages())
-
+        not_in_storage = list(self.get_not_done_jobs())
         if not not_in_storage:
             logger.info("All test iteration in storage already. Skip test")
             return
 
-        self.rstorage.put_suite_config(self.config, self.suite_root_path)
-
         logger.debug("Run test %s with profile %r on nodes %s.", self.name,
                                                                  self.load_profile_name,
                                                                  ",".join(self.sorted_nodes_ids))
         logger.debug("Prepare nodes")
 
 
-        with ThreadPoolExecutor(len(self.config.nodes)) as pool:
+        with ThreadPoolExecutor(len(self.suite.nodes)) as pool:
             # config nodes
-            list(pool.map(self.config_node, self.config.nodes))
+            list(pool.map(self.config_node, self.suite.nodes))
 
-            run_times = [self.get_expected_runtime(job_config) for _, job_config in not_in_storage]
+            run_times = list(map(self.get_expected_runtime, not_in_storage))
 
             if None not in run_times:
                 # +5% - is a rough estimation for additional operations
@@ -249,51 +110,52 @@
                 exec_time_s, end_dt_s = get_time_interval_printable_info(expected_run_time)
                 logger.info("Entire test should takes around %s and finished at %s", exec_time_s, end_dt_s)
 
-            for run_id, job_config in not_in_storage:
-                job_path = self.rstorage.get_job_root(self.suite_root_path, job_config.summary, run_id)
-
-                jfutures = []  # type: List[Future]
-                for idx in range(self.max_retry):
-                    logger.debug("Prepare job %s", job_config.summary)
+            for job in not_in_storage:
+                results = []  # type: List[TimeSeries]
+                for retry_idx in range(self.max_retry):
+                    logger.debug("Prepare job %s", job.summary)
 
                     # prepare nodes for new iterations
-                    wait([pool.submit(self.prepare_iteration, node, job_config) for node in self.config.nodes])
+                    wait([pool.submit(self.prepare_iteration, node, job) for node in self.suite.nodes])
 
-                    expected_job_time = self.get_expected_runtime(job_config)
+                    expected_job_time = self.get_expected_runtime(job)
                     exec_time_s, end_dt_s = get_time_interval_printable_info(expected_job_time)
                     logger.info("Job should takes around %s and finished at %s", exec_time_s, end_dt_s)
 
-                    try:
-                        jfutures = []
-                        for node in self.config.nodes:
-                            future = pool.submit(self.run_iteration, node, job_config, job_path)
-                            jfutures.append(future)
-                        # test completed successfully, stop retrying
-                        break
-                    except EnvironmentError:
-                        if self.max_retry - 1 == idx:
-                            logger.exception("Fio failed")
-                            raise StopTestError()
-                        logger.exception("During fio run")
-                        logger.info("Sleeping %ss and retrying job", self.retry_time)
-                        time.sleep(self.retry_time)
+                    jfutures = [pool.submit(self.run_iteration, node, job) for node in self.suite.nodes]
+                    failed = False
+                    for future in jfutures:
+                        try:
+                            results.extend(future.result())
+                        except EnvironmentError:
+                            failed = True
 
+                    if not failed:
+                        break
+
+                    if self.max_retry - 1 == retry_idx:
+                        logger.exception("Fio failed")
+                        raise StopTestError()
+
+                    logger.exception("During fio run")
+                    logger.info("Sleeping %ss and retrying job", self.retry_time)
+                    time.sleep(self.retry_time)
+                    results = []
+
+                # per node jobs start and stop times
                 start_times = []  # type: List[int]
                 stop_times = []  # type: List[int]
 
-                for future in jfutures:
-                    for (node_id, dev, sensor_name), ts in future.result().items():
-                        self.rstorage.put_ts(ts, job_path, node_id=node_id, dev=dev, sensor_name=sensor_name)
-
-                        if len(ts.times) >= 2:
-                            start_times.append(ts.times[0])
-                            stop_times.append(ts.times[-1])
+                for ts in results:
+                    self.storage.put_ts(ts)
+                    if len(ts.times) >= 2:  # type: ignore
+                        start_times.append(ts.times[0])
+                        stop_times.append(ts.times[-1])
 
                 if len(start_times) > 0:
                     min_start_time = min(start_times)
                     max_start_time = max(start_times)
                     min_stop_time = min(stop_times)
-                    max_stop_time = max(stop_times)
 
                     max_allowed_time_diff = int((min_stop_time - max_start_time) * self.max_rel_time_diff)
                     max_allowed_time_diff = max(max_allowed_time_diff, self.max_time_diff)
@@ -301,16 +163,19 @@
                     if min_start_time + self.max_time_diff < max_allowed_time_diff:
                         logger.warning("Too large difference in %s:%s start time - %s. " +
                                        "Max recommended difference is %s",
-                                       self.name, job_config.summary,
+                                       self.name, job.summary,
                                        max_start_time - min_start_time, self.max_time_diff)
 
                     if min_stop_time + self.max_time_diff < max_allowed_time_diff:
                         logger.warning("Too large difference in %s:%s stop time - %s. " +
                                        "Max recommended difference is %s",
-                                       self.name, job_config.summary,
+                                       self.name, job.summary,
                                        max_start_time - min_start_time, self.max_time_diff)
 
-                self.rstorage.put_job_config(job_config, job_path)
+                    job.reliable_info_starts_at = max_start_time
+                    job.reliable_info_stops_at = min_stop_time
+
+                self.storage.put_job(self.suite, job)
                 self.storage.sync()
 
                 if self.on_idle is not None:
@@ -321,24 +186,25 @@
         pass
 
     @abc.abstractmethod
-    def prepare_iteration(self, node: IRPCNode, iter_config: TestJobConfig) -> None:
+    def prepare_iteration(self, node: IRPCNode, job: TestJobConfig) -> None:
         pass
 
     @abc.abstractmethod
-    def run_iteration(self, node: IRPCNode, iter_config: TestJobConfig, stor_prefix: str) -> JobMetrics:
+    def run_iteration(self, node: IRPCNode, job: TestJobConfig) -> List[TimeSeries]:
         pass
 
 
 class TwoScriptTest(ThreadedTest, metaclass=abc.ABCMeta):
     def __init__(self, *dt, **mp) -> None:
         ThreadedTest.__init__(self, *dt, **mp)
-        self.prerun_script = self.config.params['prerun_script']
-        self.run_script = self.config.params['run_script']
-        self.prerun_tout = self.config.params.get('prerun_tout', 3600)
-        self.run_tout = self.config.params.get('run_tout', 3600)
-        self.iterations_configs = [None]
+        self.prerun_script = self.suite.params['prerun_script']
+        self.run_script = self.suite.params['run_script']
+        self.prerun_tout = self.suite.params.get('prerun_tout', 3600)
+        self.run_tout = self.suite.params.get('run_tout', 3600)
+        # TODO: fix job_configs field
+        raise NotImplementedError("Fix job configs")
 
-    def get_expected_runtime(self, iter_cfg: TestJobConfig) -> Optional[int]:
+    def get_expected_runtime(self, job: TestJobConfig) -> Optional[int]:
         return None
 
     def config_node(self, node: IRPCNode) -> None:
@@ -346,19 +212,19 @@
         node.copy_file(self.prerun_script, self.join_remote(self.prerun_script))
 
         cmd = self.join_remote(self.prerun_script)
-        cmd += ' ' + self.config.params.get('prerun_opts', '')
+        cmd += ' ' + self.suite.params.get('prerun_opts', '')
         node.run(cmd, timeout=self.prerun_tout)
 
-    def prepare_iteration(self, node: IRPCNode, iter_config: TestJobConfig) -> None:
+    def prepare_iteration(self, node: IRPCNode, job: TestJobConfig) -> None:
         pass
 
-    def run_iteration(self, node: IRPCNode, iter_config: TestJobConfig, stor_prefix: str) -> JobMetrics:
+    def run_iteration(self, node: IRPCNode, job: TestJobConfig) -> List[TimeSeries]:
         # TODO: have to store logs
         cmd = self.join_remote(self.run_script)
-        cmd += ' ' + self.config.params.get('run_opts', '')
+        cmd += ' ' + self.suite.params.get('run_opts', '')
         return self.parse_results(node.run(cmd, timeout=self.run_tout))
 
     @abc.abstractmethod
-    def parse_results(self, data: str) -> JobMetrics:
+    def parse_results(self, data: str) -> List[TimeSeries]:
         pass