many updates in report code and in storage structure, this commit is broken
diff --git a/wally/suits/io/fio.py b/wally/suits/io/fio.py
index bf2e6b3..77d7a75 100644
--- a/wally/suits/io/fio.py
+++ b/wally/suits/io/fio.py
@@ -1,10 +1,10 @@
-import array
 import os.path
 import logging
-from typing import cast, Any, Tuple, List
+from typing import cast, Any, List, Union
+
+import numpy
 
 import wally
-
 from ...utils import StopTestError, ssize2b, b2ssize
 from ...node_interfaces import IRPCNode
 from ...node_utils import get_os
@@ -36,7 +36,7 @@
         self.use_system_fio = get('use_system_fio', False)  # type: bool
         self.use_sudo = get("use_sudo", True)  # type: bool
         self.force_prefill = get('force_prefill', False)  # type: bool
-
+        self.skip_prefill = get('skip_prefill', False)  # type: bool
         self.load_profile_name = self.suite.params['load']  # type: str
 
         if os.path.isfile(self.load_profile_name):
@@ -71,6 +71,11 @@
 
             self.file_size = list(sizes)[0]
             logger.info("Detected test file size is %sB", b2ssize(self.file_size))
+            if self.file_size % (4 * 1024 ** 2) != 0:
+                tail = self.file_size % (4 * 1024 ** 2)
+                logger.warning("File size is not proportional to 4M, %sb at the end will not be used for test",
+                               str(tail // 1024) + "Kb" if tail > 1024 else str(tail) + "b")
+                self.file_size -= self.file_size % (4 * 1024 ** 2)
             self.load_params['FILESIZE'] = self.file_size
         else:
             self.file_size = ssize2b(self.load_params['FILESIZE'])
@@ -107,16 +112,18 @@
 
         self.install_utils(node)
 
-        mb = int(self.file_size / 1024 ** 2)
-        logger.info("Filling test file %s on node %s with %sMiB of random data", self.file_name, node.info, mb)
-        is_prefilled, fill_bw = node.conn.fio.fill_file(self.file_name, mb,
-                                                        force=self.force_prefill,
-                                                        fio_path=self.fio_path)
-
-        if not is_prefilled:
-            logger.info("Test file on node %s is already prefilled", node.info)
-        elif fill_bw is not None:
-            logger.info("Initial fio fill bw is %s MiBps for %s", fill_bw, node.info)
+        if self.skip_prefill:
+            logger.info("Prefill is skipped due to 'skip_prefill' set to true")
+        else:
+            mb = int(self.file_size / 1024 ** 2)
+            logger.info("Filling test file %s on node %s with %sMiB of random data", self.file_name, node.info, mb)
+            is_prefilled, fill_bw = node.conn.fio.fill_file(self.file_name, mb,
+                                                            force=self.force_prefill,
+                                                            fio_path=self.fio_path)
+            if not is_prefilled:
+                logger.info("Test file on node %s is already prefilled", node.info)
+            elif fill_bw is not None:
+                logger.info("Initial fio fill bw is %s MiBps for %s", fill_bw, node.info)
 
     def install_utils(self, node: IRPCNode) -> None:
         os_info = get_os(node)
@@ -170,16 +177,16 @@
         path = DataSource(suite_id=self.suite.storage_id,
                           job_id=job.storage_id,
                           node_id=node.node_id,
-                          dev='fio',
-                          sensor='stdout',
+                          sensor='fio',
+                          dev=None,
+                          metric='stdout',
                           tag='json')
-
         self.storage.put_extra(fio_out, path)
         node.conn.fs.unlink(self.remote_output_file)
 
         files = [name for name in node.conn.fs.listdir(self.exec_folder)]
         result = []
-        for name, file_path in get_log_files(cast(FioJobConfig, job)):
+        for name, file_path, units in get_log_files(cast(FioJobConfig, job)):
             log_files = [fname for fname in files if fname.startswith(file_path)]
             if len(log_files) != 1:
                 logger.error("Found %s files, match log pattern %s(%s) - %s",
@@ -196,8 +203,10 @@
                 logger.exception("Error during parse %s fio log file - can't decode usint UTF8", name)
                 raise StopTestError()
 
-            parsed = array.array('L' if name == 'lat' else 'Q')
-            times = array.array('Q')
+            # TODO: fix units, need to get array type from stream
+
+            parsed = []  # type: List[Union[List[int], int]]
+            times = []
 
             for idx, line in enumerate(log_data):
                 line = line.strip()
@@ -214,19 +223,23 @@
                                              .format(expected_lat_bins, len(vals), time_ms_s))
                                 raise StopTestError()
 
-                            parsed.extend(vals)
+                            parsed.append(vals)
                         else:
                             parsed.append(int(val_s.strip()))
                     except ValueError:
                         logger.exception("Error during parse %s fio log file in line %s: %r", name, idx, line)
                         raise StopTestError()
 
+            if not self.suite.keep_raw_files:
+                raw_result = None
+
             result.append(TimeSeries(name=name,
                                      raw=raw_result,
-                                     second_axis_size=expected_lat_bins if name == 'lat' else 1,
-                                     data=parsed,
-                                     times=times,
-                                     source=path(sensor=name, tag=None)))
+                                     data=numpy.array(parsed, dtype='uint64'),
+                                     units=units,
+                                     times=numpy.array(times, dtype='uint64'),
+                                     time_units='ms',
+                                     source=path(metric=name, tag='csv')))
         return result
 
     def format_for_console(self, data: Any) -> str:
diff --git a/wally/suits/io/fio_job.py b/wally/suits/io/fio_job.py
index 0f55e91..2d8d78a 100644
--- a/wally/suits/io/fio_job.py
+++ b/wally/suits/io/fio_job.py
@@ -53,15 +53,26 @@
     @property
     def long_summary(self) -> str:
         """Readable long summary for management and deployment engineers"""
-        res = "{0[sync_mode_long]} {0[oper]} {1}".format(self, b2ssize(self['bsize'] * 1024))
+        res = "{0[oper]}, {0.sync_mode_long}, block size {1}B".format(self, b2ssize(self['bsize'] * 1024))
         if self['qd'] is not None:
-            res += " QD = " + str(self['qd'])
+            res += ", QD = " + str(self['qd'])
         if self['thcount'] not in (1, None):
-            res += " threads={0[thcount]}".format(self)
+            res += ", threads={0[thcount]}".format(self)
         if self['write_perc'] is not None:
-            res += " write_perc={0[write_perc]}%".format(self)
+            res += ", write_perc={0[write_perc]}%".format(self)
         return res
 
+    def copy(self, **kwargs: Dict[str, Any]) -> 'FioJobParams':
+        np = self.params.copy()
+        np.update(kwargs)
+        return self.__class__(**np)
+
+    @property
+    def char_tpl(self) -> Tuple[Union[str, int], ...]:
+        mint = lambda x: -10000000000 if x is None else int(x)
+        return self['oper'], mint(self['bsize']), self['sync_mode'], \
+            mint(self['thcount']), mint(self['qd']), mint(self['write_perc'])
+
 
 class FioJobConfig(JobConfig):
     """Fio job configuration"""
@@ -157,7 +168,7 @@
         return len(list(self.required_vars())) == 0
 
     def __str__(self) -> str:
-        res = "[{0}]\n".format(self.params.summary)
+        res = "[{0}]\n".format(self.summary)
 
         for name, val in self.vals.items():
             if name.startswith('_') or name == name.upper():
@@ -180,4 +191,6 @@
     @classmethod
     def fromraw(cls, data: Dict[str, Any]) -> 'FioJobConfig':
         data['vals'] = OrderedDict(data['vals'])
+        data['_sync_mode'] = None
+        data['_params'] = None
         return cast(FioJobConfig, super().fromraw(data))
diff --git a/wally/suits/io/fio_task_parser.py b/wally/suits/io/fio_task_parser.py
index c1b4bc3..bdcec23 100644
--- a/wally/suits/io/fio_task_parser.py
+++ b/wally/suits/io/fio_task_parser.py
@@ -288,19 +288,18 @@
     return fio_config_parse(fio_config_lexer(source, fname))
 
 
-def get_log_files(sec: FioJobConfig, iops: bool = False) -> List[Tuple[str, str]]:
-    res = []  # type: List[Tuple[str, str]]
+def get_log_files(sec: FioJobConfig, iops: bool = False) -> Iterator[Tuple[str, str, str]]:
+    res = []  # type: List[Tuple[str, str, str]]
 
-    keys = [('write_bw_log', 'bw'), ('write_hist_log', 'lat')]
+    keys = [('write_bw_log', 'bw', 'kibps'),
+            ('write_hist_log', 'lat', 'us')]
     if iops:
-        keys.append(('write_iops_log', 'iops'))
+        keys.append(('write_iops_log', 'iops', 'iops'))
 
-    for key, name in keys:
+    for key, name, units in keys:
         log = sec.vals.get(key)
         if log is not None:
-            res.append((name, log))
-
-    return res
+            yield (name, log, units)
 
 
 def fio_cfg_compile(source: str, fname: str, test_params: FioParams) -> Iterator[FioJobConfig]:
diff --git a/wally/suits/io/hdd.cfg b/wally/suits/io/hdd.cfg
index 95c8cec..eff64cd 100644
--- a/wally/suits/io/hdd.cfg
+++ b/wally/suits/io/hdd.cfg
@@ -1,44 +1,23 @@
 [global]
-include defaults.cfg
+include defaults_qd.cfg
 
-# NUMJOBS={% 1, 5, 10, 15, 20, 30, 40, 80 %}
-
-NUMJOBS={% 1, 5, 10, 15, 25, 40 %}
-
-ramp_time=30
-runtime=120
-
+QD={% 1, 2, 4, 8, 16, 32, 64 %}
+runtime=300
 direct=1
 
 # ---------------------------------------------------------------------
-# check different thread count, sync mode. (latency, iops) = func(th_count)
+# check different thread count, direct read mode. (latency, iops) = func(QD)
 # ---------------------------------------------------------------------
 [hdd_{TEST_SUMM}]
 blocksize=4k
-rw=randwrite
-sync=1
-numjobs={NUMJOBS}
+rw={% randread, randwrite %}
+iodepth={QD}
 
 # ---------------------------------------------------------------------
-# check different thread count, direct read mode. (latency, iops) = func(th_count)
-# also check iops for randread
-# ---------------------------------------------------------------------
-[hdd_{TEST_SUMM}]
-blocksize=4k
-rw=randread
-numjobs={NUMJOBS}
-
-# ---------------------------------------------------------------------
-# No reason for th count > 1 in case of sequantial operations
+# No reason for QD > 1 in case of sequential operations
 # ot they became random
 # ---------------------------------------------------------------------
 [hdd_{TEST_SUMM}]
 blocksize=1m
 rw={% read, write %}
-
-# ---------------------------------------------------------------------
-# check IOPS randwrite.
-# ---------------------------------------------------------------------
-[hdd_{TEST_SUMM}]
-blocksize=4k
-rw=randwrite
+iodepth=1
diff --git a/wally/suits/itest.py b/wally/suits/itest.py
index ac9e1c1..9aa4ce6 100644
--- a/wally/suits/itest.py
+++ b/wally/suits/itest.py
@@ -2,13 +2,13 @@
 import time
 import logging
 import os.path
-from typing import Any, List, Optional, Callable, Tuple, Iterable, cast
+from typing import Any, List, Optional, Callable, Iterable, cast
 
-from concurrent.futures import ThreadPoolExecutor, wait, Future
+from concurrent.futures import ThreadPoolExecutor, wait
 
 from ..utils import StopTestError, get_time_interval_printable_info
 from ..node_interfaces import IRPCNode
-from ..result_classes import TestSuiteConfig, TestJobConfig, JobMetrics, TimeSeries, IResultStorage
+from ..result_classes import SuiteConfig, JobConfig, TimeSeries, IResultStorage
 
 
 logger = logging.getLogger("wally")
@@ -24,7 +24,7 @@
     retry_time = 30
     job_config_cls = None  # type: type
 
-    def __init__(self, storage: IResultStorage, suite: TestSuiteConfig, on_idle: Callable[[], None] = None) -> None:
+    def __init__(self, storage: IResultStorage, suite: SuiteConfig, on_idle: Callable[[], None] = None) -> None:
         self.suite = suite
         self.stop_requested = False
         self.sorted_nodes_ids = sorted(node.node_id for node in self.suite.nodes)
@@ -57,16 +57,16 @@
 
     def __init__(self, *args, **kwargs) -> None:
         PerfTest.__init__(self, *args, **kwargs)
-        self.job_configs = None  # type: List[TestJobConfig]
+        self.job_configs = None  # type: List[JobConfig]
 
     @abc.abstractmethod
-    def get_expected_runtime(self, iter_cfg: TestJobConfig) -> Optional[int]:
+    def get_expected_runtime(self, iter_cfg: JobConfig) -> Optional[int]:
         pass
 
-    def get_not_done_jobs(self) -> Iterable[TestJobConfig]:
+    def get_not_done_jobs(self) -> Iterable[JobConfig]:
         jobs_map = {job.storage_id: job for job in self.job_configs}
         already_in_storage = set()
-        for db_config in cast(List[TestJobConfig], self.storage.iter_job(self.suite)):
+        for db_config in cast(List[JobConfig], self.storage.iter_job(self.suite)):
             if db_config.storage_id in jobs_map:
                 job = jobs_map[db_config.storage_id]
                 if job != db_config:
@@ -113,7 +113,7 @@
             for job in not_in_storage:
                 results = []  # type: List[TimeSeries]
                 for retry_idx in range(self.max_retry):
-                    logger.debug("Prepare job %s", job.summary)
+                    logger.debug("Prepare job %s", job.params.summary)
 
                     # prepare nodes for new iterations
                     wait([pool.submit(self.prepare_iteration, node, job) for node in self.suite.nodes])
@@ -172,8 +172,7 @@
                                        self.name, job.summary,
                                        max_start_time - min_start_time, self.max_time_diff)
 
-                    job.reliable_info_starts_at = max_start_time
-                    job.reliable_info_stops_at = min_stop_time
+                    job.reliable_info_range = (int(max_start_time), int(min_stop_time))
 
                 self.storage.put_job(self.suite, job)
                 self.storage.sync()
@@ -186,11 +185,11 @@
         pass
 
     @abc.abstractmethod
-    def prepare_iteration(self, node: IRPCNode, job: TestJobConfig) -> None:
+    def prepare_iteration(self, node: IRPCNode, job: JobConfig) -> None:
         pass
 
     @abc.abstractmethod
-    def run_iteration(self, node: IRPCNode, job: TestJobConfig) -> List[TimeSeries]:
+    def run_iteration(self, node: IRPCNode, job: JobConfig) -> List[TimeSeries]:
         pass
 
 
@@ -204,7 +203,7 @@
         # TODO: fix job_configs field
         raise NotImplementedError("Fix job configs")
 
-    def get_expected_runtime(self, job: TestJobConfig) -> Optional[int]:
+    def get_expected_runtime(self, job: JobConfig) -> Optional[int]:
         return None
 
     def config_node(self, node: IRPCNode) -> None:
@@ -215,10 +214,10 @@
         cmd += ' ' + self.suite.params.get('prerun_opts', '')
         node.run(cmd, timeout=self.prerun_tout)
 
-    def prepare_iteration(self, node: IRPCNode, job: TestJobConfig) -> None:
+    def prepare_iteration(self, node: IRPCNode, job: JobConfig) -> None:
         pass
 
-    def run_iteration(self, node: IRPCNode, job: TestJobConfig) -> List[TimeSeries]:
+    def run_iteration(self, node: IRPCNode, job: JobConfig) -> List[TimeSeries]:
         # TODO: have to store logs
         cmd = self.join_remote(self.run_script)
         cmd += ' ' + self.suite.params.get('run_opts', '')
diff --git a/wally/suits/job.py b/wally/suits/job.py
index ce32e0e..91822cb 100644
--- a/wally/suits/job.py
+++ b/wally/suits/job.py
@@ -1,5 +1,5 @@
 import abc
-from typing import Dict, Any, Tuple
+from typing import Dict, Any, Tuple, cast, Union
 from collections import OrderedDict
 
 from ..common_types import Storable
@@ -24,6 +24,10 @@
         """Readable long summary for management and deployment engineers"""
         pass
 
+    @abc.abstractmethod
+    def copy(self, **updated) -> 'JobParams':
+        pass
+
     def __getitem__(self, name: str) -> Any:
         return self.params[name]
 
@@ -31,10 +35,21 @@
         self.params[name] = val
 
     def __hash__(self) -> int:
-        return hash(tuple(sorted(self.params.items())))
+        return hash(self.char_tpl)
 
-    def __eq__(self, o: 'JobParams') -> bool:
-        return sorted(self.params.items()) == sorted(o.params.items())
+    def __eq__(self, o: object) -> bool:
+        if not isinstance(o, self.__class__):
+            raise TypeError("Can't compare {!r} to {!r}".format(self.__class__.__qualname__, type(o).__qualname__))
+        return sorted(self.params.items()) == sorted(cast(JobParams, o).params.items())
+
+    def __lt__(self, o: object) -> bool:
+        if not isinstance(o, self.__class__):
+            raise TypeError("Can't compare {!r} to {!r}".format(self.__class__.__qualname__, type(o).__qualname__))
+        return self.char_tpl < cast(JobParams, o).char_tpl
+
+    @abc.abstractproperty
+    def char_tpl(self) -> Tuple[Union[str, int, float, bool], ...]:
+        pass
 
 
 class JobConfig(Storable, metaclass=abc.ABCMeta):
@@ -45,7 +60,7 @@
         self.idx = idx
 
         # time interval, in seconds, when test was running on all nodes
-        self.reliable_info_time_range = None  # type: Tuple[int, int]
+        self.reliable_info_range = None  # type: Tuple[int, int]
 
         # all job parameters, both from suite file and config file
         self.vals = OrderedDict()  # type: Dict[str, Any]
@@ -53,9 +68,13 @@
     @property
     def storage_id(self) -> str:
         """unique string, used as key in storage"""
-        return "{}_{}".format(self.params.summary, self.idx)
+        return "{}_{}".format(self.summary, self.idx)
 
     @abc.abstractproperty
     def params(self) -> JobParams:
         """Should return a copy"""
         pass
+
+    @property
+    def summary(self) -> str:
+        return self.params.summary