bug fixes and add text report
diff --git a/configs-examples/v2_default.yaml b/configs-examples/default.yaml
similarity index 76%
rename from configs-examples/v2_default.yaml
rename to configs-examples/default.yaml
index 931d101..c3614ba 100644
--- a/configs-examples/v2_default.yaml
+++ b/configs-examples/default.yaml
@@ -17,14 +17,18 @@
     vms:
         - "USERNAME[:PASSWD]@VM_NAME_PREFIX[::KEY_FILE]"
 
+# nodes: - map of explicit nodes URLS to node roles
+# in format
+#    USERNAME[:PASSWD]@VM_NAME_PREFIX[::KEY_FILE] or localhost: role1, role2, role3....
 
 collect_info: true
 var_dir_root: /tmp/perf_tests
 settings_dir: ~/.wally
-
-logging:
-    extra_logs: 1
-    level: DEBUG
+connect_timeout: 30
+rpc_log_level: DEBUG
+include: logging.yaml
+default_test_local_folder: "/tmp/wally_{name}_{uuid}"
+keep_raw_files: true
 
 vm_configs:
     keypair_file_private: wally_vm_key_perf3.pem
@@ -50,9 +54,11 @@
 
 
 ceph: nodeep-scrub, noscrub
-
 #-----------------------------------------    STEPS   ------------------------------------------------------------------
-# discover: ...
+# discover: a,b,c,... - comma separated list of clusters to discover. May be ommited
+#    List may contains - ceph, openstack, fuel
+#    Also - ignore_errors - mean to ignore errors during dicovery
+#           metadata - mean to discrover cluster metadata only, but not nodes
 # spawn: ...
 # connect: ...
 # sensors: ...
@@ -64,9 +70,9 @@
        testnode: system-cpu, block-io, net-io
        ceph-osd: system-cpu, block-io, net-io, ceph
        compute:
-            system-cpu: *
-            block-io: sd*
-            net-io: *
+            system-cpu: "*"
+            block-io: "sd*"
+            net-io: "*"
 
 #----------------------------------   TEST PROFILES --------------------------------------------------------------------
 profiles:
@@ -85,21 +91,21 @@
                 load: ceph
                 params:
                     FILENAME: /dev/vdb
-                    TEST_FILE_SIZE: AUTO
+                    FILESIZE: AUTO
 
         cinder_iscsi_vdb:
             - io:
                 load: cinder_iscsi
                 params:
                     FILENAME: /dev/vdb
-                    TEST_FILE_SIZE: AUTO
+                    FILESIZE: AUTO
 
         nova_io:
             - io:
                 load: hdd
                 params:
                     FILENAME: /dev/vdb
-                    TEST_FILE_SIZE: AUTO
+                    FILESIZE: AUTO
 
     openstack_ceph: OS_1_to_1 + ceph_vdb
     openstack_cinder: OS_1_to_1 + ceph_iscsi_vdb
diff --git a/configs-examples/local_hdd.yaml b/configs-examples/local_hdd.yaml
new file mode 100644
index 0000000..39af205
--- /dev/null
+++ b/configs-examples/local_hdd.yaml
@@ -0,0 +1,13 @@
+include: default.yaml
+run_sensors: true
+results_storage: /var/wally_results
+
+nodes:
+    localhost: testnode
+
+tests:
+  - fio:
+      load: verify
+      params:
+          FILENAME: /dev/rbd0
+          FILESIZE: 4G
diff --git a/configs-examples/logging.yaml b/configs-examples/logging.yaml
new file mode 100644
index 0000000..e56c6c7
--- /dev/null
+++ b/configs-examples/logging.yaml
@@ -0,0 +1,26 @@
+logging:
+    version: 1
+    disable_existing_loggers: true
+    formatters:
+        simple:
+            format: "%(asctime)s - %(levelname)s - %(message)s"
+            datefmt: "%H:%M:%S"
+    handlers:
+        console:
+            level: INFO
+            class: logging.StreamHandler
+            formatter: simple
+            stream: "ext://sys.stdout"
+        log_file:
+            level: DEBUG
+            class: logging.FileHandler
+            formatter: simple
+            filename: null
+    loggers:
+        cmd:     {"level": "DEBUG", "handlers": ["console", "log_file"]}
+        storage: {"level": "DEBUG", "handlers": ["console", "log_file"]}
+        rpc:     {"level": "DEBUG", "handlers": ["console", "log_file"]}
+        cephlib: {"level": "DEBUG", "handlers": ["console", "log_file"]}
+        collect: {"level": "DEBUG", "handlers": ["console", "log_file"]}
+        agent:   {"level": "DEBUG", "handlers": ["console", "log_file"]}
+        wally:   {"level": "DEBUG", "handlers": ["console", "log_file"]}
diff --git a/configs-examples/v2_user.yaml b/configs-examples/openstack_ceph.yaml
similarity index 95%
rename from configs-examples/v2_user.yaml
rename to configs-examples/openstack_ceph.yaml
index 32e5a6d..efd51ff 100644
--- a/configs-examples/v2_user.yaml
+++ b/configs-examples/openstack_ceph.yaml
@@ -1,4 +1,4 @@
-include: v2_default.yaml
+include: default.yaml
 discover: openstack,fuel_openrc_only
 run_sensors: true
 results_storage: /var/wally_results
diff --git a/fio_binaries/fio_yakkety_x86_64.bz2 b/fio_binaries/fio_yakkety_x86_64.bz2
new file mode 100644
index 0000000..0a11441
--- /dev/null
+++ b/fio_binaries/fio_yakkety_x86_64.bz2
Binary files differ
diff --git a/requirements.txt b/requirements.txt
index ea91948..004d52d 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -8,7 +8,6 @@
 PyYAML
 requests
 simplejson
-texttable
 pycrypto
 ecdsa
 psutil
diff --git a/requirements_extra.txt b/requirements_extra.txt
index a2a3828..6551654 100644
--- a/requirements_extra.txt
+++ b/requirements_extra.txt
@@ -1,2 +1,6 @@
 oktest
-iso8601==0.1.10
\ No newline at end of file
+iso8601==0.1.10
+scipy
+numpy
+matplotlib
+psutil
diff --git a/scripts/install.sh b/scripts/install.sh
index 3dfeef7..f3a87b1 100755
--- a/scripts/install.sh
+++ b/scripts/install.sh
@@ -7,20 +7,11 @@
 popd > /dev/null
 
 function install_apt() {
-    MODULES="python-openssl  python-faulthandler python-pip"
-    if [ "$FULL" == "--full" ] ; then
-        MODULES="$MODULES python-scipy python-numpy python-matplotlib python-psutil"
-    fi
-    apt-get install -y $MODULES
+    apt-get install -y python-openssl python-pip
 }
 
-
 function install_yum() {
-    MODULES="pyOpenSSL python-pip python-ecdsa"
-    if [ "$FULL" == "--full" ] ; then
-        MODULES="$MODULES scipy numpy python-matplotlib python-psutil"
-    fi
-    yum -y install $MODULES
+    yum -y install pyOpenSSL python-pip python-ecdsa
 }
 
 if which apt-get >/dev/null; then
diff --git a/wally/ceph.py b/wally/ceph.py
index 9734baa..36d6b15 100644
--- a/wally/ceph.py
+++ b/wally/ceph.py
@@ -40,15 +40,19 @@
     def run(self, ctx: TestRun) -> None:
         """Return list of ceph's nodes NodeInfo"""
 
-        discovery = ctx.config.get("discovery")
-        if discovery == 'disable' or discovery == 'metadata':
-            logger.info("Skip ceph discovery due to config setting")
+        if 'ceph' not in ctx.config.discovery:
+            logger.debug("Skip ceph discovery due to config setting")
             return
 
         if 'all_nodes' in ctx.storage:
             logger.debug("Skip ceph discovery, use previously discovered nodes")
             return
 
+        if 'metadata' in ctx.config.discovery:
+            logger.exception("Ceph metadata discovery is not implemented")
+            raise StopTestError()
+
+        ignore_errors = 'ignore_errors' in ctx.config.discovery
         ceph = ctx.config.ceph
         root_node_uri = cast(str, ceph.root_node)
         cluster = ceph.get("cluster", "ceph")
@@ -94,7 +98,7 @@
 
                 logger.debug("Found %s nodes with ceph-osd role", len(ips))
             except Exception as exc:
-                if discovery != 'ignore_errors':
+                if not ignore_errors:
                     logger.exception("OSD discovery failed")
                     raise StopTestError()
                 else:
@@ -109,7 +113,7 @@
                     info.params['ceph'] = ceph_params
                 logger.debug("Found %s nodes with ceph-mon role", counter + 1)
             except Exception as exc:
-                if discovery != 'ignore_errors':
+                if not ignore_errors:
                     logger.exception("MON discovery failed")
                     raise StopTestError()
                 else:
diff --git a/wally/config.py b/wally/config.py
index 2178d0c..add4df5 100644
--- a/wally/config.py
+++ b/wally/config.py
@@ -1,4 +1,4 @@
-from typing import Any, Dict, Optional
+from typing import Any, Dict, Optional, Set
 
 from .common_types import IStorable
 
@@ -33,6 +33,7 @@
         self.fuel = None  # type: 'Config'
         self.test = None  # type: 'Config'
         self.sensors = None  # type: 'Config'
+        self.discovery = None  # type: Set[str]
 
         self._dct.clear()
         self._dct.update(dct)
diff --git a/wally/console_report.py b/wally/console_report.py
new file mode 100644
index 0000000..bfa8881
--- /dev/null
+++ b/wally/console_report.py
@@ -0,0 +1,40 @@
+import numpy
+
+from cephlib.common import float2str
+
+from . import texttable
+from .hlstorage import ResultStorage
+from .stage import Stage, StepOrder
+from .test_run_class import TestRun
+from .suits.io.fio import FioTest
+from .statistic import calc_norm_stat_props, calc_histo_stat_props
+from .suits.io.fio_hist import get_lat_vals
+
+class ConsoleReportStage(Stage):
+
+    priority = StepOrder.REPORT
+
+    def run(self, ctx: TestRun) -> None:
+        rstorage = ResultStorage(ctx.storage)
+        for suite in rstorage.iter_suite(FioTest.name):
+            table = texttable.Texttable(max_width=200)
+
+            table.header(["Description", "IOPS ~ Dev", 'Skew/Kurt', 'lat med', 'lat 95'])
+            table.set_cols_align(('l', 'r', 'r', 'r', 'r'))
+
+            for job in sorted(rstorage.iter_job(suite), key=lambda job: job.params):
+                bw_ts, = list(rstorage.iter_ts(suite, job, metric='bw'))
+                props = calc_norm_stat_props(bw_ts)
+                avg_iops = props.average // job.params.params['bsize']
+                iops_dev = props.deviation // job.params.params['bsize']
+
+                lat_ts, = list(rstorage.iter_ts(suite, job, metric='lat'))
+                bins_edges = numpy.array(get_lat_vals(lat_ts.data.shape[1]), dtype='float32') / 1000  # convert us to ms
+                lat_props = calc_histo_stat_props(lat_ts, bins_edges)
+
+                table.add_row([job.params.summary,
+                               "{} ~ {}".format(float2str(avg_iops), float2str(iops_dev)),
+                               "{}/{}".format(float2str(props.skew), float2str(props.kurt)),
+                               float2str(lat_props.perc_50), float2str(lat_props.perc_95)])
+
+            print(table.draw())
diff --git a/wally/fuel.py b/wally/fuel.py
index 37a5f5e..a565805 100644
--- a/wally/fuel.py
+++ b/wally/fuel.py
@@ -37,9 +37,12 @@
         pass
 
     def run(self, ctx: TestRun) -> None:
-        discovery = ctx.config.get("discovery")
-        if discovery == 'disable':
-            logger.info("Skip FUEL discovery due to config setting")
+        full_discovery = 'fuel' in ctx.config.discovery
+        metadata_only = (not full_discovery) and ('metadata' in ctx.config.discovery)
+        ignore_errors = 'ignore_errors' in ctx.config.discovery
+
+        if not (metadata_only or full_discovery):
+            logger.debug("Skip ceph discovery due to config setting")
             return
 
         if "fuel_os_creds" in ctx.storage and 'fuel_version' in ctx.storage:
@@ -49,7 +52,7 @@
             if 'all_nodes' in ctx.storage:
                 logger.debug("Skip FUEL nodes discovery, use data from DB")
                 return
-            elif discovery == 'metadata':
+            elif metadata_only:
                 logger.debug("Skip FUEL nodes  discovery due to discovery settings")
                 return
 
@@ -86,7 +89,7 @@
 
             ctx.storage.put(list(ctx.fuel_openstack_creds), "fuel_os_creds")
 
-        if discovery == 'metadata':
+        if metadata_only:
             logger.debug("Skip FUEL nodes  discovery due to discovery settings")
             return
 
@@ -97,12 +100,12 @@
                                  log_level=ctx.config.rpc_log_level)
         except AuthenticationException:
             msg = "FUEL nodes discovery failed - wrong FUEL master SSH credentials"
-            if discovery != 'ignore_errors':
+            if ignore_errors:
                 raise StopTestError(msg)
             logger.warning(msg)
             return
         except Exception as exc:
-            if discovery != 'ignore_errors':
+            if ignore_errors:
                 logger.exception("While connection to FUEL")
                 raise StopTestError("Failed to connect to FUEL")
             logger.warning("Failed to connect to FUEL - %s", exc)
diff --git a/wally/hlstorage.py b/wally/hlstorage.py
index 3672458..411b515 100644
--- a/wally/hlstorage.py
+++ b/wally/hlstorage.py
@@ -1,4 +1,5 @@
 import os
+import pprint
 import logging
 from typing import cast, Iterator, Tuple, Type, Dict, Optional, Any, List
 
@@ -27,14 +28,14 @@
 class DB_paths:
     suite_cfg_r = r'results/{suite_id}\.info\.yml'
 
-    job_root = r'results/{suite_id}.{job_id}/'
+    job_root = r'results/{suite_id}\.{job_id}/'
     job_cfg_r = job_root + r'info\.yml'
 
     # time series, data from load tool, sensor is a tool name
-    ts_r = job_root + r'{node_id}\.{sensor}\.{metric}.{tag}'
+    ts_r = job_root + r'{node_id}\.{sensor}\.{metric}\.{tag}'
 
     # statistica data for ts
-    stat_r = job_root + r'{node_id}\.{sensor}\.{metric}\.stat.yaml'
+    stat_r = job_root + r'{node_id}\.{sensor}\.{metric}\.stat\.yaml'
 
     # sensor data
     sensor_data_r = r'sensors/{node_id}_{sensor}\.{dev}\.{metric}\.csv'
@@ -89,11 +90,11 @@
                     return obj, header
 
             header = fd.readline().decode(self.csv_file_encoding).strip().split(",")
-            print("header =", header)
+
             if skip_shape:
                 header = header[1:]
             dt = fd.read().decode("utf-8").strip()
-            print(dt.split("\n")[0])
+
             arr = numpy.fromstring(dt.replace("\n", ','), sep=',', dtype=header[0])
             if len(dt) != 0:
                 lines = dt.count("\n") + 1
@@ -117,7 +118,7 @@
         else:
             vw = data
 
-        with self.storage.get_fd(path, "cb" if exists else "wb") as fd:
+        with self.storage.get_fd(path, "cb" if not exists else "rb+") as fd:
             if exists:
                 curr_header = fd.readline().decode(self.csv_file_encoding).rstrip().split(",")
                 assert header == curr_header, \
@@ -176,12 +177,13 @@
     def put_or_check_suite(self, suite: SuiteConfig) -> None:
         path = DB_paths.suite_cfg.format(suite_id=suite.storage_id)
         if path in self.storage:
-            db_cfg = self.storage.get(path)
+            db_cfg = self.storage.load(SuiteConfig, path)
             if db_cfg != suite:
                 logger.error("Current suite %s config is not equal to found in storage at %s", suite.test_type, path)
+                logger.debug("Current: \n%s\nStorage:\n%s", pprint.pformat(db_cfg), pprint.pformat(suite))
                 raise StopTestError()
-
-        self.storage.put(suite, path)
+        else:
+            self.storage.put(suite, path)
 
     def put_job(self, suite: SuiteConfig, job: JobConfig) -> None:
         path = DB_paths.job_cfg.format(suite_id=suite.storage_id, job_id=job.storage_id)
@@ -209,7 +211,7 @@
             self.storage.put_raw(ts.raw, raw_path)
 
     def put_extra(self, data: bytes, source: DataSource) -> None:
-        self.storage.put(data, DB_paths.ts.format(**source.__dict__))
+        self.storage.put_raw(data, DB_paths.ts.format(**source.__dict__))
 
     def put_stat(self, data: StatProps, source: DataSource) -> None:
         self.storage.put(data, DB_paths.stat.format(**source.__dict__))
@@ -266,6 +268,9 @@
         filters.update(suite_id=suite.storage_id, job_id=job.storage_id)
         ts_glob = fill_path(DB_paths.ts_r, **filters)
         for is_file, path, groups in self.iter_paths(ts_glob):
+            tag = groups["tag"]
+            if tag != 'csv':
+                continue
             assert is_file
             groups = groups.copy()
             groups.update(filters)
@@ -275,7 +280,7 @@
                             sensor=groups["sensor"],
                             dev=None,
                             metric=groups["metric"],
-                            tag=groups["tag"])
+                            tag=tag)
             yield self.load_ts(ds, path)
 
     def iter_sensors(self, node_id: str = None, sensor: str = None, dev: str = None, metric: str = None) -> \
diff --git a/wally/logger.py b/wally/logger.py
index 9ebc425..d885ea9 100644
--- a/wally/logger.py
+++ b/wally/logger.py
@@ -48,43 +48,3 @@
         # restore record, as it will be used by other formatters
         record.__dict__ = orig
         return res
-
-
-def setup_loggers(def_level: int = logging.DEBUG,
-                  log_fname: str = None,
-                  log_fd: IO = None,
-                  config_file: str = None) -> None:
-
-    # TODO: need to better combine file with custom settings
-    if config_file is not None:
-        data = yaml.load(open(config_file).read())
-        logging.config.dictConfig(data)
-    else:
-        log_format = '%(asctime)s - %(levelname)8s - %(name)-10s - %(message)s'
-        colored_formatter = ColoredFormatter(log_format, datefmt="%H:%M:%S")
-
-        sh = logging.StreamHandler()
-        sh.setLevel(def_level)
-        sh.setFormatter(colored_formatter)
-
-        logger = logging.getLogger('wally')
-        logger.setLevel(logging.DEBUG)
-
-        root_logger = logging.getLogger()
-        root_logger.handlers = []
-        root_logger.addHandler(sh)
-        root_logger.setLevel(logging.DEBUG)
-
-        if log_fname or log_fd:
-            if log_fname:
-                handler = logging.FileHandler(log_fname)  # type: Optional[logging.Handler]
-            else:
-                handler = logging.StreamHandler(log_fd)
-
-            formatter = logging.Formatter(log_format, datefmt="%H:%M:%S")
-            handler.setFormatter(formatter)
-            handler.setLevel(logging.DEBUG)
-
-            root_logger.addHandler(handler)
-
-        logging.getLogger('paramiko').setLevel(logging.WARNING)
diff --git a/wally/main.py b/wally/main.py
index fa1a801..b931929 100644
--- a/wally/main.py
+++ b/wally/main.py
@@ -31,11 +31,12 @@
 except ImportError:
     faulthandler = None
 
+from cephlib.common import setup_logging
+
 from . import utils, node
 from .node_utils import log_nodes_statistic
 from .storage import make_storage, Storage
 from .config import Config
-from .logger import setup_loggers
 from .stage import Stage
 from .test_run_class import TestRun
 from .ssh import set_ssh_key_passwd
@@ -48,9 +49,10 @@
 from .run_test import (CollectInfoStage, ExplicitNodesStage, SaveNodesStage,
                        RunTestsStage, ConnectStage, SleepStage, PrepareNodes,
                        LoadStoredNodesStage)
-# from .process_results import CalcStatisticStage
-from .report import ConsoleReportStage, HtmlReportStage
+
+from .report import HtmlReportStage
 from .sensors import StartSensorsStage, CollectSensorsStage
+from .console_report import ConsoleReportStage
 
 
 logger = logging.getLogger("wally")
@@ -121,6 +123,8 @@
     # ---------------------------------------------------------------------
     report_help = 'run report on previously obtained results'
     report_parser = subparsers.add_parser('report', help=report_help)
+    report_parser.add_argument('-R', '--reporters', help="Comma-separated list of reportes - html,txt",
+                               default='html,txt')
     report_parser.add_argument("data_dir", help="folder with rest results")
 
     # ---------------------------------------------------------------------
@@ -134,17 +138,18 @@
 
     # ---------------------------------------------------------------------
     test_parser = subparsers.add_parser('test', help='run tests')
+    test_parser.add_argument("-d", '--dont-discover-nodes', action='store_true', help="Don't discover nodes")
+    test_parser.add_argument('-D', '--dont-collect', action='store_true', help="Don't collect cluster info")
+    test_parser.add_argument("-k", '--keep-vm', action='store_true', help="Don't remove test vm's")
+    test_parser.add_argument('-L', '--load-report', action='store_true', help="Create cluster load report")
+    test_parser.add_argument('-n', '--no-tests', action='store_true', help="Don't run tests")
+    test_parser.add_argument('-N', '--no-report', action='store_true', help="Skip report stages")
+    test_parser.add_argument('-r', '--result-dir', default=None, help="Save results to DIR", metavar="DIR")
+    test_parser.add_argument('-R', '--reporters', help="Comma-separated list of reportes - html,txt",
+                             default='html,txt')
     test_parser.add_argument('--build-description', type=str, default="Build info")
     test_parser.add_argument('--build-id', type=str, default="id")
     test_parser.add_argument('--build-type', type=str, default="GA")
-    test_parser.add_argument('--dont-collect', action='store_true', help="Don't collect cluster info")
-    test_parser.add_argument('-n', '--no-tests', action='store_true', help="Don't run tests")
-    test_parser.add_argument('--load-report', action='store_true')
-    test_parser.add_argument("-k", '--keep-vm', action='store_true', help="Don't remove test vm's")
-    test_parser.add_argument("-d", '--dont-discover-nodes', action='store_true',
-                             help="Don't connect/discover fuel nodes")
-    test_parser.add_argument('--no-report', action='store_true', help="Skip report stages")
-    test_parser.add_argument('--result-dir', default=None, help="Save results to DIR", metavar="DIR")
     test_parser.add_argument("comment", help="Test information")
     test_parser.add_argument("config_file", help="Yaml config file")
 
@@ -218,48 +223,6 @@
             PrepareNodes()]
 
 
-notebook_kern = """
-{
- "cells": [
-  {
-   "cell_type": "code",
-   "execution_count": 1,
-   "metadata": {
-    "collapsed": true
-   },
-   "outputs": [],
-   "source": [
-    "from wally.storage import make_storage\n",
-    "from wally.hlstorage import ResultStorage\n"
-    "storage = make_storage(\"$STORAGE\", existing=True)\n",
-    "rstorage = ResultStorage(storage=storage)\n"
-   ]
-  }
- ],
- "metadata": {
-  "kernelspec": {
-   "display_name": "Python 3",
-   "language": "python",
-   "name": "python3"
-  },
-  "language_info": {
-   "codemirror_mode": {
-    "name": "ipython",
-    "version": 3
-   },
-   "file_extension": ".py",
-   "mimetype": "text/x-python",
-   "name": "python",
-   "nbconvert_exporter": "python",
-   "pygments_lexer": "ipython3",
-   "version": "3.5.2"
-  }
- },
- "nbformat": 4,
- "nbformat_minor": 2
-}"""
-
-
 def main(argv: List[str]) -> int:
     if faulthandler is not None:
         faulthandler.register(signal.SIGUSR1, all_threads=True)
@@ -273,7 +236,7 @@
 
     if opts.subparser_name == 'test':
         config = load_config(opts.config_file)
-        config.storage_url, config.run_uuid = utils.get_uniq_path_uuid(config.results_dir)
+        config.storage_url, config.run_uuid = utils.get_uniq_path_uuid(config.results_storage)
         config.comment = opts.comment
         config.keep_vm = opts.keep_vm
         config.no_tests = opts.no_tests
@@ -282,6 +245,7 @@
         config.build_description = opts.build_description
         config.build_type = opts.build_type
         config.settings_dir = get_config_path(config, opts.settings_dir)
+        config.discovery = set(config.get('discovery', '').split(","))
 
         storage = make_storage(config.storage_url)
         storage.put(config, 'config')
@@ -368,24 +332,17 @@
 
     report_stages = []  # type: List[Stage]
     if not getattr(opts, "no_report", False):
-        # report_stages.append(CalcStatisticStage())
-        # report_stages.append(ConsoleReportStage())
-        report_stages.append(HtmlReportStage())
+        reporters = opts.reporters.split(",")
+        assert len(set(reporters)) == len(reporters)
+        assert set(reporters).issubset({'txt', 'html'})
+        if 'txt' in reporters:
+            report_stages.append(ConsoleReportStage())
+        if 'html' in reporters:
+            report_stages.append(HtmlReportStage())
 
-    # log level is not a part of config
-    if opts.log_level is not None:
-        str_level = opts.log_level
-    else:
-        str_level = config.get('logging/log_level', 'INFO')
-
-    log_config_file = config.get('logging/config', None)
-
-    if log_config_file is not None:
-        log_config_file = find_cfg_file(log_config_file, opts.config_file)
-
-    setup_loggers(getattr(logging, str_level),
-                  log_fd=storage.get_fd('log', "w"),
-                  config_file=log_config_file)
+    log_config_obj = config.raw().get('logging')
+    assert isinstance(log_config_obj, dict) or log_config_obj is None, "Broken 'logging' option in config"
+    setup_logging(log_config_obj=log_config_obj, log_level=opts.log_level, log_file=storage.get_fname('log'))
 
     logger.info("All info would be stored into %r", config.storage_url)
 
@@ -406,6 +363,7 @@
     for stage in stages:
         if stage.config_block is not None:
             if stage.config_block not in ctx.config:
+                logger.debug("Skip stage %r, as config has no required block %r", stage.name(), stage.config_block)
                 continue
 
         cleanup_stages.append(stage)
diff --git a/wally/node.py b/wally/node.py
index 57ca701..a828492 100644
--- a/wally/node.py
+++ b/wally/node.py
@@ -6,10 +6,10 @@
 import logging
 import tempfile
 import subprocess
-from typing import Union, cast, Any, Optional, Tuple, Dict
+from typing import Union, cast, Optional, Tuple, Dict
 
 
-import agent
+from agent import agent
 import paramiko
 
 
@@ -282,7 +282,7 @@
         log_file = node.run("mktemp", nolog=True).strip()
         cmd = "{} {} --log-level={} server --listen-addr={}:{} --daemon --show-settings"
         cmd = cmd.format(python_cmd, code_file, log_level, ip, port) + " --stdout-file={}".format(log_file)
-        logger.info("Agent logs for node {} stored on node in file {}. Log level is {}".format(
+        logger.info("Agent logs for node {} stored on node in file {} log level is {}".format(
             node.node_id, log_file, log_level))
     else:
         cmd = "{} {} --log-level=CRITICAL server --listen-addr={}:{} --daemon --show-settings"
diff --git a/wally/openstack.py b/wally/openstack.py
index b7fbe31..b046656 100644
--- a/wally/openstack.py
+++ b/wally/openstack.py
@@ -105,6 +105,10 @@
         pass
 
     def run(self, ctx: TestRun) -> None:
+        if 'openstack' not in ctx.config.discovery:
+            logger.debug("Skip openstack discovery due to settings")
+            return
+
         if 'all_nodes' in ctx.storage:
             logger.debug("Skip openstack discovery, use previously discovered nodes")
             return
@@ -121,7 +125,7 @@
             user, password = os_nodes_auth.split(":")
             key_file = None
 
-        if ctx.config.discovery not in ('disabled', 'metadata'):
+        if 'metadata' not in ctx.config.discovery:
             services = ctx.os_connection.nova.services.list()  # type: List[Any]
             host_services_mapping = {}  # type: Dict[str, List[str]]
 
@@ -136,7 +140,7 @@
                 ctx.merge_node(creds, set(services))
             # TODO: log OS nodes discovery results
         else:
-            logger.info("Scip OS cluster discovery due to 'discovery' setting value")
+            logger.info("Skip OS cluster discovery due to 'discovery' setting value")
 
         private_key_path = get_vm_keypair_path(ctx.config)[0]
 
diff --git a/wally/report.py b/wally/report.py
index 68170ec..75b8028 100644
--- a/wally/report.py
+++ b/wally/report.py
@@ -1271,12 +1271,3 @@
         report_path = rstorage.put_report(report, "index.html")
         rstorage.put_report(css_file, "main.css")
         logger.info("Report is stored into %r", report_path)
-
-
-class ConsoleReportStage(Stage):
-
-    priority = StepOrder.REPORT
-
-    def run(self, ctx: TestRun) -> None:
-        # TODO(koder): load data from storage
-        raise NotImplementedError("...")
diff --git a/wally/run_test.py b/wally/run_test.py
index 3fd8e64..d5f7d19 100755
--- a/wally/run_test.py
+++ b/wally/run_test.py
@@ -139,7 +139,7 @@
             return
 
         for url, roles in ctx.config.get('nodes', {}).raw().items():
-            ctx.merge_node(ssh_utils.parse_ssh_uri(url), set(roles.split(",")))
+            ctx.merge_node(ssh_utils.parse_ssh_uri(url), set(role.strip() for role in roles.split(",")))
             logger.debug("Add node %s with roles %s", url, roles)
 
 
@@ -243,6 +243,10 @@
                 logger.error("No nodes found for test, skipping it.")
                 continue
 
+            if name not in all_suits:
+                logger.error("Test suite %r not found. Only suits [%s] available", name, ", ".join(all_suits))
+                raise StopTestError()
+
             test_cls = all_suits[name]
             remote_dir = ctx.config.default_test_local_folder.format(name=name, uuid=ctx.config.run_uuid)
             suite = SuiteConfig(test_cls.name,
diff --git a/wally/sensors.py b/wally/sensors.py
index c773b34..c19c350 100644
--- a/wally/sensors.py
+++ b/wally/sensors.py
@@ -7,10 +7,12 @@
 from . import utils
 from .test_run_class import TestRun
 from .result_classes import DataSource
-from . import sensors_rpc_plugin
 from .stage import Stage, StepOrder
 from .hlstorage import ResultStorage
 
+from cephlib import sensors_rpc_plugin
+
+
 plugin_fname = sensors_rpc_plugin.__file__.rsplit(".", 1)[0] + ".py"
 SENSORS_PLUGIN_CODE = open(plugin_fname, "rb").read()  # type: bytes
 
@@ -36,7 +38,8 @@
     "block-io.sectors_read": "B",
     "block-io.sectors_written": "B",
     "block-io.writes_completed": "",
-    "block-io.wtime": "ms"
+    "block-io.wtime": "ms",
+    "block-io.weighted_io_time": "ms"
 }
 
 
@@ -106,6 +109,7 @@
 
 def collect_sensors_data(ctx: TestRun, stop: bool = False):
     rstorage = ResultStorage(ctx.storage)
+    raw_skipped = False
     for node in ctx.nodes:
         node_id = node.node_id
         if node_id in ctx.sensors_run_on:
@@ -116,7 +120,14 @@
                 func = node.conn.sensors.get_updates
 
             # TODO: units should came along with data
-            for path, value in sensors_rpc_plugin.unpack_rpc_updates(func()):
+            # TODO: process raw sensors data
+
+            for path, value, is_parsed in sensors_rpc_plugin.unpack_rpc_updates(func()):
+                if not is_parsed:
+                    if not raw_skipped:
+                        logger.warning("Raw sensors data at path %r and, maybe, others are skipped", path)
+                    raw_skipped = True
+                    continue
                 if path == 'collected_at':
                     ds = DataSource(node_id=node_id, metric='collected_at')
                     units = 'us'
diff --git a/wally/statistic.py b/wally/statistic.py
index 7edff67..a256587 100644
--- a/wally/statistic.py
+++ b/wally/statistic.py
@@ -22,7 +22,7 @@
 dev = lambda x: math.sqrt(numpy.var(x, ddof=1))
 
 
-def calc_norm_stat_props(ts: TimeSeries, bins_count: int, confidence: float = 0.95) -> NormStatProps:
+def calc_norm_stat_props(ts: TimeSeries, bins_count: int = None, confidence: float = 0.95) -> NormStatProps:
     "Calculate statistical properties of array of numbers"
 
     # array.array has very basic support
@@ -50,8 +50,9 @@
         res.confidence = None
         res.confidence_level = None
 
-    res.bins_populations, res.bins_edges = numpy.histogram(data, bins=bins_count)
-    res.bins_edges = res.bins_edges[:-1]
+    if bins_count is not None:
+        res.bins_populations, res.bins_edges = numpy.histogram(data, bins=bins_count)
+        res.bins_edges = res.bins_edges[:-1]
 
     try:
         res.normtest = stats.mstats.normaltest(data)
@@ -123,7 +124,7 @@
 
 def calc_histo_stat_props(ts: TimeSeries,
                           bins_edges: numpy.array,
-                          rebins_count: int,
+                          rebins_count: int = None,
                           tail: float = 0.005) -> HistoStatProps:
     log_bins = False
     res = HistoStatProps(ts.data)
@@ -149,8 +150,12 @@
     res.max = bins_edges[non_zero[-1] + (1 if non_zero[-1] != len(bins_edges) else 0)]
 
     res.log_bins = False
-    res.bins_populations, res.bins_edges = rebin_histogram(aggregated, bins_edges, rebins_count,
-                                                           left_tail_idx, right_tail_idx)
+    if rebins_count is not None:
+        res.bins_populations, res.bins_edges = rebin_histogram(aggregated, bins_edges, rebins_count,
+                                                               left_tail_idx, right_tail_idx)
+    else:
+        res.bins_populations = aggregated
+        res.bins_edges = bins_edges.copy()
 
     return res
 
diff --git a/wally/storage.py b/wally/storage.py
index ab52e12..0459305 100644
--- a/wally/storage.py
+++ b/wally/storage.py
@@ -52,6 +52,10 @@
         pass
 
     @abc.abstractmethod
+    def get_fname(self, path: str) -> str:
+        pass
+
+    @abc.abstractmethod
     def sub_storage(self, path: str) -> 'ISimpleStorage':
         pass
 
@@ -141,6 +145,9 @@
     def __contains__(self, path: str) -> bool:
         return os.path.exists(self.j(path))
 
+    def get_fname(self, path: str) -> str:
+        return self.j(path)
+
     def get_fd(self, path: str, mode: str = "rb+") -> IO[bytes]:
         jpath = self.j(path)
 
@@ -281,6 +288,9 @@
     def get_fd(self, path: str, mode: str = "r") -> IO:
         return self.sstorage.get_fd(path, mode)
 
+    def get_fname(self, path: str) -> str:
+        return self.sstorage.get_fname(path)
+
     def load_list(self, obj_class: Type[ObjClass], *path: str) -> List[ObjClass]:
         path_s = "/".join(path)
         raw_val = cast(List[Dict[str, Any]], self.get(path_s))
diff --git a/wally/suits/io/fio.py b/wally/suits/io/fio.py
index 77d7a75..16da091 100644
--- a/wally/suits/io/fio.py
+++ b/wally/suits/io/fio.py
@@ -168,8 +168,12 @@
                                    job_file=self.remote_task_file)
         must_be_empty = node.run(cmd, timeout=exec_time + max(300, exec_time), check_timeout=1).strip()
 
-        if must_be_empty:
-            logger.error("Unexpected fio output: %r", must_be_empty)
+        for line in must_be_empty.split("\n"):
+            if line.strip():
+                if 'only root may flush block devices' in line:
+                    continue
+                logger.error("Unexpected fio output: %r", must_be_empty)
+                break
 
         # put fio output into storage
         fio_out = node.get_file_content(self.remote_output_file)
diff --git a/wally/suits/io/fio_job.py b/wally/suits/io/fio_job.py
index 2d8d78a..39715ef 100644
--- a/wally/suits/io/fio_job.py
+++ b/wally/suits/io/fio_job.py
@@ -41,7 +41,7 @@
     @property
     def summary(self) -> str:
         """Test short summary, used mostly for file names and short image description"""
-        res = "{0[oper]}{0[sync_mode]}{0[bsize]}".format(self)
+        res = "{0[oper_short]}{0[sync_mode]}{0[bsize]}".format(self)
         if self['qd'] is not None:
             res += "_qd" + str(self['qd'])
         if self['thcount'] not in (1, None):
@@ -107,7 +107,7 @@
 
     @property
     def qd(self) -> int:
-        return int(self.vals['iodepth'])
+        return int(self.vals.get('iodepth', '1'))
 
     @property
     def bsize(self) -> int:
@@ -142,6 +142,7 @@
     def params(self) -> JobParams:
         if self._params is None:
             self._params = dict(oper=self.oper,
+                                oper_short=self.op_type_short,
                                 sync_mode=self.sync_mode,
                                 bsize=self.bsize,
                                 qd=self.qd,
@@ -154,7 +155,7 @@
     def __eq__(self, o: object) -> bool:
         if not isinstance(o, FioJobConfig):
             return False
-        return self.vals == cast(FioJobConfig, o).vals
+        return dict(self.vals) == dict(cast(FioJobConfig, o).vals)
 
     def copy(self) -> 'FioJobConfig':
         return copy.deepcopy(self)
diff --git a/wally/suits/io/rpc_plugin.py b/wally/suits/io/rpc_plugin.py
index 5f5cfb5..39ed5cc 100644
--- a/wally/suits/io/rpc_plugin.py
+++ b/wally/suits/io/rpc_plugin.py
@@ -14,9 +14,8 @@
 
 
 # TODO: fix this in case if file is block device
-def check_file_prefilled(path, used_size_mb):
+def check_file_prefilled(path, used_size_mb, blocks_to_check=16):
     used_size = used_size_mb * 1024 ** 2
-    blocks_to_check = 16
 
     try:
         fstats = os.stat(path)
@@ -25,10 +24,8 @@
     except EnvironmentError:
         return False
 
-    offsets = [random.randrange(used_size - 1024) for _ in range(blocks_to_check)]
-    offsets.append(used_size - 1024)
-    offsets.append(0)
-
+    offsets = [0, used_size - 1024] + [random.randrange(used_size - 1024) for _ in range(blocks_to_check)]
+    logger.debug(str(offsets))
     with open(path, 'rb') as fd:
         for offset in offsets:
             fd.seek(offset)
diff --git a/wally/suits/io/verify.cfg b/wally/suits/io/verify.cfg
index 75ab2b4..58a94b0 100644
--- a/wally/suits/io/verify.cfg
+++ b/wally/suits/io/verify.cfg
@@ -1,18 +1,19 @@
 [global]
-include defaults.cfg
-
-size={TEST_FILE_SIZE}
-ramp_time=0
-runtime=15
+include defaults_qd.cfg
+QD={% 1, 2, 4 %}
+runtime=30
+direct=1
 
 # ---------------------------------------------------------------------
-# [verify_{TEST_SUMM}]
-# blocksize=4k
-# rw=randwrite
-# direct=1
 
 [verify_{TEST_SUMM}]
 blocksize=4k
 rw=randwrite
 direct=1
-sync=1
+iodepth={QD}
+
+# [verify_{TEST_SUMM}]
+# blocksize=4k
+# rw=randwr
+# direct=1
+# sync=1
diff --git a/wally/suits/job.py b/wally/suits/job.py
index 91822cb..1e4c457 100644
--- a/wally/suits/job.py
+++ b/wally/suits/job.py
@@ -14,12 +14,14 @@
     def __init__(self, **params: Dict[str, Any]) -> None:
         self.params = params
 
-    @abc.abstractproperty
+    @property
+    @abc.abstractmethod
     def summary(self) -> str:
         """Test short summary, used mostly for file names and short image description"""
         pass
 
-    @abc.abstractproperty
+    @property
+    @abc.abstractmethod
     def long_summary(self) -> str:
         """Readable long summary for management and deployment engineers"""
         pass
@@ -47,7 +49,8 @@
             raise TypeError("Can't compare {!r} to {!r}".format(self.__class__.__qualname__, type(o).__qualname__))
         return self.char_tpl < cast(JobParams, o).char_tpl
 
-    @abc.abstractproperty
+    @property
+    @abc.abstractmethod
     def char_tpl(self) -> Tuple[Union[str, int, float, bool], ...]:
         pass
 
@@ -70,7 +73,8 @@
         """unique string, used as key in storage"""
         return "{}_{}".format(self.summary, self.idx)
 
-    @abc.abstractproperty
+    @property
+    @abc.abstractmethod
     def params(self) -> JobParams:
         """Should return a copy"""
         pass
diff --git a/wally/texttable.py b/wally/texttable.py
new file mode 100644
index 0000000..1917663
--- /dev/null
+++ b/wally/texttable.py
@@ -0,0 +1,586 @@
+# texttable - module for creating simple ASCII tables
+# Copyright (C) 2003-2015 Gerome Fournier <jef(at)foutaise.org>
+#
+# This library is free software; you can redistribute it and/or
+# modify it under the terms of the GNU Lesser General Public
+# License as published by the Free Software Foundation; either
+# version 2.1 of the License, or (at your option) any later version.
+#
+# This library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+# Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with this library; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA
+
+"""module for creating simple ASCII tables
+
+
+Example:
+
+    table = Texttable()
+    table.set_cols_align(["l", "r", "c"])
+    table.set_cols_valign(["t", "m", "b"])
+    table.add_rows([["Name", "Age", "Nickname"],
+                    ["Mr\\nXavier\\nHuon", 32, "Xav'"],
+                    ["Mr\\nBaptiste\\nClement", 1, "Baby"],
+                    ["Mme\\nLouise\\nBourgeau", 28, "Lou\\n\\nLoue"]])
+    print table.draw() + "\\n"
+
+    table = Texttable()
+    table.set_deco(Texttable.HEADER)
+    table.set_cols_dtype(['t',  # text
+                          'f',  # float (decimal)
+                          'e',  # float (exponent)
+                          'i',  # integer
+                          'a']) # automatic
+    table.set_cols_align(["l", "r", "r", "r", "l"])
+    table.add_rows([["text",    "float", "exp", "int", "auto"],
+                    ["abcd",    "67",    654,   89,    128.001],
+                    ["efghijk", 67.5434, .654,  89.6,  12800000000000000000000.00023],
+                    ["lmn",     5e-78,   5e-78, 89.4,  .000000000000128],
+                    ["opqrstu", .023,    5e+78, 92.,   12800000000000000000000]])
+    print table.draw()
+
+Result:
+
+    +----------+-----+----------+
+    |   Name   | Age | Nickname |
+    +==========+=====+==========+
+    | Mr       |     |          |
+    | Xavier   |  32 |          |
+    | Huon     |     |   Xav'   |
+    +----------+-----+----------+
+    | Mr       |     |          |
+    | Baptiste |   1 |          |
+    | Clement  |     |   Baby   |
+    +----------+-----+----------+
+    | Mme      |     |   Lou    |
+    | Louise   |  28 |          |
+    | Bourgeau |     |   Loue   |
+    +----------+-----+----------+
+
+    text   float       exp      int     auto
+    ===========================================
+    abcd   67.000   6.540e+02   89    128.001
+    efgh   67.543   6.540e-01   90    1.280e+22
+    ijkl   0.000    5.000e-78   89    0.000
+    mnop   0.023    5.000e+78   92    1.280e+22
+"""
+
+from __future__ import division
+
+__all__ = ["Texttable", "ArraySizeError"]
+
+__author__ = 'Gerome Fournier <jef(at)foutaise.org>'
+__license__ = 'LGPL'
+__version__ = '0.8.8'
+__credits__ = """\
+Jeff Kowalczyk:
+    - textwrap improved import
+    - comment concerning header output
+
+Anonymous:
+    - add_rows method, for adding rows in one go
+
+Sergey Simonenko:
+    - redefined len() function to deal with non-ASCII characters
+
+Roger Lew:
+    - columns datatype specifications
+
+Brian Peterson:
+    - better handling of unicode errors
+
+Frank Sachsenheim:
+    - add Python 2/3-compatibility
+
+Maximilian Hils:
+    - fix minor bug for Python 3 compatibility
+
+frinkelpi:
+    - preserve empty lines
+"""
+
+import sys
+import string
+import unicodedata
+
+try:
+    if sys.version >= '2.3':
+        import textwrap
+    elif sys.version >= '2.2':
+        from optparse import textwrap
+    else:
+        from optik import textwrap
+except ImportError:
+    sys.stderr.write("Can't import textwrap module!\n")
+    raise
+
+if sys.version >= '2.7':
+    from functools import reduce
+
+if sys.version >= '3.0':
+    unicode_type = str
+    bytes_type = bytes
+else:
+    unicode_type = unicode
+    bytes_type = str
+
+
+def obj2unicode(obj):
+    """Return a unicode representation of a python object
+    """
+    if isinstance(obj, unicode_type):
+        return obj
+    elif isinstance(obj, bytes_type):
+        try:
+            return unicode_type(obj, 'utf-8')
+        except UnicodeDecodeError as strerror:
+            sys.stderr.write("UnicodeDecodeError exception for string '%s': %s\n" % (obj, strerror))
+            return unicode_type(obj, 'utf-8', 'replace')
+    else:
+        return unicode_type(obj)
+
+
+def len(iterable):
+    """Redefining len here so it will be able to work with non-ASCII characters
+    """
+    if isinstance(iterable, bytes_type) or isinstance(iterable, unicode_type):
+        unicode_data = obj2unicode(iterable)
+        if hasattr(unicodedata, 'east_asian_width'):
+            w = unicodedata.east_asian_width
+            return sum([w(c) in 'WF' and 2 or 1 for c in unicode_data])
+        else:
+            return unicode_data.__len__()
+    else:
+        return iterable.__len__()
+
+
+class ArraySizeError(Exception):
+    """Exception raised when specified rows don't fit the required size
+    """
+
+    def __init__(self, msg):
+        self.msg = msg
+        Exception.__init__(self, msg, '')
+
+    def __str__(self):
+        return self.msg
+
+
+class Texttable:
+
+    BORDER = 1
+    HEADER = 1 << 1
+    HLINES = 1 << 2
+    VLINES = 1 << 3
+
+    def __init__(self, max_width=80):
+        """Constructor
+
+        - max_width is an integer, specifying the maximum width of the table
+        - if set to 0, size is unlimited, therefore cells won't be wrapped
+        """
+
+        if max_width <= 0:
+            max_width = False
+        self._max_width = max_width
+        self._precision = 3
+
+        self._deco = Texttable.VLINES | Texttable.HLINES | Texttable.BORDER | Texttable.HEADER
+        self._reset()
+                         ## left, horiz, cross, right
+        self._chars_top = (chr(0x250c), chr(0x2500), chr(0x252c), chr(0x2510))
+        # self.chars_header = (chr(0x255e), chr(0x2550), chr(0x256a), chr(0x2561))
+        self._chars_header = (chr(0x251d), chr(0x2501), chr(0x253f), chr(0x2525))
+        self._chars_middle = (chr(0x251c), chr(0x2500), chr(0x253c), chr(0x2524))
+        self._chars_bottom = (chr(0x2514), chr(0x2500), chr(0x2534), chr(0x2518))
+        self._char_vert = chr(0x2502)
+        self._align = None
+
+    def _reset(self):
+        """Reset the instance
+
+        - reset rows and header
+        """
+
+        self._row_size = None
+        self._header = []
+        self._rows = []
+
+    def set_cols_align(self, array):
+        """Set the desired columns alignment
+
+        - the elements of the array should be either "l", "c" or "r":
+
+            * "l": column flushed left
+            * "c": column centered
+            * "r": column flushed right
+        """
+
+        self._check_row_size(array)
+        self._align = array
+
+    def set_cols_valign(self, array):
+        """Set the desired columns vertical alignment
+
+        - the elements of the array should be either "t", "m" or "b":
+
+            * "t": column aligned on the top of the cell
+            * "m": column aligned on the middle of the cell
+            * "b": column aligned on the bottom of the cell
+        """
+
+        self._check_row_size(array)
+        self._valign = array
+
+    def set_cols_dtype(self, array):
+        """Set the desired columns datatype for the cols.
+
+        - the elements of the array should be either "a", "t", "f", "e" or "i":
+
+            * "a": automatic (try to use the most appropriate datatype)
+            * "t": treat as text
+            * "f": treat as float in decimal format
+            * "e": treat as float in exponential format
+            * "i": treat as int
+
+        - by default, automatic datatyping is used for each column
+        """
+
+        self._check_row_size(array)
+        self._dtype = array
+
+    def set_cols_width(self, array):
+        """Set the desired columns width
+
+        - the elements of the array should be integers, specifying the
+          width of each column. For example:
+
+                [10, 20, 5]
+        """
+
+        self._check_row_size(array)
+        try:
+            array = list(map(int, array))
+            if reduce(min, array) <= 0:
+                raise ValueError
+        except ValueError:
+            sys.stderr.write("Wrong argument in column width specification\n")
+            raise
+        self._width = array
+
+    def set_precision(self, width):
+        """Set the desired precision for float/exponential formats
+
+        - width must be an integer >= 0
+
+        - default value is set to 3
+        """
+
+        if not type(width) is int or width < 0:
+            raise ValueError('width must be an integer greater then 0')
+        self._precision = width
+
+    def header(self, array):
+        """Specify the header of the table
+        """
+
+        self._check_row_size(array)
+        self._header = list(map(obj2unicode, array))
+
+    def add_row(self, array):
+        """Add a row in the rows stack
+
+        - cells can contain newlines and tabs
+        """
+
+        self._check_row_size(array)
+
+        if not hasattr(self, "_dtype"):
+            self._dtype = ["a"] * self._row_size
+
+        cells = []
+        for i, x in enumerate(array):
+            cells.append(self._str(i, x))
+        self._rows.append(cells)
+
+    def add_rows(self, rows, header=True):
+        """Add several rows in the rows stack
+
+        - The 'rows' argument can be either an iterator returning arrays,
+          or a by-dimensional array
+        - 'header' specifies if the first row should be used as the header
+          of the table
+        """
+
+        # nb: don't use 'iter' on by-dimensional arrays, to get a
+        #     usable code for python 2.1
+        if header:
+            if hasattr(rows, '__iter__') and hasattr(rows, 'next'):
+                self.header(rows.next())
+            else:
+                self.header(rows[0])
+                rows = rows[1:]
+        for row in rows:
+            self.add_row(row)
+
+    def draw(self):
+        """Draw the table
+
+        - the table is returned as a whole string
+        """
+
+        if not self._header and not self._rows:
+            return
+        self._compute_cols_width()
+        self._check_align()
+        out = ""
+
+        if self._has_border():
+            out += self._hline(*self._chars_top)
+
+        if self._header:
+            out += self._draw_line(self._header, isheader=True)
+            if self._has_header():
+                out += self._hline(*self._chars_header)
+
+        length = 0
+        for row in self._rows:
+            length += 1
+            out += self._draw_line(row)
+            if self._has_hlines() and length < len(self._rows):
+                out += self._hline(*self._chars_middle)
+
+        if self._has_border():
+            out += self._hline(*self._chars_bottom)
+
+        return out[:-1]
+
+    def _str(self, i, x):
+        """Handles string formatting of cell data
+
+            i - index of the cell datatype in self._dtype
+            x - cell data to format
+        """
+        try:
+            f = float(x)
+        except:
+            return obj2unicode(x)
+
+        n = self._precision
+        dtype = self._dtype[i]
+
+        if dtype == 'i':
+            return str(int(round(f)))
+        elif dtype == 'f':
+            return '%.*f' % (n, f)
+        elif dtype == 'e':
+            return '%.*e' % (n, f)
+        elif dtype == 't':
+            return obj2unicode(x)
+        else:
+            if f - round(f) == 0:
+                if abs(f) > 1e8:
+                    return '%.*e' % (n, f)
+                else:
+                    return str(int(round(f)))
+            else:
+                if abs(f) > 1e8:
+                    return '%.*e' % (n, f)
+                else:
+                    return '%.*f' % (n, f)
+
+    def _check_row_size(self, array):
+        """Check that the specified array fits the previous rows size
+        """
+
+        if not self._row_size:
+            self._row_size = len(array)
+        elif self._row_size != len(array):
+            raise ArraySizeError("array should contain %d elements" \
+                % self._row_size)
+
+    def _has_vlines(self):
+        """Return a boolean, if vlines are required or not
+        """
+
+        return self._deco & Texttable.VLINES > 0
+
+    def _has_hlines(self):
+        """Return a boolean, if hlines are required or not
+        """
+
+        return self._deco & Texttable.HLINES > 0
+
+    def _has_border(self):
+        """Return a boolean, if border is required or not
+        """
+
+        return self._deco & Texttable.BORDER > 0
+
+    def _has_header(self):
+        """Return a boolean, if header line is required or not
+        """
+
+        return self._deco & Texttable.HEADER > 0
+
+    def _hline(self, left, horiz, cross, right):
+        """Return a string used to separated rows or separate header from rows"""
+
+        # compute cell separator
+        sep = horiz + (cross if self._has_vlines() else horiz) + horiz
+
+        # build the line
+        line = sep.join([horiz * n for n in self._width])
+
+        # add border if needed
+        if self._has_border():
+            line = left + horiz + line  + horiz + right
+
+        return line + "\n"
+
+    def _len_cell(self, cell):
+        """Return the width of the cell
+
+        Special characters are taken into account to return the width of the
+        cell, such like newlines and tabs
+        """
+
+        cell_lines = cell.split('\n')
+        maxi = 0
+        for line in cell_lines:
+            length = 0
+            parts = line.split('\t')
+            for part, i in zip(parts, list(range(1, len(parts) + 1))):
+                length = length + len(part)
+                if i < len(parts):
+                    length = (length//8 + 1) * 8
+            maxi = max(maxi, length)
+        return maxi
+
+    def _compute_cols_width(self):
+        """Return an array with the width of each column
+
+        If a specific width has been specified, exit. If the total of the
+        columns width exceed the table desired width, another width will be
+        computed to fit, and cells will be wrapped.
+        """
+
+        if hasattr(self, "_width"):
+            return
+        maxi = []
+        if self._header:
+            maxi = [ self._len_cell(x) for x in self._header ]
+        for row in self._rows:
+            for cell,i in zip(row, list(range(len(row)))):
+                try:
+                    maxi[i] = max(maxi[i], self._len_cell(cell))
+                except (TypeError, IndexError):
+                    maxi.append(self._len_cell(cell))
+        items = len(maxi)
+        length = sum(maxi)
+        if self._max_width and length + items * 3 + 1 > self._max_width:
+            maxi = [
+                int(round(self._max_width / (length + items * 3 + 1) * n))
+                for n in maxi
+            ]
+        self._width = maxi
+
+    def _check_align(self):
+        """Check if alignment has been specified, set default one if not
+        """
+
+        if not hasattr(self, "_align"):
+            self._align = ["l"] * self._row_size
+        if not hasattr(self, "_valign"):
+            self._valign = ["t"] * self._row_size
+
+    def _draw_line(self, line, isheader=False):
+        """Draw a line
+
+        Loop over a single cell length, over all the cells
+        """
+
+        line = self._splitit(line, isheader)
+        space = " "
+        out = ""
+        for i in range(len(line[0])):
+            if self._has_border():
+                out += "%s " % self._char_vert
+            length = 0
+            for cell, width, align in zip(line, self._width, self._align):
+                length += 1
+                cell_line = cell[i]
+                fill = width - len(cell_line)
+                if isheader:
+                    align = "c"
+                if align == "r":
+                    out += "%s " % (fill * space + cell_line)
+                elif align == "c":
+                    out += "%s " % (int(fill/2) * space + cell_line + int(fill/2 + fill%2) * space)
+                else:
+                    out += "%s " % (cell_line + fill * space)
+                if length < len(line):
+                    out += "%s " % [space, self._char_vert][self._has_vlines()]
+            out += "%s\n" % ['', self._char_vert][self._has_border()]
+        return out
+
+    def _splitit(self, line, isheader):
+        """Split each element of line to fit the column width
+
+        Each element is turned into a list, result of the wrapping of the
+        string to the desired width
+        """
+
+        line_wrapped = []
+        for cell, width in zip(line, self._width):
+            array = []
+            for c in cell.split('\n'):
+                if c.strip() == "":
+                    array.append("")
+                else:
+                    array.extend(textwrap.wrap(c, width))
+            line_wrapped.append(array)
+        max_cell_lines = reduce(max, list(map(len, line_wrapped)))
+        for cell, valign in zip(line_wrapped, self._valign):
+            if isheader:
+                valign = "t"
+            if valign == "m":
+                missing = max_cell_lines - len(cell)
+                cell[:0] = [""] * int(missing / 2)
+                cell.extend([""] * int(missing / 2 + missing % 2))
+            elif valign == "b":
+                cell[:0] = [""] * (max_cell_lines - len(cell))
+            else:
+                cell.extend([""] * (max_cell_lines - len(cell)))
+        return line_wrapped
+
+
+if __name__ == '__main__':
+    table = Texttable()
+    table.set_cols_align(["l", "r", "c"])
+    table.set_cols_valign(["t", "m", "b"])
+    table.add_rows([["Name", "Age", "Nickname"],
+                    ["Mr\nXavier\nHuon", 32, "Xav'"],
+                    ["Mr\nBaptiste\nClement", 1, "Baby"],
+                    ["Mme\nLouise\nBourgeau", 28, "Lou\n \nLoue"]])
+    print(table.draw() + "\n")
+
+    table = Texttable()
+    table.set_deco(Texttable.HEADER)
+    table.set_cols_dtype(['t',  # text
+                          'f',  # float (decimal)
+                          'e',  # float (exponent)
+                          'i',  # integer
+                          'a']) # automatic
+    table.set_cols_align(["l", "r", "r", "r", "l"])
+    table.add_rows([["text",    "float", "exp", "int", "auto"],
+                    ["abcd",    "67",    654,   89,    128.001],
+                    ["efghijk", 67.5434, .654,  89.6,  12800000000000000000000.00023],
+                    ["lmn",     5e-78,   5e-78, 89.4,  .000000000000128],
+                    ["opqrstu", .023,    5e+78, 92.,   12800000000000000000000]])
+    print(table.draw())