fix pre-demo bugs
diff --git a/config.yaml b/config.yaml
index 5e226e0..8bb1fa2 100644
--- a/config.yaml
+++ b/config.yaml
@@ -52,21 +52,22 @@
 
 # # tests to run
 tests: # $include(tests.yaml)
-      start_test_nodes:
-         openstack:
-             vm_params:
-                 count: x1
-                 img_name: disk_io_perf
-                 flavor_name: disk_io_perf.256
-                 keypair_name: disk_io_perf
-                 network_zone_name: novanetwork
-                 flt_ip_pool: nova
-                 creds: "ssh://ubuntu@{0}::disk_io_perf.pem"
-         tests:
-             - pgbench:
-                  opts:
-                      num_clients: [4, 8, 12]
-                      transactions: [1, 2, 3]
+    start_test_nodes:
+        openstack:
+
+            vm_params:
+                count: x1
+                img_name: disk_io_perf
+                flavor_name: disk_io_perf.256
+                keypair_name: disk_io_perf
+                network_zone_name: novanetwork
+                flt_ip_pool: nova
+                creds: "ssh://ubuntu@{0}::disk_io_perf.pem"
+        internal_tests:
+            - pgbench:
+                opts:
+                    num_clients: [4, 8, 12]
+                    transactions: [1, 2, 3]
 
 #     - io: tests/io_task_test.cfg
 
diff --git a/koder.yaml b/koder.yaml
index 5a719cd..28a8829 100644
--- a/koder.yaml
+++ b/koder.yaml
@@ -1,10 +1,16 @@
 clouds:
-     fuel:
-         id: 3
-         url: http://172.16.52.112:8000/
-         creds: admin:admin@admin
-         ssh_creds: root:test37
-         openstack_env: test
+    fuel:
+        id: 3
+        url: http://172.16.52.112:8000/
+        creds: admin:admin@admin
+        ssh_creds: root:test37
+        openstack_env: test
+
+    openstack:
+        OS_TENANT_NAME: admin
+        OS_USERNAME: admin
+        OS_PASSWORD: admin
+        OS_AUTH_URL: http://172.16.53.3:5000/v2.0/
 
 discover: fuel
 
@@ -20,6 +26,7 @@
 tests:
     start_test_nodes:
         openstack:
+            creds: clouds
             vm_params:
                 count: x1
                 img_name: disk_io_perf
diff --git a/run_test.py b/run_test.py
index 1d58ede..aa47b21 100755
--- a/run_test.py
+++ b/run_test.py
@@ -6,6 +6,7 @@
 import pprint
 import logging
 import argparse
+import traceback
 import threading
 import collections
 
@@ -50,6 +51,14 @@
     return templ.format(data, formatter(res), "=" * 80)
 
 
+class Context(object):
+    def __init__(self):
+        self.build_meta = {}
+        self.nodes = []
+        self.clear_calls_stack = []
+        self.openstack_nodes_ids = []
+
+
 def connect_one(node):
     try:
         ssh_pref = "ssh://"
@@ -216,52 +225,89 @@
     ctx.sensor_data = ctx.sensors_control_queue.get()
 
 
-def run_all_test(cfg, ctx, store_nodes):
+def run_all_test(cfg, ctx):
     ctx.results = []
 
     if 'start_test_nodes' in cfg['tests']:
         params = cfg['tests']['start_test_nodes']['openstack']
+        os_nodes_ids = []
 
-    for new_node in start_vms.launch_vms(params):
-        new_node.roles.append('testnode')
-        ctx.nodes.append(new_node)
+        os_creds = params['creds']
+
+        if os_creds == 'fuel':
+            raise NotImplementedError()
+
+        elif os_creds == 'clouds':
+            os_cfg = cfg['clouds']['openstack']
+            tenant = os_cfg['OS_TENANT_NAME'].strip()
+            user = os_cfg['OS_USERNAME'].strip()
+            passwd = os_cfg['OS_PASSWORD'].strip()
+            auth_url = os_cfg['OS_AUTH_URL'].strip()
+
+        elif os_creds == 'ENV':
+            tenant = None
+            user = None
+            passwd = None
+            auth_url = None
+
+        else:
+            raise ValueError("Only 'ENV' creds are supported")
+
+        start_vms.nova_connect(user, passwd, tenant, auth_url)
+
+        new_nodes = []
+        for new_node, node_id in start_vms.launch_vms(params):
+            new_node.roles.append('testnode')
+            ctx.nodes.append(new_node)
+            os_nodes_ids.append(node_id)
+            new_nodes.append(new_node)
+
+        store_nodes_in_log(os_nodes_ids)
+        ctx.openstack_nodes_ids = os_nodes_ids
+
+        connect_all(new_nodes)
 
     if 'tests' in cfg:
-        store_nodes(ctx.nodes)
         ctx.results.extend(run_tests(cfg_dict, ctx.nodes))
 
 
-def shut_down_vms(cfg, ctx):
-    with open('vm_journal.log') as f:
-        data = str(f.read())
-        nodes = pickle.loads(data)
+def shut_down_vms_stage(cfg, ctx):
+    if ctx.openstack_nodes_ids is None:
+        data = open('vm_journal.log').read().strip()
 
-        for node in nodes:
-            logger.info("Node " + str(node) + " has been loaded")
+        if data == "":
+            logger.info("Journal file is empty")
+            return
 
-        logger.info("Removing nodes")
-        start_vms.clear_nodes()
-        logger.info("Nodes has been removed")
+        try:
+            nodes_ids = pickle.loads(data)
+        except:
+            logger.error("File vm_journal.log corrupted")
+            return
+    else:
+        nodes_ids = ctx.openstack_nodes_ids
+
+    logger.info("Removing nodes")
+    start_vms.clear_nodes(nodes_ids)
+    logger.info("Nodes has been removed")
 
 
-def store_nodes(nodes):
+def store_nodes_in_log(nodes_ids):
     with open('vm_journal.log', 'w+') as f:
-        f.write(pickle.dumps([nodes]))
-        for node in nodes:
-            logger.info("Node " + str(node) + " has been stored")
+        f.write(pickle.dumps([nodes_ids]))
 
 
 def clear_enviroment(cfg, ctx):
     if os.path.exists('vm_journal.log'):
-        shut_down_vms(cfg, ctx)
+        shut_down_vms_stage(cfg, ctx)
         os.remove('vm_journal.log')
 
 
 def run_tests_stage(cfg, ctx):
     # clear nodes that possible were created on previous test running
-    clear_enviroment(cfg, ctx)
-    ctx.clear_calls_stack.append(shut_down_vms)
-    run_all_test(cfg, ctx, store_nodes)
+    # clear_enviroment(cfg, ctx) << fix OS connection
+    ctx.clear_calls_stack.append(shut_down_vms_stage)
+    run_all_test(cfg, ctx)
 
 
 def disconnect_stage(cfg, ctx):
@@ -272,6 +318,7 @@
 
 def report_stage(cfg, ctx):
     output_dest = cfg.get('output_dest')
+
     if output_dest is not None:
         if output_dest.endswith(".html"):
             report.render_html_results(ctx, output_dest)
@@ -293,13 +340,6 @@
         logger.debug(str(node))
 
 
-class Context(object):
-    def __init__(self):
-        self.build_meta = {}
-        self.nodes = []
-        self.clear_calls_stack = []
-
-
 def load_config(path):
     global cfg_dict
     cfg_dict = parse_config(path)
@@ -319,7 +359,7 @@
         # complete_log_nodes_statistic,
         deploy_sensors_stage,
         run_tests_stage,
-        # report_stage
+        report_stage
     ]
 
     load_config(opts.config_file)
@@ -341,8 +381,8 @@
             try:
                 logger.info("Start {0.__name__} stage".format(stage))
                 stage(cfg_dict, ctx)
-            except:
-                pass
+            except Exception as exc:
+                logger.exception("During {0.__name__} stage".format(stage))
 
         if exc is not None:
             raise exc, cls, tb
diff --git a/start_vms.py b/start_vms.py
index 7317fd0..821c299 100644
--- a/start_vms.py
+++ b/start_vms.py
@@ -9,8 +9,7 @@
 from cinderclient.v1.client import Client as c_client
 
 from nodes.node import Node
-from nodes.openstack import get_floating_ip
-from utils import parse_creds
+
 
 logger = logging.getLogger("io-perf-tool")
 
@@ -25,9 +24,23 @@
     return name, passwd, tenant, auth_url
 
 
-def nova_connect():
-    return n_client('1.1', *ostack_get_creds()
-                    )
+NOVA_CONNECTION = None
+
+
+def nova_connect(name=None, passwd=None, tenant=None, auth_url=None):
+    global NOVA_CONNECTION
+    if NOVA_CONNECTION is None:
+        if name is None:
+            name, passwd, tenant, auth_url = ostack_get_creds()
+        NOVA_CONNECTION = n_client('1.1', name, passwd, tenant, auth_url)
+    return NOVA_CONNECTION
+
+
+def nova_disconnect():
+    global NOVA_CONNECTION
+    if NOVA_CONNECTION is not None:
+        NOVA_CONNECTION.close()
+        NOVA_CONNECTION = None
 
 
 def create_keypair(nova, name, key_path):
@@ -87,26 +100,21 @@
 
 
 def launch_vms(config):
-    creds = config['vm_params']['creds']
-
-    # if creds != 'ENV':
-    #     raise ValueError("Only 'ENV' creds are supported")
-
     logger.debug("Starting new nodes on openstack")
-    conn = nova_connect()
     params = config['vm_params'].copy()
     count = params.pop('count')
 
     if isinstance(count, basestring):
         assert count.startswith("x")
-        lst = conn.services.list(binary='nova-compute')
+        lst = NOVA_CONNECTION.services.list(binary='nova-compute')
         srv_count = len([srv for srv in lst if srv.status == 'enabled'])
         count = srv_count * int(count[1:])
 
-    creds = params.pop('creds')
+    # vm_creds = config['vm_params']['creds'] ?????
+    vm_creds = params.pop('creds')
 
-    for ip, _ in create_vms_mt(conn, count, **params):
-        yield Node(creds.format(ip), [])
+    for ip, os_node in create_vms_mt(NOVA_CONNECTION, count, **params):
+        yield Node(vm_creds.format(ip), []), os_node.id
 
 
 def create_vms_mt(nova, amount, keypair_name, img_name,
@@ -201,15 +209,21 @@
     return flt_ip.ip, nova.servers.get(srv.id)
 
 
-def clear_nodes():
-    nova = nova_connect()
-    clear_all(nova)
+def clear_nodes(nodes_ids):
+    clear_all(NOVA_CONNECTION, nodes_ids, None)
 
 
-def clear_all(nova, name_templ="ceph-test-{0}"):
+def clear_all(nova, ids=None, name_templ="ceph-test-{0}"):
+
+    def need_delete(srv):
+        if name_templ is not None:
+            return re.match(name_templ.format("\\d+"), srv.name) is not None
+        else:
+            return srv.id in ids
+
     deleted_srvs = set()
     for srv in nova.servers.list():
-        if re.match(name_templ.format("\\d+"), srv.name):
+        if need_delete(srv):
             logger.debug("Deleting server {0}".format(srv.name))
             nova.servers.delete(srv)
             deleted_srvs.add(srv.id)
@@ -224,13 +238,14 @@
 
     # wait till vm actually deleted
 
-    cinder = c_client(*ostack_get_creds())
-    for vol in cinder.volumes.list():
-        if isinstance(vol.display_name, basestring):
-            if re.match(name_templ.format("\\d+"), vol.display_name):
-                if vol.status in ('available', 'error'):
-                    logger.debug("Deleting volume " + vol.display_name)
-                    cinder.volumes.delete(vol)
+    if name_templ is not None:
+        cinder = c_client(*ostack_get_creds())
+        for vol in cinder.volumes.list():
+            if isinstance(vol.display_name, basestring):
+                if re.match(name_templ.format("\\d+"), vol.display_name):
+                    if vol.status in ('available', 'error'):
+                        logger.debug("Deleting volume " + vol.display_name)
+                        cinder.volumes.delete(vol)
 
     logger.debug("Clearing done (yet some volumes may still deleting)")
 
diff --git a/tests/io_task_test.cfg b/tests/io_task_test.cfg
index c9fc2ab..4d78578 100644
--- a/tests/io_task_test.cfg
+++ b/tests/io_task_test.cfg
@@ -1,4 +1,5 @@
 [writetest * 3]
+group_reporting
 numjobs=4
 wait_for_previous
 ramp_time=5
diff --git a/tests/itest.py b/tests/itest.py
index b073879..53c4af3 100644
--- a/tests/itest.py
+++ b/tests/itest.py
@@ -5,6 +5,7 @@
 import logging
 
 from disk_perf_test_tool.tests import disk_test_agent
+from disk_perf_test_tool.tests.io_results_loader import parse_output
 from disk_perf_test_tool.ssh_utils import copy_paths
 from disk_perf_test_tool.utils import run_over_ssh, ssize_to_b
 
@@ -141,7 +142,7 @@
     def on_result(self, code, out_err, cmd):
         if 0 == code:
             try:
-                for data in disk_test_agent.parse_output(out_err):
+                for data in parse_output(out_err):
                     self.on_result_cb(data)
             except Exception as exc:
                 msg_templ = "Error during postprocessing results: {0!r}"