improve OS preparation, change config structure
diff --git a/configs/ceph_perf.yaml b/configs/ceph_perf.yaml
new file mode 100644
index 0000000..c8466c1
--- /dev/null
+++ b/configs/ceph_perf.yaml
@@ -0,0 +1,49 @@
+clouds:
+    fuel:
+        url: http://172.16.52.114:8000/
+        creds: admin:admin@admin
+        ssh_creds: root:test37
+        openstack_env: test
+
+# discover: fuel_openrc_only
+
+internal:
+    var_dir_root: /tmp/perf_tests
+
+logging:
+    extra_logs: 1
+
+vm_configs:
+    wally_1024:
+        image:
+            name: wally_ubuntu
+            url: https://cloud-images.ubuntu.com/trusty/current/trusty-server-cloudimg-amd64-disk1.img
+            creds: "ssh://ubuntu@{ip}::{private_key_path}"
+
+        flavor:
+            name: wally_1024
+            hdd_size: 50
+            ram_size: 1024
+            cpu_count: 1
+
+        vol_sz: 30
+        keypair_name: wally_vm_key
+        name_templ: wally-{group}-{id}
+        aa_group_name: wally-aa-{0}
+        security_group: wally_ssh_to_everyone
+
+tests:
+    - start_test_nodes:
+        openstack:
+            creds: /tmp/perf_tests/personalistic_sharron/test_openrc
+            count: x4
+            cfg_name: wally_1024
+            network_zone_name: net04
+            flt_ip_pool: net04_ext
+
+        tests:
+            - io:
+                cfg: ceph
+                params:
+                    FILENAME: /dev/vdb
+                    NUM_ROUNDS: 1
diff --git a/configs/config.yaml b/configs/config.yaml
index a5a4b16..522fcd3 100644
--- a/configs/config.yaml
+++ b/configs/config.yaml
@@ -22,19 +22,34 @@
 internal:
     var_dir_root: /tmp/perf_tests
 
+vm_configs:
+    wally_1024:
+        image:
+            name: wally_ubuntu
+            url: https://cloud-images.ubuntu.com/trusty/current/trusty-server-cloudimg-amd64-disk1.img
+            creds: "ssh://ubuntu@{ip}::{private_key_path}"
+
+        flavor:
+            name: wally_1024
+            hdd_size: 50
+            ram_size: 1024
+            cpu_count: 1
+
+        vol_sz: 30
+        keypair_name: wally_vm_key
+        name_templ: wally-{group}-{id}
+        aa_group_name: wally-aa-{0}
+        security_group: wally_ssh_to_everyone
+
 tests:
     - start_test_nodes:
         openstack:
-            creds: clouds
-            vm_params:
-                count: x2
-                img_name: disk_io_perf
-                flavor_name: disk_io_perf.256
-                keypair_name: disk_io_perf
-                network_zone_name: novanetwork
-                flt_ip_pool: nova
-                creds: "ssh://ubuntu@{0}::disk_io_perf.pem"
-                name_templ: disk_io_perf-{0}
+            creds: /tmp/perf_tests/personalistic_sharron/test_openrc
+            count: x1
+            cfg_name: wally_1024
+            network_zone_name: net04
+            flt_ip_pool: net04_ext
+
         tests:
             - io:
                 cfg: tests/io_scenario_hdd.cfg
diff --git a/scripts/config.sh b/scripts/config.sh
index 2937edc..8880b64 100644
--- a/scripts/config.sh
+++ b/scripts/config.sh
@@ -1,5 +1,7 @@
 FLAVOR_NAME="disk_io_perf.1024"
-SERV_GROUP="disk_io_perf.aa"
+
+SERV_GROUPS="disk_io_perf.aa.0 disk_io_perf.aa.1 disk_io_perf.aa.2 disk_io_perf.aa.3 disk_io_perf.aa.4 disk_io_perf.aa.5 disk_io_perf.aa.6 disk_io_perf.aa.7"
+
 KEYPAIR_NAME="disk_io_perf"
 IMAGE_NAME="disk_io_perf"
 KEY_FILE_NAME="${KEYPAIR_NAME}.pem"
diff --git a/scripts/prepare.sh b/scripts/prepare.sh
index c114874..475f47f 100644
--- a/scripts/prepare.sh
+++ b/scripts/prepare.sh
@@ -1,15 +1,6 @@
 #!/bin/bash
 set -e
 
-my_dir="$(dirname "$0")"
-source "$my_dir/config.sh"
-
-# settings
-FL_RAM=1024
-FL_HDD=20
-FL_CPU=1
-
-
 function lookup_for_objects() {
     set +e
 
@@ -29,13 +20,19 @@
         echo " Not Found"
     fi
 
-    echo -n "Looking for server-group $SERV_GROUP ... "
-    export group_id=$(nova server-group-list | grep " $SERV_GROUP " | awk '{print $2}' )
-    if [ ! -z "$group_id" ] ; then
-        echo " Found"
-    else
-        echo " Not Found"
-    fi
+    groups_ids=""
+    export missed_groups=""
+    for SERV_GROUP in $SERV_GROUPS ; do
+        echo -n "Looking for server-group $SERV_GROUP ... "
+        group_id=$(nova server-group-list | grep " $SERV_GROUP " | awk '{print $2}' )
+        if [ ! -z "$group_id" ] ; then
+            echo " Found"
+            export groups_ids="$groups_ids $group_id"
+        else
+            echo " Not Found"
+            export missed_groups="$missed_groups $SERV_GROUP"
+        fi
+    done
 
     echo -n "Looking for keypair $KEYPAIR_NAME ... "
     export keypair_id=$(nova keypair-list | grep " $KEYPAIR_NAME " | awk '{print $2}' )
@@ -69,10 +66,10 @@
         nova flavor-delete "$flavor_id" >/dev/null
     fi
 
-    if [ ! -z "$group_id" ] ; then
+    for group_id in $groups_ids ; do
         echo "Deleting server-group $SERV_GROUP"
         nova server-group-delete "$group_id" >/dev/null
-    fi
+    done
 
     if [ ! -z "$keypair_id" ] ; then
         echo "deleting keypair $KEYPAIR_NAME"
@@ -101,13 +98,15 @@
 
     if [ -z "$flavor_id" ] ; then
         echo "Creating flavor $FLAVOR_NAME"
-        nova flavor-create "$FLAVOR_NAME" "$FLAVOR_NAME" "$FL_RAM" "$FL_HDD" "$FL_CPU" >/dev/null
+        nova flavor-create "$FLAVOR_NAME" "$FLAVOR_NAME" "$FLAVOR_RAM" "$FLAVOR_HDD" "$FLAVOR_CPU_COUNT" >/dev/null
     fi
 
-    if [ -z "$group_id" ] ; then
+    for SERV_GROUP in $missed_groups ; do
         echo "Creating server group $SERV_GROUP"
         nova server-group-create --policy anti-affinity "$SERV_GROUP" >/dev/null
-    fi
+        group_id=$(nova server-group-list | grep " $SERV_GROUP " | awk '{print $2}' )
+        export groups_ids="$groups_ids $group_id"
+    done
 
     if [ -z "$keypair_id" ] ; then
         echo "Creating server group $SERV_GROUP. Key would be stored into $KEY_FILE_NAME"
diff --git a/wally/discover/discover.py b/wally/discover/discover.py
index 73407c0..3cab884 100644
--- a/wally/discover/discover.py
+++ b/wally/discover/discover.py
@@ -57,7 +57,10 @@
                                                           cluster_info)
             nodes_to_run.extend(os_nodes)
 
-        elif cluster == "fuel":
+        elif cluster == "fuel" or cluster == "fuel_openrc_only":
+            if cluster == "fuel_openrc_only":
+                discover_nodes = False
+
             res = fuel.discover_fuel_nodes(clusters_info['fuel'],
                                            var_dir,
                                            discover_nodes)
diff --git a/wally/fuel_rest_api.py b/wally/fuel_rest_api.py
index 737bf2e..0567e71 100644
--- a/wally/fuel_rest_api.py
+++ b/wally/fuel_rest_api.py
@@ -287,6 +287,7 @@
             if net['name'] == network:
                 iface_name = net['dev']
                 for iface in self.get_info()['meta']['interfaces']:
+                    print iface, net['ip']
                     if iface['name'] == iface_name:
                         try:
                             return iface['ip']
diff --git a/wally/report.py b/wally/report.py
index 209fa31..2ab4664 100644
--- a/wally/report.py
+++ b/wally/report.py
@@ -88,10 +88,9 @@
 #     marker += 1
 #     plt.legend(loc=2)
 #     plt.title("Linearity test by %i dots" % (len(vals)))
-
-
 # if plt:
-#     linearity_report = report('linearity', 'linearity_test')(linearity_report)
+#     linearity_report = report('linearity',
+#    'linearity_test')(linearity_report)
 
 
 def render_hdd_html(dest, info, lab_description):
@@ -100,11 +99,12 @@
     templ_file = os.path.join(templ_dir, "report_hdd.html")
     templ = open(templ_file, 'r').read()
 
-    for name in info.__dict__:
+    for name, val in info.__dict__.items():
         if not name.startswith('__'):
-            if info.__dict__[name] == "-":
-                continue
-            info.__dict__[name] = round_3_digit(info.__dict__[name])
+            if val is None:
+                info.__dict__[name] = '-'
+            else:
+                info.__dict__[name] = round_3_digit(val)
 
     report = templ.format(lab_info=lab_description, **info.__dict__)
     open(dest, 'w').write(report)
@@ -265,9 +265,9 @@
     hdi = DiskInfo()
     hdi.direct_iops_r_max = di.direct_iops_r_max
     hdi.direct_iops_w_max = di.direct_iops_w_max
-    hdi.rws4k_10ms = di.rws4k_10ms if 0 != di.rws4k_10ms else '-'
-    hdi.rws4k_30ms = di.rws4k_30ms if 0 != di.rws4k_30ms else '-'
-    hdi.rws4k_100ms = di.rws4k_100ms if 0 != di.rws4k_100ms else '-'
+    hdi.rws4k_10ms = di.rws4k_10ms if 0 != di.rws4k_10ms else None
+    hdi.rws4k_30ms = di.rws4k_30ms if 0 != di.rws4k_30ms else None
+    hdi.rws4k_100ms = di.rws4k_100ms if 0 != di.rws4k_100ms else None
     hdi.bw_write_max = di.bw_write_max
     hdi.bw_read_max = di.bw_read_max
     return hdi
diff --git a/wally/run_test.py b/wally/run_test.py
index fbe676f..6606d04 100755
--- a/wally/run_test.py
+++ b/wally/run_test.py
@@ -326,7 +326,7 @@
 
 @contextlib.contextmanager
 def create_vms_ctx(ctx, cfg, config):
-    params = config['vm_params'].copy()
+    params = cfg['vm_configs'][config['cfg_name']].copy()
     os_nodes_ids = []
 
     os_creds_type = config['creds']
@@ -335,11 +335,14 @@
     start_vms.nova_connect(**os_creds)
 
     logger.info("Preparing openstack")
-    start_vms.prepare_os_subpr(**os_creds)
+    params.update(config)
+    params['keypair_file_private'] = params['keypair_name'] + ".pem"
+    params['group_name'] = cfg_dict['run_uuid']
+
+    start_vms.prepare_os_subpr(params=params, **os_creds)
 
     new_nodes = []
     try:
-        params['group_name'] = cfg_dict['run_uuid']
         for new_node, node_id in start_vms.launch_vms(params):
             new_node.roles.append('testnode')
             ctx.nodes.append(new_node)
@@ -368,7 +371,12 @@
         key, config = group.items()[0]
 
         if 'start_test_nodes' == key:
-            with create_vms_ctx(ctx, cfg, config) as new_nodes:
+            if 'openstack' not in config:
+                msg = "No openstack block in config - can't spawn vm's"
+                logger.error(msg)
+                raise utils.StopTestError(msg)
+
+            with create_vms_ctx(ctx, cfg, config['openstack']) as new_nodes:
                 connect_all(new_nodes, True)
 
                 for node in new_nodes:
@@ -584,13 +592,17 @@
     if cfg_dict.get('run_web_ui', False):
         start_web_ui(cfg_dict, ctx)
 
+    msg_templ = "Exception during {0.__name__}: {1!s}"
+    msg_templ_no_exc = "During {0.__name__}"
+
     try:
         for stage in stages:
             logger.info("Start {0.__name__} stage".format(stage))
             stage(cfg_dict, ctx)
-    except Exception as exc:
-        msg = "Exception during {0.__name__}: {1!s}".format(stage, exc)
-        logger.error(msg)
+    except utils.StopTestError as exc:
+        logger.error(msg_templ.format(stage, exc))
+    except Exception:
+        logger.exception(msg_templ_no_exc.format(stage))
     finally:
         exc, cls, tb = sys.exc_info()
         for stage in ctx.clear_calls_stack[::-1]:
@@ -598,13 +610,9 @@
                 logger.info("Start {0.__name__} stage".format(stage))
                 stage(cfg_dict, ctx)
             except utils.StopTestError as exc:
-                msg = "During {0.__name__} stage: {1}".format(stage, exc)
-                logger.error(msg)
-            except Exception as exc:
-                logger.exception("During {0.__name__} stage".format(stage))
-
-        # if exc is not None:
-        #     raise exc, cls, tb
+                logger.error(msg_templ.format(stage, exc))
+            except Exception:
+                logger.exception(msg_templ_no_exc.format(stage))
 
     if exc is None:
         for report_stage in report_stages:
diff --git a/wally/start_vms.py b/wally/start_vms.py
index e3b9245..af7df71 100644
--- a/wally/start_vms.py
+++ b/wally/start_vms.py
@@ -62,46 +62,59 @@
     return CINDER_CONNECTION
 
 
-def nova_disconnect():
-    global NOVA_CONNECTION
-    if NOVA_CONNECTION is not None:
-        NOVA_CONNECTION.close()
-        NOVA_CONNECTION = None
-
-
-def prepare_os_subpr(name=None, passwd=None, tenant=None, auth_url=None):
+def prepare_os_subpr(params, name=None, passwd=None, tenant=None,
+                     auth_url=None):
     if name is None:
         name, passwd, tenant, auth_url = ostack_get_creds()
 
-    params = {
-        'OS_USERNAME': name,
-        'OS_PASSWORD':  passwd,
-        'OS_TENANT_NAME':  tenant,
-        'OS_AUTH_URL':  auth_url
-    }
+    MAX_VM_PER_NODE = 8
+    serv_groups = " ".join(map(params['aa_group_name'].format,
+                               range(MAX_VM_PER_NODE)))
 
-    params_s = " ".join("{0}={1}".format(k, v) for k, v in params.items())
+    env = os.environ.copy()
+    env.update(dict(
+        OS_USERNAME=name,
+        OS_PASSWORD=passwd,
+        OS_TENANT_NAME=tenant,
+        OS_AUTH_URL=auth_url,
 
-    spath = os.path.dirname(wally.__file__)
-    spath = os.path.dirname(spath)
+        FLAVOR_NAME=params['flavor']['name'],
+        FLAVOR_RAM=str(params['flavor']['ram_size']),
+        FLAVOR_HDD=str(params['flavor']['hdd_size']),
+        FLAVOR_CPU_COUNT=str(params['flavor']['cpu_count']),
+
+        SERV_GROUPS=serv_groups,
+        KEYPAIR_NAME=params['keypair_name'],
+
+        SECGROUP=params['security_group'],
+
+        IMAGE_NAME=params['image']['name'],
+        KEY_FILE_NAME=params['keypair_file_private'],
+        IMAGE_URL=params['image']['url'],
+    ))
+
+    spath = os.path.dirname(os.path.dirname(wally.__file__))
     spath = os.path.join(spath, 'scripts/prepare.sh')
 
-    cmd_templ = "env {params} bash {spath} >/dev/null"
-    cmd = cmd_templ.format(params=params_s, spath=spath)
-    subprocess.call(cmd, shell=True)
+    cmd = "bash {spath} >/dev/null".format(spath=spath)
+    subprocess.check_call(cmd, shell=True, env=env)
 
 
 def prepare_os(nova, params):
     allow_ssh(nova, params['security_group'])
 
+    MAX_VM_PER_NODE = 8
+    serv_groups = " ".join(map(params['aa_group_name'].format,
+                               range(MAX_VM_PER_NODE)))
+
     shed_ids = []
-    for shed_group in params['schedulers_groups']:
+    for shed_group in serv_groups:
         shed_ids.append(get_or_create_aa_group(nova, shed_group))
 
     create_keypair(nova,
                    params['keypair_name'],
-                   params['pub_key_path'],
-                   params['priv_key_path'])
+                   params['keypair_name'] + ".pub",
+                   params['keypair_name'] + ".pem")
 
     create_image(nova, params['image']['name'],
                  params['image']['url'])
@@ -144,13 +157,14 @@
     pass
 
 
-def create_flavor(nova, name, **params):
+def create_flavor(nova, name, ram_size, hdd_size, cpu_count):
     pass
 
 
 def create_keypair(nova, name, pub_key_path, priv_key_path):
     try:
         nova.keypairs.find(name=name)
+        # if file not found- delete and recreate
     except NotFound:
         if os.path.exists(pub_key_path):
             with open(pub_key_path) as pub_key_fd:
@@ -167,9 +181,6 @@
 
 def create_volume(size, name):
     cinder = cinder_connect()
-    # vol_id = "2974f227-8755-4333-bcae-cd9693cd5d04"
-    # logger.warning("Reusing volume {0}".format(vol_id))
-    # vol = cinder.volumes.get(vol_id)
     vol = cinder.volumes.create(size=size, display_name=name)
     err_count = 0
 
@@ -222,8 +233,7 @@
 
 def launch_vms(params):
     logger.debug("Starting new nodes on openstack")
-    params = params.copy()
-    count = params.pop('count')
+    count = params['count']
 
     if isinstance(count, basestring):
         assert count.startswith("x")
@@ -231,30 +241,42 @@
         srv_count = len([srv for srv in lst if srv.status == 'enabled'])
         count = srv_count * int(count[1:])
 
+    assert isinstance(count, (int, long))
+
     srv_params = "img: {image[name]}, flavor: {flavor[name]}".format(**params)
     msg_templ = "Will start {0} servers with next params: {1}"
     logger.info(msg_templ.format(count, srv_params))
-    vm_creds = params.pop('creds')
 
-    params = params.copy()
+    vm_params = dict(
+        img_name=params['image']['name'],
+        flavor_name=params['flavor']['name'],
+        group_name=params['group_name'],
+        keypair_name=params['keypair_name'],
+        vol_sz=params.get('vol_sz'),
+        network_zone_name=params.get("network_zone_name"),
+        flt_ip_pool=params.get('flt_ip_pool'),
+        name_templ=params.get('name_templ'),
+        scheduler_hints={"group": params['aa_group_name']},
+        security_group=params['security_group'],
+        sec_group_size=srv_count
+    )
 
-    params['img_name'] = params['image']['name']
-    params['flavor_name'] = params['flavor']['name']
+    # precache all errors before start creating vms
+    private_key_path = params['keypair_file_private']
+    creds = params['image']['creds']
+    creds.format(ip="1.1.1.1", private_key_path="/some_path/xx")
 
-    del params['image']
-    del params['flavor']
-    del params['scheduler_group_name']
-    private_key_path = params.pop('private_key_path')
+    for ip, os_node in create_vms_mt(NOVA_CONNECTION, count, **vm_params):
 
-    for ip, os_node in create_vms_mt(NOVA_CONNECTION, count, **params):
-        conn_uri = vm_creds.format(ip=ip, private_key_path=private_key_path)
+        conn_uri = creds.format(ip=ip, private_key_path=private_key_path)
         yield Node(conn_uri, []), os_node.id
 
 
 def create_vms_mt(nova, amount, group_name, keypair_name, img_name,
                   flavor_name, vol_sz=None, network_zone_name=None,
                   flt_ip_pool=None, name_templ='wally-{id}',
-                  scheduler_hints=None, security_group=None):
+                  scheduler_hints=None, security_group=None,
+                  sec_group_size=None):
 
     with ThreadPoolExecutor(max_workers=16) as executor:
         if network_zone_name is not None:
@@ -293,7 +315,20 @@
         futures = []
         logger.debug("Requesting new vm's")
 
-        for name, flt_ip in zip(names, ips):
+        orig_scheduler_hints = scheduler_hints.copy()
+
+        for idx, (name, flt_ip) in enumerate(zip(names, ips)):
+
+            scheduler_hints = None
+            if orig_scheduler_hints is not None and sec_group_size is not None:
+                if "group" in orig_scheduler_hints:
+                    scheduler_hints = orig_scheduler_hints.copy()
+                    scheduler_hints['group'] = \
+                        scheduler_hints['group'].format(idx // sec_group_size)
+
+            if scheduler_hints is None:
+                scheduler_hints = orig_scheduler_hints.copy()
+
             params = (nova, name, keypair_name, img, fl,
                       nics, vol_sz, flt_ip, scheduler_hints,
                       flt_ip_pool, [security_group])
diff --git a/wally/suits/itest.py b/wally/suits/itest.py
index 605df2c..bd98dee 100644
--- a/wally/suits/itest.py
+++ b/wally/suits/itest.py
@@ -132,6 +132,12 @@
     def __init__(self, *dt, **mp):
         IPerfTest.__init__(self, *dt, **mp)
         self.config_fname = self.options['cfg']
+
+        if '/' not in self.config_fname and '.' not in self.config_fname:
+            cfgs_dir = os.path.dirname(io_agent.__file__)
+            self.config_fname = os.path.join(cfgs_dir,
+                                             self.config_fname + '.cfg')
+
         self.alive_check_interval = self.options.get('alive_check_interval')
         self.config_params = self.options.get('params', {})
         self.tool = self.options.get('tool', 'fio')