improve OS preparation, change config structure
diff --git a/wally/discover/discover.py b/wally/discover/discover.py
index 73407c0..3cab884 100644
--- a/wally/discover/discover.py
+++ b/wally/discover/discover.py
@@ -57,7 +57,10 @@
cluster_info)
nodes_to_run.extend(os_nodes)
- elif cluster == "fuel":
+ elif cluster == "fuel" or cluster == "fuel_openrc_only":
+ if cluster == "fuel_openrc_only":
+ discover_nodes = False
+
res = fuel.discover_fuel_nodes(clusters_info['fuel'],
var_dir,
discover_nodes)
diff --git a/wally/fuel_rest_api.py b/wally/fuel_rest_api.py
index 737bf2e..0567e71 100644
--- a/wally/fuel_rest_api.py
+++ b/wally/fuel_rest_api.py
@@ -287,6 +287,7 @@
if net['name'] == network:
iface_name = net['dev']
for iface in self.get_info()['meta']['interfaces']:
+ print iface, net['ip']
if iface['name'] == iface_name:
try:
return iface['ip']
diff --git a/wally/report.py b/wally/report.py
index 209fa31..2ab4664 100644
--- a/wally/report.py
+++ b/wally/report.py
@@ -88,10 +88,9 @@
# marker += 1
# plt.legend(loc=2)
# plt.title("Linearity test by %i dots" % (len(vals)))
-
-
# if plt:
-# linearity_report = report('linearity', 'linearity_test')(linearity_report)
+# linearity_report = report('linearity',
+# 'linearity_test')(linearity_report)
def render_hdd_html(dest, info, lab_description):
@@ -100,11 +99,12 @@
templ_file = os.path.join(templ_dir, "report_hdd.html")
templ = open(templ_file, 'r').read()
- for name in info.__dict__:
+ for name, val in info.__dict__.items():
if not name.startswith('__'):
- if info.__dict__[name] == "-":
- continue
- info.__dict__[name] = round_3_digit(info.__dict__[name])
+ if val is None:
+ info.__dict__[name] = '-'
+ else:
+ info.__dict__[name] = round_3_digit(val)
report = templ.format(lab_info=lab_description, **info.__dict__)
open(dest, 'w').write(report)
@@ -265,9 +265,9 @@
hdi = DiskInfo()
hdi.direct_iops_r_max = di.direct_iops_r_max
hdi.direct_iops_w_max = di.direct_iops_w_max
- hdi.rws4k_10ms = di.rws4k_10ms if 0 != di.rws4k_10ms else '-'
- hdi.rws4k_30ms = di.rws4k_30ms if 0 != di.rws4k_30ms else '-'
- hdi.rws4k_100ms = di.rws4k_100ms if 0 != di.rws4k_100ms else '-'
+ hdi.rws4k_10ms = di.rws4k_10ms if 0 != di.rws4k_10ms else None
+ hdi.rws4k_30ms = di.rws4k_30ms if 0 != di.rws4k_30ms else None
+ hdi.rws4k_100ms = di.rws4k_100ms if 0 != di.rws4k_100ms else None
hdi.bw_write_max = di.bw_write_max
hdi.bw_read_max = di.bw_read_max
return hdi
diff --git a/wally/run_test.py b/wally/run_test.py
index fbe676f..6606d04 100755
--- a/wally/run_test.py
+++ b/wally/run_test.py
@@ -326,7 +326,7 @@
@contextlib.contextmanager
def create_vms_ctx(ctx, cfg, config):
- params = config['vm_params'].copy()
+ params = cfg['vm_configs'][config['cfg_name']].copy()
os_nodes_ids = []
os_creds_type = config['creds']
@@ -335,11 +335,14 @@
start_vms.nova_connect(**os_creds)
logger.info("Preparing openstack")
- start_vms.prepare_os_subpr(**os_creds)
+ params.update(config)
+ params['keypair_file_private'] = params['keypair_name'] + ".pem"
+ params['group_name'] = cfg_dict['run_uuid']
+
+ start_vms.prepare_os_subpr(params=params, **os_creds)
new_nodes = []
try:
- params['group_name'] = cfg_dict['run_uuid']
for new_node, node_id in start_vms.launch_vms(params):
new_node.roles.append('testnode')
ctx.nodes.append(new_node)
@@ -368,7 +371,12 @@
key, config = group.items()[0]
if 'start_test_nodes' == key:
- with create_vms_ctx(ctx, cfg, config) as new_nodes:
+ if 'openstack' not in config:
+ msg = "No openstack block in config - can't spawn vm's"
+ logger.error(msg)
+ raise utils.StopTestError(msg)
+
+ with create_vms_ctx(ctx, cfg, config['openstack']) as new_nodes:
connect_all(new_nodes, True)
for node in new_nodes:
@@ -584,13 +592,17 @@
if cfg_dict.get('run_web_ui', False):
start_web_ui(cfg_dict, ctx)
+ msg_templ = "Exception during {0.__name__}: {1!s}"
+ msg_templ_no_exc = "During {0.__name__}"
+
try:
for stage in stages:
logger.info("Start {0.__name__} stage".format(stage))
stage(cfg_dict, ctx)
- except Exception as exc:
- msg = "Exception during {0.__name__}: {1!s}".format(stage, exc)
- logger.error(msg)
+ except utils.StopTestError as exc:
+ logger.error(msg_templ.format(stage, exc))
+ except Exception:
+ logger.exception(msg_templ_no_exc.format(stage))
finally:
exc, cls, tb = sys.exc_info()
for stage in ctx.clear_calls_stack[::-1]:
@@ -598,13 +610,9 @@
logger.info("Start {0.__name__} stage".format(stage))
stage(cfg_dict, ctx)
except utils.StopTestError as exc:
- msg = "During {0.__name__} stage: {1}".format(stage, exc)
- logger.error(msg)
- except Exception as exc:
- logger.exception("During {0.__name__} stage".format(stage))
-
- # if exc is not None:
- # raise exc, cls, tb
+ logger.error(msg_templ.format(stage, exc))
+ except Exception:
+ logger.exception(msg_templ_no_exc.format(stage))
if exc is None:
for report_stage in report_stages:
diff --git a/wally/start_vms.py b/wally/start_vms.py
index e3b9245..af7df71 100644
--- a/wally/start_vms.py
+++ b/wally/start_vms.py
@@ -62,46 +62,59 @@
return CINDER_CONNECTION
-def nova_disconnect():
- global NOVA_CONNECTION
- if NOVA_CONNECTION is not None:
- NOVA_CONNECTION.close()
- NOVA_CONNECTION = None
-
-
-def prepare_os_subpr(name=None, passwd=None, tenant=None, auth_url=None):
+def prepare_os_subpr(params, name=None, passwd=None, tenant=None,
+ auth_url=None):
if name is None:
name, passwd, tenant, auth_url = ostack_get_creds()
- params = {
- 'OS_USERNAME': name,
- 'OS_PASSWORD': passwd,
- 'OS_TENANT_NAME': tenant,
- 'OS_AUTH_URL': auth_url
- }
+ MAX_VM_PER_NODE = 8
+ serv_groups = " ".join(map(params['aa_group_name'].format,
+ range(MAX_VM_PER_NODE)))
- params_s = " ".join("{0}={1}".format(k, v) for k, v in params.items())
+ env = os.environ.copy()
+ env.update(dict(
+ OS_USERNAME=name,
+ OS_PASSWORD=passwd,
+ OS_TENANT_NAME=tenant,
+ OS_AUTH_URL=auth_url,
- spath = os.path.dirname(wally.__file__)
- spath = os.path.dirname(spath)
+ FLAVOR_NAME=params['flavor']['name'],
+ FLAVOR_RAM=str(params['flavor']['ram_size']),
+ FLAVOR_HDD=str(params['flavor']['hdd_size']),
+ FLAVOR_CPU_COUNT=str(params['flavor']['cpu_count']),
+
+ SERV_GROUPS=serv_groups,
+ KEYPAIR_NAME=params['keypair_name'],
+
+ SECGROUP=params['security_group'],
+
+ IMAGE_NAME=params['image']['name'],
+ KEY_FILE_NAME=params['keypair_file_private'],
+ IMAGE_URL=params['image']['url'],
+ ))
+
+ spath = os.path.dirname(os.path.dirname(wally.__file__))
spath = os.path.join(spath, 'scripts/prepare.sh')
- cmd_templ = "env {params} bash {spath} >/dev/null"
- cmd = cmd_templ.format(params=params_s, spath=spath)
- subprocess.call(cmd, shell=True)
+ cmd = "bash {spath} >/dev/null".format(spath=spath)
+ subprocess.check_call(cmd, shell=True, env=env)
def prepare_os(nova, params):
allow_ssh(nova, params['security_group'])
+ MAX_VM_PER_NODE = 8
+ serv_groups = " ".join(map(params['aa_group_name'].format,
+ range(MAX_VM_PER_NODE)))
+
shed_ids = []
- for shed_group in params['schedulers_groups']:
+ for shed_group in serv_groups:
shed_ids.append(get_or_create_aa_group(nova, shed_group))
create_keypair(nova,
params['keypair_name'],
- params['pub_key_path'],
- params['priv_key_path'])
+ params['keypair_name'] + ".pub",
+ params['keypair_name'] + ".pem")
create_image(nova, params['image']['name'],
params['image']['url'])
@@ -144,13 +157,14 @@
pass
-def create_flavor(nova, name, **params):
+def create_flavor(nova, name, ram_size, hdd_size, cpu_count):
pass
def create_keypair(nova, name, pub_key_path, priv_key_path):
try:
nova.keypairs.find(name=name)
+ # if file not found- delete and recreate
except NotFound:
if os.path.exists(pub_key_path):
with open(pub_key_path) as pub_key_fd:
@@ -167,9 +181,6 @@
def create_volume(size, name):
cinder = cinder_connect()
- # vol_id = "2974f227-8755-4333-bcae-cd9693cd5d04"
- # logger.warning("Reusing volume {0}".format(vol_id))
- # vol = cinder.volumes.get(vol_id)
vol = cinder.volumes.create(size=size, display_name=name)
err_count = 0
@@ -222,8 +233,7 @@
def launch_vms(params):
logger.debug("Starting new nodes on openstack")
- params = params.copy()
- count = params.pop('count')
+ count = params['count']
if isinstance(count, basestring):
assert count.startswith("x")
@@ -231,30 +241,42 @@
srv_count = len([srv for srv in lst if srv.status == 'enabled'])
count = srv_count * int(count[1:])
+ assert isinstance(count, (int, long))
+
srv_params = "img: {image[name]}, flavor: {flavor[name]}".format(**params)
msg_templ = "Will start {0} servers with next params: {1}"
logger.info(msg_templ.format(count, srv_params))
- vm_creds = params.pop('creds')
- params = params.copy()
+ vm_params = dict(
+ img_name=params['image']['name'],
+ flavor_name=params['flavor']['name'],
+ group_name=params['group_name'],
+ keypair_name=params['keypair_name'],
+ vol_sz=params.get('vol_sz'),
+ network_zone_name=params.get("network_zone_name"),
+ flt_ip_pool=params.get('flt_ip_pool'),
+ name_templ=params.get('name_templ'),
+ scheduler_hints={"group": params['aa_group_name']},
+ security_group=params['security_group'],
+ sec_group_size=srv_count
+ )
- params['img_name'] = params['image']['name']
- params['flavor_name'] = params['flavor']['name']
+ # precache all errors before start creating vms
+ private_key_path = params['keypair_file_private']
+ creds = params['image']['creds']
+ creds.format(ip="1.1.1.1", private_key_path="/some_path/xx")
- del params['image']
- del params['flavor']
- del params['scheduler_group_name']
- private_key_path = params.pop('private_key_path')
+ for ip, os_node in create_vms_mt(NOVA_CONNECTION, count, **vm_params):
- for ip, os_node in create_vms_mt(NOVA_CONNECTION, count, **params):
- conn_uri = vm_creds.format(ip=ip, private_key_path=private_key_path)
+ conn_uri = creds.format(ip=ip, private_key_path=private_key_path)
yield Node(conn_uri, []), os_node.id
def create_vms_mt(nova, amount, group_name, keypair_name, img_name,
flavor_name, vol_sz=None, network_zone_name=None,
flt_ip_pool=None, name_templ='wally-{id}',
- scheduler_hints=None, security_group=None):
+ scheduler_hints=None, security_group=None,
+ sec_group_size=None):
with ThreadPoolExecutor(max_workers=16) as executor:
if network_zone_name is not None:
@@ -293,7 +315,20 @@
futures = []
logger.debug("Requesting new vm's")
- for name, flt_ip in zip(names, ips):
+ orig_scheduler_hints = scheduler_hints.copy()
+
+ for idx, (name, flt_ip) in enumerate(zip(names, ips)):
+
+ scheduler_hints = None
+ if orig_scheduler_hints is not None and sec_group_size is not None:
+ if "group" in orig_scheduler_hints:
+ scheduler_hints = orig_scheduler_hints.copy()
+ scheduler_hints['group'] = \
+ scheduler_hints['group'].format(idx // sec_group_size)
+
+ if scheduler_hints is None:
+ scheduler_hints = orig_scheduler_hints.copy()
+
params = (nova, name, keypair_name, img, fl,
nics, vol_sz, flt_ip, scheduler_hints,
flt_ip_pool, [security_group])
diff --git a/wally/suits/itest.py b/wally/suits/itest.py
index 605df2c..bd98dee 100644
--- a/wally/suits/itest.py
+++ b/wally/suits/itest.py
@@ -132,6 +132,12 @@
def __init__(self, *dt, **mp):
IPerfTest.__init__(self, *dt, **mp)
self.config_fname = self.options['cfg']
+
+ if '/' not in self.config_fname and '.' not in self.config_fname:
+ cfgs_dir = os.path.dirname(io_agent.__file__)
+ self.config_fname = os.path.join(cfgs_dir,
+ self.config_fname + '.cfg')
+
self.alive_check_interval = self.options.get('alive_check_interval')
self.config_params = self.options.get('params', {})
self.tool = self.options.get('tool', 'fio')