a lot of changes
diff --git a/wally/charts.py b/wally/charts.py
index 828438c..b4472a4 100644
--- a/wally/charts.py
+++ b/wally/charts.py
@@ -122,8 +122,7 @@
bar.legend(*legend)
bar.scale(*scale)
- img_name = file_name + ".png"
- img_path = os.path.join(cfg_dict['charts_img_path'], img_name)
+ img_path = file_name + ".png"
if not os.path.exists(img_path):
bar.save(img_path)
diff --git a/wally/discover/node.py b/wally/discover/node.py
index 9161a21..a3d58f9 100644
--- a/wally/discover/node.py
+++ b/wally/discover/node.py
@@ -10,6 +10,7 @@
self.conn_url = conn_url
self.connection = None
self.monitor_ip = None
+ self.os_vm_id = None
def get_ip(self):
if self.conn_url == 'local':
diff --git a/wally/report.py b/wally/report.py
index 5b4c858..b334fa5 100644
--- a/wally/report.py
+++ b/wally/report.py
@@ -132,7 +132,7 @@
linearity_report = report('linearity', 'linearity_test')(linearity_report)
-def render_all_html(dest, info, lab_description, templ_name):
+def render_all_html(dest, info, lab_description, img_ext, templ_name):
very_root_dir = os.path.dirname(os.path.dirname(wally.__file__))
templ_dir = os.path.join(very_root_dir, 'report_templates')
templ_file = os.path.join(templ_dir, templ_name)
@@ -151,16 +151,19 @@
data['bw_write_max'] = (data['bw_write_max'][0] // 1024,
data['bw_write_max'][1])
- report = templ.format(lab_info=lab_description, **data)
+ report = templ.format(lab_info=lab_description, img_ext=img_ext,
+ **data)
open(dest, 'w').write(report)
-def render_hdd_html(dest, info, lab_description):
- render_all_html(dest, info, lab_description, "report_hdd.html")
+def render_hdd_html(dest, info, lab_description, img_ext):
+ render_all_html(dest, info, lab_description, img_ext,
+ "report_hdd.html")
-def render_ceph_html(dest, info, lab_description):
- render_all_html(dest, info, lab_description, "report_ceph.html")
+def render_ceph_html(dest, info, lab_description, img_ext):
+ render_all_html(dest, info, lab_description, img_ext,
+ "report_ceph.html")
def io_chart(title, concurence,
@@ -172,49 +175,90 @@
legend = [legend]
iops_or_bw_per_vm = []
- for i in range(len(concurence)):
- iops_or_bw_per_vm.append(iops_or_bw[i] / concurence[i])
+ for iops, conc in zip(iops_or_bw, concurence):
+ iops_or_bw_per_vm.append(iops / conc)
bar_dev_bottom = []
bar_dev_top = []
- for i in range(len(bar_data)):
- bar_dev_top.append(bar_data[i] + bar_dev[i])
- bar_dev_bottom.append(bar_data[i] - bar_dev[i])
+ for val, err in zip(bar_data, bar_dev):
+ bar_dev_top.append(val + err)
+ bar_dev_bottom.append(val - err)
- ch = charts.render_vertical_bar(title, legend, [bar_data], [bar_dev_top],
- [bar_dev_bottom], file_name=fname,
- scale_x=concurence, label_x="clients",
- label_y=legend[0],
- lines=[
- (latv, "msec", "rr", "lat"),
- # (latv_min, None, None, "lat_min"),
- # (latv_max, None, None, "lat_max"),
- (iops_or_bw_per_vm, None, None,
- legend[0] + " per client")
- ])
- return str(ch)
+ charts.render_vertical_bar(title, legend, [bar_data], [bar_dev_top],
+ [bar_dev_bottom], file_name=fname,
+ scale_x=concurence, label_x="clients",
+ label_y=legend[0],
+ lines=[
+ (latv, "msec", "rr", "lat"),
+ # (latv_min, None, None, "lat_min"),
+ # (latv_max, None, None, "lat_max"),
+ (iops_or_bw_per_vm, None, None,
+ legend[0] + " per client")
+ ])
-def make_hdd_plots(processed_results, path):
+def io_chart_mpl(title, concurence,
+ latv, latv_min, latv_max,
+ iops_or_bw, iops_or_bw_err,
+ legend, fname):
+ points = " MiBps" if legend == 'BW' else ""
+ lc = len(concurence)
+ width = 0.35
+ xt = range(1, lc + 1)
+
+ op_per_vm = [v / c for v, c in zip(iops_or_bw, concurence)]
+ fig, p1 = plt.subplots()
+ xpos = [i - width / 2 for i in xt]
+
+ p1.bar(xpos, iops_or_bw, width=width, yerr=iops_or_bw_err,
+ color='y',
+ label=legend)
+
+ p1.set_yscale('log')
+ p1.grid(True)
+ p1.plot(xt, op_per_vm, label=legend + " per vm")
+ p1.legend()
+
+ p2 = p1.twinx()
+ p2.set_yscale('log')
+ p2.plot(xt, latv_max, label="latency max")
+ p2.plot(xt, latv, label="latency avg")
+ p2.plot(xt, latv_min, label="latency min")
+
+ plt.xlim(0.5, lc + 0.5)
+ plt.xticks(xt, map(str, concurence))
+ p1.set_xlabel("Threads")
+ p1.set_ylabel(legend + points)
+ p2.set_ylabel("Latency ms")
+ plt.title(title)
+ # plt.legend(, loc=2, borderaxespad=0.)
+ # plt.legend(bbox_to_anchor=(1.05, 1), loc=2)
+ plt.legend(loc=2)
+ plt.savefig(fname, format=fname.split('.')[-1])
+
+
+def make_hdd_plots(processed_results, charts_dir):
plots = [
('hdd_test_rrd4k', 'rand_read_4k', 'Random read 4k direct IOPS'),
('hdd_test_rws4k', 'rand_write_4k', 'Random write 4k sync IOPS')
]
- make_plots(processed_results, path, plots)
+ return make_plots(processed_results, charts_dir, plots)
-def make_ceph_plots(processed_results, path):
+def make_ceph_plots(processed_results, charts_dir):
plots = [
('ceph_test_rrd4k', 'rand_read_4k', 'Random read 4k direct IOPS'),
('ceph_test_rws4k', 'rand_write_4k', 'Random write 4k sync IOPS'),
- ('ceph_test_rrd16m', 'rand_read_16m', 'Random read 16m direct MiBps'),
+ ('ceph_test_rrd16m', 'rand_read_16m',
+ 'Random read 16m direct MiBps'),
('ceph_test_rwd16m', 'rand_write_16m',
'Random write 16m direct MiBps'),
]
- make_plots(processed_results, path, plots)
+ return make_plots(processed_results, charts_dir, plots)
-def make_plots(processed_results, path, plots, max_lat=400000):
+def make_plots(processed_results, charts_dir, plots):
+ file_ext = None
for name_pref, fname, desc in plots:
chart_data = []
@@ -231,9 +275,8 @@
# if x.lat.average < max_lat]
lat = [x.lat.average / 1000 for x in chart_data]
-
- lat_min = [x.lat.min / 1000 for x in chart_data if x.lat.min < max_lat]
- lat_max = [x.lat.max / 1000 for x in chart_data if x.lat.max < max_lat]
+ lat_min = [x.lat.min / 1000 for x in chart_data]
+ lat_max = [x.lat.max / 1000 for x in chart_data]
vm_count = x.meta['testnodes_count']
concurence = [x.raw['concurence'] * vm_count for x in chart_data]
@@ -247,8 +290,16 @@
data_dev = [x.iops.confidence for x in chart_data]
name = "IOPS"
- io_chart(desc, concurence, lat, lat_min, lat_max,
- data, data_dev, name, fname)
+ fname = os.path.join(charts_dir, fname)
+ if plt is not None:
+ io_chart_mpl(desc, concurence, lat, lat_min, lat_max,
+ data, data_dev, name, fname + '.svg')
+ file_ext = 'svg'
+ else:
+ io_chart(desc, concurence, lat, lat_min, lat_max,
+ data, data_dev, name, fname + '.png')
+ file_ext = 'png'
+ return file_ext
def find_max_where(processed_results, sync_mode, blocksize, rw, iops=True):
@@ -343,20 +394,20 @@
@report('HDD', 'hdd_test_rrd4k,hdd_test_rws4k')
-def make_hdd_report(processed_results, path, lab_info):
- make_hdd_plots(processed_results, path)
+def make_hdd_report(processed_results, path, charts_path, lab_info):
+ img_ext = make_hdd_plots(processed_results, charts_path)
di = get_disk_info(processed_results)
- render_hdd_html(path, di, lab_info)
+ render_hdd_html(path, di, lab_info, img_ext)
@report('Ceph', 'ceph_test')
-def make_ceph_report(processed_results, path, lab_info):
- make_ceph_plots(processed_results, path)
+def make_ceph_report(processed_results, path, charts_path, lab_info):
+ img_ext = make_ceph_plots(processed_results, charts_path)
di = get_disk_info(processed_results)
- render_ceph_html(path, di, lab_info)
+ render_ceph_html(path, di, lab_info, img_ext)
-def make_io_report(dinfo, results, path, lab_info=None):
+def make_io_report(dinfo, results, path, charts_path, lab_info=None):
lab_info = {
"total_disk": "None",
"total_memory": "None",
@@ -378,7 +429,7 @@
else:
hpath = path.format(name)
logger.debug("Generatins report " + name + " into " + hpath)
- func(dinfo, hpath, lab_info)
+ func(dinfo, hpath, charts_path, lab_info)
break
else:
logger.warning("No report generator found for this load")
diff --git a/wally/run_test.py b/wally/run_test.py
index 72ba4cf..dc6637f 100755
--- a/wally/run_test.py
+++ b/wally/run_test.py
@@ -9,8 +9,8 @@
import argparse
import functools
import threading
-import contextlib
import subprocess
+import contextlib
import collections
import yaml
@@ -152,6 +152,10 @@
test_nodes = [node for node in nodes
if 'testnode' in node.roles]
+ if len(test_nodes) == 0:
+ logger.error("No test nodes found")
+ return
+
test_number_per_type = {}
res_q = Queue.Queue()
@@ -257,7 +261,9 @@
if node.connection is None:
if 'testnode' in node.roles:
msg = "Can't connect to testnode {0}"
- raise RuntimeError(msg.format(node.get_conn_id()))
+ msg = msg.format(node.get_conn_id())
+ logger.error(msg)
+ raise utils.StopTestError(msg)
else:
msg = "Node {0} would be excluded - can't connect"
logger.warning(msg.format(node.get_conn_id()))
@@ -308,9 +314,12 @@
os_creds = get_OS_credentials(cfg, ctx, "clouds")
conn = start_vms.nova_connect(**os_creds)
- for ip in start_vms.find_vms(conn, vm_name_pattern):
+ for ip, vm_id in start_vms.find_vms(conn, vm_name_pattern):
node = Node(conn_pattern.format(ip=ip), ['testnode'])
+ node.os_vm_id = vm_id
ctx.nodes.append(node)
+ except utils.StopTestError:
+ raise
except Exception as exc:
msg = "Vm like {0} lookup failed".format(vm_name_pattern)
logger.exception(msg)
@@ -345,33 +354,27 @@
echo = 'echo "$OS_TENANT_NAME:$OS_USERNAME:$OS_PASSWORD@$OS_AUTH_URL"'
- p = subprocess.Popen(['/bin/bash'], shell=False,
- stdout=subprocess.PIPE,
- stdin=subprocess.PIPE,
- stderr=subprocess.STDOUT)
- p.stdin.write(fc + "\n" + echo)
- p.stdin.close()
- code = p.wait()
- data = p.stdout.read().strip()
-
- if code != 0:
+ try:
+ data = utils.run_locally(['/bin/bash'], input=fc + "\n" + echo)
+ except subprocess.CalledProcessError as exc:
msg = "Failed to get creads from openrc file: " + data
- logger.error(msg)
- raise RuntimeError(msg)
+ logger.exception(msg)
+ raise utils.StopTestError(msg, exc)
try:
user, tenant, passwd_auth_url = data.split(':', 2)
passwd, auth_url = passwd_auth_url.rsplit("@", 1)
assert (auth_url.startswith("https://") or
auth_url.startswith("http://"))
- except Exception:
+ except Exception as exc:
msg = "Failed to get creads from openrc file: " + data
logger.exception(msg)
- raise
+ raise utils.StopTestError(msg, exc)
else:
msg = "Creds {0!r} isn't supported".format(creds_type)
- raise ValueError(msg)
+ logger.error(msg)
+ raise utils.StopTestError(msg, None)
if creds is None:
creds = {'name': user,
@@ -385,7 +388,7 @@
@contextlib.contextmanager
-def create_vms_ctx(ctx, cfg, config):
+def create_vms_ctx(ctx, cfg, config, already_has_count=0):
params = cfg['vm_configs'][config['cfg_name']].copy()
os_nodes_ids = []
@@ -398,11 +401,13 @@
params['keypair_file_private'] = params['keypair_name'] + ".pem"
params['group_name'] = cfg_dict['run_uuid']
- start_vms.prepare_os_subpr(params=params, **os_creds)
+ if not config.get('skip_preparation', False):
+ start_vms.prepare_os_subpr(params=params, **os_creds)
new_nodes = []
try:
- for new_node, node_id in start_vms.launch_vms(params):
+ for new_node, node_id in start_vms.launch_vms(params,
+ already_has_count):
new_node.roles.append('testnode')
ctx.nodes.append(new_node)
os_nodes_ids.append(node_id)
@@ -435,7 +440,12 @@
logger.error(msg)
raise utils.StopTestError(msg)
- with create_vms_ctx(ctx, cfg, config['openstack']) as new_nodes:
+ num_test_nodes = sum(1 for node in ctx.nodes
+ if 'testnode' in node.roles)
+
+ vm_ctx = create_vms_ctx(ctx, cfg, config['openstack'],
+ num_test_nodes)
+ with vm_ctx as new_nodes:
connect_all(new_nodes, True)
for node in new_nodes:
@@ -534,6 +544,7 @@
found = True
dinfo = report.process_disk_info(data)
report.make_io_report(dinfo, data, html_rep_fname,
+ cfg['charts_img_path'],
lab_info=ctx.hw_info)
text_rep_fname = cfg_dict['text_report_file']
@@ -599,7 +610,7 @@
help="Skip html report", default=False)
parser.add_argument("--params", metavar="testname.paramname",
help="Test params", default=[])
- parser.add_argument("--reuse-vms", default=None, metavar="vm_name_prefix")
+ parser.add_argument("--reuse-vms", default=[], nargs='*')
parser.add_argument("config_file")
return parser.parse_args(argv[1:])
@@ -618,8 +629,8 @@
discover_stage
]
- if opts.reuse_vms is not None:
- pref, ssh_templ = opts.reuse_vms.split(',', 1)
+ for reuse_param in opts.reuse_vms:
+ pref, ssh_templ = reuse_param.split(',', 1)
stages.append(reuse_vms_stage(pref, ssh_templ))
stages.extend([
diff --git a/wally/sensors_utils.py b/wally/sensors_utils.py
index 81a2832..9350349 100644
--- a/wally/sensors_utils.py
+++ b/wally/sensors_utils.py
@@ -1,3 +1,4 @@
+import csv
import time
import Queue
import logging
@@ -18,8 +19,11 @@
fd.write("\n")
observed_nodes = set()
+ fields_list_for_nodes = {}
+ required_keys = set(['time', 'source_id', 'hostname'])
try:
+ csv_fd = csv.writer(fd)
while True:
val = data_q.get()
if val is None:
@@ -29,9 +33,20 @@
if addr not in observed_nodes:
mon_q.put(addr + (data['source_id'],))
observed_nodes.add(addr)
+ keys = set(data)
+ assert required_keys.issubset(keys)
+ keys -= required_keys
- fd.write(repr((addr, data)) + "\n")
+ fields_list_for_nodes[addr] = sorted(keys)
+ csv_fd.writerow([addr[0], addr[1],
+ data['source_id'], data['hostname']] +
+ fields_list_for_nodes[addr])
+ csv_fd.writerow([addr[0], addr[1]] +
+ map(data.__getitem__,
+ ['time'] + fields_list_for_nodes[addr]))
+
+ # fd.write(repr((addr, data)) + "\n")
# source_id = data.pop('source_id')
# rep_time = data.pop('time')
# if 'testnode' in source2roles_map.get(source_id, []):
diff --git a/wally/start_vms.py b/wally/start_vms.py
index 4e0698c..0bd2b9a 100644
--- a/wally/start_vms.py
+++ b/wally/start_vms.py
@@ -115,7 +115,7 @@
for ips in srv.addresses.values():
for ip in ips:
if ip.get("OS-EXT-IPS:type", None) == 'floating':
- yield ip['addr']
+ yield ip['addr'], srv.id
break
@@ -250,15 +250,21 @@
return [ip for ip in ip_list if ip.instance_id is None][:amount]
-def launch_vms(params):
+def launch_vms(params, already_has_count=0):
logger.debug("Starting new nodes on openstack")
count = params['count']
+ lst = NOVA_CONNECTION.services.list(binary='nova-compute')
+ srv_count = len([srv for srv in lst if srv.status == 'enabled'])
if isinstance(count, basestring):
- assert count.startswith("x")
- lst = NOVA_CONNECTION.services.list(binary='nova-compute')
- srv_count = len([srv for srv in lst if srv.status == 'enabled'])
- count = srv_count * int(count[1:])
+ if count.startswith("x"):
+ count = srv_count * int(count[1:])
+ else:
+ assert count.startswith('=')
+ count = int(count[1:]) - already_has_count
+
+ if count <= 0:
+ return
assert isinstance(count, (int, long))
@@ -291,6 +297,13 @@
yield Node(conn_uri, []), os_node.id
+def get_free_server_grpoups(nova, template=None):
+ for g in nova.server_groups.list():
+ if g.members == []:
+ if re.match(template, g.name):
+ yield str(g.name)
+
+
def create_vms_mt(nova, amount, group_name, keypair_name, img_name,
flavor_name, vol_sz=None, network_zone_name=None,
flt_ip_pool=None, name_templ='wally-{id}',
@@ -336,14 +349,21 @@
orig_scheduler_hints = scheduler_hints.copy()
- for idx, (name, flt_ip) in enumerate(zip(names, ips)):
+ MAX_SHED_GROUPS = 32
+ for start_idx in range(MAX_SHED_GROUPS):
+ pass
+
+ group_name_template = scheduler_hints['group'].format("\\d+")
+ groups = list(get_free_server_grpoups(nova, group_name_template + "$"))
+ groups.sort()
+
+ for idx, (name, flt_ip) in enumerate(zip(names, ips), 2):
scheduler_hints = None
if orig_scheduler_hints is not None and sec_group_size is not None:
if "group" in orig_scheduler_hints:
scheduler_hints = orig_scheduler_hints.copy()
- scheduler_hints['group'] = \
- scheduler_hints['group'].format(idx // sec_group_size)
+ scheduler_hints['group'] = groups[idx // sec_group_size]
if scheduler_hints is None:
scheduler_hints = orig_scheduler_hints.copy()
diff --git a/wally/suits/io/agent.py b/wally/suits/io/agent.py
index f6c3308..57ba229 100644
--- a/wally/suits/io/agent.py
+++ b/wally/suits/io/agent.py
@@ -293,15 +293,24 @@
def slice_config(sec_iter, runcycle=None, max_jobs=1000,
- soft_runcycle=None):
+ soft_runcycle=None, split_on_names=False):
jcount = 0
runtime = 0
curr_slice = []
prev_name = None
for pos, sec in enumerate(sec_iter):
- if soft_runcycle is not None and prev_name != sec.name:
- if runtime > soft_runcycle:
+
+ if prev_name is not None:
+ split_here = False
+
+ if soft_runcycle is not None and prev_name != sec.name:
+ split_here = (runtime > soft_runcycle)
+
+ if split_on_names and prev_name != sec.name:
+ split_here = True
+
+ if split_here:
yield curr_slice
curr_slice = []
runtime = 0
diff --git a/wally/suits/io/ceph.cfg b/wally/suits/io/ceph.cfg
index 5593181..a10adfb 100644
--- a/wally/suits/io/ceph.cfg
+++ b/wally/suits/io/ceph.cfg
@@ -1,12 +1,16 @@
[defaults]
-wait_for_previous
-group_reporting
-time_based
+wait_for_previous=1
+group_reporting=1
+time_based=1
buffered=0
iodepth=1
softrandommap=1
filename={FILENAME}
NUM_ROUNDS=7
+thread=1
+
+# this is critical for correct results in multy-node run
+randrepeat=0
NUMJOBS={% 1, 5, 10, 15, 40 %}
NUMJOBS_SHORT={% 1, 2, 3, 10 %}
diff --git a/wally/suits/io/rrd.cfg b/wally/suits/io/rrd.cfg
index 5593181..3f4c074 100644
--- a/wally/suits/io/rrd.cfg
+++ b/wally/suits/io/rrd.cfg
@@ -6,50 +6,19 @@
iodepth=1
softrandommap=1
filename={FILENAME}
-NUM_ROUNDS=7
-
-NUMJOBS={% 1, 5, 10, 15, 40 %}
-NUMJOBS_SHORT={% 1, 2, 3, 10 %}
+NUM_ROUNDS=35
size=30G
ramp_time=15
runtime=60
# ---------------------------------------------------------------------
-# check different thread count, sync mode. (latency, iops) = func(th_count)
-# ---------------------------------------------------------------------
-[ceph_test_{TEST_SUMM} * {NUM_ROUNDS}]
-blocksize=4k
-rw=randwrite
-sync=1
-numjobs={NUMJOBS}
-
-# ---------------------------------------------------------------------
-# direct write
-# ---------------------------------------------------------------------
-[ceph_test_{TEST_SUMM} * {NUM_ROUNDS}]
-blocksize=4k
-rw=randwrite
-direct=1
-numjobs=1
-
-# ---------------------------------------------------------------------
# check different thread count, direct read mode. (latency, iops) = func(th_count)
# also check iops for randread
# ---------------------------------------------------------------------
-[ceph_test_{TEST_SUMM} * {NUM_ROUNDS}]
+[rrd_test_{TEST_SUMM} * {NUM_ROUNDS}]
blocksize=4k
rw=randread
direct=1
-numjobs={NUMJOBS}
-
-# ---------------------------------------------------------------------
-# this is essentially sequential write/read operations
-# we can't use sequential with numjobs > 1 due to caching and block merging
-# ---------------------------------------------------------------------
-[ceph_test_{TEST_SUMM} * {NUM_ROUNDS}]
-blocksize=16m
-rw={% randread, randwrite %}
-direct=1
-numjobs={NUMJOBS_SHORT}
+numjobs=5
diff --git a/wally/suits/io/verify.cfg b/wally/suits/io/verify.cfg
index 4a66aac..58b8450 100644
--- a/wally/suits/io/verify.cfg
+++ b/wally/suits/io/verify.cfg
@@ -8,31 +8,21 @@
filename={FILENAME}
NUM_ROUNDS=1
-size=5G
+# this is critical for correct results in multy-node run
+randrepeat=0
+
+size=50G
ramp_time=5
-runtime=360
+runtime=60
# ---------------------------------------------------------------------
-# check different thread count, sync mode. (latency, iops) = func(th_count)
+[verify_{TEST_SUMM} * {NUM_ROUNDS}]
+blocksize=4k
+rw=randwrite
+direct=1
+
# ---------------------------------------------------------------------
-[verify_{TEST_SUMM}]
-blocksize=4m
+[verify_{TEST_SUMM} * {NUM_ROUNDS}]
+blocksize=4k
rw=randread
direct=1
-numjobs=5
-
-# ---------------------------------------------------------------------
-# check different thread count, sync mode. (latency, iops) = func(th_count)
-# ---------------------------------------------------------------------
-# [verify_{TEST_SUMM}]
-# blocksize=4k
-# rw=randwrite
-# direct=1
-
-# ---------------------------------------------------------------------
-# direct write
-# ---------------------------------------------------------------------
-# [verify_{TEST_SUMM}]
-# blocksize=4k
-# rw=randread
-# direct=1
diff --git a/wally/suits/itest.py b/wally/suits/itest.py
index f0a1e8d..dd52f33 100644
--- a/wally/suits/itest.py
+++ b/wally/suits/itest.py
@@ -189,7 +189,8 @@
self.fio_configs = io_agent.parse_and_slice_all_in_1(
self.raw_cfg,
self.config_params,
- soft_runcycle=soft_runcycle)
+ soft_runcycle=soft_runcycle,
+ split_on_names=self.test_logging)
self.fio_configs = list(self.fio_configs)
splitter = "\n\n" + "-" * 60 + "\n\n"
diff --git a/wally/utils.py b/wally/utils.py
index d5d6f48..3792ba4 100644
--- a/wally/utils.py
+++ b/wally/utils.py
@@ -1,5 +1,6 @@
import re
import os
+import time
import socket
import logging
import threading
@@ -129,6 +130,28 @@
return "{0}{1}i".format(size // scale, name)
+def run_locally(cmd, input_data="", timeout=20):
+ shell = isinstance(cmd, basestring)
+
+ proc = subprocess.Popen(cmd,
+ shell=shell,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE)
+
+ end_time = time.time() + timeout
+
+ while end_time > time.time():
+ if proc.poll() is None:
+ time.sleep(1)
+
+ out, err = proc.communicate()
+
+ if 0 != proc.returncode:
+ raise subprocess.CalledProcessError(proc.returncode, cmd, out + err)
+
+ return out
+
+
def get_ip_for_target(target_ip):
if not is_ip(target_ip):
target_ip = socket.gethostbyname(target_ip)
@@ -137,8 +160,7 @@
if first_dig == 127:
return '127.0.0.1'
- cmd = 'ip route get to'.split(" ") + [target_ip]
- data = subprocess.Popen(cmd, stdout=subprocess.PIPE).stdout.read()
+ data = run_locally('ip route get to'.split(" ") + [target_ip])
rr1 = r'{0} via [.0-9]+ dev (?P<dev>.*?) src (?P<ip>[.0-9]+)$'
rr1 = rr1.replace(" ", r'\s+')