blob: 7b774615df191aa9f7c6f5e26326ec426827b106 [file] [log] [blame]
koder aka kdanilovbc2c8982015-06-13 02:50:43 +03001import re
2import time
3import json
koder aka kdanilovf236b9c2015-06-24 18:17:22 +03004import stat
koder aka kdanilov6ab4d432015-06-22 00:26:28 +03005import random
koder aka kdanilov76471642015-08-14 11:44:43 +03006import shutil
koder aka kdanilovbc2c8982015-06-13 02:50:43 +03007import os.path
8import logging
9import datetime
10import functools
11import subprocess
12import collections
13
14import yaml
15import paramiko
16import texttable
17from paramiko.ssh_exception import SSHException
koder aka kdanilova94dfe12015-08-19 13:04:51 +030018from concurrent.futures import ThreadPoolExecutor, wait
koder aka kdanilovbc2c8982015-06-13 02:50:43 +030019
koder aka kdanilov6ab4d432015-06-22 00:26:28 +030020import wally
koder aka kdanilovbc2c8982015-06-13 02:50:43 +030021from wally.pretty_yaml import dumps
koder aka kdanilovbb6d6cd2015-06-20 02:55:07 +030022from wally.statistic import round_3_digit, data_property, average
koder aka kdanilovbc2c8982015-06-13 02:50:43 +030023from wally.utils import ssize2b, sec_to_str, StopTestError, Barrier, get_os
24from wally.ssh_utils import (save_to_remote, read_from_remote, BGSSHTask, reconnect)
25
26from .fio_task_parser import (execution_time, fio_cfg_compile,
koder aka kdanilovf236b9c2015-06-24 18:17:22 +030027 get_test_summary, get_test_summary_tuple,
28 get_test_sync_mode, FioJobSection)
29
koder aka kdanilovbb6d6cd2015-06-20 02:55:07 +030030from ..itest import (TimeSeriesValue, PerfTest, TestResults,
31 run_on_node, TestConfig, MeasurementMatrix)
koder aka kdanilovbc2c8982015-06-13 02:50:43 +030032
33logger = logging.getLogger("wally")
34
35
36# Results folder structure
37# results/
38# {loadtype}_{num}/
39# config.yaml
40# ......
41
42
43class NoData(object):
44 pass
45
46
47def cached_prop(func):
48 @property
49 @functools.wraps(func)
50 def closure(self):
51 val = getattr(self, "_" + func.__name__)
52 if val is NoData:
53 val = func(self)
54 setattr(self, "_" + func.__name__, val)
55 return val
56 return closure
57
58
59def load_fio_log_file(fname):
60 with open(fname) as fd:
61 it = [ln.split(',')[:2] for ln in fd]
koder aka kdanilovbb6d6cd2015-06-20 02:55:07 +030062
63 vals = [(float(off) / 1000, # convert us to ms
64 float(val.strip()) + 0.5) # add 0.5 to compemsate average value
65 # as fio trimm all values in log to integer
66 for off, val in it]
67
koder aka kdanilovbc2c8982015-06-13 02:50:43 +030068 return TimeSeriesValue(vals)
69
70
koder aka kdanilova94dfe12015-08-19 13:04:51 +030071READ_IOPS_DISCSTAT_POS = 3
72WRITE_IOPS_DISCSTAT_POS = 7
73
74
75def load_sys_log_file(ftype, fname):
76 assert ftype == 'iops'
77 pval = None
78 with open(fname) as fd:
79 iops = []
80 for ln in fd:
81 params = ln.split()
82 cval = int(params[WRITE_IOPS_DISCSTAT_POS]) + \
83 int(params[READ_IOPS_DISCSTAT_POS])
84 if pval is not None:
85 iops.append(cval - pval)
86 pval = cval
87
88 vals = [(idx * 1000, val) for idx, val in enumerate(iops)]
89 return TimeSeriesValue(vals)
90
91
koder aka kdanilov0fdaaee2015-06-30 11:10:48 +030092def load_test_results(folder, run_num):
koder aka kdanilovbc2c8982015-06-13 02:50:43 +030093 res = {}
94 params = None
95
96 fn = os.path.join(folder, str(run_num) + '_params.yaml')
97 params = yaml.load(open(fn).read())
98
koder aka kdanilovbb6d6cd2015-06-20 02:55:07 +030099 conn_ids_set = set()
100 rr = r"{0}_(?P<conn_id>.*?)_(?P<type>[^_.]*)\.\d+\.log$".format(run_num)
koder aka kdanilovbc2c8982015-06-13 02:50:43 +0300101 for fname in os.listdir(folder):
koder aka kdanilovbc2c8982015-06-13 02:50:43 +0300102 rm = re.match(rr, fname)
103 if rm is None:
104 continue
105
106 conn_id_s = rm.group('conn_id')
107 conn_id = conn_id_s.replace('_', ':')
108 ftype = rm.group('type')
109
110 if ftype not in ('iops', 'bw', 'lat'):
111 continue
112
koder aka kdanilovbb6d6cd2015-06-20 02:55:07 +0300113 ts = load_fio_log_file(os.path.join(folder, fname))
114 res.setdefault(ftype, {}).setdefault(conn_id, []).append(ts)
koder aka kdanilovbc2c8982015-06-13 02:50:43 +0300115
koder aka kdanilovbb6d6cd2015-06-20 02:55:07 +0300116 conn_ids_set.add(conn_id)
koder aka kdanilovbc2c8982015-06-13 02:50:43 +0300117
koder aka kdanilova94dfe12015-08-19 13:04:51 +0300118 rr = r"{0}_(?P<conn_id>.*?)_(?P<type>[^_.]*)\.sys\.log$".format(run_num)
119 for fname in os.listdir(folder):
120 rm = re.match(rr, fname)
121 if rm is None:
122 continue
123
124 conn_id_s = rm.group('conn_id')
125 conn_id = conn_id_s.replace('_', ':')
126 ftype = rm.group('type')
127
128 if ftype not in ('iops', 'bw', 'lat'):
129 continue
130
131 ts = load_sys_log_file(ftype, os.path.join(folder, fname))
132 res.setdefault(ftype + ":sys", {}).setdefault(conn_id, []).append(ts)
133
134 conn_ids_set.add(conn_id)
135
koder aka kdanilovbb6d6cd2015-06-20 02:55:07 +0300136 mm_res = {}
137
koder aka kdanilov9e0512a2015-08-10 14:51:59 +0300138 if len(res) == 0:
139 raise ValueError("No data was found")
140
koder aka kdanilovbb6d6cd2015-06-20 02:55:07 +0300141 for key, data in res.items():
142 conn_ids = sorted(conn_ids_set)
koder aka kdanilov765920a2016-04-12 00:35:48 +0300143 awail_ids = [conn_id for conn_id in conn_ids if conn_id in data]
144 matr = [data[conn_id] for conn_id in awail_ids]
145 mm_res[key] = MeasurementMatrix(matr, awail_ids)
koder aka kdanilovbb6d6cd2015-06-20 02:55:07 +0300146
koder aka kdanilovbc2c8982015-06-13 02:50:43 +0300147 raw_res = {}
148 for conn_id in conn_ids:
149 fn = os.path.join(folder, "{0}_{1}_rawres.json".format(run_num, conn_id_s))
150
151 # remove message hack
152 fc = "{" + open(fn).read().split('{', 1)[1]
153 raw_res[conn_id] = json.loads(fc)
154
koder aka kdanilovbb6d6cd2015-06-20 02:55:07 +0300155 fio_task = FioJobSection(params['name'])
156 fio_task.vals.update(params['vals'])
157
158 config = TestConfig('io', params, None, params['nodes'], folder, None)
koder aka kdanilov0fdaaee2015-06-30 11:10:48 +0300159 return FioRunResult(config, fio_task, mm_res, raw_res, params['intervals'], run_num)
koder aka kdanilovbc2c8982015-06-13 02:50:43 +0300160
161
162class Attrmapper(object):
163 def __init__(self, dct):
164 self.__dct = dct
165
166 def __getattr__(self, name):
167 try:
168 return self.__dct[name]
169 except KeyError:
170 raise AttributeError(name)
171
172
173class DiskPerfInfo(object):
174 def __init__(self, name, summary, params, testnodes_count):
175 self.name = name
176 self.bw = None
177 self.iops = None
178 self.lat = None
179 self.lat_50 = None
180 self.lat_95 = None
koder aka kdanilov170936a2015-06-27 22:51:17 +0300181 self.lat_avg = None
koder aka kdanilovbc2c8982015-06-13 02:50:43 +0300182
183 self.raw_bw = []
184 self.raw_iops = []
185 self.raw_lat = []
186
187 self.params = params
188 self.testnodes_count = testnodes_count
189 self.summary = summary
190 self.p = Attrmapper(self.params['vals'])
191
192 self.sync_mode = get_test_sync_mode(self.params['vals'])
193 self.concurence = self.params['vals'].get('numjobs', 1)
194
195
196def get_lat_perc_50_95(lat_mks):
197 curr_perc = 0
198 perc_50 = None
199 perc_95 = None
200 pkey = None
201 for key, val in sorted(lat_mks.items()):
202 if curr_perc + val >= 50 and perc_50 is None:
203 if pkey is None or val < 1.:
204 perc_50 = key
205 else:
206 perc_50 = (50. - curr_perc) / val * (key - pkey) + pkey
207
208 if curr_perc + val >= 95:
209 if pkey is None or val < 1.:
210 perc_95 = key
211 else:
212 perc_95 = (95. - curr_perc) / val * (key - pkey) + pkey
213 break
214
215 pkey = key
216 curr_perc += val
217
koder aka kdanilovf236b9c2015-06-24 18:17:22 +0300218 # for k, v in sorted(lat_mks.items()):
219 # if k / 1000 > 0:
220 # print "{0:>4}".format(k / 1000), v
221
222 # print perc_50 / 1000., perc_95 / 1000.
223 # exit(1)
koder aka kdanilovbc2c8982015-06-13 02:50:43 +0300224 return perc_50 / 1000., perc_95 / 1000.
225
226
koder aka kdanilov0fdaaee2015-06-30 11:10:48 +0300227class IOTestResults(object):
228 def __init__(self, suite_name, fio_results, log_directory):
229 self.suite_name = suite_name
230 self.fio_results = fio_results
231 self.log_directory = log_directory
232
233 def __iter__(self):
234 return iter(self.fio_results)
235
236 def __len__(self):
237 return len(self.fio_results)
238
239 def get_yamable(self):
240 items = [(fio_res.summary(), fio_res.idx) for fio_res in self]
241 return {self.suite_name: [self.log_directory] + items}
242
243
244class FioRunResult(TestResults):
koder aka kdanilovbc2c8982015-06-13 02:50:43 +0300245 """
246 Fio run results
247 config: TestConfig
248 fio_task: FioJobSection
koder aka kdanilovbb6d6cd2015-06-20 02:55:07 +0300249 ts_results: {str: MeasurementMatrix[TimeSeriesValue]}
koder aka kdanilovbc2c8982015-06-13 02:50:43 +0300250 raw_result: ????
251 run_interval:(float, float) - test tun time, used for sensors
252 """
koder aka kdanilov0fdaaee2015-06-30 11:10:48 +0300253 def __init__(self, config, fio_task, ts_results, raw_result, run_interval, idx):
koder aka kdanilovbc2c8982015-06-13 02:50:43 +0300254
koder aka kdanilov170936a2015-06-27 22:51:17 +0300255 self.name = fio_task.name.rsplit("_", 1)[0]
koder aka kdanilovbc2c8982015-06-13 02:50:43 +0300256 self.fio_task = fio_task
koder aka kdanilov0fdaaee2015-06-30 11:10:48 +0300257 self.idx = idx
koder aka kdanilovbc2c8982015-06-13 02:50:43 +0300258
koder aka kdanilova94dfe12015-08-19 13:04:51 +0300259 self.bw = ts_results['bw']
260 self.lat = ts_results['lat']
261 self.iops = ts_results['iops']
koder aka kdanilovbc2c8982015-06-13 02:50:43 +0300262
koder aka kdanilova94dfe12015-08-19 13:04:51 +0300263 if 'iops:sys' in ts_results:
264 self.iops_sys = ts_results['iops:sys']
265 else:
266 self.iops_sys = None
267
268 res = {"bw": self.bw,
269 "lat": self.lat,
270 "iops": self.iops,
271 "iops:sys": self.iops_sys}
koder aka kdanilovbc2c8982015-06-13 02:50:43 +0300272
273 self.sensors_data = None
274 self._pinfo = None
275 TestResults.__init__(self, config, res, raw_result, run_interval)
276
koder aka kdanilovbb6d6cd2015-06-20 02:55:07 +0300277 def get_params_from_fio_report(self):
278 nodes = self.bw.connections_ids
279
280 iops = [self.raw_result[node]['jobs'][0]['mixed']['iops'] for node in nodes]
281 total_ios = [self.raw_result[node]['jobs'][0]['mixed']['total_ios'] for node in nodes]
282 runtime = [self.raw_result[node]['jobs'][0]['mixed']['runtime'] / 1000 for node in nodes]
283 flt_iops = [float(ios) / rtime for ios, rtime in zip(total_ios, runtime)]
284
285 bw = [self.raw_result[node]['jobs'][0]['mixed']['bw'] for node in nodes]
286 total_bytes = [self.raw_result[node]['jobs'][0]['mixed']['io_bytes'] for node in nodes]
287 flt_bw = [float(tbytes) / rtime for tbytes, rtime in zip(total_bytes, runtime)]
288
koder aka kdanilovbb6d6cd2015-06-20 02:55:07 +0300289 return {'iops': iops,
290 'flt_iops': flt_iops,
291 'bw': bw,
koder aka kdanilov6ab4d432015-06-22 00:26:28 +0300292 'flt_bw': flt_bw}
koder aka kdanilovbb6d6cd2015-06-20 02:55:07 +0300293
koder aka kdanilovbc2c8982015-06-13 02:50:43 +0300294 def summary(self):
koder aka kdanilovf236b9c2015-06-24 18:17:22 +0300295 return get_test_summary(self.fio_task, len(self.config.nodes))
296
297 def summary_tpl(self):
298 return get_test_summary_tuple(self.fio_task, len(self.config.nodes))
koder aka kdanilovbc2c8982015-06-13 02:50:43 +0300299
koder aka kdanilov6ab4d432015-06-22 00:26:28 +0300300 def get_lat_perc_50_95_multy(self):
koder aka kdanilovbc2c8982015-06-13 02:50:43 +0300301 lat_mks = collections.defaultdict(lambda: 0)
302 num_res = 0
303
koder aka kdanilov6ab4d432015-06-22 00:26:28 +0300304 for result in self.raw_result.values():
koder aka kdanilovbc2c8982015-06-13 02:50:43 +0300305 num_res += len(result['jobs'])
306 for job_info in result['jobs']:
307 for k, v in job_info['latency_ms'].items():
308 if isinstance(k, basestring) and k.startswith('>='):
309 lat_mks[int(k[2:]) * 1000] += v
310 else:
311 lat_mks[int(k) * 1000] += v
312
313 for k, v in job_info['latency_us'].items():
314 lat_mks[int(k)] += v
315
316 for k, v in lat_mks.items():
317 lat_mks[k] = float(v) / num_res
koder aka kdanilov6ab4d432015-06-22 00:26:28 +0300318 return get_lat_perc_50_95(lat_mks)
319
320 def disk_perf_info(self, avg_interval=2.0):
321
322 if self._pinfo is not None:
323 return self._pinfo
koder aka kdanilovbc2c8982015-06-13 02:50:43 +0300324
koder aka kdanilovbb6d6cd2015-06-20 02:55:07 +0300325 testnodes_count = len(self.config.nodes)
koder aka kdanilovbc2c8982015-06-13 02:50:43 +0300326
327 pinfo = DiskPerfInfo(self.name,
328 self.summary(),
329 self.params,
330 testnodes_count)
331
koder aka kdanilov6ab4d432015-06-22 00:26:28 +0300332 def prepare(data, drop=1):
koder aka kdanilovbb6d6cd2015-06-20 02:55:07 +0300333 if data is None:
334 return data
335
336 res = []
337 for ts_data in data:
koder aka kdanilovbb6d6cd2015-06-20 02:55:07 +0300338 if ts_data.average_interval() < avg_interval:
339 ts_data = ts_data.derived(avg_interval)
340
koder aka kdanilov6ab4d432015-06-22 00:26:28 +0300341 # drop last value on bounds
342 # as they may contains ranges without activities
koder aka kdanilov0fdaaee2015-06-30 11:10:48 +0300343 assert len(ts_data.values) >= drop + 1, str(drop) + " " + str(ts_data.values)
koder aka kdanilov6ab4d432015-06-22 00:26:28 +0300344
345 if drop > 0:
346 res.append(ts_data.values[:-drop])
347 else:
348 res.append(ts_data.values)
349
koder aka kdanilovbb6d6cd2015-06-20 02:55:07 +0300350 return res
351
352 def agg_data(matr):
353 arr = sum(matr, [])
354 min_len = min(map(len, arr))
355 res = []
356 for idx in range(min_len):
357 res.append(sum(dt[idx] for dt in arr))
358 return res
359
koder aka kdanilov170936a2015-06-27 22:51:17 +0300360 pinfo.raw_lat = map(prepare, self.lat.per_vm())
361 num_th = sum(map(len, pinfo.raw_lat))
362 lat_avg = [val / num_th for val in agg_data(pinfo.raw_lat)]
363 pinfo.lat_avg = data_property(lat_avg).average / 1000 # us to ms
364
koder aka kdanilov6ab4d432015-06-22 00:26:28 +0300365 pinfo.lat_50, pinfo.lat_95 = self.get_lat_perc_50_95_multy()
366 pinfo.lat = pinfo.lat_50
koder aka kdanilovbc2c8982015-06-13 02:50:43 +0300367
koder aka kdanilovbb6d6cd2015-06-20 02:55:07 +0300368 pinfo.raw_bw = map(prepare, self.bw.per_vm())
369 pinfo.raw_iops = map(prepare, self.iops.per_vm())
370
koder aka kdanilova94dfe12015-08-19 13:04:51 +0300371 if self.iops_sys is not None:
372 pinfo.raw_iops_sys = map(prepare, self.iops_sys.per_vm())
373 pinfo.iops_sys = data_property(agg_data(pinfo.raw_iops_sys))
374 else:
375 pinfo.raw_iops_sys = None
376 pinfo.iops_sys = None
377
koder aka kdanilovbb6d6cd2015-06-20 02:55:07 +0300378 fparams = self.get_params_from_fio_report()
379 fio_report_bw = sum(fparams['flt_bw'])
380 fio_report_iops = sum(fparams['flt_iops'])
381
382 agg_bw = agg_data(pinfo.raw_bw)
383 agg_iops = agg_data(pinfo.raw_iops)
384
385 log_bw_avg = average(agg_bw)
386 log_iops_avg = average(agg_iops)
387
388 # update values to match average from fio report
389 coef_iops = fio_report_iops / float(log_iops_avg)
390 coef_bw = fio_report_bw / float(log_bw_avg)
391
392 bw_log = data_property([val * coef_bw for val in agg_bw])
393 iops_log = data_property([val * coef_iops for val in agg_iops])
394
395 bw_report = data_property([fio_report_bw])
396 iops_report = data_property([fio_report_iops])
397
398 # When IOPS/BW per thread is too low
399 # data from logs is rounded to match
koder aka kdanilovf236b9c2015-06-24 18:17:22 +0300400 iops_per_th = sum(sum(pinfo.raw_iops, []), [])
koder aka kdanilovbb6d6cd2015-06-20 02:55:07 +0300401 if average(iops_per_th) > 10:
koder aka kdanilovbb6d6cd2015-06-20 02:55:07 +0300402 pinfo.iops = iops_log
koder aka kdanilovbb6d6cd2015-06-20 02:55:07 +0300403 pinfo.iops2 = iops_report
404 else:
koder aka kdanilovbb6d6cd2015-06-20 02:55:07 +0300405 pinfo.iops = iops_report
koder aka kdanilovbb6d6cd2015-06-20 02:55:07 +0300406 pinfo.iops2 = iops_log
407
koder aka kdanilovf236b9c2015-06-24 18:17:22 +0300408 bw_per_th = sum(sum(pinfo.raw_bw, []), [])
409 if average(bw_per_th) > 10:
410 pinfo.bw = bw_log
koder aka kdanilovf236b9c2015-06-24 18:17:22 +0300411 pinfo.bw2 = bw_report
koder aka kdanilov170936a2015-06-27 22:51:17 +0300412 else:
413 pinfo.bw = bw_report
414 pinfo.bw2 = bw_log
koder aka kdanilovf236b9c2015-06-24 18:17:22 +0300415
koder aka kdanilovbc2c8982015-06-13 02:50:43 +0300416 self._pinfo = pinfo
417
418 return pinfo
419
420
421class IOPerfTest(PerfTest):
422 tcp_conn_timeout = 30
423 max_pig_timeout = 5
424 soft_runcycle = 5 * 60
Michael Semenov8ba6e232015-08-28 10:57:18 +0000425 retry_time = 30
koder aka kdanilovbc2c8982015-06-13 02:50:43 +0300426
427 def __init__(self, config):
428 PerfTest.__init__(self, config)
429
430 get = self.config.params.get
431 do_get = self.config.params.__getitem__
432
433 self.config_fname = do_get('cfg')
434
435 if '/' not in self.config_fname and '.' not in self.config_fname:
436 cfgs_dir = os.path.dirname(__file__)
437 self.config_fname = os.path.join(cfgs_dir,
438 self.config_fname + '.cfg')
439
440 self.alive_check_interval = get('alive_check_interval')
441 self.use_system_fio = get('use_system_fio', False)
442
koder aka kdanilov6ab4d432015-06-22 00:26:28 +0300443 if get('prefill_files') is not None:
444 logger.warning("prefill_files option is depricated. Use force_prefill instead")
445
446 self.force_prefill = get('force_prefill', False)
koder aka kdanilovbc2c8982015-06-13 02:50:43 +0300447 self.config_params = get('params', {}).copy()
448
449 self.io_py_remote = self.join_remote("agent.py")
450 self.results_file = self.join_remote("results.json")
451 self.pid_file = self.join_remote("pid")
452 self.task_file = self.join_remote("task.cfg")
453 self.sh_file = self.join_remote("cmd.sh")
454 self.err_out_file = self.join_remote("fio_err_out")
koder aka kdanilova94dfe12015-08-19 13:04:51 +0300455 self.io_log_file = self.join_remote("io_log.txt")
koder aka kdanilovbc2c8982015-06-13 02:50:43 +0300456 self.exit_code_file = self.join_remote("exit_code")
457
koder aka kdanilovbb6d6cd2015-06-20 02:55:07 +0300458 self.max_latency = get("max_lat", None)
koder aka kdanilov6ab4d432015-06-22 00:26:28 +0300459 self.min_bw_per_thread = get("min_bw", None)
koder aka kdanilovbb6d6cd2015-06-20 02:55:07 +0300460
koder aka kdanilovbc2c8982015-06-13 02:50:43 +0300461 self.use_sudo = get("use_sudo", True)
koder aka kdanilovbc2c8982015-06-13 02:50:43 +0300462
463 self.raw_cfg = open(self.config_fname).read()
koder aka kdanilova94dfe12015-08-19 13:04:51 +0300464 self.fio_configs = None
koder aka kdanilovbc2c8982015-06-13 02:50:43 +0300465
466 @classmethod
koder aka kdanilov0fdaaee2015-06-30 11:10:48 +0300467 def load(cls, suite_name, folder):
468 res = []
koder aka kdanilovbc2c8982015-06-13 02:50:43 +0300469 for fname in os.listdir(folder):
470 if re.match("\d+_params.yaml$", fname):
471 num = int(fname.split('_')[0])
koder aka kdanilov0fdaaee2015-06-30 11:10:48 +0300472 res.append(load_test_results(folder, num))
473 return IOTestResults(suite_name, res, folder)
koder aka kdanilovbc2c8982015-06-13 02:50:43 +0300474
475 def cleanup(self):
476 # delete_file(conn, self.io_py_remote)
477 # Need to remove tempo files, used for testing
478 pass
479
koder aka kdanilovf236b9c2015-06-24 18:17:22 +0300480 # size is megabytes
koder aka kdanilov6ab4d432015-06-22 00:26:28 +0300481 def check_prefill_required(self, rossh, fname, size, num_blocks=16):
koder aka kdanilov170936a2015-06-27 22:51:17 +0300482 try:
483 with rossh.connection.open_sftp() as sftp:
484 fstats = sftp.stat(fname)
koder aka kdanilovf236b9c2015-06-24 18:17:22 +0300485
koder aka kdanilov170936a2015-06-27 22:51:17 +0300486 if stat.S_ISREG(fstats.st_mode) and fstats.st_size < size * 1024 ** 2:
487 return True
488 except EnvironmentError:
koder aka kdanilovf95cfc12015-06-23 03:33:19 +0300489 return True
490
koder aka kdanilovf236b9c2015-06-24 18:17:22 +0300491 cmd = 'python -c "' + \
492 "import sys;" + \
493 "fd = open('{0}', 'rb');" + \
494 "fd.seek({1});" + \
495 "data = fd.read(1024); " + \
496 "sys.stdout.write(data + ' ' * ( 1024 - len(data)))\" | md5sum"
koder aka kdanilov6ab4d432015-06-22 00:26:28 +0300497
498 if self.use_sudo:
499 cmd = "sudo " + cmd
500
koder aka kdanilovf95cfc12015-06-23 03:33:19 +0300501 zero_md5 = '0f343b0931126a20f133d67c2b018a3b'
koder aka kdanilov8fbb27f2015-07-17 22:23:31 +0300502 bsize = size * (1024 ** 2)
503 offsets = [random.randrange(bsize - 1024) for _ in range(num_blocks)]
504 offsets.append(bsize - 1024)
505 offsets.append(0)
koder aka kdanilov6ab4d432015-06-22 00:26:28 +0300506
koder aka kdanilovf95cfc12015-06-23 03:33:19 +0300507 for offset in offsets:
koder aka kdanilov6ab4d432015-06-22 00:26:28 +0300508 data = rossh(cmd.format(fname, offset), nolog=True)
koder aka kdanilovf236b9c2015-06-24 18:17:22 +0300509
510 md = ""
511 for line in data.split("\n"):
512 if "unable to resolve" not in line:
513 md = line.split()[0].strip()
514 break
koder aka kdanilovf95cfc12015-06-23 03:33:19 +0300515
516 if len(md) != 32:
517 logger.error("File data check is failed - " + data)
koder aka kdanilov6ab4d432015-06-22 00:26:28 +0300518 return True
koder aka kdanilovf95cfc12015-06-23 03:33:19 +0300519
520 if zero_md5 == md:
521 return True
522
koder aka kdanilov6ab4d432015-06-22 00:26:28 +0300523 return False
524
525 def prefill_test_files(self, rossh, files, force=False):
526 if self.use_system_fio:
527 cmd_templ = "fio "
528 else:
529 cmd_templ = "{0}/fio ".format(self.config.remote_dir)
koder aka kdanilovbc2c8982015-06-13 02:50:43 +0300530
531 if self.use_sudo:
532 cmd_templ = "sudo " + cmd_templ
533
koder aka kdanilov6ab4d432015-06-22 00:26:28 +0300534 cmd_templ += "--name=xxx --filename={0} --direct=1" + \
535 " --bs=4m --size={1}m --rw=write"
koder aka kdanilovbc2c8982015-06-13 02:50:43 +0300536
koder aka kdanilov6ab4d432015-06-22 00:26:28 +0300537 ssize = 0
538
539 if force:
540 logger.info("File prefilling is forced")
541
542 ddtime = 0
koder aka kdanilovbc2c8982015-06-13 02:50:43 +0300543 for fname, curr_sz in files.items():
koder aka kdanilov6ab4d432015-06-22 00:26:28 +0300544 if not force:
545 if not self.check_prefill_required(rossh, fname, curr_sz):
koder aka kdanilovf236b9c2015-06-24 18:17:22 +0300546 logger.debug("prefill is skipped")
koder aka kdanilov6ab4d432015-06-22 00:26:28 +0300547 continue
548
549 logger.info("Prefilling file {0}".format(fname))
koder aka kdanilovbc2c8982015-06-13 02:50:43 +0300550 cmd = cmd_templ.format(fname, curr_sz)
551 ssize += curr_sz
552
koder aka kdanilov6ab4d432015-06-22 00:26:28 +0300553 stime = time.time()
koder aka kdanilovbc2c8982015-06-13 02:50:43 +0300554 rossh(cmd, timeout=curr_sz)
koder aka kdanilov6ab4d432015-06-22 00:26:28 +0300555 ddtime += time.time() - stime
koder aka kdanilovbc2c8982015-06-13 02:50:43 +0300556
koder aka kdanilov6ab4d432015-06-22 00:26:28 +0300557 if ddtime > 1.0:
koder aka kdanilovbc2c8982015-06-13 02:50:43 +0300558 fill_bw = int(ssize / ddtime)
koder aka kdanilov6ab4d432015-06-22 00:26:28 +0300559 mess = "Initiall fio fill bw is {0} MiBps for this vm"
koder aka kdanilovbc2c8982015-06-13 02:50:43 +0300560 logger.info(mess.format(fill_bw))
koder aka kdanilovbc2c8982015-06-13 02:50:43 +0300561
koder aka kdanilov6ab4d432015-06-22 00:26:28 +0300562 def install_utils(self, node, rossh, max_retry=3, timeout=5):
koder aka kdanilovbc2c8982015-06-13 02:50:43 +0300563 need_install = []
564 packs = [('screen', 'screen')]
koder aka kdanilov6ab4d432015-06-22 00:26:28 +0300565 os_info = get_os(rossh)
koder aka kdanilovbc2c8982015-06-13 02:50:43 +0300566
567 if self.use_system_fio:
568 packs.append(('fio', 'fio'))
569 else:
koder aka kdanilov6ab4d432015-06-22 00:26:28 +0300570 packs.append(('bzip2', 'bzip2'))
koder aka kdanilovbc2c8982015-06-13 02:50:43 +0300571
572 for bin_name, package in packs:
573 if bin_name is None:
574 need_install.append(package)
575 continue
576
577 try:
578 rossh('which ' + bin_name, nolog=True)
579 except OSError:
580 need_install.append(package)
581
koder aka kdanilov6ab4d432015-06-22 00:26:28 +0300582 if len(need_install) != 0:
583 if 'redhat' == os_info.distro:
584 cmd = "sudo yum -y install " + " ".join(need_install)
585 else:
586 cmd = "sudo apt-get -y install " + " ".join(need_install)
koder aka kdanilovbc2c8982015-06-13 02:50:43 +0300587
koder aka kdanilov6ab4d432015-06-22 00:26:28 +0300588 for _ in range(max_retry):
589 try:
590 rossh(cmd)
591 break
592 except OSError as err:
593 time.sleep(timeout)
594 else:
595 raise OSError("Can't install - " + str(err))
koder aka kdanilovbc2c8982015-06-13 02:50:43 +0300596
koder aka kdanilov6ab4d432015-06-22 00:26:28 +0300597 if not self.use_system_fio:
598 fio_dir = os.path.dirname(os.path.dirname(wally.__file__))
599 fio_dir = os.path.join(os.getcwd(), fio_dir)
600 fio_dir = os.path.join(fio_dir, 'fio_binaries')
601 fname = 'fio_{0.release}_{0.arch}.bz2'.format(os_info)
602 fio_path = os.path.join(fio_dir, fname)
603
604 if not os.path.exists(fio_path):
605 raise RuntimeError("No prebuild fio available for {0}".format(os_info))
606
607 bz_dest = self.join_remote('fio.bz2')
608 with node.connection.open_sftp() as sftp:
609 sftp.put(fio_path, bz_dest)
610
611 rossh("bzip2 --decompress " + bz_dest, nolog=True)
612 rossh("chmod a+x " + self.join_remote("fio"), nolog=True)
koder aka kdanilovbc2c8982015-06-13 02:50:43 +0300613
614 def pre_run(self):
koder aka kdanilova94dfe12015-08-19 13:04:51 +0300615 if 'FILESIZE' not in self.config_params:
616 # need to detect file size
617 pass
618
619 self.fio_configs = fio_cfg_compile(self.raw_cfg,
620 self.config_fname,
621 self.config_params)
622 self.fio_configs = list(self.fio_configs)
623
koder aka kdanilov6ab4d432015-06-22 00:26:28 +0300624 files = {}
625 for section in self.fio_configs:
626 sz = ssize2b(section.vals['size'])
627 msz = sz / (1024 ** 2)
koder aka kdanilovbc2c8982015-06-13 02:50:43 +0300628
koder aka kdanilov6ab4d432015-06-22 00:26:28 +0300629 if sz % (1024 ** 2) != 0:
630 msz += 1
koder aka kdanilovbc2c8982015-06-13 02:50:43 +0300631
koder aka kdanilov6ab4d432015-06-22 00:26:28 +0300632 fname = section.vals['filename']
koder aka kdanilovbc2c8982015-06-13 02:50:43 +0300633
koder aka kdanilov6ab4d432015-06-22 00:26:28 +0300634 # if already has other test with the same file name
635 # take largest size
636 files[fname] = max(files.get(fname, 0), msz)
koder aka kdanilovbc2c8982015-06-13 02:50:43 +0300637
638 with ThreadPoolExecutor(len(self.config.nodes)) as pool:
koder aka kdanilov6ab4d432015-06-22 00:26:28 +0300639 fc = functools.partial(self.pre_run_th,
640 files=files,
641 force=self.force_prefill)
koder aka kdanilovbc2c8982015-06-13 02:50:43 +0300642 list(pool.map(fc, self.config.nodes))
643
koder aka kdanilov6ab4d432015-06-22 00:26:28 +0300644 def pre_run_th(self, node, files, force):
koder aka kdanilovbc2c8982015-06-13 02:50:43 +0300645 try:
koder aka kdanilov8fbb27f2015-07-17 22:23:31 +0300646 # fill files with pseudo-random data
647 rossh = run_on_node(node)
648 rossh.connection = node.connection
koder aka kdanilovbc2c8982015-06-13 02:50:43 +0300649
koder aka kdanilov8fbb27f2015-07-17 22:23:31 +0300650 try:
651 cmd = 'mkdir -p "{0}"'.format(self.config.remote_dir)
652 if self.use_sudo:
653 cmd = "sudo " + cmd
654 cmd += " ; sudo chown {0} {1}".format(node.get_user(),
655 self.config.remote_dir)
656 rossh(cmd, nolog=True)
koder aka kdanilov6ab4d432015-06-22 00:26:28 +0300657
koder aka kdanilov8fbb27f2015-07-17 22:23:31 +0300658 assert self.config.remote_dir != "" and self.config.remote_dir != "/"
659 rossh("rm -rf {0}/*".format(self.config.remote_dir), nolog=True)
koder aka kdanilovbc2c8982015-06-13 02:50:43 +0300660
koder aka kdanilov8fbb27f2015-07-17 22:23:31 +0300661 except Exception as exc:
662 msg = "Failed to create folder {0} on remote {1}. Error: {2!s}"
663 msg = msg.format(self.config.remote_dir, node.get_conn_id(), exc)
664 logger.exception(msg)
665 raise StopTestError(msg, exc)
666
667 self.install_utils(node, rossh)
668 self.prefill_test_files(rossh, files, force)
669 except:
670 logger.exception("XXXX")
671 raise
koder aka kdanilovbc2c8982015-06-13 02:50:43 +0300672
koder aka kdanilov6ab4d432015-06-22 00:26:28 +0300673 def show_test_execution_time(self):
koder aka kdanilovbc2c8982015-06-13 02:50:43 +0300674 if len(self.fio_configs) > 1:
675 # +10% - is a rough estimation for additional operations
676 # like sftp, etc
677 exec_time = int(sum(map(execution_time, self.fio_configs)) * 1.1)
678 exec_time_s = sec_to_str(exec_time)
679 now_dt = datetime.datetime.now()
680 end_dt = now_dt + datetime.timedelta(0, exec_time)
681 msg = "Entire test should takes aroud: {0} and finished at {1}"
682 logger.info(msg.format(exec_time_s,
683 end_dt.strftime("%H:%M:%S")))
684
koder aka kdanilov6ab4d432015-06-22 00:26:28 +0300685 def run(self):
686 logger.debug("Run preparation")
687 self.pre_run()
688 self.show_test_execution_time()
689
koder aka kdanilovbc2c8982015-06-13 02:50:43 +0300690 tname = os.path.basename(self.config_fname)
691 if tname.endswith('.cfg'):
692 tname = tname[:-4]
693
694 barrier = Barrier(len(self.config.nodes))
695 results = []
696
koder aka kdanilov6ab4d432015-06-22 00:26:28 +0300697 # set of Operation_Mode_BlockSize str's
698 # which should not be tested anymore, as
koder aka kdanilovbb6d6cd2015-06-20 02:55:07 +0300699 # they already too slow with previous thread count
700 lat_bw_limit_reached = set()
701
koder aka kdanilovbc2c8982015-06-13 02:50:43 +0300702 with ThreadPoolExecutor(len(self.config.nodes)) as pool:
703 for pos, fio_cfg in enumerate(self.fio_configs):
koder aka kdanilovbb6d6cd2015-06-20 02:55:07 +0300704 test_descr = get_test_summary(fio_cfg.vals).split("th")[0]
705 if test_descr in lat_bw_limit_reached:
koder aka kdanilovbb6d6cd2015-06-20 02:55:07 +0300706 continue
707 else:
708 logger.info("Will run {0} test".format(fio_cfg.name))
koder aka kdanilovbc2c8982015-06-13 02:50:43 +0300709
710 templ = "Test should takes about {0}." + \
711 " Should finish at {1}," + \
712 " will wait at most till {2}"
713 exec_time = execution_time(fio_cfg)
714 exec_time_str = sec_to_str(exec_time)
715 timeout = int(exec_time + max(300, exec_time))
716
717 now_dt = datetime.datetime.now()
718 end_dt = now_dt + datetime.timedelta(0, exec_time)
719 wait_till = now_dt + datetime.timedelta(0, timeout)
720
721 logger.info(templ.format(exec_time_str,
722 end_dt.strftime("%H:%M:%S"),
723 wait_till.strftime("%H:%M:%S")))
724
725 func = functools.partial(self.do_run,
726 barrier=barrier,
727 fio_cfg=fio_cfg,
728 pos=pos)
729
730 max_retr = 3
731 for idx in range(max_retr):
732 try:
733 intervals = list(pool.map(func, self.config.nodes))
koder aka kdanilov76471642015-08-14 11:44:43 +0300734 if None not in intervals:
735 break
koder aka kdanilovbc2c8982015-06-13 02:50:43 +0300736 except (EnvironmentError, SSHException) as exc:
737 logger.exception("During fio run")
738 if idx == max_retr - 1:
739 raise StopTestError("Fio failed", exc)
740
koder aka kdanilov10266f42015-09-10 19:26:08 +0300741 logger.info("Reconnectiong, sleeping %ss and retrying", self.retry_time)
koder aka kdanilova94dfe12015-08-19 13:04:51 +0300742
koder aka kdanilov10266f42015-09-10 19:26:08 +0300743 wait([pool.submit(node.connection.close)
744 for node in self.config.nodes])
koder aka kdanilova94dfe12015-08-19 13:04:51 +0300745
746 time.sleep(self.retry_time)
747
koder aka kdanilov5ea9df02015-12-04 21:46:06 +0200748 wait([pool.submit(reconnect, node.connection, node.conn_url)
749 for node in self.config.nodes])
koder aka kdanilovbc2c8982015-06-13 02:50:43 +0300750
751 fname = "{0}_task.fio".format(pos)
752 with open(os.path.join(self.config.log_directory, fname), "w") as fd:
753 fd.write(str(fio_cfg))
754
755 params = {'vm_count': len(self.config.nodes)}
756 params['name'] = fio_cfg.name
757 params['vals'] = dict(fio_cfg.vals.items())
758 params['intervals'] = intervals
759 params['nodes'] = [node.get_conn_id() for node in self.config.nodes]
760
761 fname = "{0}_params.yaml".format(pos)
762 with open(os.path.join(self.config.log_directory, fname), "w") as fd:
763 fd.write(dumps(params))
764
koder aka kdanilov0fdaaee2015-06-30 11:10:48 +0300765 res = load_test_results(self.config.log_directory, pos)
koder aka kdanilovbc2c8982015-06-13 02:50:43 +0300766 results.append(res)
koder aka kdanilovbb6d6cd2015-06-20 02:55:07 +0300767
koder aka kdanilovbb6d6cd2015-06-20 02:55:07 +0300768 if self.max_latency is not None:
koder aka kdanilov6ab4d432015-06-22 00:26:28 +0300769 lat_50, _ = res.get_lat_perc_50_95_multy()
770
771 # conver us to ms
772 if self.max_latency < lat_50:
773 logger.info(("Will skip all subsequent tests of {0} " +
774 "due to lat/bw limits").format(fio_cfg.name))
koder aka kdanilovbb6d6cd2015-06-20 02:55:07 +0300775 lat_bw_limit_reached.add(test_descr)
776
koder aka kdanilov6ab4d432015-06-22 00:26:28 +0300777 test_res = res.get_params_from_fio_report()
koder aka kdanilovbb6d6cd2015-06-20 02:55:07 +0300778 if self.min_bw_per_thread is not None:
779 if self.min_bw_per_thread > average(test_res['bw']):
780 lat_bw_limit_reached.add(test_descr)
781
koder aka kdanilov0fdaaee2015-06-30 11:10:48 +0300782 return IOTestResults(self.config.params['cfg'],
783 results, self.config.log_directory)
koder aka kdanilovbc2c8982015-06-13 02:50:43 +0300784
785 def do_run(self, node, barrier, fio_cfg, pos, nolog=False):
koder aka kdanilov6ab4d432015-06-22 00:26:28 +0300786 if self.use_sudo:
787 sudo = "sudo "
788 else:
789 sudo = ""
790
koder aka kdanilova94dfe12015-08-19 13:04:51 +0300791 bash_file = """
792#!/bin/bash
793
794function get_dev() {{
795 if [ -b "$1" ] ; then
796 echo $1
797 else
798 echo $(df "$1" | tail -1 | awk '{{print $1}}')
799 fi
800}}
801
802function log_io_activiti(){{
803 local dest="$1"
804 local dev=$(get_dev "$2")
805 local sleep_time="$3"
806 dev=$(basename "$dev")
807
808 echo $dev
809
810 for (( ; ; )) ; do
811 grep -E "\\b$dev\\b" /proc/diskstats >> "$dest"
812 sleep $sleep_time
813 done
814}}
815
816sync
817cd {exec_folder}
818
819log_io_activiti {io_log_file} {test_file} 1 &
820local pid="$!"
821
822{fio_path}fio --output-format=json --output={out_file} --alloc-size=262144 {job_file} >{err_out_file} 2>&1
823echo $? >{res_code_file}
824kill -9 $pid
825
826"""
koder aka kdanilovbc2c8982015-06-13 02:50:43 +0300827
koder aka kdanilov6ab4d432015-06-22 00:26:28 +0300828 exec_folder = self.config.remote_dir
829
830 if self.use_system_fio:
831 fio_path = ""
832 else:
833 if not exec_folder.endswith("/"):
834 fio_path = exec_folder + "/"
835 else:
836 fio_path = exec_folder
837
koder aka kdanilovbc2c8982015-06-13 02:50:43 +0300838 bash_file = bash_file.format(out_file=self.results_file,
839 job_file=self.task_file,
840 err_out_file=self.err_out_file,
841 res_code_file=self.exit_code_file,
koder aka kdanilov6ab4d432015-06-22 00:26:28 +0300842 exec_folder=exec_folder,
koder aka kdanilova94dfe12015-08-19 13:04:51 +0300843 fio_path=fio_path,
844 test_file=self.config_params['FILENAME'],
845 io_log_file=self.io_log_file).strip()
koder aka kdanilovbc2c8982015-06-13 02:50:43 +0300846
847 with node.connection.open_sftp() as sftp:
koder aka kdanilovbc2c8982015-06-13 02:50:43 +0300848 save_to_remote(sftp, self.task_file, str(fio_cfg))
849 save_to_remote(sftp, self.sh_file, bash_file)
850
851 exec_time = execution_time(fio_cfg)
852
853 timeout = int(exec_time + max(300, exec_time))
854 soft_tout = exec_time
855
856 begin = time.time()
857
koder aka kdanilovbc2c8982015-06-13 02:50:43 +0300858 fnames_before = run_on_node(node)("ls -1 " + exec_folder, nolog=True)
859
860 barrier.wait()
861
koder aka kdanilov5414a992015-06-13 03:07:25 +0300862 task = BGSSHTask(node, self.use_sudo)
koder aka kdanilovbc2c8982015-06-13 02:50:43 +0300863 task.start(sudo + "bash " + self.sh_file)
864
865 while True:
866 try:
867 task.wait(soft_tout, timeout)
868 break
869 except paramiko.SSHException:
870 pass
871
872 try:
873 node.connection.close()
874 except:
875 pass
876
877 reconnect(node.connection, node.conn_url)
878
879 end = time.time()
880 rossh = run_on_node(node)
881 fnames_after = rossh("ls -1 " + exec_folder, nolog=True)
882
883 conn_id = node.get_conn_id().replace(":", "_")
884 if not nolog:
885 logger.debug("Test on node {0} is finished".format(conn_id))
886
887 log_files_pref = []
888 if 'write_lat_log' in fio_cfg.vals:
889 fname = fio_cfg.vals['write_lat_log']
890 log_files_pref.append(fname + '_clat')
891 log_files_pref.append(fname + '_lat')
892 log_files_pref.append(fname + '_slat')
893
894 if 'write_iops_log' in fio_cfg.vals:
895 fname = fio_cfg.vals['write_iops_log']
896 log_files_pref.append(fname + '_iops')
897
898 if 'write_bw_log' in fio_cfg.vals:
899 fname = fio_cfg.vals['write_bw_log']
900 log_files_pref.append(fname + '_bw')
901
902 files = collections.defaultdict(lambda: [])
903 all_files = [os.path.basename(self.results_file)]
904 new_files = set(fnames_after.split()) - set(fnames_before.split())
koder aka kdanilov6ab4d432015-06-22 00:26:28 +0300905
koder aka kdanilovbc2c8982015-06-13 02:50:43 +0300906 for fname in new_files:
907 if fname.endswith('.log') and fname.split('.')[0] in log_files_pref:
908 name, _ = os.path.splitext(fname)
909 if fname.count('.') == 1:
910 tp = name.split("_")[-1]
911 cnt = 0
912 else:
913 tp_cnt = name.split("_")[-1]
914 tp, cnt = tp_cnt.split('.')
915 files[tp].append((int(cnt), fname))
916 all_files.append(fname)
koder aka kdanilova94dfe12015-08-19 13:04:51 +0300917 elif fname == os.path.basename(self.io_log_file):
918 files['iops'].append(('sys', fname))
919 all_files.append(fname)
koder aka kdanilovbc2c8982015-06-13 02:50:43 +0300920
921 arch_name = self.join_remote('wally_result.tar.gz')
922 tmp_dir = os.path.join(self.config.log_directory, 'tmp_' + conn_id)
koder aka kdanilov76471642015-08-14 11:44:43 +0300923
924 if os.path.exists(tmp_dir):
925 shutil.rmtree(tmp_dir)
926
koder aka kdanilovbc2c8982015-06-13 02:50:43 +0300927 os.mkdir(tmp_dir)
928 loc_arch_name = os.path.join(tmp_dir, 'wally_result.{0}.tar.gz'.format(conn_id))
929 file_full_names = " ".join(all_files)
930
931 try:
932 os.unlink(loc_arch_name)
933 except:
934 pass
935
936 with node.connection.open_sftp() as sftp:
koder aka kdanilov76471642015-08-14 11:44:43 +0300937 try:
938 exit_code = read_from_remote(sftp, self.exit_code_file)
939 except IOError:
940 logger.error("No exit code file found on %s. Looks like process failed to start",
941 conn_id)
942 return None
943
koder aka kdanilovbc2c8982015-06-13 02:50:43 +0300944 err_out = read_from_remote(sftp, self.err_out_file)
945 exit_code = exit_code.strip()
946
947 if exit_code != '0':
948 msg = "fio exit with code {0}: {1}".format(exit_code, err_out)
949 logger.critical(msg.strip())
950 raise StopTestError("fio failed")
951
952 rossh("rm -f {0}".format(arch_name), nolog=True)
koder aka kdanilov6ab4d432015-06-22 00:26:28 +0300953 pack_files_cmd = "cd {0} ; tar zcvf {1} {2}".format(exec_folder, arch_name, file_full_names)
954 rossh(pack_files_cmd, nolog=True)
koder aka kdanilovbc2c8982015-06-13 02:50:43 +0300955 sftp.get(arch_name, loc_arch_name)
956
koder aka kdanilov6ab4d432015-06-22 00:26:28 +0300957 unpack_files_cmd = "cd {0} ; tar xvzf {1} >/dev/null".format(tmp_dir, loc_arch_name)
958 subprocess.check_call(unpack_files_cmd, shell=True)
koder aka kdanilovbc2c8982015-06-13 02:50:43 +0300959 os.unlink(loc_arch_name)
960
961 for ftype, fls in files.items():
962 for idx, fname in fls:
963 cname = os.path.join(tmp_dir, fname)
964 loc_fname = "{0}_{1}_{2}.{3}.log".format(pos, conn_id, ftype, idx)
965 loc_path = os.path.join(self.config.log_directory, loc_fname)
966 os.rename(cname, loc_path)
967
968 cname = os.path.join(tmp_dir,
969 os.path.basename(self.results_file))
970 loc_fname = "{0}_{1}_rawres.json".format(pos, conn_id)
971 loc_path = os.path.join(self.config.log_directory, loc_fname)
972 os.rename(cname, loc_path)
koder aka kdanilovbc2c8982015-06-13 02:50:43 +0300973 os.rmdir(tmp_dir)
koder aka kdanilov6ab4d432015-06-22 00:26:28 +0300974
975 remove_remote_res_files_cmd = "cd {0} ; rm -f {1} {2}".format(exec_folder,
976 arch_name,
977 file_full_names)
978 rossh(remove_remote_res_files_cmd, nolog=True)
koder aka kdanilovbc2c8982015-06-13 02:50:43 +0300979 return begin, end
980
981 @classmethod
koder aka kdanilov6b872662015-06-23 01:58:36 +0300982 def prepare_data(cls, results):
koder aka kdanilovbc2c8982015-06-13 02:50:43 +0300983 """
984 create a table with io performance report
985 for console
986 """
987
koder aka kdanilovbc2c8982015-06-13 02:50:43 +0300988 def key_func(data):
koder aka kdanilovf236b9c2015-06-24 18:17:22 +0300989 tpl = data.summary_tpl()
koder aka kdanilov170936a2015-06-27 22:51:17 +0300990 return (data.name,
koder aka kdanilovf236b9c2015-06-24 18:17:22 +0300991 tpl.oper,
992 tpl.mode,
koder aka kdanilov170936a2015-06-27 22:51:17 +0300993 ssize2b(tpl.bsize),
koder aka kdanilovf236b9c2015-06-24 18:17:22 +0300994 int(tpl.th_count) * int(tpl.vm_count))
koder aka kdanilov6b872662015-06-23 01:58:36 +0300995 res = []
koder aka kdanilovbc2c8982015-06-13 02:50:43 +0300996
koder aka kdanilovbb6d6cd2015-06-20 02:55:07 +0300997 for item in sorted(results, key=key_func):
koder aka kdanilovbb6d6cd2015-06-20 02:55:07 +0300998 test_dinfo = item.disk_perf_info()
koder aka kdanilova94dfe12015-08-19 13:04:51 +0300999 testnodes_count = len(item.config.nodes)
koder aka kdanilovbc2c8982015-06-13 02:50:43 +03001000
1001 iops, _ = test_dinfo.iops.rounded_average_conf()
1002
koder aka kdanilova94dfe12015-08-19 13:04:51 +03001003 if test_dinfo.iops_sys is not None:
1004 iops_sys, iops_sys_conf = test_dinfo.iops_sys.rounded_average_conf()
1005 _, iops_sys_dev = test_dinfo.iops_sys.rounded_average_dev()
1006 iops_sys_per_vm = round_3_digit(iops_sys / testnodes_count)
1007 iops_sys = round_3_digit(iops_sys)
1008 else:
1009 iops_sys = None
1010 iops_sys_per_vm = None
1011 iops_sys_dev = None
1012 iops_sys_conf = None
1013
koder aka kdanilovbc2c8982015-06-13 02:50:43 +03001014 bw, bw_conf = test_dinfo.bw.rounded_average_conf()
1015 _, bw_dev = test_dinfo.bw.rounded_average_dev()
1016 conf_perc = int(round(bw_conf * 100 / bw))
1017 dev_perc = int(round(bw_dev * 100 / bw))
1018
koder aka kdanilov6b872662015-06-23 01:58:36 +03001019 lat_50 = round_3_digit(int(test_dinfo.lat_50))
1020 lat_95 = round_3_digit(int(test_dinfo.lat_95))
koder aka kdanilov170936a2015-06-27 22:51:17 +03001021 lat_avg = round_3_digit(int(test_dinfo.lat_avg))
koder aka kdanilovbc2c8982015-06-13 02:50:43 +03001022
koder aka kdanilovbb6d6cd2015-06-20 02:55:07 +03001023 iops_per_vm = round_3_digit(iops / testnodes_count)
1024 bw_per_vm = round_3_digit(bw / testnodes_count)
koder aka kdanilovbc2c8982015-06-13 02:50:43 +03001025
1026 iops = round_3_digit(iops)
1027 bw = round_3_digit(bw)
1028
koder aka kdanilovf236b9c2015-06-24 18:17:22 +03001029 summ = "{0.oper}{0.mode} {0.bsize:>4} {0.th_count:>3}th {0.vm_count:>2}vm".format(item.summary_tpl())
1030
1031 res.append({"name": key_func(item)[0],
1032 "key": key_func(item)[:4],
1033 "summ": summ,
koder aka kdanilov6b872662015-06-23 01:58:36 +03001034 "iops": int(iops),
1035 "bw": int(bw),
koder aka kdanilovf236b9c2015-06-24 18:17:22 +03001036 "conf": str(conf_perc),
1037 "dev": str(dev_perc),
koder aka kdanilov6b872662015-06-23 01:58:36 +03001038 "iops_per_vm": int(iops_per_vm),
1039 "bw_per_vm": int(bw_per_vm),
1040 "lat_50": lat_50,
koder aka kdanilov170936a2015-06-27 22:51:17 +03001041 "lat_95": lat_95,
koder aka kdanilova94dfe12015-08-19 13:04:51 +03001042 "lat_avg": lat_avg,
1043
1044 "iops_sys": iops_sys,
1045 "iops_sys_per_vm": iops_sys_per_vm,
1046 "sys_conf": iops_sys_conf,
1047 "sys_dev": iops_sys_dev})
koder aka kdanilovbc2c8982015-06-13 02:50:43 +03001048
koder aka kdanilov6b872662015-06-23 01:58:36 +03001049 return res
1050
1051 Field = collections.namedtuple("Field", ("header", "attr", "allign", "size"))
1052 fiels_and_header = [
1053 Field("Name", "name", "l", 7),
koder aka kdanilovf236b9c2015-06-24 18:17:22 +03001054 Field("Description", "summ", "l", 19),
koder aka kdanilov6b872662015-06-23 01:58:36 +03001055 Field("IOPS\ncum", "iops", "r", 3),
koder aka kdanilova94dfe12015-08-19 13:04:51 +03001056 # Field("IOPS_sys\ncum", "iops_sys", "r", 3),
koder aka kdanilovf236b9c2015-06-24 18:17:22 +03001057 Field("KiBps\ncum", "bw", "r", 6),
1058 Field("Cnf %\n95%", "conf", "r", 3),
1059 Field("Dev%", "dev", "r", 3),
1060 Field("iops\n/vm", "iops_per_vm", "r", 3),
1061 Field("KiBps\n/vm", "bw_per_vm", "r", 6),
koder aka kdanilov6b872662015-06-23 01:58:36 +03001062 Field("lat ms\nmedian", "lat_50", "r", 3),
koder aka kdanilov170936a2015-06-27 22:51:17 +03001063 Field("lat ms\n95%", "lat_95", "r", 3),
1064 Field("lat\navg", "lat_avg", "r", 3),
koder aka kdanilov6b872662015-06-23 01:58:36 +03001065 ]
1066
1067 fiels_and_header_dct = dict((item.attr, item) for item in fiels_and_header)
1068
1069 @classmethod
1070 def format_for_console(cls, results):
1071 """
1072 create a table with io performance report
1073 for console
1074 """
1075
1076 tab = texttable.Texttable(max_width=120)
1077 tab.set_deco(tab.HEADER | tab.VLINES | tab.BORDER)
1078 tab.set_cols_align([f.allign for f in cls.fiels_and_header])
1079 sep = ["-" * f.size for f in cls.fiels_and_header]
1080 tab.header([f.header for f in cls.fiels_and_header])
koder aka kdanilov6b872662015-06-23 01:58:36 +03001081 prev_k = None
1082 for item in cls.prepare_data(results):
koder aka kdanilov6b872662015-06-23 01:58:36 +03001083 if prev_k is not None:
koder aka kdanilovf236b9c2015-06-24 18:17:22 +03001084 if prev_k != item["key"]:
koder aka kdanilov6b872662015-06-23 01:58:36 +03001085 tab.add_row(sep)
1086
koder aka kdanilovf236b9c2015-06-24 18:17:22 +03001087 prev_k = item["key"]
koder aka kdanilov6b872662015-06-23 01:58:36 +03001088 tab.add_row([item[f.attr] for f in cls.fiels_and_header])
1089
1090 return tab.draw()
1091
1092 @classmethod
1093 def format_diff_for_console(cls, list_of_results):
1094 """
1095 create a table with io performance report
1096 for console
1097 """
1098
1099 tab = texttable.Texttable(max_width=200)
1100 tab.set_deco(tab.HEADER | tab.VLINES | tab.BORDER)
1101
1102 header = [
1103 cls.fiels_and_header_dct["name"].header,
1104 cls.fiels_and_header_dct["summ"].header,
1105 ]
1106 allign = ["l", "l"]
1107
1108 header.append("IOPS ~ Cnf% ~ Dev%")
1109 allign.extend(["r"] * len(list_of_results))
1110 header.extend(
1111 "IOPS_{0} %".format(i + 2) for i in range(len(list_of_results[1:]))
1112 )
1113
1114 header.append("BW")
1115 allign.extend(["r"] * len(list_of_results))
1116 header.extend(
1117 "BW_{0} %".format(i + 2) for i in range(len(list_of_results[1:]))
1118 )
1119
1120 header.append("LAT")
1121 allign.extend(["r"] * len(list_of_results))
1122 header.extend(
1123 "LAT_{0}".format(i + 2) for i in range(len(list_of_results[1:]))
1124 )
1125
1126 tab.header(header)
1127 sep = ["-" * 3] * len(header)
1128 processed_results = map(cls.prepare_data, list_of_results)
1129
1130 key2results = []
1131 for res in processed_results:
1132 key2results.append(dict(
1133 ((item["name"], item["summ"]), item) for item in res
1134 ))
1135
1136 prev_k = None
koder aka kdanilovf236b9c2015-06-24 18:17:22 +03001137 iops_frmt = "{0[iops]} ~ {0[conf]:>2} ~ {0[dev]:>2}"
koder aka kdanilov6b872662015-06-23 01:58:36 +03001138 for item in processed_results[0]:
koder aka kdanilov6b872662015-06-23 01:58:36 +03001139 if prev_k is not None:
koder aka kdanilovf236b9c2015-06-24 18:17:22 +03001140 if prev_k != item["key"]:
koder aka kdanilov6b872662015-06-23 01:58:36 +03001141 tab.add_row(sep)
1142
koder aka kdanilovf236b9c2015-06-24 18:17:22 +03001143 prev_k = item["key"]
koder aka kdanilov6b872662015-06-23 01:58:36 +03001144
1145 key = (item['name'], item['summ'])
1146 line = list(key)
1147 base = key2results[0][key]
1148
1149 line.append(iops_frmt.format(base))
1150
1151 for test_results in key2results[1:]:
1152 val = test_results.get(key)
1153 if val is None:
1154 line.append("-")
1155 elif base['iops'] == 0:
1156 line.append("Nan")
1157 else:
koder aka kdanilovf236b9c2015-06-24 18:17:22 +03001158 prc_val = {'dev': val['dev'], 'conf': val['conf']}
koder aka kdanilov6b872662015-06-23 01:58:36 +03001159 prc_val['iops'] = int(100 * val['iops'] / base['iops'])
1160 line.append(iops_frmt.format(prc_val))
1161
1162 line.append(base['bw'])
1163
1164 for test_results in key2results[1:]:
1165 val = test_results.get(key)
1166 if val is None:
1167 line.append("-")
1168 elif base['bw'] == 0:
1169 line.append("Nan")
1170 else:
1171 line.append(int(100 * val['bw'] / base['bw']))
1172
1173 for test_results in key2results:
1174 val = test_results.get(key)
1175 if val is None:
1176 line.append("-")
1177 else:
1178 line.append("{0[lat_50]} - {0[lat_95]}".format(val))
1179
1180 tab.add_row(line)
1181
1182 tab.set_cols_align(allign)
koder aka kdanilovbc2c8982015-06-13 02:50:43 +03001183 return tab.draw()