blob: f9f1d1b1cfef3dda5dd4e48bc5f6720ff17e6e8f [file] [log] [blame]
koder aka kdanilovbc2c8982015-06-13 02:50:43 +03001import re
2import time
3import json
koder aka kdanilovf236b9c2015-06-24 18:17:22 +03004import stat
koder aka kdanilov6ab4d432015-06-22 00:26:28 +03005import random
koder aka kdanilovbc2c8982015-06-13 02:50:43 +03006import os.path
7import logging
8import datetime
9import functools
10import subprocess
11import collections
12
13import yaml
14import paramiko
15import texttable
16from paramiko.ssh_exception import SSHException
17from concurrent.futures import ThreadPoolExecutor
18
koder aka kdanilov6ab4d432015-06-22 00:26:28 +030019import wally
koder aka kdanilovbc2c8982015-06-13 02:50:43 +030020from wally.pretty_yaml import dumps
koder aka kdanilovbb6d6cd2015-06-20 02:55:07 +030021from wally.statistic import round_3_digit, data_property, average
koder aka kdanilovbc2c8982015-06-13 02:50:43 +030022from wally.utils import ssize2b, sec_to_str, StopTestError, Barrier, get_os
23from wally.ssh_utils import (save_to_remote, read_from_remote, BGSSHTask, reconnect)
24
25from .fio_task_parser import (execution_time, fio_cfg_compile,
koder aka kdanilovf236b9c2015-06-24 18:17:22 +030026 get_test_summary, get_test_summary_tuple,
27 get_test_sync_mode, FioJobSection)
28
koder aka kdanilovbb6d6cd2015-06-20 02:55:07 +030029from ..itest import (TimeSeriesValue, PerfTest, TestResults,
30 run_on_node, TestConfig, MeasurementMatrix)
koder aka kdanilovbc2c8982015-06-13 02:50:43 +030031
32logger = logging.getLogger("wally")
33
34
35# Results folder structure
36# results/
37# {loadtype}_{num}/
38# config.yaml
39# ......
40
41
42class NoData(object):
43 pass
44
45
46def cached_prop(func):
47 @property
48 @functools.wraps(func)
49 def closure(self):
50 val = getattr(self, "_" + func.__name__)
51 if val is NoData:
52 val = func(self)
53 setattr(self, "_" + func.__name__, val)
54 return val
55 return closure
56
57
58def load_fio_log_file(fname):
59 with open(fname) as fd:
60 it = [ln.split(',')[:2] for ln in fd]
koder aka kdanilovbb6d6cd2015-06-20 02:55:07 +030061
62 vals = [(float(off) / 1000, # convert us to ms
63 float(val.strip()) + 0.5) # add 0.5 to compemsate average value
64 # as fio trimm all values in log to integer
65 for off, val in it]
66
koder aka kdanilovbc2c8982015-06-13 02:50:43 +030067 return TimeSeriesValue(vals)
68
69
70def load_test_results(cls, folder, run_num):
71 res = {}
72 params = None
73
74 fn = os.path.join(folder, str(run_num) + '_params.yaml')
75 params = yaml.load(open(fn).read())
76
koder aka kdanilovbb6d6cd2015-06-20 02:55:07 +030077 conn_ids_set = set()
78 rr = r"{0}_(?P<conn_id>.*?)_(?P<type>[^_.]*)\.\d+\.log$".format(run_num)
koder aka kdanilovbc2c8982015-06-13 02:50:43 +030079 for fname in os.listdir(folder):
koder aka kdanilovbc2c8982015-06-13 02:50:43 +030080 rm = re.match(rr, fname)
81 if rm is None:
82 continue
83
84 conn_id_s = rm.group('conn_id')
85 conn_id = conn_id_s.replace('_', ':')
86 ftype = rm.group('type')
87
88 if ftype not in ('iops', 'bw', 'lat'):
89 continue
90
koder aka kdanilovbb6d6cd2015-06-20 02:55:07 +030091 ts = load_fio_log_file(os.path.join(folder, fname))
92 res.setdefault(ftype, {}).setdefault(conn_id, []).append(ts)
koder aka kdanilovbc2c8982015-06-13 02:50:43 +030093
koder aka kdanilovbb6d6cd2015-06-20 02:55:07 +030094 conn_ids_set.add(conn_id)
koder aka kdanilovbc2c8982015-06-13 02:50:43 +030095
koder aka kdanilovbb6d6cd2015-06-20 02:55:07 +030096 mm_res = {}
97
98 for key, data in res.items():
99 conn_ids = sorted(conn_ids_set)
100 matr = [data[conn_id] for conn_id in conn_ids]
101
102 mm_res[key] = MeasurementMatrix(matr, conn_ids)
103
koder aka kdanilov6ab4d432015-06-22 00:26:28 +0300104 # iops_from_lat_matr = []
105 # for node_ts in mm_res['lat'].data:
106 # iops_from_lat_matr.append([])
107 # for thread_ts in node_ts:
108 # ndt = [(start + ln, 1000000. / val)
109 # for (start, ln, val) in thread_ts.data]
110 # new_ts = TimeSeriesValue(ndt)
111 # iops_from_lat_matr[-1].append(new_ts)
koder aka kdanilovbb6d6cd2015-06-20 02:55:07 +0300112
koder aka kdanilov6ab4d432015-06-22 00:26:28 +0300113 # mm_res['iops_from_lat'] = MeasurementMatrix(iops_from_lat_matr, conn_ids)
koder aka kdanilovbc2c8982015-06-13 02:50:43 +0300114
115 raw_res = {}
116 for conn_id in conn_ids:
117 fn = os.path.join(folder, "{0}_{1}_rawres.json".format(run_num, conn_id_s))
118
119 # remove message hack
120 fc = "{" + open(fn).read().split('{', 1)[1]
121 raw_res[conn_id] = json.loads(fc)
122
koder aka kdanilovbb6d6cd2015-06-20 02:55:07 +0300123 fio_task = FioJobSection(params['name'])
124 fio_task.vals.update(params['vals'])
125
126 config = TestConfig('io', params, None, params['nodes'], folder, None)
127 return cls(config, fio_task, mm_res, raw_res, params['intervals'])
koder aka kdanilovbc2c8982015-06-13 02:50:43 +0300128
129
130class Attrmapper(object):
131 def __init__(self, dct):
132 self.__dct = dct
133
134 def __getattr__(self, name):
135 try:
136 return self.__dct[name]
137 except KeyError:
138 raise AttributeError(name)
139
140
141class DiskPerfInfo(object):
142 def __init__(self, name, summary, params, testnodes_count):
143 self.name = name
144 self.bw = None
145 self.iops = None
146 self.lat = None
147 self.lat_50 = None
148 self.lat_95 = None
koder aka kdanilov170936a2015-06-27 22:51:17 +0300149 self.lat_avg = None
koder aka kdanilovbc2c8982015-06-13 02:50:43 +0300150
151 self.raw_bw = []
152 self.raw_iops = []
153 self.raw_lat = []
154
155 self.params = params
156 self.testnodes_count = testnodes_count
157 self.summary = summary
158 self.p = Attrmapper(self.params['vals'])
159
160 self.sync_mode = get_test_sync_mode(self.params['vals'])
161 self.concurence = self.params['vals'].get('numjobs', 1)
162
163
164def get_lat_perc_50_95(lat_mks):
165 curr_perc = 0
166 perc_50 = None
167 perc_95 = None
168 pkey = None
169 for key, val in sorted(lat_mks.items()):
170 if curr_perc + val >= 50 and perc_50 is None:
171 if pkey is None or val < 1.:
172 perc_50 = key
173 else:
174 perc_50 = (50. - curr_perc) / val * (key - pkey) + pkey
175
176 if curr_perc + val >= 95:
177 if pkey is None or val < 1.:
178 perc_95 = key
179 else:
180 perc_95 = (95. - curr_perc) / val * (key - pkey) + pkey
181 break
182
183 pkey = key
184 curr_perc += val
185
koder aka kdanilovf236b9c2015-06-24 18:17:22 +0300186 # for k, v in sorted(lat_mks.items()):
187 # if k / 1000 > 0:
188 # print "{0:>4}".format(k / 1000), v
189
190 # print perc_50 / 1000., perc_95 / 1000.
191 # exit(1)
koder aka kdanilovbc2c8982015-06-13 02:50:43 +0300192 return perc_50 / 1000., perc_95 / 1000.
193
194
koder aka kdanilovbc2c8982015-06-13 02:50:43 +0300195class IOTestResult(TestResults):
196 """
197 Fio run results
198 config: TestConfig
199 fio_task: FioJobSection
koder aka kdanilovbb6d6cd2015-06-20 02:55:07 +0300200 ts_results: {str: MeasurementMatrix[TimeSeriesValue]}
koder aka kdanilovbc2c8982015-06-13 02:50:43 +0300201 raw_result: ????
202 run_interval:(float, float) - test tun time, used for sensors
203 """
204 def __init__(self, config, fio_task, ts_results, raw_result, run_interval):
205
koder aka kdanilov170936a2015-06-27 22:51:17 +0300206 self.name = fio_task.name.rsplit("_", 1)[0]
koder aka kdanilovbc2c8982015-06-13 02:50:43 +0300207 self.fio_task = fio_task
208
koder aka kdanilovbb6d6cd2015-06-20 02:55:07 +0300209 self.bw = ts_results.get('bw')
210 self.lat = ts_results.get('lat')
211 self.iops = ts_results.get('iops')
koder aka kdanilov6ab4d432015-06-22 00:26:28 +0300212 # self.iops_from_lat = ts_results.get('iops_from_lat')
koder aka kdanilovbc2c8982015-06-13 02:50:43 +0300213
koder aka kdanilovbc2c8982015-06-13 02:50:43 +0300214 # self.slat = drop_warmup(res.get('clat', None), self.params)
215 # self.clat = drop_warmup(res.get('slat', None), self.params)
216
217 res = {"bw": self.bw, "lat": self.lat, "iops": self.iops}
218
219 self.sensors_data = None
220 self._pinfo = None
221 TestResults.__init__(self, config, res, raw_result, run_interval)
222
koder aka kdanilovbb6d6cd2015-06-20 02:55:07 +0300223 def get_params_from_fio_report(self):
224 nodes = self.bw.connections_ids
225
226 iops = [self.raw_result[node]['jobs'][0]['mixed']['iops'] for node in nodes]
227 total_ios = [self.raw_result[node]['jobs'][0]['mixed']['total_ios'] for node in nodes]
228 runtime = [self.raw_result[node]['jobs'][0]['mixed']['runtime'] / 1000 for node in nodes]
229 flt_iops = [float(ios) / rtime for ios, rtime in zip(total_ios, runtime)]
230
231 bw = [self.raw_result[node]['jobs'][0]['mixed']['bw'] for node in nodes]
232 total_bytes = [self.raw_result[node]['jobs'][0]['mixed']['io_bytes'] for node in nodes]
233 flt_bw = [float(tbytes) / rtime for tbytes, rtime in zip(total_bytes, runtime)]
234
koder aka kdanilovbb6d6cd2015-06-20 02:55:07 +0300235 return {'iops': iops,
236 'flt_iops': flt_iops,
237 'bw': bw,
koder aka kdanilov6ab4d432015-06-22 00:26:28 +0300238 'flt_bw': flt_bw}
koder aka kdanilovbb6d6cd2015-06-20 02:55:07 +0300239
koder aka kdanilovbc2c8982015-06-13 02:50:43 +0300240 def summary(self):
koder aka kdanilovf236b9c2015-06-24 18:17:22 +0300241 return get_test_summary(self.fio_task, len(self.config.nodes))
242
243 def summary_tpl(self):
244 return get_test_summary_tuple(self.fio_task, len(self.config.nodes))
koder aka kdanilovbc2c8982015-06-13 02:50:43 +0300245
246 def get_yamable(self):
247 return self.summary()
248
koder aka kdanilov6ab4d432015-06-22 00:26:28 +0300249 def get_lat_perc_50_95_multy(self):
koder aka kdanilovbc2c8982015-06-13 02:50:43 +0300250 lat_mks = collections.defaultdict(lambda: 0)
251 num_res = 0
252
koder aka kdanilov6ab4d432015-06-22 00:26:28 +0300253 for result in self.raw_result.values():
koder aka kdanilovbc2c8982015-06-13 02:50:43 +0300254 num_res += len(result['jobs'])
255 for job_info in result['jobs']:
256 for k, v in job_info['latency_ms'].items():
257 if isinstance(k, basestring) and k.startswith('>='):
258 lat_mks[int(k[2:]) * 1000] += v
259 else:
260 lat_mks[int(k) * 1000] += v
261
262 for k, v in job_info['latency_us'].items():
263 lat_mks[int(k)] += v
264
265 for k, v in lat_mks.items():
266 lat_mks[k] = float(v) / num_res
koder aka kdanilov6ab4d432015-06-22 00:26:28 +0300267 return get_lat_perc_50_95(lat_mks)
268
269 def disk_perf_info(self, avg_interval=2.0):
270
271 if self._pinfo is not None:
272 return self._pinfo
koder aka kdanilovbc2c8982015-06-13 02:50:43 +0300273
koder aka kdanilovbb6d6cd2015-06-20 02:55:07 +0300274 testnodes_count = len(self.config.nodes)
koder aka kdanilovbc2c8982015-06-13 02:50:43 +0300275
276 pinfo = DiskPerfInfo(self.name,
277 self.summary(),
278 self.params,
279 testnodes_count)
280
koder aka kdanilovbb6d6cd2015-06-20 02:55:07 +0300281 # ramp_time = self.fio_task.vals.get('ramp_time', 0)
koder aka kdanilovbc2c8982015-06-13 02:50:43 +0300282
koder aka kdanilov6ab4d432015-06-22 00:26:28 +0300283 def prepare(data, drop=1):
koder aka kdanilovbb6d6cd2015-06-20 02:55:07 +0300284 if data is None:
285 return data
286
287 res = []
288 for ts_data in data:
289 # if ramp_time > 0:
290 # ts_data = ts_data.skip(ramp_time)
291
292 if ts_data.average_interval() < avg_interval:
293 ts_data = ts_data.derived(avg_interval)
294
koder aka kdanilov6ab4d432015-06-22 00:26:28 +0300295 # drop last value on bounds
296 # as they may contains ranges without activities
297 assert len(ts_data.values) >= drop + 1
298
299 if drop > 0:
300 res.append(ts_data.values[:-drop])
301 else:
302 res.append(ts_data.values)
303
koder aka kdanilovbb6d6cd2015-06-20 02:55:07 +0300304 return res
305
306 def agg_data(matr):
307 arr = sum(matr, [])
308 min_len = min(map(len, arr))
309 res = []
310 for idx in range(min_len):
311 res.append(sum(dt[idx] for dt in arr))
312 return res
313
koder aka kdanilov170936a2015-06-27 22:51:17 +0300314 pinfo.raw_lat = map(prepare, self.lat.per_vm())
315 num_th = sum(map(len, pinfo.raw_lat))
316 lat_avg = [val / num_th for val in agg_data(pinfo.raw_lat)]
317 pinfo.lat_avg = data_property(lat_avg).average / 1000 # us to ms
318
koder aka kdanilov6ab4d432015-06-22 00:26:28 +0300319 pinfo.lat_50, pinfo.lat_95 = self.get_lat_perc_50_95_multy()
320 pinfo.lat = pinfo.lat_50
koder aka kdanilovbc2c8982015-06-13 02:50:43 +0300321
koder aka kdanilovbb6d6cd2015-06-20 02:55:07 +0300322 pinfo.raw_bw = map(prepare, self.bw.per_vm())
323 pinfo.raw_iops = map(prepare, self.iops.per_vm())
324
koder aka kdanilovbb6d6cd2015-06-20 02:55:07 +0300325 fparams = self.get_params_from_fio_report()
326 fio_report_bw = sum(fparams['flt_bw'])
327 fio_report_iops = sum(fparams['flt_iops'])
328
329 agg_bw = agg_data(pinfo.raw_bw)
330 agg_iops = agg_data(pinfo.raw_iops)
331
332 log_bw_avg = average(agg_bw)
333 log_iops_avg = average(agg_iops)
334
335 # update values to match average from fio report
336 coef_iops = fio_report_iops / float(log_iops_avg)
337 coef_bw = fio_report_bw / float(log_bw_avg)
338
339 bw_log = data_property([val * coef_bw for val in agg_bw])
340 iops_log = data_property([val * coef_iops for val in agg_iops])
341
342 bw_report = data_property([fio_report_bw])
343 iops_report = data_property([fio_report_iops])
344
345 # When IOPS/BW per thread is too low
346 # data from logs is rounded to match
koder aka kdanilovf236b9c2015-06-24 18:17:22 +0300347 iops_per_th = sum(sum(pinfo.raw_iops, []), [])
koder aka kdanilovbb6d6cd2015-06-20 02:55:07 +0300348 if average(iops_per_th) > 10:
koder aka kdanilovbb6d6cd2015-06-20 02:55:07 +0300349 pinfo.iops = iops_log
koder aka kdanilovbb6d6cd2015-06-20 02:55:07 +0300350 pinfo.iops2 = iops_report
351 else:
koder aka kdanilovbb6d6cd2015-06-20 02:55:07 +0300352 pinfo.iops = iops_report
koder aka kdanilovbb6d6cd2015-06-20 02:55:07 +0300353 pinfo.iops2 = iops_log
354
koder aka kdanilovf236b9c2015-06-24 18:17:22 +0300355 bw_per_th = sum(sum(pinfo.raw_bw, []), [])
356 if average(bw_per_th) > 10:
357 pinfo.bw = bw_log
koder aka kdanilovf236b9c2015-06-24 18:17:22 +0300358 pinfo.bw2 = bw_report
koder aka kdanilov170936a2015-06-27 22:51:17 +0300359 else:
360 pinfo.bw = bw_report
361 pinfo.bw2 = bw_log
koder aka kdanilovf236b9c2015-06-24 18:17:22 +0300362
koder aka kdanilovbc2c8982015-06-13 02:50:43 +0300363 self._pinfo = pinfo
364
365 return pinfo
366
367
368class IOPerfTest(PerfTest):
369 tcp_conn_timeout = 30
370 max_pig_timeout = 5
371 soft_runcycle = 5 * 60
372
373 def __init__(self, config):
374 PerfTest.__init__(self, config)
375
376 get = self.config.params.get
377 do_get = self.config.params.__getitem__
378
379 self.config_fname = do_get('cfg')
380
381 if '/' not in self.config_fname and '.' not in self.config_fname:
382 cfgs_dir = os.path.dirname(__file__)
383 self.config_fname = os.path.join(cfgs_dir,
384 self.config_fname + '.cfg')
385
386 self.alive_check_interval = get('alive_check_interval')
387 self.use_system_fio = get('use_system_fio', False)
388
koder aka kdanilov6ab4d432015-06-22 00:26:28 +0300389 if get('prefill_files') is not None:
390 logger.warning("prefill_files option is depricated. Use force_prefill instead")
391
392 self.force_prefill = get('force_prefill', False)
koder aka kdanilovbc2c8982015-06-13 02:50:43 +0300393 self.config_params = get('params', {}).copy()
394
395 self.io_py_remote = self.join_remote("agent.py")
396 self.results_file = self.join_remote("results.json")
397 self.pid_file = self.join_remote("pid")
398 self.task_file = self.join_remote("task.cfg")
399 self.sh_file = self.join_remote("cmd.sh")
400 self.err_out_file = self.join_remote("fio_err_out")
401 self.exit_code_file = self.join_remote("exit_code")
402
koder aka kdanilovbb6d6cd2015-06-20 02:55:07 +0300403 self.max_latency = get("max_lat", None)
koder aka kdanilov6ab4d432015-06-22 00:26:28 +0300404 self.min_bw_per_thread = get("min_bw", None)
koder aka kdanilovbb6d6cd2015-06-20 02:55:07 +0300405
koder aka kdanilovbc2c8982015-06-13 02:50:43 +0300406 self.use_sudo = get("use_sudo", True)
koder aka kdanilovbc2c8982015-06-13 02:50:43 +0300407
408 self.raw_cfg = open(self.config_fname).read()
409 self.fio_configs = fio_cfg_compile(self.raw_cfg,
410 self.config_fname,
koder aka kdanilovf236b9c2015-06-24 18:17:22 +0300411 self.config_params)
koder aka kdanilovbc2c8982015-06-13 02:50:43 +0300412 self.fio_configs = list(self.fio_configs)
413
414 @classmethod
415 def load(cls, folder):
416 for fname in os.listdir(folder):
417 if re.match("\d+_params.yaml$", fname):
418 num = int(fname.split('_')[0])
419 yield load_test_results(IOTestResult, folder, num)
420
421 def cleanup(self):
422 # delete_file(conn, self.io_py_remote)
423 # Need to remove tempo files, used for testing
424 pass
425
koder aka kdanilovf236b9c2015-06-24 18:17:22 +0300426 # size is megabytes
koder aka kdanilov6ab4d432015-06-22 00:26:28 +0300427 def check_prefill_required(self, rossh, fname, size, num_blocks=16):
koder aka kdanilov170936a2015-06-27 22:51:17 +0300428 try:
429 with rossh.connection.open_sftp() as sftp:
430 fstats = sftp.stat(fname)
koder aka kdanilovf236b9c2015-06-24 18:17:22 +0300431
koder aka kdanilov170936a2015-06-27 22:51:17 +0300432 if stat.S_ISREG(fstats.st_mode) and fstats.st_size < size * 1024 ** 2:
433 return True
434 except EnvironmentError:
koder aka kdanilovf95cfc12015-06-23 03:33:19 +0300435 return True
436
koder aka kdanilovf236b9c2015-06-24 18:17:22 +0300437 cmd = 'python -c "' + \
438 "import sys;" + \
439 "fd = open('{0}', 'rb');" + \
440 "fd.seek({1});" + \
441 "data = fd.read(1024); " + \
442 "sys.stdout.write(data + ' ' * ( 1024 - len(data)))\" | md5sum"
koder aka kdanilov6ab4d432015-06-22 00:26:28 +0300443
444 if self.use_sudo:
445 cmd = "sudo " + cmd
446
koder aka kdanilovf95cfc12015-06-23 03:33:19 +0300447 zero_md5 = '0f343b0931126a20f133d67c2b018a3b'
koder aka kdanilovf236b9c2015-06-24 18:17:22 +0300448 offsets = [random.randrange(size - 1024) for _ in range(num_blocks)]
449 offsets.append(size - 1024)
koder aka kdanilov6ab4d432015-06-22 00:26:28 +0300450
koder aka kdanilovf95cfc12015-06-23 03:33:19 +0300451 for offset in offsets:
koder aka kdanilov6ab4d432015-06-22 00:26:28 +0300452 data = rossh(cmd.format(fname, offset), nolog=True)
koder aka kdanilovf236b9c2015-06-24 18:17:22 +0300453
454 md = ""
455 for line in data.split("\n"):
456 if "unable to resolve" not in line:
457 md = line.split()[0].strip()
458 break
koder aka kdanilovf95cfc12015-06-23 03:33:19 +0300459
460 if len(md) != 32:
461 logger.error("File data check is failed - " + data)
koder aka kdanilov6ab4d432015-06-22 00:26:28 +0300462 return True
koder aka kdanilovf95cfc12015-06-23 03:33:19 +0300463
464 if zero_md5 == md:
465 return True
466
koder aka kdanilov6ab4d432015-06-22 00:26:28 +0300467 return False
468
469 def prefill_test_files(self, rossh, files, force=False):
470 if self.use_system_fio:
471 cmd_templ = "fio "
472 else:
473 cmd_templ = "{0}/fio ".format(self.config.remote_dir)
koder aka kdanilovbc2c8982015-06-13 02:50:43 +0300474
475 if self.use_sudo:
476 cmd_templ = "sudo " + cmd_templ
477
koder aka kdanilov6ab4d432015-06-22 00:26:28 +0300478 cmd_templ += "--name=xxx --filename={0} --direct=1" + \
479 " --bs=4m --size={1}m --rw=write"
koder aka kdanilovbc2c8982015-06-13 02:50:43 +0300480
koder aka kdanilov6ab4d432015-06-22 00:26:28 +0300481 ssize = 0
482
483 if force:
484 logger.info("File prefilling is forced")
485
486 ddtime = 0
koder aka kdanilovbc2c8982015-06-13 02:50:43 +0300487 for fname, curr_sz in files.items():
koder aka kdanilov6ab4d432015-06-22 00:26:28 +0300488 if not force:
489 if not self.check_prefill_required(rossh, fname, curr_sz):
koder aka kdanilovf236b9c2015-06-24 18:17:22 +0300490 logger.debug("prefill is skipped")
koder aka kdanilov6ab4d432015-06-22 00:26:28 +0300491 continue
492
493 logger.info("Prefilling file {0}".format(fname))
koder aka kdanilovbc2c8982015-06-13 02:50:43 +0300494 cmd = cmd_templ.format(fname, curr_sz)
495 ssize += curr_sz
496
koder aka kdanilov6ab4d432015-06-22 00:26:28 +0300497 stime = time.time()
koder aka kdanilovbc2c8982015-06-13 02:50:43 +0300498 rossh(cmd, timeout=curr_sz)
koder aka kdanilov6ab4d432015-06-22 00:26:28 +0300499 ddtime += time.time() - stime
koder aka kdanilovbc2c8982015-06-13 02:50:43 +0300500
koder aka kdanilov6ab4d432015-06-22 00:26:28 +0300501 if ddtime > 1.0:
koder aka kdanilovbc2c8982015-06-13 02:50:43 +0300502 fill_bw = int(ssize / ddtime)
koder aka kdanilov6ab4d432015-06-22 00:26:28 +0300503 mess = "Initiall fio fill bw is {0} MiBps for this vm"
koder aka kdanilovbc2c8982015-06-13 02:50:43 +0300504 logger.info(mess.format(fill_bw))
koder aka kdanilovbc2c8982015-06-13 02:50:43 +0300505
koder aka kdanilov6ab4d432015-06-22 00:26:28 +0300506 def install_utils(self, node, rossh, max_retry=3, timeout=5):
koder aka kdanilovbc2c8982015-06-13 02:50:43 +0300507 need_install = []
508 packs = [('screen', 'screen')]
koder aka kdanilov6ab4d432015-06-22 00:26:28 +0300509 os_info = get_os(rossh)
koder aka kdanilovbc2c8982015-06-13 02:50:43 +0300510
511 if self.use_system_fio:
512 packs.append(('fio', 'fio'))
513 else:
koder aka kdanilov6ab4d432015-06-22 00:26:28 +0300514 packs.append(('bzip2', 'bzip2'))
koder aka kdanilovbc2c8982015-06-13 02:50:43 +0300515
516 for bin_name, package in packs:
517 if bin_name is None:
518 need_install.append(package)
519 continue
520
521 try:
522 rossh('which ' + bin_name, nolog=True)
523 except OSError:
524 need_install.append(package)
525
koder aka kdanilov6ab4d432015-06-22 00:26:28 +0300526 if len(need_install) != 0:
527 if 'redhat' == os_info.distro:
528 cmd = "sudo yum -y install " + " ".join(need_install)
529 else:
530 cmd = "sudo apt-get -y install " + " ".join(need_install)
koder aka kdanilovbc2c8982015-06-13 02:50:43 +0300531
koder aka kdanilov6ab4d432015-06-22 00:26:28 +0300532 for _ in range(max_retry):
533 try:
534 rossh(cmd)
535 break
536 except OSError as err:
537 time.sleep(timeout)
538 else:
539 raise OSError("Can't install - " + str(err))
koder aka kdanilovbc2c8982015-06-13 02:50:43 +0300540
koder aka kdanilov6ab4d432015-06-22 00:26:28 +0300541 if not self.use_system_fio:
542 fio_dir = os.path.dirname(os.path.dirname(wally.__file__))
543 fio_dir = os.path.join(os.getcwd(), fio_dir)
544 fio_dir = os.path.join(fio_dir, 'fio_binaries')
545 fname = 'fio_{0.release}_{0.arch}.bz2'.format(os_info)
546 fio_path = os.path.join(fio_dir, fname)
547
548 if not os.path.exists(fio_path):
549 raise RuntimeError("No prebuild fio available for {0}".format(os_info))
550
551 bz_dest = self.join_remote('fio.bz2')
552 with node.connection.open_sftp() as sftp:
553 sftp.put(fio_path, bz_dest)
554
555 rossh("bzip2 --decompress " + bz_dest, nolog=True)
556 rossh("chmod a+x " + self.join_remote("fio"), nolog=True)
koder aka kdanilovbc2c8982015-06-13 02:50:43 +0300557
558 def pre_run(self):
koder aka kdanilov6ab4d432015-06-22 00:26:28 +0300559 files = {}
560 for section in self.fio_configs:
561 sz = ssize2b(section.vals['size'])
562 msz = sz / (1024 ** 2)
koder aka kdanilovbc2c8982015-06-13 02:50:43 +0300563
koder aka kdanilov6ab4d432015-06-22 00:26:28 +0300564 if sz % (1024 ** 2) != 0:
565 msz += 1
koder aka kdanilovbc2c8982015-06-13 02:50:43 +0300566
koder aka kdanilov6ab4d432015-06-22 00:26:28 +0300567 fname = section.vals['filename']
koder aka kdanilovbc2c8982015-06-13 02:50:43 +0300568
koder aka kdanilov6ab4d432015-06-22 00:26:28 +0300569 # if already has other test with the same file name
570 # take largest size
571 files[fname] = max(files.get(fname, 0), msz)
koder aka kdanilovbc2c8982015-06-13 02:50:43 +0300572
573 with ThreadPoolExecutor(len(self.config.nodes)) as pool:
koder aka kdanilov6ab4d432015-06-22 00:26:28 +0300574 fc = functools.partial(self.pre_run_th,
575 files=files,
576 force=self.force_prefill)
koder aka kdanilovbc2c8982015-06-13 02:50:43 +0300577 list(pool.map(fc, self.config.nodes))
578
koder aka kdanilov6ab4d432015-06-22 00:26:28 +0300579 def pre_run_th(self, node, files, force):
koder aka kdanilovbc2c8982015-06-13 02:50:43 +0300580 # fill files with pseudo-random data
581 rossh = run_on_node(node)
koder aka kdanilovf236b9c2015-06-24 18:17:22 +0300582 rossh.connection = node.connection
koder aka kdanilovbc2c8982015-06-13 02:50:43 +0300583
584 try:
585 cmd = 'mkdir -p "{0}"'.format(self.config.remote_dir)
586 if self.use_sudo:
587 cmd = "sudo " + cmd
koder aka kdanilov6ab4d432015-06-22 00:26:28 +0300588 cmd += " ; sudo chown {0} {1}".format(node.get_user(),
koder aka kdanilovbc2c8982015-06-13 02:50:43 +0300589 self.config.remote_dir)
koder aka kdanilov6ab4d432015-06-22 00:26:28 +0300590 rossh(cmd, nolog=True)
koder aka kdanilovbc2c8982015-06-13 02:50:43 +0300591
koder aka kdanilov6ab4d432015-06-22 00:26:28 +0300592 assert self.config.remote_dir != "" and self.config.remote_dir != "/"
593 rossh("rm -rf {0}/*".format(self.config.remote_dir), nolog=True)
594
koder aka kdanilovbc2c8982015-06-13 02:50:43 +0300595 except Exception as exc:
596 msg = "Failed to create folder {0} on remote {1}. Error: {2!s}"
koder aka kdanilov6ab4d432015-06-22 00:26:28 +0300597 msg = msg.format(self.config.remote_dir, node.get_conn_id(), exc)
koder aka kdanilovbc2c8982015-06-13 02:50:43 +0300598 logger.exception(msg)
599 raise StopTestError(msg, exc)
600
koder aka kdanilov6ab4d432015-06-22 00:26:28 +0300601 self.install_utils(node, rossh)
602 self.prefill_test_files(rossh, files, force)
koder aka kdanilovbc2c8982015-06-13 02:50:43 +0300603
koder aka kdanilov6ab4d432015-06-22 00:26:28 +0300604 def show_test_execution_time(self):
koder aka kdanilovbc2c8982015-06-13 02:50:43 +0300605 if len(self.fio_configs) > 1:
606 # +10% - is a rough estimation for additional operations
607 # like sftp, etc
608 exec_time = int(sum(map(execution_time, self.fio_configs)) * 1.1)
609 exec_time_s = sec_to_str(exec_time)
610 now_dt = datetime.datetime.now()
611 end_dt = now_dt + datetime.timedelta(0, exec_time)
612 msg = "Entire test should takes aroud: {0} and finished at {1}"
613 logger.info(msg.format(exec_time_s,
614 end_dt.strftime("%H:%M:%S")))
615
koder aka kdanilov6ab4d432015-06-22 00:26:28 +0300616 def run(self):
617 logger.debug("Run preparation")
618 self.pre_run()
619 self.show_test_execution_time()
620
koder aka kdanilovbc2c8982015-06-13 02:50:43 +0300621 tname = os.path.basename(self.config_fname)
622 if tname.endswith('.cfg'):
623 tname = tname[:-4]
624
625 barrier = Barrier(len(self.config.nodes))
626 results = []
627
koder aka kdanilov6ab4d432015-06-22 00:26:28 +0300628 # set of Operation_Mode_BlockSize str's
629 # which should not be tested anymore, as
koder aka kdanilovbb6d6cd2015-06-20 02:55:07 +0300630 # they already too slow with previous thread count
631 lat_bw_limit_reached = set()
632
koder aka kdanilovbc2c8982015-06-13 02:50:43 +0300633 with ThreadPoolExecutor(len(self.config.nodes)) as pool:
634 for pos, fio_cfg in enumerate(self.fio_configs):
koder aka kdanilovbb6d6cd2015-06-20 02:55:07 +0300635 test_descr = get_test_summary(fio_cfg.vals).split("th")[0]
636 if test_descr in lat_bw_limit_reached:
koder aka kdanilovbb6d6cd2015-06-20 02:55:07 +0300637 continue
638 else:
639 logger.info("Will run {0} test".format(fio_cfg.name))
koder aka kdanilovbc2c8982015-06-13 02:50:43 +0300640
641 templ = "Test should takes about {0}." + \
642 " Should finish at {1}," + \
643 " will wait at most till {2}"
644 exec_time = execution_time(fio_cfg)
645 exec_time_str = sec_to_str(exec_time)
646 timeout = int(exec_time + max(300, exec_time))
647
648 now_dt = datetime.datetime.now()
649 end_dt = now_dt + datetime.timedelta(0, exec_time)
650 wait_till = now_dt + datetime.timedelta(0, timeout)
651
652 logger.info(templ.format(exec_time_str,
653 end_dt.strftime("%H:%M:%S"),
654 wait_till.strftime("%H:%M:%S")))
655
656 func = functools.partial(self.do_run,
657 barrier=barrier,
658 fio_cfg=fio_cfg,
659 pos=pos)
660
661 max_retr = 3
662 for idx in range(max_retr):
663 try:
664 intervals = list(pool.map(func, self.config.nodes))
665 break
666 except (EnvironmentError, SSHException) as exc:
667 logger.exception("During fio run")
668 if idx == max_retr - 1:
669 raise StopTestError("Fio failed", exc)
670
671 logger.info("Sleeping 30s and retrying")
672 time.sleep(30)
673
674 fname = "{0}_task.fio".format(pos)
675 with open(os.path.join(self.config.log_directory, fname), "w") as fd:
676 fd.write(str(fio_cfg))
677
678 params = {'vm_count': len(self.config.nodes)}
679 params['name'] = fio_cfg.name
680 params['vals'] = dict(fio_cfg.vals.items())
681 params['intervals'] = intervals
682 params['nodes'] = [node.get_conn_id() for node in self.config.nodes]
683
684 fname = "{0}_params.yaml".format(pos)
685 with open(os.path.join(self.config.log_directory, fname), "w") as fd:
686 fd.write(dumps(params))
687
koder aka kdanilovbb6d6cd2015-06-20 02:55:07 +0300688 res = load_test_results(IOTestResult, self.config.log_directory, pos)
koder aka kdanilovbc2c8982015-06-13 02:50:43 +0300689 results.append(res)
koder aka kdanilovbb6d6cd2015-06-20 02:55:07 +0300690
koder aka kdanilovbb6d6cd2015-06-20 02:55:07 +0300691 if self.max_latency is not None:
koder aka kdanilov6ab4d432015-06-22 00:26:28 +0300692 lat_50, _ = res.get_lat_perc_50_95_multy()
693
694 # conver us to ms
695 if self.max_latency < lat_50:
696 logger.info(("Will skip all subsequent tests of {0} " +
697 "due to lat/bw limits").format(fio_cfg.name))
koder aka kdanilovbb6d6cd2015-06-20 02:55:07 +0300698 lat_bw_limit_reached.add(test_descr)
699
koder aka kdanilov6ab4d432015-06-22 00:26:28 +0300700 test_res = res.get_params_from_fio_report()
koder aka kdanilovbb6d6cd2015-06-20 02:55:07 +0300701 if self.min_bw_per_thread is not None:
702 if self.min_bw_per_thread > average(test_res['bw']):
703 lat_bw_limit_reached.add(test_descr)
704
koder aka kdanilovbc2c8982015-06-13 02:50:43 +0300705 return results
706
707 def do_run(self, node, barrier, fio_cfg, pos, nolog=False):
koder aka kdanilov6ab4d432015-06-22 00:26:28 +0300708 if self.use_sudo:
709 sudo = "sudo "
710 else:
711 sudo = ""
712
koder aka kdanilovbc2c8982015-06-13 02:50:43 +0300713 bash_file = "#!/bin/bash\n" + \
714 "cd {exec_folder}\n" + \
koder aka kdanilov6ab4d432015-06-22 00:26:28 +0300715 "{fio_path}fio --output-format=json --output={out_file} " + \
koder aka kdanilovbc2c8982015-06-13 02:50:43 +0300716 "--alloc-size=262144 {job_file} " + \
717 " >{err_out_file} 2>&1 \n" + \
718 "echo $? >{res_code_file}\n"
719
koder aka kdanilov6ab4d432015-06-22 00:26:28 +0300720 exec_folder = self.config.remote_dir
721
722 if self.use_system_fio:
723 fio_path = ""
724 else:
725 if not exec_folder.endswith("/"):
726 fio_path = exec_folder + "/"
727 else:
728 fio_path = exec_folder
729
koder aka kdanilovbc2c8982015-06-13 02:50:43 +0300730 bash_file = bash_file.format(out_file=self.results_file,
731 job_file=self.task_file,
732 err_out_file=self.err_out_file,
733 res_code_file=self.exit_code_file,
koder aka kdanilov6ab4d432015-06-22 00:26:28 +0300734 exec_folder=exec_folder,
735 fio_path=fio_path)
koder aka kdanilovbc2c8982015-06-13 02:50:43 +0300736
737 with node.connection.open_sftp() as sftp:
koder aka kdanilovbc2c8982015-06-13 02:50:43 +0300738 save_to_remote(sftp, self.task_file, str(fio_cfg))
739 save_to_remote(sftp, self.sh_file, bash_file)
740
741 exec_time = execution_time(fio_cfg)
742
743 timeout = int(exec_time + max(300, exec_time))
744 soft_tout = exec_time
745
746 begin = time.time()
747
koder aka kdanilovbc2c8982015-06-13 02:50:43 +0300748 fnames_before = run_on_node(node)("ls -1 " + exec_folder, nolog=True)
749
750 barrier.wait()
751
koder aka kdanilov5414a992015-06-13 03:07:25 +0300752 task = BGSSHTask(node, self.use_sudo)
koder aka kdanilovbc2c8982015-06-13 02:50:43 +0300753 task.start(sudo + "bash " + self.sh_file)
754
755 while True:
756 try:
757 task.wait(soft_tout, timeout)
758 break
759 except paramiko.SSHException:
760 pass
761
762 try:
763 node.connection.close()
764 except:
765 pass
766
767 reconnect(node.connection, node.conn_url)
768
769 end = time.time()
770 rossh = run_on_node(node)
771 fnames_after = rossh("ls -1 " + exec_folder, nolog=True)
772
773 conn_id = node.get_conn_id().replace(":", "_")
774 if not nolog:
775 logger.debug("Test on node {0} is finished".format(conn_id))
776
777 log_files_pref = []
778 if 'write_lat_log' in fio_cfg.vals:
779 fname = fio_cfg.vals['write_lat_log']
780 log_files_pref.append(fname + '_clat')
781 log_files_pref.append(fname + '_lat')
782 log_files_pref.append(fname + '_slat')
783
784 if 'write_iops_log' in fio_cfg.vals:
785 fname = fio_cfg.vals['write_iops_log']
786 log_files_pref.append(fname + '_iops')
787
788 if 'write_bw_log' in fio_cfg.vals:
789 fname = fio_cfg.vals['write_bw_log']
790 log_files_pref.append(fname + '_bw')
791
792 files = collections.defaultdict(lambda: [])
793 all_files = [os.path.basename(self.results_file)]
794 new_files = set(fnames_after.split()) - set(fnames_before.split())
koder aka kdanilov6ab4d432015-06-22 00:26:28 +0300795
koder aka kdanilovbc2c8982015-06-13 02:50:43 +0300796 for fname in new_files:
797 if fname.endswith('.log') and fname.split('.')[0] in log_files_pref:
798 name, _ = os.path.splitext(fname)
799 if fname.count('.') == 1:
800 tp = name.split("_")[-1]
801 cnt = 0
802 else:
803 tp_cnt = name.split("_")[-1]
804 tp, cnt = tp_cnt.split('.')
805 files[tp].append((int(cnt), fname))
806 all_files.append(fname)
807
808 arch_name = self.join_remote('wally_result.tar.gz')
809 tmp_dir = os.path.join(self.config.log_directory, 'tmp_' + conn_id)
810 os.mkdir(tmp_dir)
811 loc_arch_name = os.path.join(tmp_dir, 'wally_result.{0}.tar.gz'.format(conn_id))
812 file_full_names = " ".join(all_files)
813
814 try:
815 os.unlink(loc_arch_name)
816 except:
817 pass
818
819 with node.connection.open_sftp() as sftp:
820 exit_code = read_from_remote(sftp, self.exit_code_file)
821 err_out = read_from_remote(sftp, self.err_out_file)
822 exit_code = exit_code.strip()
823
824 if exit_code != '0':
825 msg = "fio exit with code {0}: {1}".format(exit_code, err_out)
826 logger.critical(msg.strip())
827 raise StopTestError("fio failed")
828
829 rossh("rm -f {0}".format(arch_name), nolog=True)
koder aka kdanilov6ab4d432015-06-22 00:26:28 +0300830 pack_files_cmd = "cd {0} ; tar zcvf {1} {2}".format(exec_folder, arch_name, file_full_names)
831 rossh(pack_files_cmd, nolog=True)
koder aka kdanilovbc2c8982015-06-13 02:50:43 +0300832 sftp.get(arch_name, loc_arch_name)
833
koder aka kdanilov6ab4d432015-06-22 00:26:28 +0300834 unpack_files_cmd = "cd {0} ; tar xvzf {1} >/dev/null".format(tmp_dir, loc_arch_name)
835 subprocess.check_call(unpack_files_cmd, shell=True)
koder aka kdanilovbc2c8982015-06-13 02:50:43 +0300836 os.unlink(loc_arch_name)
837
838 for ftype, fls in files.items():
839 for idx, fname in fls:
840 cname = os.path.join(tmp_dir, fname)
841 loc_fname = "{0}_{1}_{2}.{3}.log".format(pos, conn_id, ftype, idx)
842 loc_path = os.path.join(self.config.log_directory, loc_fname)
843 os.rename(cname, loc_path)
844
845 cname = os.path.join(tmp_dir,
846 os.path.basename(self.results_file))
847 loc_fname = "{0}_{1}_rawres.json".format(pos, conn_id)
848 loc_path = os.path.join(self.config.log_directory, loc_fname)
849 os.rename(cname, loc_path)
koder aka kdanilovbc2c8982015-06-13 02:50:43 +0300850 os.rmdir(tmp_dir)
koder aka kdanilov6ab4d432015-06-22 00:26:28 +0300851
852 remove_remote_res_files_cmd = "cd {0} ; rm -f {1} {2}".format(exec_folder,
853 arch_name,
854 file_full_names)
855 rossh(remove_remote_res_files_cmd, nolog=True)
koder aka kdanilovbc2c8982015-06-13 02:50:43 +0300856 return begin, end
857
858 @classmethod
koder aka kdanilov6b872662015-06-23 01:58:36 +0300859 def prepare_data(cls, results):
koder aka kdanilovbc2c8982015-06-13 02:50:43 +0300860 """
861 create a table with io performance report
862 for console
863 """
864
koder aka kdanilovbc2c8982015-06-13 02:50:43 +0300865 def key_func(data):
koder aka kdanilovf236b9c2015-06-24 18:17:22 +0300866 tpl = data.summary_tpl()
koder aka kdanilov170936a2015-06-27 22:51:17 +0300867 return (data.name,
koder aka kdanilovf236b9c2015-06-24 18:17:22 +0300868 tpl.oper,
869 tpl.mode,
koder aka kdanilov170936a2015-06-27 22:51:17 +0300870 ssize2b(tpl.bsize),
koder aka kdanilovf236b9c2015-06-24 18:17:22 +0300871 int(tpl.th_count) * int(tpl.vm_count))
koder aka kdanilov6b872662015-06-23 01:58:36 +0300872 res = []
koder aka kdanilovbc2c8982015-06-13 02:50:43 +0300873
koder aka kdanilovbb6d6cd2015-06-20 02:55:07 +0300874 for item in sorted(results, key=key_func):
koder aka kdanilovbb6d6cd2015-06-20 02:55:07 +0300875 test_dinfo = item.disk_perf_info()
koder aka kdanilovbc2c8982015-06-13 02:50:43 +0300876
877 iops, _ = test_dinfo.iops.rounded_average_conf()
878
879 bw, bw_conf = test_dinfo.bw.rounded_average_conf()
880 _, bw_dev = test_dinfo.bw.rounded_average_dev()
881 conf_perc = int(round(bw_conf * 100 / bw))
882 dev_perc = int(round(bw_dev * 100 / bw))
883
koder aka kdanilov6b872662015-06-23 01:58:36 +0300884 lat_50 = round_3_digit(int(test_dinfo.lat_50))
885 lat_95 = round_3_digit(int(test_dinfo.lat_95))
koder aka kdanilov170936a2015-06-27 22:51:17 +0300886 lat_avg = round_3_digit(int(test_dinfo.lat_avg))
koder aka kdanilovbc2c8982015-06-13 02:50:43 +0300887
koder aka kdanilovbb6d6cd2015-06-20 02:55:07 +0300888 testnodes_count = len(item.config.nodes)
889 iops_per_vm = round_3_digit(iops / testnodes_count)
890 bw_per_vm = round_3_digit(bw / testnodes_count)
koder aka kdanilovbc2c8982015-06-13 02:50:43 +0300891
892 iops = round_3_digit(iops)
893 bw = round_3_digit(bw)
894
koder aka kdanilovf236b9c2015-06-24 18:17:22 +0300895 summ = "{0.oper}{0.mode} {0.bsize:>4} {0.th_count:>3}th {0.vm_count:>2}vm".format(item.summary_tpl())
896
897 res.append({"name": key_func(item)[0],
898 "key": key_func(item)[:4],
899 "summ": summ,
koder aka kdanilov6b872662015-06-23 01:58:36 +0300900 "iops": int(iops),
901 "bw": int(bw),
koder aka kdanilovf236b9c2015-06-24 18:17:22 +0300902 "conf": str(conf_perc),
903 "dev": str(dev_perc),
koder aka kdanilov6b872662015-06-23 01:58:36 +0300904 "iops_per_vm": int(iops_per_vm),
905 "bw_per_vm": int(bw_per_vm),
906 "lat_50": lat_50,
koder aka kdanilov170936a2015-06-27 22:51:17 +0300907 "lat_95": lat_95,
908 "lat_avg": lat_avg})
koder aka kdanilovbc2c8982015-06-13 02:50:43 +0300909
koder aka kdanilov6b872662015-06-23 01:58:36 +0300910 return res
911
912 Field = collections.namedtuple("Field", ("header", "attr", "allign", "size"))
913 fiels_and_header = [
914 Field("Name", "name", "l", 7),
koder aka kdanilovf236b9c2015-06-24 18:17:22 +0300915 Field("Description", "summ", "l", 19),
koder aka kdanilov6b872662015-06-23 01:58:36 +0300916 Field("IOPS\ncum", "iops", "r", 3),
koder aka kdanilovf236b9c2015-06-24 18:17:22 +0300917 Field("KiBps\ncum", "bw", "r", 6),
918 Field("Cnf %\n95%", "conf", "r", 3),
919 Field("Dev%", "dev", "r", 3),
920 Field("iops\n/vm", "iops_per_vm", "r", 3),
921 Field("KiBps\n/vm", "bw_per_vm", "r", 6),
koder aka kdanilov6b872662015-06-23 01:58:36 +0300922 Field("lat ms\nmedian", "lat_50", "r", 3),
koder aka kdanilov170936a2015-06-27 22:51:17 +0300923 Field("lat ms\n95%", "lat_95", "r", 3),
924 Field("lat\navg", "lat_avg", "r", 3),
koder aka kdanilov6b872662015-06-23 01:58:36 +0300925 ]
926
927 fiels_and_header_dct = dict((item.attr, item) for item in fiels_and_header)
928
929 @classmethod
930 def format_for_console(cls, results):
931 """
932 create a table with io performance report
933 for console
934 """
935
936 tab = texttable.Texttable(max_width=120)
937 tab.set_deco(tab.HEADER | tab.VLINES | tab.BORDER)
938 tab.set_cols_align([f.allign for f in cls.fiels_and_header])
939 sep = ["-" * f.size for f in cls.fiels_and_header]
940 tab.header([f.header for f in cls.fiels_and_header])
941
942 prev_k = None
943 for item in cls.prepare_data(results):
koder aka kdanilov6b872662015-06-23 01:58:36 +0300944 if prev_k is not None:
koder aka kdanilovf236b9c2015-06-24 18:17:22 +0300945 if prev_k != item["key"]:
koder aka kdanilov6b872662015-06-23 01:58:36 +0300946 tab.add_row(sep)
947
koder aka kdanilovf236b9c2015-06-24 18:17:22 +0300948 prev_k = item["key"]
koder aka kdanilov6b872662015-06-23 01:58:36 +0300949 tab.add_row([item[f.attr] for f in cls.fiels_and_header])
950
951 return tab.draw()
952
953 @classmethod
954 def format_diff_for_console(cls, list_of_results):
955 """
956 create a table with io performance report
957 for console
958 """
959
960 tab = texttable.Texttable(max_width=200)
961 tab.set_deco(tab.HEADER | tab.VLINES | tab.BORDER)
962
963 header = [
964 cls.fiels_and_header_dct["name"].header,
965 cls.fiels_and_header_dct["summ"].header,
966 ]
967 allign = ["l", "l"]
968
969 header.append("IOPS ~ Cnf% ~ Dev%")
970 allign.extend(["r"] * len(list_of_results))
971 header.extend(
972 "IOPS_{0} %".format(i + 2) for i in range(len(list_of_results[1:]))
973 )
974
975 header.append("BW")
976 allign.extend(["r"] * len(list_of_results))
977 header.extend(
978 "BW_{0} %".format(i + 2) for i in range(len(list_of_results[1:]))
979 )
980
981 header.append("LAT")
982 allign.extend(["r"] * len(list_of_results))
983 header.extend(
984 "LAT_{0}".format(i + 2) for i in range(len(list_of_results[1:]))
985 )
986
987 tab.header(header)
988 sep = ["-" * 3] * len(header)
989 processed_results = map(cls.prepare_data, list_of_results)
990
991 key2results = []
992 for res in processed_results:
993 key2results.append(dict(
994 ((item["name"], item["summ"]), item) for item in res
995 ))
996
997 prev_k = None
koder aka kdanilovf236b9c2015-06-24 18:17:22 +0300998 iops_frmt = "{0[iops]} ~ {0[conf]:>2} ~ {0[dev]:>2}"
koder aka kdanilov6b872662015-06-23 01:58:36 +0300999 for item in processed_results[0]:
koder aka kdanilov6b872662015-06-23 01:58:36 +03001000 if prev_k is not None:
koder aka kdanilovf236b9c2015-06-24 18:17:22 +03001001 if prev_k != item["key"]:
koder aka kdanilov6b872662015-06-23 01:58:36 +03001002 tab.add_row(sep)
1003
koder aka kdanilovf236b9c2015-06-24 18:17:22 +03001004 prev_k = item["key"]
koder aka kdanilov6b872662015-06-23 01:58:36 +03001005
1006 key = (item['name'], item['summ'])
1007 line = list(key)
1008 base = key2results[0][key]
1009
1010 line.append(iops_frmt.format(base))
1011
1012 for test_results in key2results[1:]:
1013 val = test_results.get(key)
1014 if val is None:
1015 line.append("-")
1016 elif base['iops'] == 0:
1017 line.append("Nan")
1018 else:
koder aka kdanilovf236b9c2015-06-24 18:17:22 +03001019 prc_val = {'dev': val['dev'], 'conf': val['conf']}
koder aka kdanilov6b872662015-06-23 01:58:36 +03001020 prc_val['iops'] = int(100 * val['iops'] / base['iops'])
1021 line.append(iops_frmt.format(prc_val))
1022
1023 line.append(base['bw'])
1024
1025 for test_results in key2results[1:]:
1026 val = test_results.get(key)
1027 if val is None:
1028 line.append("-")
1029 elif base['bw'] == 0:
1030 line.append("Nan")
1031 else:
1032 line.append(int(100 * val['bw'] / base['bw']))
1033
1034 for test_results in key2results:
1035 val = test_results.get(key)
1036 if val is None:
1037 line.append("-")
1038 else:
1039 line.append("{0[lat_50]} - {0[lat_95]}".format(val))
1040
1041 tab.add_row(line)
1042
1043 tab.set_cols_align(allign)
koder aka kdanilovbc2c8982015-06-13 02:50:43 +03001044 return tab.draw()