blob: 42302b91d04b914095470b371a3788e8837689b2 [file] [log] [blame]
koder aka kdanilova732a602017-02-01 20:29:56 +02001# --------------------------- LEGACY --------------------------------------------------------------------------------
2
3
4# # disk_info = None
5# # base = None
6# # linearity = None
7#
8#
9# def group_by_name(test_data):
10# name_map = collections.defaultdict(lambda: [])
11#
12# for data in test_data:
13# name_map[(data.name, data.summary())].append(data)
14#
15# return name_map
16#
17#
18# def report(name, required_fields):
19# def closure(func):
20# report_funcs.append((required_fields.split(","), name, func))
21# return func
22# return closure
23#
24#
25# def get_test_lcheck_params(pinfo):
26# res = [{
27# 's': 'sync',
28# 'd': 'direct',
29# 'a': 'async',
30# 'x': 'sync direct'
31# }[pinfo.sync_mode]]
32#
33# res.append(pinfo.p.rw)
34#
35# return " ".join(res)
36#
37#
38# def get_emb_data_svg(plt):
39# sio = StringIO()
40# plt.savefig(sio, format='svg')
41# img_start = "<!-- Created with matplotlib (http://matplotlib.org/) -->"
42# return sio.getvalue().split(img_start, 1)[1]
43#
44#
45# def get_template(templ_name):
46# very_root_dir = os.path.dirname(os.path.dirname(wally.__file__))
47# templ_dir = os.path.join(very_root_dir, 'report_templates')
48# templ_file = os.path.join(templ_dir, templ_name)
49# return open(templ_file, 'r').read()
50#
51#
52# def group_by(data, func):
53# if len(data) < 2:
54# yield data
55# return
56#
57# ndata = [(func(dt), dt) for dt in data]
58# ndata.sort(key=func)
59# pkey, dt = ndata[0]
60# curr_list = [dt]
61#
62# for key, val in ndata[1:]:
63# if pkey != key:
64# yield curr_list
65# curr_list = [val]
66# else:
67# curr_list.append(val)
68# pkey = key
69#
70# yield curr_list
71#
72#
73# @report('linearity', 'linearity_test')
74# def linearity_report(processed_results, lab_info, comment):
75# labels_and_data_mp = collections.defaultdict(lambda: [])
76# vls = {}
77#
78# # plot io_time = func(bsize)
79# for res in processed_results.values():
80# if res.name.startswith('linearity_test'):
81# iotimes = [1000. / val for val in res.iops.raw]
82#
83# op_summ = get_test_summary(res.params)[:3]
84#
85# labels_and_data_mp[op_summ].append(
86# [res.p.blocksize, res.iops.raw, iotimes])
87#
88# cvls = res.params.vals.copy()
89# del cvls['blocksize']
90# del cvls['rw']
91#
92# cvls.pop('sync', None)
93# cvls.pop('direct', None)
94# cvls.pop('buffered', None)
95#
96# if op_summ not in vls:
97# vls[op_summ] = cvls
98# else:
99# assert cvls == vls[op_summ]
100#
101# all_labels = None
102# _, ax1 = plt.subplots()
103# for name, labels_and_data in labels_and_data_mp.items():
104# labels_and_data.sort(key=lambda x: ssize2b(x[0]))
105#
106# labels, _, iotimes = zip(*labels_and_data)
107#
108# if all_labels is None:
109# all_labels = labels
110# else:
111# assert all_labels == labels
112#
113# plt.boxplot(iotimes)
114# if len(labels_and_data) > 2 and \
115# ssize2b(labels_and_data[-2][0]) >= 4096:
116#
117# xt = range(1, len(labels) + 1)
118#
119# def io_time(sz, bw, initial_lat):
120# return sz / bw + initial_lat
121#
122# x = numpy.array(map(ssize2b, labels))
123# y = numpy.array([sum(dt) / len(dt) for dt in iotimes])
124# popt, _ = scipy.optimize.curve_fit(io_time, x, y, p0=(100., 1.))
125#
126# y1 = io_time(x, *popt)
127# plt.plot(xt, y1, linestyle='--',
128# label=name + ' LS linear approx')
129#
130# for idx, (sz, _, _) in enumerate(labels_and_data):
131# if ssize2b(sz) >= 4096:
132# break
133#
134# bw = (x[-1] - x[idx]) / (y[-1] - y[idx])
135# lat = y[-1] - x[-1] / bw
136# y2 = io_time(x, bw, lat)
137# plt.plot(xt, y2, linestyle='--',
138# label=abbv_name_to_full(name) +
139# ' (4k & max) linear approx')
140#
141# plt.setp(ax1, xticklabels=labels)
142#
143# plt.xlabel("Block size")
144# plt.ylabel("IO time, ms")
145#
146# plt.subplots_adjust(top=0.85)
147# plt.legend(bbox_to_anchor=(0.5, 1.15),
148# loc='upper center',
149# prop={'size': 10}, ncol=2)
150# plt.grid()
151# iotime_plot = get_emb_data_svg(plt)
152# plt.clf()
153#
154# # plot IOPS = func(bsize)
155# _, ax1 = plt.subplots()
156#
157# for name, labels_and_data in labels_and_data_mp.items():
158# labels_and_data.sort(key=lambda x: ssize2b(x[0]))
159# _, data, _ = zip(*labels_and_data)
160# plt.boxplot(data)
161# avg = [float(sum(arr)) / len(arr) for arr in data]
162# xt = range(1, len(data) + 1)
163# plt.plot(xt, avg, linestyle='--',
164# label=abbv_name_to_full(name) + " avg")
165#
166# plt.setp(ax1, xticklabels=labels)
167# plt.xlabel("Block size")
168# plt.ylabel("IOPS")
169# plt.legend(bbox_to_anchor=(0.5, 1.15),
170# loc='upper center',
171# prop={'size': 10}, ncol=2)
172# plt.grid()
173# plt.subplots_adjust(top=0.85)
174#
175# iops_plot = get_emb_data_svg(plt)
176#
177# res = set(get_test_lcheck_params(res) for res in processed_results.values())
178# ncount = list(set(res.testnodes_count for res in processed_results.values()))
179# conc = list(set(res.concurence for res in processed_results.values()))
180#
181# assert len(conc) == 1
182# assert len(ncount) == 1
183#
184# descr = {
185# 'vm_count': ncount[0],
186# 'concurence': conc[0],
187# 'oper_descr': ", ".join(res).capitalize()
188# }
189#
190# params_map = {'iotime_vs_size': iotime_plot,
191# 'iops_vs_size': iops_plot,
192# 'descr': descr}
193#
194# return get_template('report_linearity.html').format(**params_map)
195#
196#
197# @report('lat_vs_iops', 'lat_vs_iops')
198# def lat_vs_iops(processed_results, lab_info, comment):
199# lat_iops = collections.defaultdict(lambda: [])
200# requsted_vs_real = collections.defaultdict(lambda: {})
201#
202# for res in processed_results.values():
203# if res.name.startswith('lat_vs_iops'):
204# lat_iops[res.concurence].append((res.lat,
205# 0,
206# res.iops.average,
207# res.iops.deviation))
208# # lat_iops[res.concurence].append((res.lat.average / 1000.0,
209# # res.lat.deviation / 1000.0,
210# # res.iops.average,
211# # res.iops.deviation))
212# requested_iops = res.p.rate_iops * res.concurence
213# requsted_vs_real[res.concurence][requested_iops] = \
214# (res.iops.average, res.iops.deviation)
215#
216# colors = ['red', 'green', 'blue', 'orange', 'magenta', "teal"]
217# colors_it = iter(colors)
218# for conc, lat_iops in sorted(lat_iops.items()):
219# lat, dev, iops, iops_dev = zip(*lat_iops)
220# plt.errorbar(iops, lat, xerr=iops_dev, yerr=dev, fmt='ro',
221# label=str(conc) + " threads",
222# color=next(colors_it))
223#
224# plt.xlabel("IOPS")
225# plt.ylabel("Latency, ms")
226# plt.grid()
227# plt.legend(loc=0)
228# plt_iops_vs_lat = get_emb_data_svg(plt)
229# plt.clf()
230#
231# colors_it = iter(colors)
232# for conc, req_vs_real in sorted(requsted_vs_real.items()):
233# req, real = zip(*sorted(req_vs_real.items()))
234# iops, dev = zip(*real)
235# plt.errorbar(req, iops, yerr=dev, fmt='ro',
236# label=str(conc) + " threads",
237# color=next(colors_it))
238# plt.xlabel("Requested IOPS")
239# plt.ylabel("Get IOPS")
240# plt.grid()
241# plt.legend(loc=0)
242# plt_iops_vs_requested = get_emb_data_svg(plt)
243#
244# res1 = processed_results.values()[0]
245# params_map = {'iops_vs_lat': plt_iops_vs_lat,
246# 'iops_vs_requested': plt_iops_vs_requested,
247# 'oper_descr': get_test_lcheck_params(res1).capitalize()}
248#
249# return get_template('report_iops_vs_lat.html').format(**params_map)
250#
251#
252# def render_all_html(comment, info, lab_description, images, templ_name):
253# data = info.__dict__.copy()
254# for name, val in data.items():
255# if not name.startswith('__'):
256# if val is None:
257# if name in ('direct_iops_w64_max', 'direct_iops_w_max'):
258# data[name] = ('-', '-', '-')
259# else:
260# data[name] = '-'
261# elif isinstance(val, (int, float, long)):
262# data[name] = round_3_digit(val)
263#
264# data['bw_read_max'] = (data['bw_read_max'][0] // 1024,
265# data['bw_read_max'][1],
266# data['bw_read_max'][2])
267#
268# data['bw_write_max'] = (data['bw_write_max'][0] // 1024,
269# data['bw_write_max'][1],
270# data['bw_write_max'][2])
271#
272# images.update(data)
273# templ = get_template(templ_name)
274# return templ.format(lab_info=lab_description,
275# comment=comment,
276# **images)
277#
278#
279# def io_chart(title, concurence,
280# latv, latv_min, latv_max,
281# iops_or_bw, iops_or_bw_err,
282# legend,
283# log_iops=False,
284# log_lat=False,
285# boxplots=False,
286# latv_50=None,
287# latv_95=None,
288# error2=None):
289#
290# matplotlib.rcParams.update({'font.size': 10})
291# points = " MiBps" if legend == 'BW' else ""
292# lc = len(concurence)
293# width = 0.35
294# xt = range(1, lc + 1)
295#
296# op_per_vm = [v / (vm * th) for v, (vm, th) in zip(iops_or_bw, concurence)]
297# fig, p1 = plt.subplots()
298# xpos = [i - width / 2 for i in xt]
299#
300# p1.bar(xpos, iops_or_bw,
301# width=width,
302# color='y',
303# label=legend)
304#
305# err1_leg = None
306# for pos, y, err in zip(xpos, iops_or_bw, iops_or_bw_err):
307# err1_leg = p1.errorbar(pos + width / 2,
308# y,
309# err,
310# color='magenta')
311#
312# err2_leg = None
313# if error2 is not None:
314# for pos, y, err in zip(xpos, iops_or_bw, error2):
315# err2_leg = p1.errorbar(pos + width / 2 + 0.08,
316# y,
317# err,
318# lw=2,
319# alpha=0.5,
320# color='teal')
321#
322# p1.grid(True)
323# p1.plot(xt, op_per_vm, '--', label=legend + "/thread", color='black')
324# handles1, labels1 = p1.get_legend_handles_labels()
325#
326# handles1 += [err1_leg]
327# labels1 += ["95% conf"]
328#
329# if err2_leg is not None:
330# handles1 += [err2_leg]
331# labels1 += ["95% dev"]
332#
333# p2 = p1.twinx()
334#
335# if latv_50 is None:
336# p2.plot(xt, latv_max, label="lat max")
337# p2.plot(xt, latv, label="lat avg")
338# p2.plot(xt, latv_min, label="lat min")
339# else:
340# p2.plot(xt, latv_50, label="lat med")
341# p2.plot(xt, latv_95, label="lat 95%")
342#
343# plt.xlim(0.5, lc + 0.5)
344# plt.xticks(xt, ["{0} * {1}".format(vm, th) for (vm, th) in concurence])
345# p1.set_xlabel("VM Count * Thread per VM")
346# p1.set_ylabel(legend + points)
347# p2.set_ylabel("Latency ms")
348# plt.title(title)
349# handles2, labels2 = p2.get_legend_handles_labels()
350#
351# plt.legend(handles1 + handles2, labels1 + labels2,
352# loc='center left', bbox_to_anchor=(1.1, 0.81))
353#
354# if log_iops:
355# p1.set_yscale('log')
356#
357# if log_lat:
358# p2.set_yscale('log')
359#
360# plt.subplots_adjust(right=0.68)
361#
362# return get_emb_data_svg(plt)
363#
364#
365# def make_plots(processed_results, plots):
366# """
367# processed_results: [PerfInfo]
368# plots = [(test_name_prefix:str, fname:str, description:str)]
369# """
370# files = {}
371# for name_pref, fname, desc in plots:
372# chart_data = []
373#
374# for res in processed_results:
375# summ = res.name + "_" + res.summary
376# if summ.startswith(name_pref):
377# chart_data.append(res)
378#
379# if len(chart_data) == 0:
380# raise ValueError("Can't found any date for " + name_pref)
381#
382# use_bw = ssize2b(chart_data[0].p.blocksize) > 16 * 1024
383#
384# chart_data.sort(key=lambda x: x.params['vals']['numjobs'])
385#
386# lat = None
387# lat_min = None
388# lat_max = None
389#
390# lat_50 = [x.lat_50 for x in chart_data]
391# lat_95 = [x.lat_95 for x in chart_data]
392#
393# lat_diff_max = max(x.lat_95 / x.lat_50 for x in chart_data)
394# lat_log_scale = (lat_diff_max > 10)
395#
396# testnodes_count = x.testnodes_count
397# concurence = [(testnodes_count, x.concurence)
398# for x in chart_data]
399#
400# if use_bw:
401# data = [x.bw.average / 1000 for x in chart_data]
402# data_conf = [x.bw.confidence / 1000 for x in chart_data]
403# data_dev = [x.bw.deviation * 2.5 / 1000 for x in chart_data]
404# name = "BW"
405# else:
406# data = [x.iops.average for x in chart_data]
407# data_conf = [x.iops.confidence for x in chart_data]
408# data_dev = [x.iops.deviation * 2 for x in chart_data]
409# name = "IOPS"
410#
411# fc = io_chart(title=desc,
412# concurence=concurence,
413#
414# latv=lat,
415# latv_min=lat_min,
416# latv_max=lat_max,
417#
418# iops_or_bw=data,
419# iops_or_bw_err=data_conf,
420#
421# legend=name,
422# log_lat=lat_log_scale,
423#
424# latv_50=lat_50,
425# latv_95=lat_95,
426#
427# error2=data_dev)
428# files[fname] = fc
429#
430# return files
431#
432#
433# def find_max_where(processed_results, sync_mode, blocksize, rw, iops=True):
434# result = None
435# attr = 'iops' if iops else 'bw'
436# for measurement in processed_results:
437# ok = measurement.sync_mode == sync_mode
438# ok = ok and (measurement.p.blocksize == blocksize)
439# ok = ok and (measurement.p.rw == rw)
440#
441# if ok:
442# field = getattr(measurement, attr)
443#
444# if result is None:
445# result = field
446# elif field.average > result.average:
447# result = field
448#
449# return result
450#
451#
452# def get_disk_info(processed_results):
453# di = DiskInfo()
454# di.direct_iops_w_max = find_max_where(processed_results,
455# 'd', '4k', 'randwrite')
456# di.direct_iops_r_max = find_max_where(processed_results,
457# 'd', '4k', 'randread')
458#
459# di.direct_iops_w64_max = find_max_where(processed_results,
460# 'd', '64k', 'randwrite')
461#
462# for sz in ('16m', '64m'):
463# di.bw_write_max = find_max_where(processed_results,
464# 'd', sz, 'randwrite', False)
465# if di.bw_write_max is not None:
466# break
467#
468# if di.bw_write_max is None:
469# for sz in ('1m', '2m', '4m', '8m'):
470# di.bw_write_max = find_max_where(processed_results,
471# 'd', sz, 'write', False)
472# if di.bw_write_max is not None:
473# break
474#
475# for sz in ('16m', '64m'):
476# di.bw_read_max = find_max_where(processed_results,
477# 'd', sz, 'randread', False)
478# if di.bw_read_max is not None:
479# break
480#
481# if di.bw_read_max is None:
482# di.bw_read_max = find_max_where(processed_results,
483# 'd', '1m', 'read', False)
484#
485# rws4k_iops_lat_th = []
486# for res in processed_results:
487# if res.sync_mode in 'xs' and res.p.blocksize == '4k':
488# if res.p.rw != 'randwrite':
489# continue
490# rws4k_iops_lat_th.append((res.iops.average,
491# res.lat,
492# # res.lat.average,
493# res.concurence))
494#
495# rws4k_iops_lat_th.sort(key=lambda x: x[2])
496#
497# latv = [lat for _, lat, _ in rws4k_iops_lat_th]
498#
499# for tlat in [10, 30, 100]:
500# pos = bisect.bisect_left(latv, tlat)
501# if 0 == pos:
502# setattr(di, 'rws4k_{}ms'.format(tlat), 0)
503# elif pos == len(latv):
504# iops3, _, _ = rws4k_iops_lat_th[-1]
505# iops3 = int(round_3_digit(iops3))
506# setattr(di, 'rws4k_{}ms'.format(tlat), ">=" + str(iops3))
507# else:
508# lat1 = latv[pos - 1]
509# lat2 = latv[pos]
510#
511# iops1, _, th1 = rws4k_iops_lat_th[pos - 1]
512# iops2, _, th2 = rws4k_iops_lat_th[pos]
513#
514# th_lat_coef = (th2 - th1) / (lat2 - lat1)
515# th3 = th_lat_coef * (tlat - lat1) + th1
516#
517# th_iops_coef = (iops2 - iops1) / (th2 - th1)
518# iops3 = th_iops_coef * (th3 - th1) + iops1
519# iops3 = int(round_3_digit(iops3))
520# setattr(di, 'rws4k_{}ms'.format(tlat), iops3)
521#
522# hdi = DiskInfo()
523#
524# def pp(x):
525# med, conf = x.rounded_average_conf()
526# conf_perc = int(float(conf) / med * 100)
527# dev_perc = int(float(x.deviation) / med * 100)
528# return (round_3_digit(med), conf_perc, dev_perc)
529#
530# hdi.direct_iops_r_max = pp(di.direct_iops_r_max)
531#
532# if di.direct_iops_w_max is not None:
533# hdi.direct_iops_w_max = pp(di.direct_iops_w_max)
534# else:
535# hdi.direct_iops_w_max = None
536#
537# if di.direct_iops_w64_max is not None:
538# hdi.direct_iops_w64_max = pp(di.direct_iops_w64_max)
539# else:
540# hdi.direct_iops_w64_max = None
541#
542# hdi.bw_write_max = pp(di.bw_write_max)
543# hdi.bw_read_max = pp(di.bw_read_max)
544#
545# hdi.rws4k_10ms = di.rws4k_10ms if 0 != di.rws4k_10ms else None
546# hdi.rws4k_30ms = di.rws4k_30ms if 0 != di.rws4k_30ms else None
547# hdi.rws4k_100ms = di.rws4k_100ms if 0 != di.rws4k_100ms else None
548# return hdi
549#
550#
551# @report('hdd', 'hdd')
552# def make_hdd_report(processed_results, lab_info, comment):
553# plots = [
554# ('hdd_rrd4k', 'rand_read_4k', 'Random read 4k direct IOPS'),
555# ('hdd_rwx4k', 'rand_write_4k', 'Random write 4k sync IOPS')
556# ]
557# perf_infos = [res.disk_perf_info() for res in processed_results]
558# images = make_plots(perf_infos, plots)
559# di = get_disk_info(perf_infos)
560# return render_all_html(comment, di, lab_info, images, "report_hdd.html")
561#
562#
563# @report('cinder_iscsi', 'cinder_iscsi')
564# def make_cinder_iscsi_report(processed_results, lab_info, comment):
565# plots = [
566# ('cinder_iscsi_rrd4k', 'rand_read_4k', 'Random read 4k direct IOPS'),
567# ('cinder_iscsi_rwx4k', 'rand_write_4k', 'Random write 4k sync IOPS')
568# ]
569# perf_infos = [res.disk_perf_info() for res in processed_results]
570# try:
571# images = make_plots(perf_infos, plots)
572# except ValueError:
573# plots = [
574# ('cinder_iscsi_rrd4k', 'rand_read_4k', 'Random read 4k direct IOPS'),
575# ('cinder_iscsi_rws4k', 'rand_write_4k', 'Random write 4k sync IOPS')
576# ]
577# images = make_plots(perf_infos, plots)
578# di = get_disk_info(perf_infos)
579#
580# return render_all_html(comment, di, lab_info, images, "report_cinder_iscsi.html")
581#
582#
583# @report('ceph', 'ceph')
584# def make_ceph_report(processed_results, lab_info, comment):
585# plots = [
586# ('ceph_rrd4k', 'rand_read_4k', 'Random read 4k direct IOPS'),
587# ('ceph_rws4k', 'rand_write_4k', 'Random write 4k sync IOPS'),
588# ('ceph_rrd16m', 'rand_read_16m', 'Random read 16m direct MiBps'),
589# ('ceph_rwd16m', 'rand_write_16m',
590# 'Random write 16m direct MiBps'),
591# ]
592#
593# perf_infos = [res.disk_perf_info() for res in processed_results]
594# images = make_plots(perf_infos, plots)
595# di = get_disk_info(perf_infos)
596# return render_all_html(comment, di, lab_info, images, "report_ceph.html")
597#
598#
599# @report('mixed', 'mixed')
600# def make_mixed_report(processed_results, lab_info, comment):
601# #
602# # IOPS(X% read) = 100 / ( X / IOPS_W + (100 - X) / IOPS_R )
603# #
604#
605# perf_infos = [res.disk_perf_info() for res in processed_results]
606# mixed = collections.defaultdict(lambda: [])
607#
608# is_ssd = False
609# for res in perf_infos:
610# if res.name.startswith('mixed'):
611# if res.name.startswith('mixed-ssd'):
612# is_ssd = True
613# mixed[res.concurence].append((res.p.rwmixread,
614# res.lat,
615# 0,
616# # res.lat.average / 1000.0,
617# # res.lat.deviation / 1000.0,
618# res.iops.average,
619# res.iops.deviation))
620#
621# if len(mixed) == 0:
622# raise ValueError("No mixed load found")
623#
624# fig, p1 = plt.subplots()
625# p2 = p1.twinx()
626#
627# colors = ['red', 'green', 'blue', 'orange', 'magenta', "teal"]
628# colors_it = iter(colors)
629# for conc, mix_lat_iops in sorted(mixed.items()):
630# mix_lat_iops = sorted(mix_lat_iops)
631# read_perc, lat, dev, iops, iops_dev = zip(*mix_lat_iops)
632# p1.errorbar(read_perc, iops, color=next(colors_it),
633# yerr=iops_dev, label=str(conc) + " th")
634#
635# p2.errorbar(read_perc, lat, color=next(colors_it),
636# ls='--', yerr=dev, label=str(conc) + " th lat")
637#
638# if is_ssd:
639# p1.set_yscale('log')
640# p2.set_yscale('log')
641#
642# p1.set_xlim(-5, 105)
643#
644# read_perc = set(read_perc)
645# read_perc.add(0)
646# read_perc.add(100)
647# read_perc = sorted(read_perc)
648#
649# plt.xticks(read_perc, map(str, read_perc))
650#
651# p1.grid(True)
652# p1.set_xlabel("% of reads")
653# p1.set_ylabel("Mixed IOPS")
654# p2.set_ylabel("Latency, ms")
655#
656# handles1, labels1 = p1.get_legend_handles_labels()
657# handles2, labels2 = p2.get_legend_handles_labels()
658# plt.subplots_adjust(top=0.85)
659# plt.legend(handles1 + handles2, labels1 + labels2,
660# bbox_to_anchor=(0.5, 1.15),
661# loc='upper center',
662# prop={'size': 12}, ncol=3)
663# plt.show()
664#
665#
666# def make_load_report(idx, results_dir, fname):
667# dpath = os.path.join(results_dir, "io_" + str(idx))
668# files = sorted(os.listdir(dpath))
669# gf = lambda x: "_".join(x.rsplit(".", 1)[0].split('_')[:3])
670#
671# for key, group in itertools.groupby(files, gf):
672# fname = os.path.join(dpath, key + ".fio")
673#
674# cfgs = list(parse_all_in_1(open(fname).read(), fname))
675#
676# fname = os.path.join(dpath, key + "_lat.log")
677#
678# curr = []
679# arrays = []
680#
681# with open(fname) as fd:
682# for offset, lat, _, _ in csv.reader(fd):
683# offset = int(offset)
684# lat = int(lat)
685# if len(curr) > 0 and curr[-1][0] > offset:
686# arrays.append(curr)
687# curr = []
688# curr.append((offset, lat))
689# arrays.append(curr)
690# conc = int(cfgs[0].vals.get('numjobs', 1))
691#
692# if conc != 5:
693# continue
694#
695# assert len(arrays) == len(cfgs) * conc
696#
697# garrays = [[(0, 0)] for _ in range(conc)]
698#
699# for offset in range(len(cfgs)):
700# for acc, new_arr in zip(garrays, arrays[offset * conc:(offset + 1) * conc]):
701# last = acc[-1][0]
702# for off, lat in new_arr:
703# acc.append((off / 1000. + last, lat / 1000.))
704#
705# for cfg, arr in zip(cfgs, garrays):
706# plt.plot(*zip(*arr[1:]))
707# plt.show()
708# exit(1)
709#
710#
711# def make_io_report(dinfo, comment, path, lab_info=None):
712# lab_info = {
713# "total_disk": "None",
714# "total_memory": "None",
715# "nodes_count": "None",
716# "processor_count": "None"
717# }
718#
719# try:
720# res_fields = sorted(v.name for v in dinfo)
721#
722# found = False
723# for fields, name, func in report_funcs:
724# for field in fields:
725# pos = bisect.bisect_left(res_fields, field)
726#
727# if pos == len(res_fields):
728# break
729#
730# if not res_fields[pos].startswith(field):
731# break
732# else:
733# found = True
734# hpath = path.format(name)
735#
736# try:
737# report = func(dinfo, lab_info, comment)
738# except:
739# logger.exception("Diring {0} report generation".format(name))
740# continue
741#
742# if report is not None:
743# try:
744# with open(hpath, "w") as fd:
745# fd.write(report)
746# except:
747# logger.exception("Diring saving {0} report".format(name))
748# continue
749# logger.info("Report {0} saved into {1}".format(name, hpath))
750# else:
751# logger.warning("No report produced by {0!r}".format(name))
752#
753# if not found:
754# logger.warning("No report generator found for this load")
755#
756# except Exception as exc:
757# import traceback
758# traceback.print_exc()
759# logger.error("Failed to generate html report:" + str(exc))
760#
761#
762# # @classmethod
763# # def prepare_data(cls, results) -> List[Dict[str, Any]]:
764# # """create a table with io performance report for console"""
765# #
766# # def key_func(data: FioRunResult) -> Tuple[str, str, str, str, int]:
767# # tpl = data.summary_tpl()
768# # return (data.name,
769# # tpl.oper,
770# # tpl.mode,
771# # ssize2b(tpl.bsize),
772# # int(tpl.th_count) * int(tpl.vm_count))
773# # res = []
774# #
775# # for item in sorted(results, key=key_func):
776# # test_dinfo = item.disk_perf_info()
777# # testnodes_count = len(item.config.nodes)
778# #
779# # iops, _ = test_dinfo.iops.rounded_average_conf()
780# #
781# # if test_dinfo.iops_sys is not None:
782# # iops_sys, iops_sys_conf = test_dinfo.iops_sys.rounded_average_conf()
783# # _, iops_sys_dev = test_dinfo.iops_sys.rounded_average_dev()
784# # iops_sys_per_vm = round_3_digit(iops_sys / testnodes_count)
785# # iops_sys = round_3_digit(iops_sys)
786# # else:
787# # iops_sys = None
788# # iops_sys_per_vm = None
789# # iops_sys_dev = None
790# # iops_sys_conf = None
791# #
792# # bw, bw_conf = test_dinfo.bw.rounded_average_conf()
793# # _, bw_dev = test_dinfo.bw.rounded_average_dev()
794# # conf_perc = int(round(bw_conf * 100 / bw))
795# # dev_perc = int(round(bw_dev * 100 / bw))
796# #
797# # lat_50 = round_3_digit(int(test_dinfo.lat_50))
798# # lat_95 = round_3_digit(int(test_dinfo.lat_95))
799# # lat_avg = round_3_digit(int(test_dinfo.lat_avg))
800# #
801# # iops_per_vm = round_3_digit(iops / testnodes_count)
802# # bw_per_vm = round_3_digit(bw / testnodes_count)
803# #
804# # iops = round_3_digit(iops)
805# # bw = round_3_digit(bw)
806# #
807# # summ = "{0.oper}{0.mode} {0.bsize:>4} {0.th_count:>3}th {0.vm_count:>2}vm".format(item.summary_tpl())
808# #
809# # res.append({"name": key_func(item)[0],
810# # "key": key_func(item)[:4],
811# # "summ": summ,
812# # "iops": int(iops),
813# # "bw": int(bw),
814# # "conf": str(conf_perc),
815# # "dev": str(dev_perc),
816# # "iops_per_vm": int(iops_per_vm),
817# # "bw_per_vm": int(bw_per_vm),
818# # "lat_50": lat_50,
819# # "lat_95": lat_95,
820# # "lat_avg": lat_avg,
821# #
822# # "iops_sys": iops_sys,
823# # "iops_sys_per_vm": iops_sys_per_vm,
824# # "sys_conf": iops_sys_conf,
825# # "sys_dev": iops_sys_dev})
826# #
827# # return res
828# #
829# # Field = collections.namedtuple("Field", ("header", "attr", "allign", "size"))
830# # fiels_and_header = [
831# # Field("Name", "name", "l", 7),
832# # Field("Description", "summ", "l", 19),
833# # Field("IOPS\ncum", "iops", "r", 3),
834# # # Field("IOPS_sys\ncum", "iops_sys", "r", 3),
835# # Field("KiBps\ncum", "bw", "r", 6),
836# # Field("Cnf %\n95%", "conf", "r", 3),
837# # Field("Dev%", "dev", "r", 3),
838# # Field("iops\n/vm", "iops_per_vm", "r", 3),
839# # Field("KiBps\n/vm", "bw_per_vm", "r", 6),
840# # Field("lat ms\nmedian", "lat_50", "r", 3),
841# # Field("lat ms\n95%", "lat_95", "r", 3),
842# # Field("lat\navg", "lat_avg", "r", 3),
843# # ]
844# #
845# # fiels_and_header_dct = dict((item.attr, item) for item in fiels_and_header)
846# #
847# # @classmethod
848# # def format_for_console(cls, results) -> str:
849# # """create a table with io performance report for console"""
850# #
851# # tab = texttable.Texttable(max_width=120)
852# # tab.set_deco(tab.HEADER | tab.VLINES | tab.BORDER)
853# # tab.set_cols_align([f.allign for f in cls.fiels_and_header])
854# # sep = ["-" * f.size for f in cls.fiels_and_header]
855# # tab.header([f.header for f in cls.fiels_and_header])
856# # prev_k = None
857# # for item in cls.prepare_data(results):
858# # if prev_k is not None:
859# # if prev_k != item["key"]:
860# # tab.add_row(sep)
861# #
862# # prev_k = item["key"]
863# # tab.add_row([item[f.attr] for f in cls.fiels_and_header])
864# #
865# # return tab.draw()
866# #
867# # @classmethod
868# # def format_diff_for_console(cls, list_of_results: List[Any]) -> str:
869# # """create a table with io performance report for console"""
870# #
871# # tab = texttable.Texttable(max_width=200)
872# # tab.set_deco(tab.HEADER | tab.VLINES | tab.BORDER)
873# #
874# # header = [
875# # cls.fiels_and_header_dct["name"].header,
876# # cls.fiels_and_header_dct["summ"].header,
877# # ]
878# # allign = ["l", "l"]
879# #
880# # header.append("IOPS ~ Cnf% ~ Dev%")
881# # allign.extend(["r"] * len(list_of_results))
882# # header.extend(
883# # "IOPS_{0} %".format(i + 2) for i in range(len(list_of_results[1:]))
884# # )
885# #
886# # header.append("BW")
887# # allign.extend(["r"] * len(list_of_results))
888# # header.extend(
889# # "BW_{0} %".format(i + 2) for i in range(len(list_of_results[1:]))
890# # )
891# #
892# # header.append("LAT")
893# # allign.extend(["r"] * len(list_of_results))
894# # header.extend(
895# # "LAT_{0}".format(i + 2) for i in range(len(list_of_results[1:]))
896# # )
897# #
898# # tab.header(header)
899# # sep = ["-" * 3] * len(header)
900# # processed_results = map(cls.prepare_data, list_of_results)
901# #
902# # key2results = []
903# # for res in processed_results:
904# # key2results.append(dict(
905# # ((item["name"], item["summ"]), item) for item in res
906# # ))
907# #
908# # prev_k = None
909# # iops_frmt = "{0[iops]} ~ {0[conf]:>2} ~ {0[dev]:>2}"
910# # for item in processed_results[0]:
911# # if prev_k is not None:
912# # if prev_k != item["key"]:
913# # tab.add_row(sep)
914# #
915# # prev_k = item["key"]
916# #
917# # key = (item['name'], item['summ'])
918# # line = list(key)
919# # base = key2results[0][key]
920# #
921# # line.append(iops_frmt.format(base))
922# #
923# # for test_results in key2results[1:]:
924# # val = test_results.get(key)
925# # if val is None:
926# # line.append("-")
927# # elif base['iops'] == 0:
928# # line.append("Nan")
929# # else:
930# # prc_val = {'dev': val['dev'], 'conf': val['conf']}
931# # prc_val['iops'] = int(100 * val['iops'] / base['iops'])
932# # line.append(iops_frmt.format(prc_val))
933# #
934# # line.append(base['bw'])
935# #
936# # for test_results in key2results[1:]:
937# # val = test_results.get(key)
938# # if val is None:
939# # line.append("-")
940# # elif base['bw'] == 0:
941# # line.append("Nan")
942# # else:
943# # line.append(int(100 * val['bw'] / base['bw']))
944# #
945# # for test_results in key2results:
946# # val = test_results.get(key)
947# # if val is None:
948# # line.append("-")
949# # else:
950# # line.append("{0[lat_50]} - {0[lat_95]}".format(val))
951# #
952# # tab.add_row(line)
953# #
954# # tab.set_cols_align(allign)
955# # return tab.draw()
956#
957#
958# # READ_IOPS_DISCSTAT_POS = 3
959# # WRITE_IOPS_DISCSTAT_POS = 7
960# #
961# #
962# # def load_sys_log_file(ftype: str, fname: str) -> TimeSeriesValue:
963# # assert ftype == 'iops'
964# # pval = None
965# # with open(fname) as fd:
966# # iops = []
967# # for ln in fd:
968# # params = ln.split()
969# # cval = int(params[WRITE_IOPS_DISCSTAT_POS]) + \
970# # int(params[READ_IOPS_DISCSTAT_POS])
971# # if pval is not None:
972# # iops.append(cval - pval)
973# # pval = cval
974# #
975# # vals = [(idx * 1000, val) for idx, val in enumerate(iops)]
976# # return TimeSeriesValue(vals)
977# #
978# #
979# # def load_test_results(folder: str, run_num: int) -> 'FioRunResult':
980# # res = {}
981# # params = None
982# #
983# # fn = os.path.join(folder, str(run_num) + '_params.yaml')
984# # params = yaml.load(open(fn).read())
985# #
986# # conn_ids_set = set()
987# # rr = r"{}_(?P<conn_id>.*?)_(?P<type>[^_.]*)\.\d+\.log$".format(run_num)
988# # for fname in os.listdir(folder):
989# # rm = re.match(rr, fname)
990# # if rm is None:
991# # continue
992# #
993# # conn_id_s = rm.group('conn_id')
994# # conn_id = conn_id_s.replace('_', ':')
995# # ftype = rm.group('type')
996# #
997# # if ftype not in ('iops', 'bw', 'lat'):
998# # continue
999# #
1000# # ts = load_fio_log_file(os.path.join(folder, fname))
1001# # res.setdefault(ftype, {}).setdefault(conn_id, []).append(ts)
1002# #
1003# # conn_ids_set.add(conn_id)
1004# #
1005# # rr = r"{}_(?P<conn_id>.*?)_(?P<type>[^_.]*)\.sys\.log$".format(run_num)
1006# # for fname in os.listdir(folder):
1007# # rm = re.match(rr, fname)
1008# # if rm is None:
1009# # continue
1010# #
1011# # conn_id_s = rm.group('conn_id')
1012# # conn_id = conn_id_s.replace('_', ':')
1013# # ftype = rm.group('type')
1014# #
1015# # if ftype not in ('iops', 'bw', 'lat'):
1016# # continue
1017# #
1018# # ts = load_sys_log_file(ftype, os.path.join(folder, fname))
1019# # res.setdefault(ftype + ":sys", {}).setdefault(conn_id, []).append(ts)
1020# #
1021# # conn_ids_set.add(conn_id)
1022# #
1023# # mm_res = {}
1024# #
1025# # if len(res) == 0:
1026# # raise ValueError("No data was found")
1027# #
1028# # for key, data in res.items():
1029# # conn_ids = sorted(conn_ids_set)
1030# # awail_ids = [conn_id for conn_id in conn_ids if conn_id in data]
1031# # matr = [data[conn_id] for conn_id in awail_ids]
1032# # mm_res[key] = MeasurementMatrix(matr, awail_ids)
1033# #
1034# # raw_res = {}
1035# # for conn_id in conn_ids:
1036# # fn = os.path.join(folder, "{0}_{1}_rawres.json".format(run_num, conn_id_s))
1037# #
1038# # # remove message hack
1039# # fc = "{" + open(fn).read().split('{', 1)[1]
1040# # raw_res[conn_id] = json.loads(fc)
1041# #
1042# # fio_task = FioJobSection(params['name'])
1043# # fio_task.vals.update(params['vals'])
1044# #
1045# # config = TestConfig('io', params, None, params['nodes'], folder, None)
1046# # return FioRunResult(config, fio_task, mm_res, raw_res, params['intervals'], run_num)
1047# #
1048#
1049# # class DiskPerfInfo:
1050# # def __init__(self, name: str, summary: str, params: Dict[str, Any], testnodes_count: int) -> None:
1051# # self.name = name
1052# # self.bw = None
1053# # self.iops = None
1054# # self.lat = None
1055# # self.lat_50 = None
1056# # self.lat_95 = None
1057# # self.lat_avg = None
1058# #
1059# # self.raw_bw = []
1060# # self.raw_iops = []
1061# # self.raw_lat = []
1062# #
1063# # self.params = params
1064# # self.testnodes_count = testnodes_count
1065# # self.summary = summary
1066# #
1067# # self.sync_mode = get_test_sync_mode(self.params['vals'])
1068# # self.concurence = self.params['vals'].get('numjobs', 1)
1069# #
1070# #
1071# # class IOTestResults:
1072# # def __init__(self, suite_name: str, fio_results: 'FioRunResult', log_directory: str):
1073# # self.suite_name = suite_name
1074# # self.fio_results = fio_results
1075# # self.log_directory = log_directory
1076# #
1077# # def __iter__(self):
1078# # return iter(self.fio_results)
1079# #
1080# # def __len__(self):
1081# # return len(self.fio_results)
1082# #
1083# # def get_yamable(self) -> Dict[str, List[str]]:
1084# # items = [(fio_res.summary(), fio_res.idx) for fio_res in self]
1085# # return {self.suite_name: [self.log_directory] + items}
1086#
1087#
1088# # class FioRunResult(TestResults):
1089# # """
1090# # Fio run results
1091# # config: TestConfig
1092# # fio_task: FioJobSection
1093# # ts_results: {str: MeasurementMatrix[TimeSeriesValue]}
1094# # raw_result: ????
1095# # run_interval:(float, float) - test tun time, used for sensors
1096# # """
1097# # def __init__(self, config, fio_task, ts_results, raw_result, run_interval, idx):
1098# #
1099# # self.name = fio_task.name.rsplit("_", 1)[0]
1100# # self.fio_task = fio_task
1101# # self.idx = idx
1102# #
1103# # self.bw = ts_results['bw']
1104# # self.lat = ts_results['lat']
1105# # self.iops = ts_results['iops']
1106# #
1107# # if 'iops:sys' in ts_results:
1108# # self.iops_sys = ts_results['iops:sys']
1109# # else:
1110# # self.iops_sys = None
1111# #
1112# # res = {"bw": self.bw,
1113# # "lat": self.lat,
1114# # "iops": self.iops,
1115# # "iops:sys": self.iops_sys}
1116# #
1117# # self.sensors_data = None
1118# # self._pinfo = None
1119# # TestResults.__init__(self, config, res, raw_result, run_interval)
1120# #
1121# # def get_params_from_fio_report(self):
1122# # nodes = self.bw.connections_ids
1123# #
1124# # iops = [self.raw_result[node]['jobs'][0]['mixed']['iops'] for node in nodes]
1125# # total_ios = [self.raw_result[node]['jobs'][0]['mixed']['total_ios'] for node in nodes]
1126# # runtime = [self.raw_result[node]['jobs'][0]['mixed']['runtime'] / 1000 for node in nodes]
1127# # flt_iops = [float(ios) / rtime for ios, rtime in zip(total_ios, runtime)]
1128# #
1129# # bw = [self.raw_result[node]['jobs'][0]['mixed']['bw'] for node in nodes]
1130# # total_bytes = [self.raw_result[node]['jobs'][0]['mixed']['io_bytes'] for node in nodes]
1131# # flt_bw = [float(tbytes) / rtime for tbytes, rtime in zip(total_bytes, runtime)]
1132# #
1133# # return {'iops': iops,
1134# # 'flt_iops': flt_iops,
1135# # 'bw': bw,
1136# # 'flt_bw': flt_bw}
1137# #
1138# # def summary(self):
1139# # return get_test_summary(self.fio_task, len(self.config.nodes))
1140# #
1141# # def summary_tpl(self):
1142# # return get_test_summary_tuple(self.fio_task, len(self.config.nodes))
1143# #
1144# # def get_lat_perc_50_95_multy(self):
1145# # lat_mks = collections.defaultdict(lambda: 0)
1146# # num_res = 0
1147# #
1148# # for result in self.raw_result.values():
1149# # num_res += len(result['jobs'])
1150# # for job_info in result['jobs']:
1151# # for k, v in job_info['latency_ms'].items():
1152# # if isinstance(k, basestring) and k.startswith('>='):
1153# # lat_mks[int(k[2:]) * 1000] += v
1154# # else:
1155# # lat_mks[int(k) * 1000] += v
1156# #
1157# # for k, v in job_info['latency_us'].items():
1158# # lat_mks[int(k)] += v
1159# #
1160# # for k, v in lat_mks.items():
1161# # lat_mks[k] = float(v) / num_res
1162# # return get_lat_perc_50_95(lat_mks)
1163# #
1164# # def disk_perf_info(self, avg_interval=2.0):
1165# #
1166# # if self._pinfo is not None:
1167# # return self._pinfo
1168# #
1169# # testnodes_count = len(self.config.nodes)
1170# #
1171# # pinfo = DiskPerfInfo(self.name,
1172# # self.summary(),
1173# # self.params,
1174# # testnodes_count)
1175# #
1176# # def prepare(data, drop=1):
1177# # if data is None:
1178# # return data
1179# #
1180# # res = []
1181# # for ts_data in data:
1182# # if ts_data.average_interval() < avg_interval:
1183# # ts_data = ts_data.derived(avg_interval)
1184# #
1185# # # drop last value on bounds
1186# # # as they may contains ranges without activities
1187# # assert len(ts_data.values) >= drop + 1, str(drop) + " " + str(ts_data.values)
1188# #
1189# # if drop > 0:
1190# # res.append(ts_data.values[:-drop])
1191# # else:
1192# # res.append(ts_data.values)
1193# #
1194# # return res
1195# #
1196# # def agg_data(matr):
1197# # arr = sum(matr, [])
1198# # min_len = min(map(len, arr))
1199# # res = []
1200# # for idx in range(min_len):
1201# # res.append(sum(dt[idx] for dt in arr))
1202# # return res
1203# #
1204# # pinfo.raw_lat = map(prepare, self.lat.per_vm())
1205# # num_th = sum(map(len, pinfo.raw_lat))
1206# # lat_avg = [val / num_th for val in agg_data(pinfo.raw_lat)]
1207# # pinfo.lat_avg = data_property(lat_avg).average / 1000 # us to ms
1208# #
1209# # pinfo.lat_50, pinfo.lat_95 = self.get_lat_perc_50_95_multy()
1210# # pinfo.lat = pinfo.lat_50
1211# #
1212# # pinfo.raw_bw = map(prepare, self.bw.per_vm())
1213# # pinfo.raw_iops = map(prepare, self.iops.per_vm())
1214# #
1215# # if self.iops_sys is not None:
1216# # pinfo.raw_iops_sys = map(prepare, self.iops_sys.per_vm())
1217# # pinfo.iops_sys = data_property(agg_data(pinfo.raw_iops_sys))
1218# # else:
1219# # pinfo.raw_iops_sys = None
1220# # pinfo.iops_sys = None
1221# #
1222# # fparams = self.get_params_from_fio_report()
1223# # fio_report_bw = sum(fparams['flt_bw'])
1224# # fio_report_iops = sum(fparams['flt_iops'])
1225# #
1226# # agg_bw = agg_data(pinfo.raw_bw)
1227# # agg_iops = agg_data(pinfo.raw_iops)
1228# #
1229# # log_bw_avg = average(agg_bw)
1230# # log_iops_avg = average(agg_iops)
1231# #
1232# # # update values to match average from fio report
1233# # coef_iops = fio_report_iops / float(log_iops_avg)
1234# # coef_bw = fio_report_bw / float(log_bw_avg)
1235# #
1236# # bw_log = data_property([val * coef_bw for val in agg_bw])
1237# # iops_log = data_property([val * coef_iops for val in agg_iops])
1238# #
1239# # bw_report = data_property([fio_report_bw])
1240# # iops_report = data_property([fio_report_iops])
1241# #
1242# # # When IOPS/BW per thread is too low
1243# # # data from logs is rounded to match
1244# # iops_per_th = sum(sum(pinfo.raw_iops, []), [])
1245# # if average(iops_per_th) > 10:
1246# # pinfo.iops = iops_log
1247# # pinfo.iops2 = iops_report
1248# # else:
1249# # pinfo.iops = iops_report
1250# # pinfo.iops2 = iops_log
1251# #
1252# # bw_per_th = sum(sum(pinfo.raw_bw, []), [])
1253# # if average(bw_per_th) > 10:
1254# # pinfo.bw = bw_log
1255# # pinfo.bw2 = bw_report
1256# # else:
1257# # pinfo.bw = bw_report
1258# # pinfo.bw2 = bw_log
1259# #
1260# # self._pinfo = pinfo
1261# #
1262# # return pinfo
1263#
1264# # class TestResult:
1265# # """Hold all information for a given test - test info,
1266# # sensors data and performance results for test period from all nodes"""
1267# # run_id = None # type: int
1268# # test_info = None # type: Any
1269# # begin_time = None # type: int
1270# # end_time = None # type: int
1271# # sensors = None # Dict[Tuple[str, str, str], TimeSeries]
1272# # performance = None # Dict[Tuple[str, str], TimeSeries]
1273# #
1274# # class TestResults:
1275# # """
1276# # this class describe test results
1277# #
1278# # config:TestConfig - test config object
1279# # params:dict - parameters from yaml file for this test
1280# # results:{str:MeasurementMesh} - test results object
1281# # raw_result:Any - opaque object to store raw results
1282# # run_interval:(float, float) - test tun time, used for sensors
1283# # """
1284# #
1285# # def __init__(self,
1286# # config: TestConfig,
1287# # results: Dict[str, Any],
1288# # raw_result: Any,
1289# # run_interval: Tuple[float, float]) -> None:
1290# # self.config = config
1291# # self.params = config.params
1292# # self.results = results
1293# # self.raw_result = raw_result
1294# # self.run_interval = run_interval
1295# #
1296# # def __str__(self) -> str:
1297# # res = "{0}({1}):\n results:\n".format(
1298# # self.__class__.__name__,
1299# # self.summary())
1300# #
1301# # for name, val in self.results.items():
1302# # res += " {0}={1}\n".format(name, val)
1303# #
1304# # res += " params:\n"
1305# #
1306# # for name, val in self.params.items():
1307# # res += " {0}={1}\n".format(name, val)
1308# #
1309# # return res
1310# #
1311# # def summary(self) -> str:
1312# # raise NotImplementedError()
1313# # return ""
1314# #
1315# # def get_yamable(self) -> Any:
1316# # raise NotImplementedError()
1317# # return None
1318#
1319#
1320#
1321# # class MeasurementMatrix:
1322# # """
1323# # data:[[MeasurementResult]] - VM_COUNT x TH_COUNT matrix of MeasurementResult
1324# # """
1325# # def __init__(self, data, connections_ids):
1326# # self.data = data
1327# # self.connections_ids = connections_ids
1328# #
1329# # def per_vm(self):
1330# # return self.data
1331# #
1332# # def per_th(self):
1333# # return sum(self.data, [])
1334#
1335#
1336# # class MeasurementResults:
1337# # data = None # type: List[Any]
1338# #
1339# # def stat(self) -> StatProps:
1340# # return data_property(self.data)
1341# #
1342# # def __str__(self) -> str:
1343# # return 'TS([' + ", ".join(map(str, self.data)) + '])'
1344# #
1345# #
1346# # class SimpleVals(MeasurementResults):
1347# # """
1348# # data:[float] - list of values
1349# # """
1350# # def __init__(self, data: List[float]) -> None:
1351# # self.data = data
1352# #
1353# #
1354# # class TimeSeriesValue(MeasurementResults):
1355# # """
1356# # data:[(float, float, float)] - list of (start_time, lenght, average_value_for_interval)
1357# # odata: original values
1358# # """
1359# # def __init__(self, data: List[Tuple[float, float]]) -> None:
1360# # assert len(data) > 0
1361# # self.odata = data[:]
1362# # self.data = [] # type: List[Tuple[float, float, float]]
1363# #
1364# # cstart = 0.0
1365# # for nstart, nval in data:
1366# # self.data.append((cstart, nstart - cstart, nval))
1367# # cstart = nstart
1368# #
1369# # @property
1370# # def values(self) -> List[float]:
1371# # return [val[2] for val in self.data]
1372# #
1373# # def average_interval(self) -> float:
1374# # return float(sum([val[1] for val in self.data])) / len(self.data)
1375# #
1376# # def skip(self, seconds) -> 'TimeSeriesValue':
1377# # nres = []
1378# # for start, ln, val in self.data:
1379# # nstart = start + ln - seconds
1380# # if nstart > 0:
1381# # nres.append([nstart, val])
1382# # return self.__class__(nres)
1383# #
1384# # def derived(self, tdelta) -> 'TimeSeriesValue':
1385# # end = self.data[-1][0] + self.data[-1][1]
1386# # tdelta = float(tdelta)
1387# #
1388# # ln = end / tdelta
1389# #
1390# # if ln - int(ln) > 0:
1391# # ln += 1
1392# #
1393# # res = [[tdelta * i, 0.0] for i in range(int(ln))]
1394# #
1395# # for start, lenght, val in self.data:
1396# # start_idx = int(start / tdelta)
1397# # end_idx = int((start + lenght) / tdelta)
1398# #
1399# # for idx in range(start_idx, end_idx + 1):
1400# # rstart = tdelta * idx
1401# # rend = tdelta * (idx + 1)
1402# #
1403# # intersection_ln = min(rend, start + lenght) - max(start, rstart)
1404# # if intersection_ln > 0:
1405# # try:
1406# # res[idx][1] += val * intersection_ln / tdelta
1407# # except IndexError:
1408# # raise
1409# #
1410# # return self.__class__(res)
1411#
1412#
1413# def console_report_stage(ctx: TestRun) -> None:
1414# # TODO(koder): load data from storage
1415# raise NotImplementedError("...")
1416# # first_report = True
1417# # text_rep_fname = ctx.config.text_report_file
1418# #
1419# # with open(text_rep_fname, "w") as fd:
1420# # for tp, data in ctx.results.items():
1421# # if 'io' == tp and data is not None:
1422# # rep_lst = []
1423# # for result in data:
1424# # rep_lst.append(
1425# # IOPerfTest.format_for_console(list(result)))
1426# # rep = "\n\n".join(rep_lst)
1427# # elif tp in ['mysql', 'pgbench'] and data is not None:
1428# # rep = MysqlTest.format_for_console(data)
1429# # elif tp == 'omg':
1430# # rep = OmgTest.format_for_console(data)
1431# # else:
1432# # logger.warning("Can't generate text report for " + tp)
1433# # continue
1434# #
1435# # fd.write(rep)
1436# # fd.write("\n")
1437# #
1438# # if first_report:
1439# # logger.info("Text report were stored in " + text_rep_fname)
1440# # first_report = False
1441# #
1442# # print("\n" + rep + "\n")
1443#
1444#
1445# # def test_load_report_stage(cfg: Config, ctx: TestRun) -> None:
1446# # load_rep_fname = cfg.load_report_file
1447# # found = False
1448# # for idx, (tp, data) in enumerate(ctx.results.items()):
1449# # if 'io' == tp and data is not None:
1450# # if found:
1451# # logger.error("Making reports for more than one " +
1452# # "io block isn't supported! All " +
1453# # "report, except first are skipped")
1454# # continue
1455# # found = True
1456# # report.make_load_report(idx, cfg['results'], load_rep_fname)
1457# #
1458# #
1459#
1460# # def html_report_stage(ctx: TestRun) -> None:
1461# # TODO(koder): load data from storage
1462# # raise NotImplementedError("...")
1463# # html_rep_fname = cfg.html_report_file
1464# # found = False
1465# # for tp, data in ctx.results.items():
1466# # if 'io' == tp and data is not None:
1467# # if found or len(data) > 1:
1468# # logger.error("Making reports for more than one " +
1469# # "io block isn't supported! All " +
1470# # "report, except first are skipped")
1471# # continue
1472# # found = True
1473# # report.make_io_report(list(data[0]),
1474# # cfg.get('comment', ''),
1475# # html_rep_fname,
1476# # lab_info=ctx.nodes)
1477#
1478# #
1479# # def load_data_from_path(test_res_dir: str) -> Mapping[str, List[Any]]:
1480# # files = get_test_files(test_res_dir)
1481# # raw_res = yaml_load(open(files['raw_results']).read())
1482# # res = collections.defaultdict(list)
1483# #
1484# # for tp, test_lists in raw_res:
1485# # for tests in test_lists:
1486# # for suite_name, suite_data in tests.items():
1487# # result_folder = suite_data[0]
1488# # res[tp].append(TOOL_TYPE_MAPPER[tp].load(suite_name, result_folder))
1489# #
1490# # return res
1491# #
1492# #
1493# # def load_data_from_path_stage(var_dir: str, _, ctx: TestRun) -> None:
1494# # for tp, vals in load_data_from_path(var_dir).items():
1495# # ctx.results.setdefault(tp, []).extend(vals)
1496# #
1497# #
1498# # def load_data_from(var_dir: str) -> Callable[[TestRun], None]:
1499# # return functools.partial(load_data_from_path_stage, var_dir)