blob: cf1289b5094db45afe5f1ea23f31fb223c83bb23 [file] [log] [blame]
koder aka kdanilov7f59d562016-12-26 01:34:23 +02001import abc
koder aka kdanilova047e1b2015-04-21 23:16:59 +03002import logging
koder aka kdanilov7f59d562016-12-26 01:34:23 +02003from typing import Dict, Any, Iterator, Tuple, cast, List
koder aka kdanilovcff7b2e2015-04-18 20:48:15 +03004
koder aka kdanilovffaf48d2016-12-27 02:25:29 +02005import numpy
6import scipy
7import matplotlib
koder aka kdanilovf2865172016-12-30 03:35:11 +02008
9# have to be before pyplot import to avoid tkinter(default graph frontend) import error
10matplotlib.use('svg')
11
koder aka kdanilovffaf48d2016-12-27 02:25:29 +020012import matplotlib.pyplot as plt
koder aka kdanilovbe8f89f2015-04-28 14:51:51 +030013
koder aka kdanilovffaf48d2016-12-27 02:25:29 +020014
koder aka kdanilovffaf48d2016-12-27 02:25:29 +020015
16
koder aka kdanilov3b4da8b2016-10-17 00:17:53 +030017from .utils import ssize2b
koder aka kdanilov39e449e2016-12-17 15:15:26 +020018from .stage import Stage, StepOrder
19from .test_run_class import TestRun
koder aka kdanilovffaf48d2016-12-27 02:25:29 +020020from .result_classes import NormStatProps
koder aka kdanilovcff7b2e2015-04-18 20:48:15 +030021
koder aka kdanilov4a510ee2015-04-21 18:50:42 +030022
koder aka kdanilov962ee5f2016-12-19 02:40:08 +020023logger = logging.getLogger("wally")
koder aka kdanilova047e1b2015-04-21 23:16:59 +030024
25
koder aka kdanilov39e449e2016-12-17 15:15:26 +020026class ConsoleReportStage(Stage):
27
28 priority = StepOrder.REPORT
29
30 def run(self, ctx: TestRun) -> None:
31 # TODO(koder): load data from storage
32 raise NotImplementedError("...")
33
koder aka kdanilov7f59d562016-12-26 01:34:23 +020034
koder aka kdanilov39e449e2016-12-17 15:15:26 +020035class HtmlReportStage(Stage):
36
37 priority = StepOrder.REPORT
38
39 def run(self, ctx: TestRun) -> None:
40 # TODO(koder): load data from storage
41 raise NotImplementedError("...")
42
koder aka kdanilov7f59d562016-12-26 01:34:23 +020043
44# TODO: need to be revised, have to user StatProps fields instead
45class StoragePerfSummary:
46 def __init__(self, name: str) -> None:
47 self.direct_iops_r_max = 0 # type: int
48 self.direct_iops_w_max = 0 # type: int
49
50 # 64 used instead of 4k to faster feed caches
51 self.direct_iops_w64_max = 0 # type: int
52
53 self.rws4k_10ms = 0 # type: int
54 self.rws4k_30ms = 0 # type: int
55 self.rws4k_100ms = 0 # type: int
56 self.bw_write_max = 0 # type: int
57 self.bw_read_max = 0 # type: int
58
59 self.bw = None # type: float
60 self.iops = None # type: float
61 self.lat = None # type: float
62 self.lat_50 = None # type: float
63 self.lat_95 = None # type: float
64
65
66class HTMLBlock:
67 data = None # type: str
68 js_links = [] # type: List[str]
69 css_links = [] # type: List[str]
70
71
72class Reporter(metaclass=abc.ABCMeta):
73 @abc.abstractmethod
74 def get_divs(self, config, storage) -> Iterator[Tuple[str, str, HTMLBlock]]:
75 pass
76
77
78# Main performance report
79class PerformanceSummary(Reporter):
80 """Creates graph, which show how IOPS and Latency depend on QD"""
81
82
83# Main performance report
84class IOPS_QD(Reporter):
85 """Creates graph, which show how IOPS and Latency depend on QD"""
86
87
88# Linearization report
89class IOPS_Bsize(Reporter):
90 """Creates graphs, which show how IOPS and Latency depend on block size"""
91
92
93# IOPS/latency distribution
94class IOPSHist(Reporter):
95 """IOPS.latency distribution histogram"""
96
97
98# IOPS/latency over test time
99class IOPSTime(Reporter):
100 """IOPS/latency during test"""
koder aka kdanilovffaf48d2016-12-27 02:25:29 +0200101 def get_divs(self, config, storage) -> Iterator[Tuple[str, str, HTMLBlock]]:
102 pass
koder aka kdanilov7f59d562016-12-26 01:34:23 +0200103
104
105# Cluster load over test time
106class ClusterLoad(Reporter):
107 """IOPS/latency during test"""
108
109
110# Node load over test time
111class NodeLoad(Reporter):
112 """IOPS/latency during test"""
113
114
115# Ceph cluster summary
116class CephClusterSummary(Reporter):
117 """IOPS/latency during test"""
118
119
120# TODO: Resource consumption report
121# TODO: Ceph operation breakout report
122# TODO: Resource consumption for different type of test
123
124
koder aka kdanilov70227062016-11-26 23:23:21 +0200125#
126# # disk_info = None
127# # base = None
128# # linearity = None
129#
130#
131# def group_by_name(test_data):
132# name_map = collections.defaultdict(lambda: [])
133#
134# for data in test_data:
135# name_map[(data.name, data.summary())].append(data)
136#
137# return name_map
138#
139#
140# def report(name, required_fields):
141# def closure(func):
142# report_funcs.append((required_fields.split(","), name, func))
143# return func
144# return closure
145#
146#
147# def get_test_lcheck_params(pinfo):
148# res = [{
149# 's': 'sync',
150# 'd': 'direct',
151# 'a': 'async',
152# 'x': 'sync direct'
153# }[pinfo.sync_mode]]
154#
155# res.append(pinfo.p.rw)
156#
157# return " ".join(res)
158#
159#
160# def get_emb_data_svg(plt):
161# sio = StringIO()
162# plt.savefig(sio, format='svg')
163# img_start = "<!-- Created with matplotlib (http://matplotlib.org/) -->"
164# return sio.getvalue().split(img_start, 1)[1]
165#
166#
167# def get_template(templ_name):
168# very_root_dir = os.path.dirname(os.path.dirname(wally.__file__))
169# templ_dir = os.path.join(very_root_dir, 'report_templates')
170# templ_file = os.path.join(templ_dir, templ_name)
171# return open(templ_file, 'r').read()
172#
173#
174# def group_by(data, func):
175# if len(data) < 2:
176# yield data
177# return
178#
179# ndata = [(func(dt), dt) for dt in data]
180# ndata.sort(key=func)
181# pkey, dt = ndata[0]
182# curr_list = [dt]
183#
184# for key, val in ndata[1:]:
185# if pkey != key:
186# yield curr_list
187# curr_list = [val]
188# else:
189# curr_list.append(val)
190# pkey = key
191#
192# yield curr_list
193#
194#
195# @report('linearity', 'linearity_test')
196# def linearity_report(processed_results, lab_info, comment):
197# labels_and_data_mp = collections.defaultdict(lambda: [])
198# vls = {}
199#
200# # plot io_time = func(bsize)
201# for res in processed_results.values():
202# if res.name.startswith('linearity_test'):
203# iotimes = [1000. / val for val in res.iops.raw]
204#
205# op_summ = get_test_summary(res.params)[:3]
206#
207# labels_and_data_mp[op_summ].append(
208# [res.p.blocksize, res.iops.raw, iotimes])
209#
210# cvls = res.params.vals.copy()
211# del cvls['blocksize']
212# del cvls['rw']
213#
214# cvls.pop('sync', None)
215# cvls.pop('direct', None)
216# cvls.pop('buffered', None)
217#
218# if op_summ not in vls:
219# vls[op_summ] = cvls
220# else:
221# assert cvls == vls[op_summ]
222#
223# all_labels = None
224# _, ax1 = plt.subplots()
225# for name, labels_and_data in labels_and_data_mp.items():
226# labels_and_data.sort(key=lambda x: ssize2b(x[0]))
227#
228# labels, _, iotimes = zip(*labels_and_data)
229#
230# if all_labels is None:
231# all_labels = labels
232# else:
233# assert all_labels == labels
234#
235# plt.boxplot(iotimes)
236# if len(labels_and_data) > 2 and \
237# ssize2b(labels_and_data[-2][0]) >= 4096:
238#
239# xt = range(1, len(labels) + 1)
240#
241# def io_time(sz, bw, initial_lat):
242# return sz / bw + initial_lat
243#
244# x = numpy.array(map(ssize2b, labels))
245# y = numpy.array([sum(dt) / len(dt) for dt in iotimes])
246# popt, _ = scipy.optimize.curve_fit(io_time, x, y, p0=(100., 1.))
247#
248# y1 = io_time(x, *popt)
249# plt.plot(xt, y1, linestyle='--',
250# label=name + ' LS linear approx')
251#
252# for idx, (sz, _, _) in enumerate(labels_and_data):
253# if ssize2b(sz) >= 4096:
254# break
255#
256# bw = (x[-1] - x[idx]) / (y[-1] - y[idx])
257# lat = y[-1] - x[-1] / bw
258# y2 = io_time(x, bw, lat)
259# plt.plot(xt, y2, linestyle='--',
260# label=abbv_name_to_full(name) +
261# ' (4k & max) linear approx')
262#
263# plt.setp(ax1, xticklabels=labels)
264#
265# plt.xlabel("Block size")
266# plt.ylabel("IO time, ms")
267#
268# plt.subplots_adjust(top=0.85)
269# plt.legend(bbox_to_anchor=(0.5, 1.15),
270# loc='upper center',
271# prop={'size': 10}, ncol=2)
272# plt.grid()
273# iotime_plot = get_emb_data_svg(plt)
274# plt.clf()
275#
276# # plot IOPS = func(bsize)
277# _, ax1 = plt.subplots()
278#
279# for name, labels_and_data in labels_and_data_mp.items():
280# labels_and_data.sort(key=lambda x: ssize2b(x[0]))
281# _, data, _ = zip(*labels_and_data)
282# plt.boxplot(data)
283# avg = [float(sum(arr)) / len(arr) for arr in data]
284# xt = range(1, len(data) + 1)
285# plt.plot(xt, avg, linestyle='--',
286# label=abbv_name_to_full(name) + " avg")
287#
288# plt.setp(ax1, xticklabels=labels)
289# plt.xlabel("Block size")
290# plt.ylabel("IOPS")
291# plt.legend(bbox_to_anchor=(0.5, 1.15),
292# loc='upper center',
293# prop={'size': 10}, ncol=2)
294# plt.grid()
295# plt.subplots_adjust(top=0.85)
296#
297# iops_plot = get_emb_data_svg(plt)
298#
299# res = set(get_test_lcheck_params(res) for res in processed_results.values())
300# ncount = list(set(res.testnodes_count for res in processed_results.values()))
301# conc = list(set(res.concurence for res in processed_results.values()))
302#
303# assert len(conc) == 1
304# assert len(ncount) == 1
305#
306# descr = {
307# 'vm_count': ncount[0],
308# 'concurence': conc[0],
309# 'oper_descr': ", ".join(res).capitalize()
310# }
311#
312# params_map = {'iotime_vs_size': iotime_plot,
313# 'iops_vs_size': iops_plot,
314# 'descr': descr}
315#
316# return get_template('report_linearity.html').format(**params_map)
317#
318#
319# @report('lat_vs_iops', 'lat_vs_iops')
320# def lat_vs_iops(processed_results, lab_info, comment):
321# lat_iops = collections.defaultdict(lambda: [])
322# requsted_vs_real = collections.defaultdict(lambda: {})
323#
324# for res in processed_results.values():
325# if res.name.startswith('lat_vs_iops'):
326# lat_iops[res.concurence].append((res.lat,
327# 0,
328# res.iops.average,
329# res.iops.deviation))
330# # lat_iops[res.concurence].append((res.lat.average / 1000.0,
331# # res.lat.deviation / 1000.0,
332# # res.iops.average,
333# # res.iops.deviation))
334# requested_iops = res.p.rate_iops * res.concurence
335# requsted_vs_real[res.concurence][requested_iops] = \
336# (res.iops.average, res.iops.deviation)
337#
338# colors = ['red', 'green', 'blue', 'orange', 'magenta', "teal"]
339# colors_it = iter(colors)
340# for conc, lat_iops in sorted(lat_iops.items()):
341# lat, dev, iops, iops_dev = zip(*lat_iops)
342# plt.errorbar(iops, lat, xerr=iops_dev, yerr=dev, fmt='ro',
343# label=str(conc) + " threads",
344# color=next(colors_it))
345#
346# plt.xlabel("IOPS")
347# plt.ylabel("Latency, ms")
348# plt.grid()
349# plt.legend(loc=0)
350# plt_iops_vs_lat = get_emb_data_svg(plt)
351# plt.clf()
352#
353# colors_it = iter(colors)
354# for conc, req_vs_real in sorted(requsted_vs_real.items()):
355# req, real = zip(*sorted(req_vs_real.items()))
356# iops, dev = zip(*real)
357# plt.errorbar(req, iops, yerr=dev, fmt='ro',
358# label=str(conc) + " threads",
359# color=next(colors_it))
360# plt.xlabel("Requested IOPS")
361# plt.ylabel("Get IOPS")
362# plt.grid()
363# plt.legend(loc=0)
364# plt_iops_vs_requested = get_emb_data_svg(plt)
365#
366# res1 = processed_results.values()[0]
367# params_map = {'iops_vs_lat': plt_iops_vs_lat,
368# 'iops_vs_requested': plt_iops_vs_requested,
369# 'oper_descr': get_test_lcheck_params(res1).capitalize()}
370#
371# return get_template('report_iops_vs_lat.html').format(**params_map)
372#
373#
374# def render_all_html(comment, info, lab_description, images, templ_name):
375# data = info.__dict__.copy()
376# for name, val in data.items():
377# if not name.startswith('__'):
378# if val is None:
379# if name in ('direct_iops_w64_max', 'direct_iops_w_max'):
380# data[name] = ('-', '-', '-')
381# else:
382# data[name] = '-'
383# elif isinstance(val, (int, float, long)):
384# data[name] = round_3_digit(val)
385#
386# data['bw_read_max'] = (data['bw_read_max'][0] // 1024,
387# data['bw_read_max'][1],
388# data['bw_read_max'][2])
389#
390# data['bw_write_max'] = (data['bw_write_max'][0] // 1024,
391# data['bw_write_max'][1],
392# data['bw_write_max'][2])
393#
394# images.update(data)
395# templ = get_template(templ_name)
396# return templ.format(lab_info=lab_description,
397# comment=comment,
398# **images)
399#
400#
401# def io_chart(title, concurence,
402# latv, latv_min, latv_max,
403# iops_or_bw, iops_or_bw_err,
404# legend,
405# log_iops=False,
406# log_lat=False,
407# boxplots=False,
408# latv_50=None,
409# latv_95=None,
410# error2=None):
411#
412# matplotlib.rcParams.update({'font.size': 10})
413# points = " MiBps" if legend == 'BW' else ""
414# lc = len(concurence)
415# width = 0.35
416# xt = range(1, lc + 1)
417#
418# op_per_vm = [v / (vm * th) for v, (vm, th) in zip(iops_or_bw, concurence)]
419# fig, p1 = plt.subplots()
420# xpos = [i - width / 2 for i in xt]
421#
422# p1.bar(xpos, iops_or_bw,
423# width=width,
424# color='y',
425# label=legend)
426#
427# err1_leg = None
428# for pos, y, err in zip(xpos, iops_or_bw, iops_or_bw_err):
429# err1_leg = p1.errorbar(pos + width / 2,
430# y,
431# err,
432# color='magenta')
433#
434# err2_leg = None
435# if error2 is not None:
436# for pos, y, err in zip(xpos, iops_or_bw, error2):
437# err2_leg = p1.errorbar(pos + width / 2 + 0.08,
438# y,
439# err,
440# lw=2,
441# alpha=0.5,
442# color='teal')
443#
444# p1.grid(True)
445# p1.plot(xt, op_per_vm, '--', label=legend + "/thread", color='black')
446# handles1, labels1 = p1.get_legend_handles_labels()
447#
448# handles1 += [err1_leg]
449# labels1 += ["95% conf"]
450#
451# if err2_leg is not None:
452# handles1 += [err2_leg]
453# labels1 += ["95% dev"]
454#
455# p2 = p1.twinx()
456#
457# if latv_50 is None:
458# p2.plot(xt, latv_max, label="lat max")
459# p2.plot(xt, latv, label="lat avg")
460# p2.plot(xt, latv_min, label="lat min")
461# else:
462# p2.plot(xt, latv_50, label="lat med")
463# p2.plot(xt, latv_95, label="lat 95%")
464#
465# plt.xlim(0.5, lc + 0.5)
466# plt.xticks(xt, ["{0} * {1}".format(vm, th) for (vm, th) in concurence])
467# p1.set_xlabel("VM Count * Thread per VM")
468# p1.set_ylabel(legend + points)
469# p2.set_ylabel("Latency ms")
470# plt.title(title)
471# handles2, labels2 = p2.get_legend_handles_labels()
472#
473# plt.legend(handles1 + handles2, labels1 + labels2,
474# loc='center left', bbox_to_anchor=(1.1, 0.81))
475#
476# if log_iops:
477# p1.set_yscale('log')
478#
479# if log_lat:
480# p2.set_yscale('log')
481#
482# plt.subplots_adjust(right=0.68)
483#
484# return get_emb_data_svg(plt)
485#
486#
487# def make_plots(processed_results, plots):
488# """
489# processed_results: [PerfInfo]
490# plots = [(test_name_prefix:str, fname:str, description:str)]
491# """
492# files = {}
493# for name_pref, fname, desc in plots:
494# chart_data = []
495#
496# for res in processed_results:
497# summ = res.name + "_" + res.summary
498# if summ.startswith(name_pref):
499# chart_data.append(res)
500#
501# if len(chart_data) == 0:
502# raise ValueError("Can't found any date for " + name_pref)
503#
504# use_bw = ssize2b(chart_data[0].p.blocksize) > 16 * 1024
505#
506# chart_data.sort(key=lambda x: x.params['vals']['numjobs'])
507#
508# lat = None
509# lat_min = None
510# lat_max = None
511#
512# lat_50 = [x.lat_50 for x in chart_data]
513# lat_95 = [x.lat_95 for x in chart_data]
514#
515# lat_diff_max = max(x.lat_95 / x.lat_50 for x in chart_data)
516# lat_log_scale = (lat_diff_max > 10)
517#
518# testnodes_count = x.testnodes_count
519# concurence = [(testnodes_count, x.concurence)
520# for x in chart_data]
521#
522# if use_bw:
523# data = [x.bw.average / 1000 for x in chart_data]
524# data_conf = [x.bw.confidence / 1000 for x in chart_data]
525# data_dev = [x.bw.deviation * 2.5 / 1000 for x in chart_data]
526# name = "BW"
527# else:
528# data = [x.iops.average for x in chart_data]
529# data_conf = [x.iops.confidence for x in chart_data]
530# data_dev = [x.iops.deviation * 2 for x in chart_data]
531# name = "IOPS"
532#
533# fc = io_chart(title=desc,
534# concurence=concurence,
535#
536# latv=lat,
537# latv_min=lat_min,
538# latv_max=lat_max,
539#
540# iops_or_bw=data,
541# iops_or_bw_err=data_conf,
542#
543# legend=name,
544# log_lat=lat_log_scale,
545#
546# latv_50=lat_50,
547# latv_95=lat_95,
548#
549# error2=data_dev)
550# files[fname] = fc
551#
552# return files
553#
554#
555# def find_max_where(processed_results, sync_mode, blocksize, rw, iops=True):
556# result = None
557# attr = 'iops' if iops else 'bw'
558# for measurement in processed_results:
559# ok = measurement.sync_mode == sync_mode
560# ok = ok and (measurement.p.blocksize == blocksize)
561# ok = ok and (measurement.p.rw == rw)
562#
563# if ok:
564# field = getattr(measurement, attr)
565#
566# if result is None:
567# result = field
568# elif field.average > result.average:
569# result = field
570#
571# return result
572#
573#
574# def get_disk_info(processed_results):
575# di = DiskInfo()
576# di.direct_iops_w_max = find_max_where(processed_results,
577# 'd', '4k', 'randwrite')
578# di.direct_iops_r_max = find_max_where(processed_results,
579# 'd', '4k', 'randread')
580#
581# di.direct_iops_w64_max = find_max_where(processed_results,
582# 'd', '64k', 'randwrite')
583#
584# for sz in ('16m', '64m'):
585# di.bw_write_max = find_max_where(processed_results,
586# 'd', sz, 'randwrite', False)
587# if di.bw_write_max is not None:
588# break
589#
590# if di.bw_write_max is None:
591# for sz in ('1m', '2m', '4m', '8m'):
592# di.bw_write_max = find_max_where(processed_results,
593# 'd', sz, 'write', False)
594# if di.bw_write_max is not None:
595# break
596#
597# for sz in ('16m', '64m'):
598# di.bw_read_max = find_max_where(processed_results,
599# 'd', sz, 'randread', False)
600# if di.bw_read_max is not None:
601# break
602#
603# if di.bw_read_max is None:
604# di.bw_read_max = find_max_where(processed_results,
605# 'd', '1m', 'read', False)
606#
607# rws4k_iops_lat_th = []
608# for res in processed_results:
609# if res.sync_mode in 'xs' and res.p.blocksize == '4k':
610# if res.p.rw != 'randwrite':
611# continue
612# rws4k_iops_lat_th.append((res.iops.average,
613# res.lat,
614# # res.lat.average,
615# res.concurence))
616#
617# rws4k_iops_lat_th.sort(key=lambda x: x[2])
618#
619# latv = [lat for _, lat, _ in rws4k_iops_lat_th]
620#
621# for tlat in [10, 30, 100]:
622# pos = bisect.bisect_left(latv, tlat)
623# if 0 == pos:
624# setattr(di, 'rws4k_{}ms'.format(tlat), 0)
625# elif pos == len(latv):
626# iops3, _, _ = rws4k_iops_lat_th[-1]
627# iops3 = int(round_3_digit(iops3))
628# setattr(di, 'rws4k_{}ms'.format(tlat), ">=" + str(iops3))
629# else:
630# lat1 = latv[pos - 1]
631# lat2 = latv[pos]
632#
633# iops1, _, th1 = rws4k_iops_lat_th[pos - 1]
634# iops2, _, th2 = rws4k_iops_lat_th[pos]
635#
636# th_lat_coef = (th2 - th1) / (lat2 - lat1)
637# th3 = th_lat_coef * (tlat - lat1) + th1
638#
639# th_iops_coef = (iops2 - iops1) / (th2 - th1)
640# iops3 = th_iops_coef * (th3 - th1) + iops1
641# iops3 = int(round_3_digit(iops3))
642# setattr(di, 'rws4k_{}ms'.format(tlat), iops3)
643#
644# hdi = DiskInfo()
645#
646# def pp(x):
647# med, conf = x.rounded_average_conf()
648# conf_perc = int(float(conf) / med * 100)
649# dev_perc = int(float(x.deviation) / med * 100)
650# return (round_3_digit(med), conf_perc, dev_perc)
651#
652# hdi.direct_iops_r_max = pp(di.direct_iops_r_max)
653#
654# if di.direct_iops_w_max is not None:
655# hdi.direct_iops_w_max = pp(di.direct_iops_w_max)
656# else:
657# hdi.direct_iops_w_max = None
658#
659# if di.direct_iops_w64_max is not None:
660# hdi.direct_iops_w64_max = pp(di.direct_iops_w64_max)
661# else:
662# hdi.direct_iops_w64_max = None
663#
664# hdi.bw_write_max = pp(di.bw_write_max)
665# hdi.bw_read_max = pp(di.bw_read_max)
666#
667# hdi.rws4k_10ms = di.rws4k_10ms if 0 != di.rws4k_10ms else None
668# hdi.rws4k_30ms = di.rws4k_30ms if 0 != di.rws4k_30ms else None
669# hdi.rws4k_100ms = di.rws4k_100ms if 0 != di.rws4k_100ms else None
670# return hdi
671#
672#
673# @report('hdd', 'hdd')
674# def make_hdd_report(processed_results, lab_info, comment):
675# plots = [
676# ('hdd_rrd4k', 'rand_read_4k', 'Random read 4k direct IOPS'),
677# ('hdd_rwx4k', 'rand_write_4k', 'Random write 4k sync IOPS')
678# ]
679# perf_infos = [res.disk_perf_info() for res in processed_results]
680# images = make_plots(perf_infos, plots)
681# di = get_disk_info(perf_infos)
682# return render_all_html(comment, di, lab_info, images, "report_hdd.html")
683#
684#
685# @report('cinder_iscsi', 'cinder_iscsi')
686# def make_cinder_iscsi_report(processed_results, lab_info, comment):
687# plots = [
688# ('cinder_iscsi_rrd4k', 'rand_read_4k', 'Random read 4k direct IOPS'),
689# ('cinder_iscsi_rwx4k', 'rand_write_4k', 'Random write 4k sync IOPS')
690# ]
691# perf_infos = [res.disk_perf_info() for res in processed_results]
692# try:
693# images = make_plots(perf_infos, plots)
694# except ValueError:
695# plots = [
696# ('cinder_iscsi_rrd4k', 'rand_read_4k', 'Random read 4k direct IOPS'),
697# ('cinder_iscsi_rws4k', 'rand_write_4k', 'Random write 4k sync IOPS')
698# ]
699# images = make_plots(perf_infos, plots)
700# di = get_disk_info(perf_infos)
701#
702# return render_all_html(comment, di, lab_info, images, "report_cinder_iscsi.html")
703#
704#
705# @report('ceph', 'ceph')
706# def make_ceph_report(processed_results, lab_info, comment):
707# plots = [
708# ('ceph_rrd4k', 'rand_read_4k', 'Random read 4k direct IOPS'),
709# ('ceph_rws4k', 'rand_write_4k', 'Random write 4k sync IOPS'),
710# ('ceph_rrd16m', 'rand_read_16m', 'Random read 16m direct MiBps'),
711# ('ceph_rwd16m', 'rand_write_16m',
712# 'Random write 16m direct MiBps'),
713# ]
714#
715# perf_infos = [res.disk_perf_info() for res in processed_results]
716# images = make_plots(perf_infos, plots)
717# di = get_disk_info(perf_infos)
718# return render_all_html(comment, di, lab_info, images, "report_ceph.html")
719#
720#
721# @report('mixed', 'mixed')
722# def make_mixed_report(processed_results, lab_info, comment):
723# #
724# # IOPS(X% read) = 100 / ( X / IOPS_W + (100 - X) / IOPS_R )
725# #
726#
727# perf_infos = [res.disk_perf_info() for res in processed_results]
728# mixed = collections.defaultdict(lambda: [])
729#
730# is_ssd = False
731# for res in perf_infos:
732# if res.name.startswith('mixed'):
733# if res.name.startswith('mixed-ssd'):
734# is_ssd = True
735# mixed[res.concurence].append((res.p.rwmixread,
736# res.lat,
737# 0,
738# # res.lat.average / 1000.0,
739# # res.lat.deviation / 1000.0,
740# res.iops.average,
741# res.iops.deviation))
742#
743# if len(mixed) == 0:
744# raise ValueError("No mixed load found")
745#
746# fig, p1 = plt.subplots()
747# p2 = p1.twinx()
748#
749# colors = ['red', 'green', 'blue', 'orange', 'magenta', "teal"]
750# colors_it = iter(colors)
751# for conc, mix_lat_iops in sorted(mixed.items()):
752# mix_lat_iops = sorted(mix_lat_iops)
753# read_perc, lat, dev, iops, iops_dev = zip(*mix_lat_iops)
754# p1.errorbar(read_perc, iops, color=next(colors_it),
755# yerr=iops_dev, label=str(conc) + " th")
756#
757# p2.errorbar(read_perc, lat, color=next(colors_it),
758# ls='--', yerr=dev, label=str(conc) + " th lat")
759#
760# if is_ssd:
761# p1.set_yscale('log')
762# p2.set_yscale('log')
763#
764# p1.set_xlim(-5, 105)
765#
766# read_perc = set(read_perc)
767# read_perc.add(0)
768# read_perc.add(100)
769# read_perc = sorted(read_perc)
770#
771# plt.xticks(read_perc, map(str, read_perc))
772#
773# p1.grid(True)
774# p1.set_xlabel("% of reads")
775# p1.set_ylabel("Mixed IOPS")
776# p2.set_ylabel("Latency, ms")
777#
778# handles1, labels1 = p1.get_legend_handles_labels()
779# handles2, labels2 = p2.get_legend_handles_labels()
780# plt.subplots_adjust(top=0.85)
781# plt.legend(handles1 + handles2, labels1 + labels2,
782# bbox_to_anchor=(0.5, 1.15),
783# loc='upper center',
784# prop={'size': 12}, ncol=3)
785# plt.show()
786#
787#
788# def make_load_report(idx, results_dir, fname):
789# dpath = os.path.join(results_dir, "io_" + str(idx))
790# files = sorted(os.listdir(dpath))
791# gf = lambda x: "_".join(x.rsplit(".", 1)[0].split('_')[:3])
792#
793# for key, group in itertools.groupby(files, gf):
794# fname = os.path.join(dpath, key + ".fio")
795#
796# cfgs = list(parse_all_in_1(open(fname).read(), fname))
797#
798# fname = os.path.join(dpath, key + "_lat.log")
799#
800# curr = []
801# arrays = []
802#
803# with open(fname) as fd:
804# for offset, lat, _, _ in csv.reader(fd):
805# offset = int(offset)
806# lat = int(lat)
807# if len(curr) > 0 and curr[-1][0] > offset:
808# arrays.append(curr)
809# curr = []
810# curr.append((offset, lat))
811# arrays.append(curr)
812# conc = int(cfgs[0].vals.get('numjobs', 1))
813#
814# if conc != 5:
815# continue
816#
817# assert len(arrays) == len(cfgs) * conc
818#
819# garrays = [[(0, 0)] for _ in range(conc)]
820#
821# for offset in range(len(cfgs)):
822# for acc, new_arr in zip(garrays, arrays[offset * conc:(offset + 1) * conc]):
823# last = acc[-1][0]
824# for off, lat in new_arr:
825# acc.append((off / 1000. + last, lat / 1000.))
826#
827# for cfg, arr in zip(cfgs, garrays):
828# plt.plot(*zip(*arr[1:]))
829# plt.show()
830# exit(1)
831#
832#
833# def make_io_report(dinfo, comment, path, lab_info=None):
834# lab_info = {
835# "total_disk": "None",
836# "total_memory": "None",
837# "nodes_count": "None",
838# "processor_count": "None"
839# }
840#
841# try:
842# res_fields = sorted(v.name for v in dinfo)
843#
844# found = False
845# for fields, name, func in report_funcs:
846# for field in fields:
847# pos = bisect.bisect_left(res_fields, field)
848#
849# if pos == len(res_fields):
850# break
851#
852# if not res_fields[pos].startswith(field):
853# break
854# else:
855# found = True
856# hpath = path.format(name)
857#
858# try:
859# report = func(dinfo, lab_info, comment)
860# except:
861# logger.exception("Diring {0} report generation".format(name))
862# continue
863#
864# if report is not None:
865# try:
866# with open(hpath, "w") as fd:
867# fd.write(report)
868# except:
869# logger.exception("Diring saving {0} report".format(name))
870# continue
871# logger.info("Report {0} saved into {1}".format(name, hpath))
872# else:
873# logger.warning("No report produced by {0!r}".format(name))
874#
875# if not found:
876# logger.warning("No report generator found for this load")
877#
878# except Exception as exc:
879# import traceback
880# traceback.print_exc()
881# logger.error("Failed to generate html report:" + str(exc))
882#
883#
884# # @classmethod
885# # def prepare_data(cls, results) -> List[Dict[str, Any]]:
886# # """create a table with io performance report for console"""
887# #
888# # def key_func(data: FioRunResult) -> Tuple[str, str, str, str, int]:
889# # tpl = data.summary_tpl()
890# # return (data.name,
891# # tpl.oper,
892# # tpl.mode,
893# # ssize2b(tpl.bsize),
894# # int(tpl.th_count) * int(tpl.vm_count))
895# # res = []
896# #
897# # for item in sorted(results, key=key_func):
898# # test_dinfo = item.disk_perf_info()
899# # testnodes_count = len(item.config.nodes)
900# #
901# # iops, _ = test_dinfo.iops.rounded_average_conf()
902# #
903# # if test_dinfo.iops_sys is not None:
904# # iops_sys, iops_sys_conf = test_dinfo.iops_sys.rounded_average_conf()
905# # _, iops_sys_dev = test_dinfo.iops_sys.rounded_average_dev()
906# # iops_sys_per_vm = round_3_digit(iops_sys / testnodes_count)
907# # iops_sys = round_3_digit(iops_sys)
908# # else:
909# # iops_sys = None
910# # iops_sys_per_vm = None
911# # iops_sys_dev = None
912# # iops_sys_conf = None
913# #
914# # bw, bw_conf = test_dinfo.bw.rounded_average_conf()
915# # _, bw_dev = test_dinfo.bw.rounded_average_dev()
916# # conf_perc = int(round(bw_conf * 100 / bw))
917# # dev_perc = int(round(bw_dev * 100 / bw))
918# #
919# # lat_50 = round_3_digit(int(test_dinfo.lat_50))
920# # lat_95 = round_3_digit(int(test_dinfo.lat_95))
921# # lat_avg = round_3_digit(int(test_dinfo.lat_avg))
922# #
923# # iops_per_vm = round_3_digit(iops / testnodes_count)
924# # bw_per_vm = round_3_digit(bw / testnodes_count)
925# #
926# # iops = round_3_digit(iops)
927# # bw = round_3_digit(bw)
928# #
929# # summ = "{0.oper}{0.mode} {0.bsize:>4} {0.th_count:>3}th {0.vm_count:>2}vm".format(item.summary_tpl())
930# #
931# # res.append({"name": key_func(item)[0],
932# # "key": key_func(item)[:4],
933# # "summ": summ,
934# # "iops": int(iops),
935# # "bw": int(bw),
936# # "conf": str(conf_perc),
937# # "dev": str(dev_perc),
938# # "iops_per_vm": int(iops_per_vm),
939# # "bw_per_vm": int(bw_per_vm),
940# # "lat_50": lat_50,
941# # "lat_95": lat_95,
942# # "lat_avg": lat_avg,
943# #
944# # "iops_sys": iops_sys,
945# # "iops_sys_per_vm": iops_sys_per_vm,
946# # "sys_conf": iops_sys_conf,
947# # "sys_dev": iops_sys_dev})
948# #
949# # return res
950# #
951# # Field = collections.namedtuple("Field", ("header", "attr", "allign", "size"))
952# # fiels_and_header = [
953# # Field("Name", "name", "l", 7),
954# # Field("Description", "summ", "l", 19),
955# # Field("IOPS\ncum", "iops", "r", 3),
956# # # Field("IOPS_sys\ncum", "iops_sys", "r", 3),
957# # Field("KiBps\ncum", "bw", "r", 6),
958# # Field("Cnf %\n95%", "conf", "r", 3),
959# # Field("Dev%", "dev", "r", 3),
960# # Field("iops\n/vm", "iops_per_vm", "r", 3),
961# # Field("KiBps\n/vm", "bw_per_vm", "r", 6),
962# # Field("lat ms\nmedian", "lat_50", "r", 3),
963# # Field("lat ms\n95%", "lat_95", "r", 3),
964# # Field("lat\navg", "lat_avg", "r", 3),
965# # ]
966# #
967# # fiels_and_header_dct = dict((item.attr, item) for item in fiels_and_header)
968# #
969# # @classmethod
970# # def format_for_console(cls, results) -> str:
971# # """create a table with io performance report for console"""
972# #
973# # tab = texttable.Texttable(max_width=120)
974# # tab.set_deco(tab.HEADER | tab.VLINES | tab.BORDER)
975# # tab.set_cols_align([f.allign for f in cls.fiels_and_header])
976# # sep = ["-" * f.size for f in cls.fiels_and_header]
977# # tab.header([f.header for f in cls.fiels_and_header])
978# # prev_k = None
979# # for item in cls.prepare_data(results):
980# # if prev_k is not None:
981# # if prev_k != item["key"]:
982# # tab.add_row(sep)
983# #
984# # prev_k = item["key"]
985# # tab.add_row([item[f.attr] for f in cls.fiels_and_header])
986# #
987# # return tab.draw()
988# #
989# # @classmethod
990# # def format_diff_for_console(cls, list_of_results: List[Any]) -> str:
991# # """create a table with io performance report for console"""
992# #
993# # tab = texttable.Texttable(max_width=200)
994# # tab.set_deco(tab.HEADER | tab.VLINES | tab.BORDER)
995# #
996# # header = [
997# # cls.fiels_and_header_dct["name"].header,
998# # cls.fiels_and_header_dct["summ"].header,
999# # ]
1000# # allign = ["l", "l"]
1001# #
1002# # header.append("IOPS ~ Cnf% ~ Dev%")
1003# # allign.extend(["r"] * len(list_of_results))
1004# # header.extend(
1005# # "IOPS_{0} %".format(i + 2) for i in range(len(list_of_results[1:]))
1006# # )
1007# #
1008# # header.append("BW")
1009# # allign.extend(["r"] * len(list_of_results))
1010# # header.extend(
1011# # "BW_{0} %".format(i + 2) for i in range(len(list_of_results[1:]))
1012# # )
1013# #
1014# # header.append("LAT")
1015# # allign.extend(["r"] * len(list_of_results))
1016# # header.extend(
1017# # "LAT_{0}".format(i + 2) for i in range(len(list_of_results[1:]))
1018# # )
1019# #
1020# # tab.header(header)
1021# # sep = ["-" * 3] * len(header)
1022# # processed_results = map(cls.prepare_data, list_of_results)
1023# #
1024# # key2results = []
1025# # for res in processed_results:
1026# # key2results.append(dict(
1027# # ((item["name"], item["summ"]), item) for item in res
1028# # ))
1029# #
1030# # prev_k = None
1031# # iops_frmt = "{0[iops]} ~ {0[conf]:>2} ~ {0[dev]:>2}"
1032# # for item in processed_results[0]:
1033# # if prev_k is not None:
1034# # if prev_k != item["key"]:
1035# # tab.add_row(sep)
1036# #
1037# # prev_k = item["key"]
1038# #
1039# # key = (item['name'], item['summ'])
1040# # line = list(key)
1041# # base = key2results[0][key]
1042# #
1043# # line.append(iops_frmt.format(base))
1044# #
1045# # for test_results in key2results[1:]:
1046# # val = test_results.get(key)
1047# # if val is None:
1048# # line.append("-")
1049# # elif base['iops'] == 0:
1050# # line.append("Nan")
1051# # else:
1052# # prc_val = {'dev': val['dev'], 'conf': val['conf']}
1053# # prc_val['iops'] = int(100 * val['iops'] / base['iops'])
1054# # line.append(iops_frmt.format(prc_val))
1055# #
1056# # line.append(base['bw'])
1057# #
1058# # for test_results in key2results[1:]:
1059# # val = test_results.get(key)
1060# # if val is None:
1061# # line.append("-")
1062# # elif base['bw'] == 0:
1063# # line.append("Nan")
1064# # else:
1065# # line.append(int(100 * val['bw'] / base['bw']))
1066# #
1067# # for test_results in key2results:
1068# # val = test_results.get(key)
1069# # if val is None:
1070# # line.append("-")
1071# # else:
1072# # line.append("{0[lat_50]} - {0[lat_95]}".format(val))
1073# #
1074# # tab.add_row(line)
1075# #
1076# # tab.set_cols_align(allign)
1077# # return tab.draw()
1078#
1079#
1080# # READ_IOPS_DISCSTAT_POS = 3
1081# # WRITE_IOPS_DISCSTAT_POS = 7
1082# #
1083# #
1084# # def load_sys_log_file(ftype: str, fname: str) -> TimeSeriesValue:
1085# # assert ftype == 'iops'
1086# # pval = None
1087# # with open(fname) as fd:
1088# # iops = []
1089# # for ln in fd:
1090# # params = ln.split()
1091# # cval = int(params[WRITE_IOPS_DISCSTAT_POS]) + \
1092# # int(params[READ_IOPS_DISCSTAT_POS])
1093# # if pval is not None:
1094# # iops.append(cval - pval)
1095# # pval = cval
1096# #
1097# # vals = [(idx * 1000, val) for idx, val in enumerate(iops)]
1098# # return TimeSeriesValue(vals)
1099# #
1100# #
1101# # def load_test_results(folder: str, run_num: int) -> 'FioRunResult':
1102# # res = {}
1103# # params = None
1104# #
1105# # fn = os.path.join(folder, str(run_num) + '_params.yaml')
1106# # params = yaml.load(open(fn).read())
1107# #
1108# # conn_ids_set = set()
1109# # rr = r"{}_(?P<conn_id>.*?)_(?P<type>[^_.]*)\.\d+\.log$".format(run_num)
1110# # for fname in os.listdir(folder):
1111# # rm = re.match(rr, fname)
1112# # if rm is None:
1113# # continue
1114# #
1115# # conn_id_s = rm.group('conn_id')
1116# # conn_id = conn_id_s.replace('_', ':')
1117# # ftype = rm.group('type')
1118# #
1119# # if ftype not in ('iops', 'bw', 'lat'):
1120# # continue
1121# #
1122# # ts = load_fio_log_file(os.path.join(folder, fname))
1123# # res.setdefault(ftype, {}).setdefault(conn_id, []).append(ts)
1124# #
1125# # conn_ids_set.add(conn_id)
1126# #
1127# # rr = r"{}_(?P<conn_id>.*?)_(?P<type>[^_.]*)\.sys\.log$".format(run_num)
1128# # for fname in os.listdir(folder):
1129# # rm = re.match(rr, fname)
1130# # if rm is None:
1131# # continue
1132# #
1133# # conn_id_s = rm.group('conn_id')
1134# # conn_id = conn_id_s.replace('_', ':')
1135# # ftype = rm.group('type')
1136# #
1137# # if ftype not in ('iops', 'bw', 'lat'):
1138# # continue
1139# #
1140# # ts = load_sys_log_file(ftype, os.path.join(folder, fname))
1141# # res.setdefault(ftype + ":sys", {}).setdefault(conn_id, []).append(ts)
1142# #
1143# # conn_ids_set.add(conn_id)
1144# #
1145# # mm_res = {}
1146# #
1147# # if len(res) == 0:
1148# # raise ValueError("No data was found")
1149# #
1150# # for key, data in res.items():
1151# # conn_ids = sorted(conn_ids_set)
1152# # awail_ids = [conn_id for conn_id in conn_ids if conn_id in data]
1153# # matr = [data[conn_id] for conn_id in awail_ids]
1154# # mm_res[key] = MeasurementMatrix(matr, awail_ids)
1155# #
1156# # raw_res = {}
1157# # for conn_id in conn_ids:
1158# # fn = os.path.join(folder, "{0}_{1}_rawres.json".format(run_num, conn_id_s))
1159# #
1160# # # remove message hack
1161# # fc = "{" + open(fn).read().split('{', 1)[1]
1162# # raw_res[conn_id] = json.loads(fc)
1163# #
1164# # fio_task = FioJobSection(params['name'])
1165# # fio_task.vals.update(params['vals'])
1166# #
1167# # config = TestConfig('io', params, None, params['nodes'], folder, None)
1168# # return FioRunResult(config, fio_task, mm_res, raw_res, params['intervals'], run_num)
1169# #
1170#
1171# # class DiskPerfInfo:
1172# # def __init__(self, name: str, summary: str, params: Dict[str, Any], testnodes_count: int) -> None:
1173# # self.name = name
1174# # self.bw = None
1175# # self.iops = None
1176# # self.lat = None
1177# # self.lat_50 = None
1178# # self.lat_95 = None
1179# # self.lat_avg = None
1180# #
1181# # self.raw_bw = []
1182# # self.raw_iops = []
1183# # self.raw_lat = []
1184# #
1185# # self.params = params
1186# # self.testnodes_count = testnodes_count
1187# # self.summary = summary
1188# #
1189# # self.sync_mode = get_test_sync_mode(self.params['vals'])
1190# # self.concurence = self.params['vals'].get('numjobs', 1)
1191# #
1192# #
1193# # class IOTestResults:
1194# # def __init__(self, suite_name: str, fio_results: 'FioRunResult', log_directory: str):
1195# # self.suite_name = suite_name
1196# # self.fio_results = fio_results
1197# # self.log_directory = log_directory
1198# #
1199# # def __iter__(self):
1200# # return iter(self.fio_results)
1201# #
1202# # def __len__(self):
1203# # return len(self.fio_results)
1204# #
1205# # def get_yamable(self) -> Dict[str, List[str]]:
1206# # items = [(fio_res.summary(), fio_res.idx) for fio_res in self]
1207# # return {self.suite_name: [self.log_directory] + items}
1208#
1209#
1210# # class FioRunResult(TestResults):
1211# # """
1212# # Fio run results
1213# # config: TestConfig
1214# # fio_task: FioJobSection
1215# # ts_results: {str: MeasurementMatrix[TimeSeriesValue]}
1216# # raw_result: ????
1217# # run_interval:(float, float) - test tun time, used for sensors
1218# # """
1219# # def __init__(self, config, fio_task, ts_results, raw_result, run_interval, idx):
1220# #
1221# # self.name = fio_task.name.rsplit("_", 1)[0]
1222# # self.fio_task = fio_task
1223# # self.idx = idx
1224# #
1225# # self.bw = ts_results['bw']
1226# # self.lat = ts_results['lat']
1227# # self.iops = ts_results['iops']
1228# #
1229# # if 'iops:sys' in ts_results:
1230# # self.iops_sys = ts_results['iops:sys']
1231# # else:
1232# # self.iops_sys = None
1233# #
1234# # res = {"bw": self.bw,
1235# # "lat": self.lat,
1236# # "iops": self.iops,
1237# # "iops:sys": self.iops_sys}
1238# #
1239# # self.sensors_data = None
1240# # self._pinfo = None
1241# # TestResults.__init__(self, config, res, raw_result, run_interval)
1242# #
1243# # def get_params_from_fio_report(self):
1244# # nodes = self.bw.connections_ids
1245# #
1246# # iops = [self.raw_result[node]['jobs'][0]['mixed']['iops'] for node in nodes]
1247# # total_ios = [self.raw_result[node]['jobs'][0]['mixed']['total_ios'] for node in nodes]
1248# # runtime = [self.raw_result[node]['jobs'][0]['mixed']['runtime'] / 1000 for node in nodes]
1249# # flt_iops = [float(ios) / rtime for ios, rtime in zip(total_ios, runtime)]
1250# #
1251# # bw = [self.raw_result[node]['jobs'][0]['mixed']['bw'] for node in nodes]
1252# # total_bytes = [self.raw_result[node]['jobs'][0]['mixed']['io_bytes'] for node in nodes]
1253# # flt_bw = [float(tbytes) / rtime for tbytes, rtime in zip(total_bytes, runtime)]
1254# #
1255# # return {'iops': iops,
1256# # 'flt_iops': flt_iops,
1257# # 'bw': bw,
1258# # 'flt_bw': flt_bw}
1259# #
1260# # def summary(self):
1261# # return get_test_summary(self.fio_task, len(self.config.nodes))
1262# #
1263# # def summary_tpl(self):
1264# # return get_test_summary_tuple(self.fio_task, len(self.config.nodes))
1265# #
1266# # def get_lat_perc_50_95_multy(self):
1267# # lat_mks = collections.defaultdict(lambda: 0)
1268# # num_res = 0
1269# #
1270# # for result in self.raw_result.values():
1271# # num_res += len(result['jobs'])
1272# # for job_info in result['jobs']:
1273# # for k, v in job_info['latency_ms'].items():
1274# # if isinstance(k, basestring) and k.startswith('>='):
1275# # lat_mks[int(k[2:]) * 1000] += v
1276# # else:
1277# # lat_mks[int(k) * 1000] += v
1278# #
1279# # for k, v in job_info['latency_us'].items():
1280# # lat_mks[int(k)] += v
1281# #
1282# # for k, v in lat_mks.items():
1283# # lat_mks[k] = float(v) / num_res
1284# # return get_lat_perc_50_95(lat_mks)
1285# #
1286# # def disk_perf_info(self, avg_interval=2.0):
1287# #
1288# # if self._pinfo is not None:
1289# # return self._pinfo
1290# #
1291# # testnodes_count = len(self.config.nodes)
1292# #
1293# # pinfo = DiskPerfInfo(self.name,
1294# # self.summary(),
1295# # self.params,
1296# # testnodes_count)
1297# #
1298# # def prepare(data, drop=1):
1299# # if data is None:
1300# # return data
1301# #
1302# # res = []
1303# # for ts_data in data:
1304# # if ts_data.average_interval() < avg_interval:
1305# # ts_data = ts_data.derived(avg_interval)
1306# #
1307# # # drop last value on bounds
1308# # # as they may contains ranges without activities
1309# # assert len(ts_data.values) >= drop + 1, str(drop) + " " + str(ts_data.values)
1310# #
1311# # if drop > 0:
1312# # res.append(ts_data.values[:-drop])
1313# # else:
1314# # res.append(ts_data.values)
1315# #
1316# # return res
1317# #
1318# # def agg_data(matr):
1319# # arr = sum(matr, [])
1320# # min_len = min(map(len, arr))
1321# # res = []
1322# # for idx in range(min_len):
1323# # res.append(sum(dt[idx] for dt in arr))
1324# # return res
1325# #
1326# # pinfo.raw_lat = map(prepare, self.lat.per_vm())
1327# # num_th = sum(map(len, pinfo.raw_lat))
1328# # lat_avg = [val / num_th for val in agg_data(pinfo.raw_lat)]
1329# # pinfo.lat_avg = data_property(lat_avg).average / 1000 # us to ms
1330# #
1331# # pinfo.lat_50, pinfo.lat_95 = self.get_lat_perc_50_95_multy()
1332# # pinfo.lat = pinfo.lat_50
1333# #
1334# # pinfo.raw_bw = map(prepare, self.bw.per_vm())
1335# # pinfo.raw_iops = map(prepare, self.iops.per_vm())
1336# #
1337# # if self.iops_sys is not None:
1338# # pinfo.raw_iops_sys = map(prepare, self.iops_sys.per_vm())
1339# # pinfo.iops_sys = data_property(agg_data(pinfo.raw_iops_sys))
1340# # else:
1341# # pinfo.raw_iops_sys = None
1342# # pinfo.iops_sys = None
1343# #
1344# # fparams = self.get_params_from_fio_report()
1345# # fio_report_bw = sum(fparams['flt_bw'])
1346# # fio_report_iops = sum(fparams['flt_iops'])
1347# #
1348# # agg_bw = agg_data(pinfo.raw_bw)
1349# # agg_iops = agg_data(pinfo.raw_iops)
1350# #
1351# # log_bw_avg = average(agg_bw)
1352# # log_iops_avg = average(agg_iops)
1353# #
1354# # # update values to match average from fio report
1355# # coef_iops = fio_report_iops / float(log_iops_avg)
1356# # coef_bw = fio_report_bw / float(log_bw_avg)
1357# #
1358# # bw_log = data_property([val * coef_bw for val in agg_bw])
1359# # iops_log = data_property([val * coef_iops for val in agg_iops])
1360# #
1361# # bw_report = data_property([fio_report_bw])
1362# # iops_report = data_property([fio_report_iops])
1363# #
1364# # # When IOPS/BW per thread is too low
1365# # # data from logs is rounded to match
1366# # iops_per_th = sum(sum(pinfo.raw_iops, []), [])
1367# # if average(iops_per_th) > 10:
1368# # pinfo.iops = iops_log
1369# # pinfo.iops2 = iops_report
1370# # else:
1371# # pinfo.iops = iops_report
1372# # pinfo.iops2 = iops_log
1373# #
1374# # bw_per_th = sum(sum(pinfo.raw_bw, []), [])
1375# # if average(bw_per_th) > 10:
1376# # pinfo.bw = bw_log
1377# # pinfo.bw2 = bw_report
1378# # else:
1379# # pinfo.bw = bw_report
1380# # pinfo.bw2 = bw_log
1381# #
1382# # self._pinfo = pinfo
1383# #
1384# # return pinfo
1385#
1386# # class TestResult:
1387# # """Hold all information for a given test - test info,
1388# # sensors data and performance results for test period from all nodes"""
1389# # run_id = None # type: int
1390# # test_info = None # type: Any
1391# # begin_time = None # type: int
1392# # end_time = None # type: int
1393# # sensors = None # Dict[Tuple[str, str, str], TimeSeries]
1394# # performance = None # Dict[Tuple[str, str], TimeSeries]
1395# #
1396# # class TestResults:
1397# # """
1398# # this class describe test results
1399# #
1400# # config:TestConfig - test config object
1401# # params:dict - parameters from yaml file for this test
1402# # results:{str:MeasurementMesh} - test results object
1403# # raw_result:Any - opaque object to store raw results
1404# # run_interval:(float, float) - test tun time, used for sensors
1405# # """
1406# #
1407# # def __init__(self,
1408# # config: TestConfig,
1409# # results: Dict[str, Any],
1410# # raw_result: Any,
1411# # run_interval: Tuple[float, float]) -> None:
1412# # self.config = config
1413# # self.params = config.params
1414# # self.results = results
1415# # self.raw_result = raw_result
1416# # self.run_interval = run_interval
1417# #
1418# # def __str__(self) -> str:
1419# # res = "{0}({1}):\n results:\n".format(
1420# # self.__class__.__name__,
1421# # self.summary())
1422# #
1423# # for name, val in self.results.items():
1424# # res += " {0}={1}\n".format(name, val)
1425# #
1426# # res += " params:\n"
1427# #
1428# # for name, val in self.params.items():
1429# # res += " {0}={1}\n".format(name, val)
1430# #
1431# # return res
1432# #
1433# # def summary(self) -> str:
1434# # raise NotImplementedError()
1435# # return ""
1436# #
1437# # def get_yamable(self) -> Any:
1438# # raise NotImplementedError()
1439# # return None
1440#
1441#
1442#
1443# # class MeasurementMatrix:
1444# # """
1445# # data:[[MeasurementResult]] - VM_COUNT x TH_COUNT matrix of MeasurementResult
1446# # """
1447# # def __init__(self, data, connections_ids):
1448# # self.data = data
1449# # self.connections_ids = connections_ids
1450# #
1451# # def per_vm(self):
1452# # return self.data
1453# #
1454# # def per_th(self):
1455# # return sum(self.data, [])
1456#
1457#
1458# # class MeasurementResults:
1459# # data = None # type: List[Any]
1460# #
1461# # def stat(self) -> StatProps:
1462# # return data_property(self.data)
1463# #
1464# # def __str__(self) -> str:
1465# # return 'TS([' + ", ".join(map(str, self.data)) + '])'
1466# #
1467# #
1468# # class SimpleVals(MeasurementResults):
1469# # """
1470# # data:[float] - list of values
1471# # """
1472# # def __init__(self, data: List[float]) -> None:
1473# # self.data = data
1474# #
1475# #
1476# # class TimeSeriesValue(MeasurementResults):
1477# # """
1478# # data:[(float, float, float)] - list of (start_time, lenght, average_value_for_interval)
1479# # odata: original values
1480# # """
1481# # def __init__(self, data: List[Tuple[float, float]]) -> None:
1482# # assert len(data) > 0
1483# # self.odata = data[:]
1484# # self.data = [] # type: List[Tuple[float, float, float]]
1485# #
1486# # cstart = 0.0
1487# # for nstart, nval in data:
1488# # self.data.append((cstart, nstart - cstart, nval))
1489# # cstart = nstart
1490# #
1491# # @property
1492# # def values(self) -> List[float]:
1493# # return [val[2] for val in self.data]
1494# #
1495# # def average_interval(self) -> float:
1496# # return float(sum([val[1] for val in self.data])) / len(self.data)
1497# #
1498# # def skip(self, seconds) -> 'TimeSeriesValue':
1499# # nres = []
1500# # for start, ln, val in self.data:
1501# # nstart = start + ln - seconds
1502# # if nstart > 0:
1503# # nres.append([nstart, val])
1504# # return self.__class__(nres)
1505# #
1506# # def derived(self, tdelta) -> 'TimeSeriesValue':
1507# # end = self.data[-1][0] + self.data[-1][1]
1508# # tdelta = float(tdelta)
1509# #
1510# # ln = end / tdelta
1511# #
1512# # if ln - int(ln) > 0:
1513# # ln += 1
1514# #
1515# # res = [[tdelta * i, 0.0] for i in range(int(ln))]
1516# #
1517# # for start, lenght, val in self.data:
1518# # start_idx = int(start / tdelta)
1519# # end_idx = int((start + lenght) / tdelta)
1520# #
1521# # for idx in range(start_idx, end_idx + 1):
1522# # rstart = tdelta * idx
1523# # rend = tdelta * (idx + 1)
1524# #
1525# # intersection_ln = min(rend, start + lenght) - max(start, rstart)
1526# # if intersection_ln > 0:
1527# # try:
1528# # res[idx][1] += val * intersection_ln / tdelta
1529# # except IndexError:
1530# # raise
1531# #
1532# # return self.__class__(res)
1533#
1534#
1535# def console_report_stage(ctx: TestRun) -> None:
1536# # TODO(koder): load data from storage
1537# raise NotImplementedError("...")
1538# # first_report = True
1539# # text_rep_fname = ctx.config.text_report_file
1540# #
1541# # with open(text_rep_fname, "w") as fd:
1542# # for tp, data in ctx.results.items():
1543# # if 'io' == tp and data is not None:
1544# # rep_lst = []
1545# # for result in data:
1546# # rep_lst.append(
1547# # IOPerfTest.format_for_console(list(result)))
1548# # rep = "\n\n".join(rep_lst)
1549# # elif tp in ['mysql', 'pgbench'] and data is not None:
1550# # rep = MysqlTest.format_for_console(data)
1551# # elif tp == 'omg':
1552# # rep = OmgTest.format_for_console(data)
1553# # else:
1554# # logger.warning("Can't generate text report for " + tp)
1555# # continue
1556# #
1557# # fd.write(rep)
1558# # fd.write("\n")
1559# #
1560# # if first_report:
1561# # logger.info("Text report were stored in " + text_rep_fname)
1562# # first_report = False
1563# #
1564# # print("\n" + rep + "\n")
1565#
1566#
1567# # def test_load_report_stage(cfg: Config, ctx: TestRun) -> None:
1568# # load_rep_fname = cfg.load_report_file
1569# # found = False
1570# # for idx, (tp, data) in enumerate(ctx.results.items()):
1571# # if 'io' == tp and data is not None:
1572# # if found:
1573# # logger.error("Making reports for more than one " +
1574# # "io block isn't supported! All " +
1575# # "report, except first are skipped")
1576# # continue
1577# # found = True
1578# # report.make_load_report(idx, cfg['results'], load_rep_fname)
1579# #
1580# #
1581#
1582# # def html_report_stage(ctx: TestRun) -> None:
1583# # TODO(koder): load data from storage
1584# # raise NotImplementedError("...")
1585# # html_rep_fname = cfg.html_report_file
1586# # found = False
1587# # for tp, data in ctx.results.items():
1588# # if 'io' == tp and data is not None:
1589# # if found or len(data) > 1:
1590# # logger.error("Making reports for more than one " +
1591# # "io block isn't supported! All " +
1592# # "report, except first are skipped")
1593# # continue
1594# # found = True
1595# # report.make_io_report(list(data[0]),
1596# # cfg.get('comment', ''),
1597# # html_rep_fname,
1598# # lab_info=ctx.nodes)
1599#
1600# #
1601# # def load_data_from_path(test_res_dir: str) -> Mapping[str, List[Any]]:
1602# # files = get_test_files(test_res_dir)
1603# # raw_res = yaml_load(open(files['raw_results']).read())
1604# # res = collections.defaultdict(list)
1605# #
1606# # for tp, test_lists in raw_res:
1607# # for tests in test_lists:
1608# # for suite_name, suite_data in tests.items():
1609# # result_folder = suite_data[0]
1610# # res[tp].append(TOOL_TYPE_MAPPER[tp].load(suite_name, result_folder))
1611# #
1612# # return res
1613# #
1614# #
1615# # def load_data_from_path_stage(var_dir: str, _, ctx: TestRun) -> None:
1616# # for tp, vals in load_data_from_path(var_dir).items():
1617# # ctx.results.setdefault(tp, []).extend(vals)
1618# #
1619# #
1620# # def load_data_from(var_dir: str) -> Callable[[TestRun], None]:
1621# # return functools.partial(load_data_from_path_stage, var_dir)