blob: 0c96280f3fa8a0b422360b60faef0cf72e6962ab [file] [log] [blame]
koder aka kdanilovcff7b2e2015-04-18 20:48:15 +03001import os
koder aka kdanilov88407ff2015-05-26 15:35:57 +03002import csv
koder aka kdanilov7f59d562016-12-26 01:34:23 +02003import abc
koder aka kdanilov4a510ee2015-04-21 18:50:42 +03004import bisect
koder aka kdanilova047e1b2015-04-21 23:16:59 +03005import logging
koder aka kdanilov88407ff2015-05-26 15:35:57 +03006import itertools
koder aka kdanilov416b87a2015-05-12 00:26:04 +03007import collections
koder aka kdanilov3b4da8b2016-10-17 00:17:53 +03008from io import StringIO
koder aka kdanilov7f59d562016-12-26 01:34:23 +02009from typing import Dict, Any, Iterator, Tuple, cast, List
koder aka kdanilovcff7b2e2015-04-18 20:48:15 +030010
koder aka kdanilovbe8f89f2015-04-28 14:51:51 +030011try:
koder aka kdanilov4af1c1d2015-05-18 15:48:58 +030012 import numpy
13 import scipy
koder aka kdanilov0fdaaee2015-06-30 11:10:48 +030014 import matplotlib
koder aka kdanilov9e0512a2015-08-10 14:51:59 +030015 matplotlib.use('svg')
koder aka kdanilovbe8f89f2015-04-28 14:51:51 +030016 import matplotlib.pyplot as plt
17except ImportError:
18 plt = None
19
koder aka kdanilov4a510ee2015-04-21 18:50:42 +030020import wally
koder aka kdanilov3b4da8b2016-10-17 00:17:53 +030021from .utils import ssize2b
koder aka kdanilov70227062016-11-26 23:23:21 +020022from .storage import Storage
koder aka kdanilov39e449e2016-12-17 15:15:26 +020023from .stage import Stage, StepOrder
24from .test_run_class import TestRun
koder aka kdanilov70227062016-11-26 23:23:21 +020025from .result_classes import TestInfo, FullTestResult, SensorInfo
koder aka kdanilov3b4da8b2016-10-17 00:17:53 +030026from .suits.io.fio_task_parser import (get_test_sync_mode,
27 get_test_summary,
28 parse_all_in_1,
29 abbv_name_to_full)
koder aka kdanilovcff7b2e2015-04-18 20:48:15 +030030
koder aka kdanilov4a510ee2015-04-21 18:50:42 +030031
koder aka kdanilov962ee5f2016-12-19 02:40:08 +020032logger = logging.getLogger("wally")
koder aka kdanilova047e1b2015-04-21 23:16:59 +030033
34
koder aka kdanilov70227062016-11-26 23:23:21 +020035def load_test_results(storage: Storage) -> Iterator[FullTestResult]:
koder aka kdanilov7f59d562016-12-26 01:34:23 +020036 raise NotImplementedError()
37 # sensors_data = {} # type: Dict[Tuple[str, str, str], SensorInfo]
38 #
39 # mstorage = storage.sub_storage("metric")
40 # for _, node_id in mstorage.list():
41 # for _, dev_name in mstorage.list(node_id):
42 # for _, sensor_name in mstorage.list(node_id, dev_name):
43 # key = (node_id, dev_name, sensor_name)
44 # si = SensorInfo(*key)
45 # si.begin_time, si.end_time, si.data = storage[node_id, dev_name, sensor_name] # type: ignore
46 # sensors_data[key] = si
47 #
48 # rstorage = storage.sub_storage("result")
49 # for _, run_id in rstorage.list():
50 # ftr = FullTestResult()
51 # ftr.test_info = rstorage.load(TestInfo, run_id, "info")
52 # ftr.performance_data = {}
53 #
54 # p1 = "{}/measurement".format(run_id)
55 # for _, node_id in rstorage.list(p1):
56 # for _, measurement_name in rstorage.list(p1, node_id):
57 # perf_key = (node_id, measurement_name)
58 # ftr.performance_data[perf_key] = rstorage["{}/{}/{}".format(p1, *perf_key)] # type: ignore
59 #
60 # yield ftr
koder aka kdanilovbe8f89f2015-04-28 14:51:51 +030061
62
koder aka kdanilov39e449e2016-12-17 15:15:26 +020063class ConsoleReportStage(Stage):
64
65 priority = StepOrder.REPORT
66
67 def run(self, ctx: TestRun) -> None:
68 # TODO(koder): load data from storage
69 raise NotImplementedError("...")
70
koder aka kdanilov7f59d562016-12-26 01:34:23 +020071
koder aka kdanilov39e449e2016-12-17 15:15:26 +020072class HtmlReportStage(Stage):
73
74 priority = StepOrder.REPORT
75
76 def run(self, ctx: TestRun) -> None:
77 # TODO(koder): load data from storage
78 raise NotImplementedError("...")
79
koder aka kdanilov7f59d562016-12-26 01:34:23 +020080
81# TODO: need to be revised, have to user StatProps fields instead
82class StoragePerfSummary:
83 def __init__(self, name: str) -> None:
84 self.direct_iops_r_max = 0 # type: int
85 self.direct_iops_w_max = 0 # type: int
86
87 # 64 used instead of 4k to faster feed caches
88 self.direct_iops_w64_max = 0 # type: int
89
90 self.rws4k_10ms = 0 # type: int
91 self.rws4k_30ms = 0 # type: int
92 self.rws4k_100ms = 0 # type: int
93 self.bw_write_max = 0 # type: int
94 self.bw_read_max = 0 # type: int
95
96 self.bw = None # type: float
97 self.iops = None # type: float
98 self.lat = None # type: float
99 self.lat_50 = None # type: float
100 self.lat_95 = None # type: float
101
102
103class HTMLBlock:
104 data = None # type: str
105 js_links = [] # type: List[str]
106 css_links = [] # type: List[str]
107
108
109class Reporter(metaclass=abc.ABCMeta):
110 @abc.abstractmethod
111 def get_divs(self, config, storage) -> Iterator[Tuple[str, str, HTMLBlock]]:
112 pass
113
114
115# Main performance report
116class PerformanceSummary(Reporter):
117 """Creates graph, which show how IOPS and Latency depend on QD"""
118
119
120# Main performance report
121class IOPS_QD(Reporter):
122 """Creates graph, which show how IOPS and Latency depend on QD"""
123
124
125# Linearization report
126class IOPS_Bsize(Reporter):
127 """Creates graphs, which show how IOPS and Latency depend on block size"""
128
129
130# IOPS/latency distribution
131class IOPSHist(Reporter):
132 """IOPS.latency distribution histogram"""
133
134
135# IOPS/latency over test time
136class IOPSTime(Reporter):
137 """IOPS/latency during test"""
138
139
140# Cluster load over test time
141class ClusterLoad(Reporter):
142 """IOPS/latency during test"""
143
144
145# Node load over test time
146class NodeLoad(Reporter):
147 """IOPS/latency during test"""
148
149
150# Ceph cluster summary
151class CephClusterSummary(Reporter):
152 """IOPS/latency during test"""
153
154
155# TODO: Resource consumption report
156# TODO: Ceph operation breakout report
157# TODO: Resource consumption for different type of test
158
159
koder aka kdanilov70227062016-11-26 23:23:21 +0200160#
161# # disk_info = None
162# # base = None
163# # linearity = None
164#
165#
166# def group_by_name(test_data):
167# name_map = collections.defaultdict(lambda: [])
168#
169# for data in test_data:
170# name_map[(data.name, data.summary())].append(data)
171#
172# return name_map
173#
174#
175# def report(name, required_fields):
176# def closure(func):
177# report_funcs.append((required_fields.split(","), name, func))
178# return func
179# return closure
180#
181#
182# def get_test_lcheck_params(pinfo):
183# res = [{
184# 's': 'sync',
185# 'd': 'direct',
186# 'a': 'async',
187# 'x': 'sync direct'
188# }[pinfo.sync_mode]]
189#
190# res.append(pinfo.p.rw)
191#
192# return " ".join(res)
193#
194#
195# def get_emb_data_svg(plt):
196# sio = StringIO()
197# plt.savefig(sio, format='svg')
198# img_start = "<!-- Created with matplotlib (http://matplotlib.org/) -->"
199# return sio.getvalue().split(img_start, 1)[1]
200#
201#
202# def get_template(templ_name):
203# very_root_dir = os.path.dirname(os.path.dirname(wally.__file__))
204# templ_dir = os.path.join(very_root_dir, 'report_templates')
205# templ_file = os.path.join(templ_dir, templ_name)
206# return open(templ_file, 'r').read()
207#
208#
209# def group_by(data, func):
210# if len(data) < 2:
211# yield data
212# return
213#
214# ndata = [(func(dt), dt) for dt in data]
215# ndata.sort(key=func)
216# pkey, dt = ndata[0]
217# curr_list = [dt]
218#
219# for key, val in ndata[1:]:
220# if pkey != key:
221# yield curr_list
222# curr_list = [val]
223# else:
224# curr_list.append(val)
225# pkey = key
226#
227# yield curr_list
228#
229#
230# @report('linearity', 'linearity_test')
231# def linearity_report(processed_results, lab_info, comment):
232# labels_and_data_mp = collections.defaultdict(lambda: [])
233# vls = {}
234#
235# # plot io_time = func(bsize)
236# for res in processed_results.values():
237# if res.name.startswith('linearity_test'):
238# iotimes = [1000. / val for val in res.iops.raw]
239#
240# op_summ = get_test_summary(res.params)[:3]
241#
242# labels_and_data_mp[op_summ].append(
243# [res.p.blocksize, res.iops.raw, iotimes])
244#
245# cvls = res.params.vals.copy()
246# del cvls['blocksize']
247# del cvls['rw']
248#
249# cvls.pop('sync', None)
250# cvls.pop('direct', None)
251# cvls.pop('buffered', None)
252#
253# if op_summ not in vls:
254# vls[op_summ] = cvls
255# else:
256# assert cvls == vls[op_summ]
257#
258# all_labels = None
259# _, ax1 = plt.subplots()
260# for name, labels_and_data in labels_and_data_mp.items():
261# labels_and_data.sort(key=lambda x: ssize2b(x[0]))
262#
263# labels, _, iotimes = zip(*labels_and_data)
264#
265# if all_labels is None:
266# all_labels = labels
267# else:
268# assert all_labels == labels
269#
270# plt.boxplot(iotimes)
271# if len(labels_and_data) > 2 and \
272# ssize2b(labels_and_data[-2][0]) >= 4096:
273#
274# xt = range(1, len(labels) + 1)
275#
276# def io_time(sz, bw, initial_lat):
277# return sz / bw + initial_lat
278#
279# x = numpy.array(map(ssize2b, labels))
280# y = numpy.array([sum(dt) / len(dt) for dt in iotimes])
281# popt, _ = scipy.optimize.curve_fit(io_time, x, y, p0=(100., 1.))
282#
283# y1 = io_time(x, *popt)
284# plt.plot(xt, y1, linestyle='--',
285# label=name + ' LS linear approx')
286#
287# for idx, (sz, _, _) in enumerate(labels_and_data):
288# if ssize2b(sz) >= 4096:
289# break
290#
291# bw = (x[-1] - x[idx]) / (y[-1] - y[idx])
292# lat = y[-1] - x[-1] / bw
293# y2 = io_time(x, bw, lat)
294# plt.plot(xt, y2, linestyle='--',
295# label=abbv_name_to_full(name) +
296# ' (4k & max) linear approx')
297#
298# plt.setp(ax1, xticklabels=labels)
299#
300# plt.xlabel("Block size")
301# plt.ylabel("IO time, ms")
302#
303# plt.subplots_adjust(top=0.85)
304# plt.legend(bbox_to_anchor=(0.5, 1.15),
305# loc='upper center',
306# prop={'size': 10}, ncol=2)
307# plt.grid()
308# iotime_plot = get_emb_data_svg(plt)
309# plt.clf()
310#
311# # plot IOPS = func(bsize)
312# _, ax1 = plt.subplots()
313#
314# for name, labels_and_data in labels_and_data_mp.items():
315# labels_and_data.sort(key=lambda x: ssize2b(x[0]))
316# _, data, _ = zip(*labels_and_data)
317# plt.boxplot(data)
318# avg = [float(sum(arr)) / len(arr) for arr in data]
319# xt = range(1, len(data) + 1)
320# plt.plot(xt, avg, linestyle='--',
321# label=abbv_name_to_full(name) + " avg")
322#
323# plt.setp(ax1, xticklabels=labels)
324# plt.xlabel("Block size")
325# plt.ylabel("IOPS")
326# plt.legend(bbox_to_anchor=(0.5, 1.15),
327# loc='upper center',
328# prop={'size': 10}, ncol=2)
329# plt.grid()
330# plt.subplots_adjust(top=0.85)
331#
332# iops_plot = get_emb_data_svg(plt)
333#
334# res = set(get_test_lcheck_params(res) for res in processed_results.values())
335# ncount = list(set(res.testnodes_count for res in processed_results.values()))
336# conc = list(set(res.concurence for res in processed_results.values()))
337#
338# assert len(conc) == 1
339# assert len(ncount) == 1
340#
341# descr = {
342# 'vm_count': ncount[0],
343# 'concurence': conc[0],
344# 'oper_descr': ", ".join(res).capitalize()
345# }
346#
347# params_map = {'iotime_vs_size': iotime_plot,
348# 'iops_vs_size': iops_plot,
349# 'descr': descr}
350#
351# return get_template('report_linearity.html').format(**params_map)
352#
353#
354# @report('lat_vs_iops', 'lat_vs_iops')
355# def lat_vs_iops(processed_results, lab_info, comment):
356# lat_iops = collections.defaultdict(lambda: [])
357# requsted_vs_real = collections.defaultdict(lambda: {})
358#
359# for res in processed_results.values():
360# if res.name.startswith('lat_vs_iops'):
361# lat_iops[res.concurence].append((res.lat,
362# 0,
363# res.iops.average,
364# res.iops.deviation))
365# # lat_iops[res.concurence].append((res.lat.average / 1000.0,
366# # res.lat.deviation / 1000.0,
367# # res.iops.average,
368# # res.iops.deviation))
369# requested_iops = res.p.rate_iops * res.concurence
370# requsted_vs_real[res.concurence][requested_iops] = \
371# (res.iops.average, res.iops.deviation)
372#
373# colors = ['red', 'green', 'blue', 'orange', 'magenta', "teal"]
374# colors_it = iter(colors)
375# for conc, lat_iops in sorted(lat_iops.items()):
376# lat, dev, iops, iops_dev = zip(*lat_iops)
377# plt.errorbar(iops, lat, xerr=iops_dev, yerr=dev, fmt='ro',
378# label=str(conc) + " threads",
379# color=next(colors_it))
380#
381# plt.xlabel("IOPS")
382# plt.ylabel("Latency, ms")
383# plt.grid()
384# plt.legend(loc=0)
385# plt_iops_vs_lat = get_emb_data_svg(plt)
386# plt.clf()
387#
388# colors_it = iter(colors)
389# for conc, req_vs_real in sorted(requsted_vs_real.items()):
390# req, real = zip(*sorted(req_vs_real.items()))
391# iops, dev = zip(*real)
392# plt.errorbar(req, iops, yerr=dev, fmt='ro',
393# label=str(conc) + " threads",
394# color=next(colors_it))
395# plt.xlabel("Requested IOPS")
396# plt.ylabel("Get IOPS")
397# plt.grid()
398# plt.legend(loc=0)
399# plt_iops_vs_requested = get_emb_data_svg(plt)
400#
401# res1 = processed_results.values()[0]
402# params_map = {'iops_vs_lat': plt_iops_vs_lat,
403# 'iops_vs_requested': plt_iops_vs_requested,
404# 'oper_descr': get_test_lcheck_params(res1).capitalize()}
405#
406# return get_template('report_iops_vs_lat.html').format(**params_map)
407#
408#
409# def render_all_html(comment, info, lab_description, images, templ_name):
410# data = info.__dict__.copy()
411# for name, val in data.items():
412# if not name.startswith('__'):
413# if val is None:
414# if name in ('direct_iops_w64_max', 'direct_iops_w_max'):
415# data[name] = ('-', '-', '-')
416# else:
417# data[name] = '-'
418# elif isinstance(val, (int, float, long)):
419# data[name] = round_3_digit(val)
420#
421# data['bw_read_max'] = (data['bw_read_max'][0] // 1024,
422# data['bw_read_max'][1],
423# data['bw_read_max'][2])
424#
425# data['bw_write_max'] = (data['bw_write_max'][0] // 1024,
426# data['bw_write_max'][1],
427# data['bw_write_max'][2])
428#
429# images.update(data)
430# templ = get_template(templ_name)
431# return templ.format(lab_info=lab_description,
432# comment=comment,
433# **images)
434#
435#
436# def io_chart(title, concurence,
437# latv, latv_min, latv_max,
438# iops_or_bw, iops_or_bw_err,
439# legend,
440# log_iops=False,
441# log_lat=False,
442# boxplots=False,
443# latv_50=None,
444# latv_95=None,
445# error2=None):
446#
447# matplotlib.rcParams.update({'font.size': 10})
448# points = " MiBps" if legend == 'BW' else ""
449# lc = len(concurence)
450# width = 0.35
451# xt = range(1, lc + 1)
452#
453# op_per_vm = [v / (vm * th) for v, (vm, th) in zip(iops_or_bw, concurence)]
454# fig, p1 = plt.subplots()
455# xpos = [i - width / 2 for i in xt]
456#
457# p1.bar(xpos, iops_or_bw,
458# width=width,
459# color='y',
460# label=legend)
461#
462# err1_leg = None
463# for pos, y, err in zip(xpos, iops_or_bw, iops_or_bw_err):
464# err1_leg = p1.errorbar(pos + width / 2,
465# y,
466# err,
467# color='magenta')
468#
469# err2_leg = None
470# if error2 is not None:
471# for pos, y, err in zip(xpos, iops_or_bw, error2):
472# err2_leg = p1.errorbar(pos + width / 2 + 0.08,
473# y,
474# err,
475# lw=2,
476# alpha=0.5,
477# color='teal')
478#
479# p1.grid(True)
480# p1.plot(xt, op_per_vm, '--', label=legend + "/thread", color='black')
481# handles1, labels1 = p1.get_legend_handles_labels()
482#
483# handles1 += [err1_leg]
484# labels1 += ["95% conf"]
485#
486# if err2_leg is not None:
487# handles1 += [err2_leg]
488# labels1 += ["95% dev"]
489#
490# p2 = p1.twinx()
491#
492# if latv_50 is None:
493# p2.plot(xt, latv_max, label="lat max")
494# p2.plot(xt, latv, label="lat avg")
495# p2.plot(xt, latv_min, label="lat min")
496# else:
497# p2.plot(xt, latv_50, label="lat med")
498# p2.plot(xt, latv_95, label="lat 95%")
499#
500# plt.xlim(0.5, lc + 0.5)
501# plt.xticks(xt, ["{0} * {1}".format(vm, th) for (vm, th) in concurence])
502# p1.set_xlabel("VM Count * Thread per VM")
503# p1.set_ylabel(legend + points)
504# p2.set_ylabel("Latency ms")
505# plt.title(title)
506# handles2, labels2 = p2.get_legend_handles_labels()
507#
508# plt.legend(handles1 + handles2, labels1 + labels2,
509# loc='center left', bbox_to_anchor=(1.1, 0.81))
510#
511# if log_iops:
512# p1.set_yscale('log')
513#
514# if log_lat:
515# p2.set_yscale('log')
516#
517# plt.subplots_adjust(right=0.68)
518#
519# return get_emb_data_svg(plt)
520#
521#
522# def make_plots(processed_results, plots):
523# """
524# processed_results: [PerfInfo]
525# plots = [(test_name_prefix:str, fname:str, description:str)]
526# """
527# files = {}
528# for name_pref, fname, desc in plots:
529# chart_data = []
530#
531# for res in processed_results:
532# summ = res.name + "_" + res.summary
533# if summ.startswith(name_pref):
534# chart_data.append(res)
535#
536# if len(chart_data) == 0:
537# raise ValueError("Can't found any date for " + name_pref)
538#
539# use_bw = ssize2b(chart_data[0].p.blocksize) > 16 * 1024
540#
541# chart_data.sort(key=lambda x: x.params['vals']['numjobs'])
542#
543# lat = None
544# lat_min = None
545# lat_max = None
546#
547# lat_50 = [x.lat_50 for x in chart_data]
548# lat_95 = [x.lat_95 for x in chart_data]
549#
550# lat_diff_max = max(x.lat_95 / x.lat_50 for x in chart_data)
551# lat_log_scale = (lat_diff_max > 10)
552#
553# testnodes_count = x.testnodes_count
554# concurence = [(testnodes_count, x.concurence)
555# for x in chart_data]
556#
557# if use_bw:
558# data = [x.bw.average / 1000 for x in chart_data]
559# data_conf = [x.bw.confidence / 1000 for x in chart_data]
560# data_dev = [x.bw.deviation * 2.5 / 1000 for x in chart_data]
561# name = "BW"
562# else:
563# data = [x.iops.average for x in chart_data]
564# data_conf = [x.iops.confidence for x in chart_data]
565# data_dev = [x.iops.deviation * 2 for x in chart_data]
566# name = "IOPS"
567#
568# fc = io_chart(title=desc,
569# concurence=concurence,
570#
571# latv=lat,
572# latv_min=lat_min,
573# latv_max=lat_max,
574#
575# iops_or_bw=data,
576# iops_or_bw_err=data_conf,
577#
578# legend=name,
579# log_lat=lat_log_scale,
580#
581# latv_50=lat_50,
582# latv_95=lat_95,
583#
584# error2=data_dev)
585# files[fname] = fc
586#
587# return files
588#
589#
590# def find_max_where(processed_results, sync_mode, blocksize, rw, iops=True):
591# result = None
592# attr = 'iops' if iops else 'bw'
593# for measurement in processed_results:
594# ok = measurement.sync_mode == sync_mode
595# ok = ok and (measurement.p.blocksize == blocksize)
596# ok = ok and (measurement.p.rw == rw)
597#
598# if ok:
599# field = getattr(measurement, attr)
600#
601# if result is None:
602# result = field
603# elif field.average > result.average:
604# result = field
605#
606# return result
607#
608#
609# def get_disk_info(processed_results):
610# di = DiskInfo()
611# di.direct_iops_w_max = find_max_where(processed_results,
612# 'd', '4k', 'randwrite')
613# di.direct_iops_r_max = find_max_where(processed_results,
614# 'd', '4k', 'randread')
615#
616# di.direct_iops_w64_max = find_max_where(processed_results,
617# 'd', '64k', 'randwrite')
618#
619# for sz in ('16m', '64m'):
620# di.bw_write_max = find_max_where(processed_results,
621# 'd', sz, 'randwrite', False)
622# if di.bw_write_max is not None:
623# break
624#
625# if di.bw_write_max is None:
626# for sz in ('1m', '2m', '4m', '8m'):
627# di.bw_write_max = find_max_where(processed_results,
628# 'd', sz, 'write', False)
629# if di.bw_write_max is not None:
630# break
631#
632# for sz in ('16m', '64m'):
633# di.bw_read_max = find_max_where(processed_results,
634# 'd', sz, 'randread', False)
635# if di.bw_read_max is not None:
636# break
637#
638# if di.bw_read_max is None:
639# di.bw_read_max = find_max_where(processed_results,
640# 'd', '1m', 'read', False)
641#
642# rws4k_iops_lat_th = []
643# for res in processed_results:
644# if res.sync_mode in 'xs' and res.p.blocksize == '4k':
645# if res.p.rw != 'randwrite':
646# continue
647# rws4k_iops_lat_th.append((res.iops.average,
648# res.lat,
649# # res.lat.average,
650# res.concurence))
651#
652# rws4k_iops_lat_th.sort(key=lambda x: x[2])
653#
654# latv = [lat for _, lat, _ in rws4k_iops_lat_th]
655#
656# for tlat in [10, 30, 100]:
657# pos = bisect.bisect_left(latv, tlat)
658# if 0 == pos:
659# setattr(di, 'rws4k_{}ms'.format(tlat), 0)
660# elif pos == len(latv):
661# iops3, _, _ = rws4k_iops_lat_th[-1]
662# iops3 = int(round_3_digit(iops3))
663# setattr(di, 'rws4k_{}ms'.format(tlat), ">=" + str(iops3))
664# else:
665# lat1 = latv[pos - 1]
666# lat2 = latv[pos]
667#
668# iops1, _, th1 = rws4k_iops_lat_th[pos - 1]
669# iops2, _, th2 = rws4k_iops_lat_th[pos]
670#
671# th_lat_coef = (th2 - th1) / (lat2 - lat1)
672# th3 = th_lat_coef * (tlat - lat1) + th1
673#
674# th_iops_coef = (iops2 - iops1) / (th2 - th1)
675# iops3 = th_iops_coef * (th3 - th1) + iops1
676# iops3 = int(round_3_digit(iops3))
677# setattr(di, 'rws4k_{}ms'.format(tlat), iops3)
678#
679# hdi = DiskInfo()
680#
681# def pp(x):
682# med, conf = x.rounded_average_conf()
683# conf_perc = int(float(conf) / med * 100)
684# dev_perc = int(float(x.deviation) / med * 100)
685# return (round_3_digit(med), conf_perc, dev_perc)
686#
687# hdi.direct_iops_r_max = pp(di.direct_iops_r_max)
688#
689# if di.direct_iops_w_max is not None:
690# hdi.direct_iops_w_max = pp(di.direct_iops_w_max)
691# else:
692# hdi.direct_iops_w_max = None
693#
694# if di.direct_iops_w64_max is not None:
695# hdi.direct_iops_w64_max = pp(di.direct_iops_w64_max)
696# else:
697# hdi.direct_iops_w64_max = None
698#
699# hdi.bw_write_max = pp(di.bw_write_max)
700# hdi.bw_read_max = pp(di.bw_read_max)
701#
702# hdi.rws4k_10ms = di.rws4k_10ms if 0 != di.rws4k_10ms else None
703# hdi.rws4k_30ms = di.rws4k_30ms if 0 != di.rws4k_30ms else None
704# hdi.rws4k_100ms = di.rws4k_100ms if 0 != di.rws4k_100ms else None
705# return hdi
706#
707#
708# @report('hdd', 'hdd')
709# def make_hdd_report(processed_results, lab_info, comment):
710# plots = [
711# ('hdd_rrd4k', 'rand_read_4k', 'Random read 4k direct IOPS'),
712# ('hdd_rwx4k', 'rand_write_4k', 'Random write 4k sync IOPS')
713# ]
714# perf_infos = [res.disk_perf_info() for res in processed_results]
715# images = make_plots(perf_infos, plots)
716# di = get_disk_info(perf_infos)
717# return render_all_html(comment, di, lab_info, images, "report_hdd.html")
718#
719#
720# @report('cinder_iscsi', 'cinder_iscsi')
721# def make_cinder_iscsi_report(processed_results, lab_info, comment):
722# plots = [
723# ('cinder_iscsi_rrd4k', 'rand_read_4k', 'Random read 4k direct IOPS'),
724# ('cinder_iscsi_rwx4k', 'rand_write_4k', 'Random write 4k sync IOPS')
725# ]
726# perf_infos = [res.disk_perf_info() for res in processed_results]
727# try:
728# images = make_plots(perf_infos, plots)
729# except ValueError:
730# plots = [
731# ('cinder_iscsi_rrd4k', 'rand_read_4k', 'Random read 4k direct IOPS'),
732# ('cinder_iscsi_rws4k', 'rand_write_4k', 'Random write 4k sync IOPS')
733# ]
734# images = make_plots(perf_infos, plots)
735# di = get_disk_info(perf_infos)
736#
737# return render_all_html(comment, di, lab_info, images, "report_cinder_iscsi.html")
738#
739#
740# @report('ceph', 'ceph')
741# def make_ceph_report(processed_results, lab_info, comment):
742# plots = [
743# ('ceph_rrd4k', 'rand_read_4k', 'Random read 4k direct IOPS'),
744# ('ceph_rws4k', 'rand_write_4k', 'Random write 4k sync IOPS'),
745# ('ceph_rrd16m', 'rand_read_16m', 'Random read 16m direct MiBps'),
746# ('ceph_rwd16m', 'rand_write_16m',
747# 'Random write 16m direct MiBps'),
748# ]
749#
750# perf_infos = [res.disk_perf_info() for res in processed_results]
751# images = make_plots(perf_infos, plots)
752# di = get_disk_info(perf_infos)
753# return render_all_html(comment, di, lab_info, images, "report_ceph.html")
754#
755#
756# @report('mixed', 'mixed')
757# def make_mixed_report(processed_results, lab_info, comment):
758# #
759# # IOPS(X% read) = 100 / ( X / IOPS_W + (100 - X) / IOPS_R )
760# #
761#
762# perf_infos = [res.disk_perf_info() for res in processed_results]
763# mixed = collections.defaultdict(lambda: [])
764#
765# is_ssd = False
766# for res in perf_infos:
767# if res.name.startswith('mixed'):
768# if res.name.startswith('mixed-ssd'):
769# is_ssd = True
770# mixed[res.concurence].append((res.p.rwmixread,
771# res.lat,
772# 0,
773# # res.lat.average / 1000.0,
774# # res.lat.deviation / 1000.0,
775# res.iops.average,
776# res.iops.deviation))
777#
778# if len(mixed) == 0:
779# raise ValueError("No mixed load found")
780#
781# fig, p1 = plt.subplots()
782# p2 = p1.twinx()
783#
784# colors = ['red', 'green', 'blue', 'orange', 'magenta', "teal"]
785# colors_it = iter(colors)
786# for conc, mix_lat_iops in sorted(mixed.items()):
787# mix_lat_iops = sorted(mix_lat_iops)
788# read_perc, lat, dev, iops, iops_dev = zip(*mix_lat_iops)
789# p1.errorbar(read_perc, iops, color=next(colors_it),
790# yerr=iops_dev, label=str(conc) + " th")
791#
792# p2.errorbar(read_perc, lat, color=next(colors_it),
793# ls='--', yerr=dev, label=str(conc) + " th lat")
794#
795# if is_ssd:
796# p1.set_yscale('log')
797# p2.set_yscale('log')
798#
799# p1.set_xlim(-5, 105)
800#
801# read_perc = set(read_perc)
802# read_perc.add(0)
803# read_perc.add(100)
804# read_perc = sorted(read_perc)
805#
806# plt.xticks(read_perc, map(str, read_perc))
807#
808# p1.grid(True)
809# p1.set_xlabel("% of reads")
810# p1.set_ylabel("Mixed IOPS")
811# p2.set_ylabel("Latency, ms")
812#
813# handles1, labels1 = p1.get_legend_handles_labels()
814# handles2, labels2 = p2.get_legend_handles_labels()
815# plt.subplots_adjust(top=0.85)
816# plt.legend(handles1 + handles2, labels1 + labels2,
817# bbox_to_anchor=(0.5, 1.15),
818# loc='upper center',
819# prop={'size': 12}, ncol=3)
820# plt.show()
821#
822#
823# def make_load_report(idx, results_dir, fname):
824# dpath = os.path.join(results_dir, "io_" + str(idx))
825# files = sorted(os.listdir(dpath))
826# gf = lambda x: "_".join(x.rsplit(".", 1)[0].split('_')[:3])
827#
828# for key, group in itertools.groupby(files, gf):
829# fname = os.path.join(dpath, key + ".fio")
830#
831# cfgs = list(parse_all_in_1(open(fname).read(), fname))
832#
833# fname = os.path.join(dpath, key + "_lat.log")
834#
835# curr = []
836# arrays = []
837#
838# with open(fname) as fd:
839# for offset, lat, _, _ in csv.reader(fd):
840# offset = int(offset)
841# lat = int(lat)
842# if len(curr) > 0 and curr[-1][0] > offset:
843# arrays.append(curr)
844# curr = []
845# curr.append((offset, lat))
846# arrays.append(curr)
847# conc = int(cfgs[0].vals.get('numjobs', 1))
848#
849# if conc != 5:
850# continue
851#
852# assert len(arrays) == len(cfgs) * conc
853#
854# garrays = [[(0, 0)] for _ in range(conc)]
855#
856# for offset in range(len(cfgs)):
857# for acc, new_arr in zip(garrays, arrays[offset * conc:(offset + 1) * conc]):
858# last = acc[-1][0]
859# for off, lat in new_arr:
860# acc.append((off / 1000. + last, lat / 1000.))
861#
862# for cfg, arr in zip(cfgs, garrays):
863# plt.plot(*zip(*arr[1:]))
864# plt.show()
865# exit(1)
866#
867#
868# def make_io_report(dinfo, comment, path, lab_info=None):
869# lab_info = {
870# "total_disk": "None",
871# "total_memory": "None",
872# "nodes_count": "None",
873# "processor_count": "None"
874# }
875#
876# try:
877# res_fields = sorted(v.name for v in dinfo)
878#
879# found = False
880# for fields, name, func in report_funcs:
881# for field in fields:
882# pos = bisect.bisect_left(res_fields, field)
883#
884# if pos == len(res_fields):
885# break
886#
887# if not res_fields[pos].startswith(field):
888# break
889# else:
890# found = True
891# hpath = path.format(name)
892#
893# try:
894# report = func(dinfo, lab_info, comment)
895# except:
896# logger.exception("Diring {0} report generation".format(name))
897# continue
898#
899# if report is not None:
900# try:
901# with open(hpath, "w") as fd:
902# fd.write(report)
903# except:
904# logger.exception("Diring saving {0} report".format(name))
905# continue
906# logger.info("Report {0} saved into {1}".format(name, hpath))
907# else:
908# logger.warning("No report produced by {0!r}".format(name))
909#
910# if not found:
911# logger.warning("No report generator found for this load")
912#
913# except Exception as exc:
914# import traceback
915# traceback.print_exc()
916# logger.error("Failed to generate html report:" + str(exc))
917#
918#
919# # @classmethod
920# # def prepare_data(cls, results) -> List[Dict[str, Any]]:
921# # """create a table with io performance report for console"""
922# #
923# # def key_func(data: FioRunResult) -> Tuple[str, str, str, str, int]:
924# # tpl = data.summary_tpl()
925# # return (data.name,
926# # tpl.oper,
927# # tpl.mode,
928# # ssize2b(tpl.bsize),
929# # int(tpl.th_count) * int(tpl.vm_count))
930# # res = []
931# #
932# # for item in sorted(results, key=key_func):
933# # test_dinfo = item.disk_perf_info()
934# # testnodes_count = len(item.config.nodes)
935# #
936# # iops, _ = test_dinfo.iops.rounded_average_conf()
937# #
938# # if test_dinfo.iops_sys is not None:
939# # iops_sys, iops_sys_conf = test_dinfo.iops_sys.rounded_average_conf()
940# # _, iops_sys_dev = test_dinfo.iops_sys.rounded_average_dev()
941# # iops_sys_per_vm = round_3_digit(iops_sys / testnodes_count)
942# # iops_sys = round_3_digit(iops_sys)
943# # else:
944# # iops_sys = None
945# # iops_sys_per_vm = None
946# # iops_sys_dev = None
947# # iops_sys_conf = None
948# #
949# # bw, bw_conf = test_dinfo.bw.rounded_average_conf()
950# # _, bw_dev = test_dinfo.bw.rounded_average_dev()
951# # conf_perc = int(round(bw_conf * 100 / bw))
952# # dev_perc = int(round(bw_dev * 100 / bw))
953# #
954# # lat_50 = round_3_digit(int(test_dinfo.lat_50))
955# # lat_95 = round_3_digit(int(test_dinfo.lat_95))
956# # lat_avg = round_3_digit(int(test_dinfo.lat_avg))
957# #
958# # iops_per_vm = round_3_digit(iops / testnodes_count)
959# # bw_per_vm = round_3_digit(bw / testnodes_count)
960# #
961# # iops = round_3_digit(iops)
962# # bw = round_3_digit(bw)
963# #
964# # summ = "{0.oper}{0.mode} {0.bsize:>4} {0.th_count:>3}th {0.vm_count:>2}vm".format(item.summary_tpl())
965# #
966# # res.append({"name": key_func(item)[0],
967# # "key": key_func(item)[:4],
968# # "summ": summ,
969# # "iops": int(iops),
970# # "bw": int(bw),
971# # "conf": str(conf_perc),
972# # "dev": str(dev_perc),
973# # "iops_per_vm": int(iops_per_vm),
974# # "bw_per_vm": int(bw_per_vm),
975# # "lat_50": lat_50,
976# # "lat_95": lat_95,
977# # "lat_avg": lat_avg,
978# #
979# # "iops_sys": iops_sys,
980# # "iops_sys_per_vm": iops_sys_per_vm,
981# # "sys_conf": iops_sys_conf,
982# # "sys_dev": iops_sys_dev})
983# #
984# # return res
985# #
986# # Field = collections.namedtuple("Field", ("header", "attr", "allign", "size"))
987# # fiels_and_header = [
988# # Field("Name", "name", "l", 7),
989# # Field("Description", "summ", "l", 19),
990# # Field("IOPS\ncum", "iops", "r", 3),
991# # # Field("IOPS_sys\ncum", "iops_sys", "r", 3),
992# # Field("KiBps\ncum", "bw", "r", 6),
993# # Field("Cnf %\n95%", "conf", "r", 3),
994# # Field("Dev%", "dev", "r", 3),
995# # Field("iops\n/vm", "iops_per_vm", "r", 3),
996# # Field("KiBps\n/vm", "bw_per_vm", "r", 6),
997# # Field("lat ms\nmedian", "lat_50", "r", 3),
998# # Field("lat ms\n95%", "lat_95", "r", 3),
999# # Field("lat\navg", "lat_avg", "r", 3),
1000# # ]
1001# #
1002# # fiels_and_header_dct = dict((item.attr, item) for item in fiels_and_header)
1003# #
1004# # @classmethod
1005# # def format_for_console(cls, results) -> str:
1006# # """create a table with io performance report for console"""
1007# #
1008# # tab = texttable.Texttable(max_width=120)
1009# # tab.set_deco(tab.HEADER | tab.VLINES | tab.BORDER)
1010# # tab.set_cols_align([f.allign for f in cls.fiels_and_header])
1011# # sep = ["-" * f.size for f in cls.fiels_and_header]
1012# # tab.header([f.header for f in cls.fiels_and_header])
1013# # prev_k = None
1014# # for item in cls.prepare_data(results):
1015# # if prev_k is not None:
1016# # if prev_k != item["key"]:
1017# # tab.add_row(sep)
1018# #
1019# # prev_k = item["key"]
1020# # tab.add_row([item[f.attr] for f in cls.fiels_and_header])
1021# #
1022# # return tab.draw()
1023# #
1024# # @classmethod
1025# # def format_diff_for_console(cls, list_of_results: List[Any]) -> str:
1026# # """create a table with io performance report for console"""
1027# #
1028# # tab = texttable.Texttable(max_width=200)
1029# # tab.set_deco(tab.HEADER | tab.VLINES | tab.BORDER)
1030# #
1031# # header = [
1032# # cls.fiels_and_header_dct["name"].header,
1033# # cls.fiels_and_header_dct["summ"].header,
1034# # ]
1035# # allign = ["l", "l"]
1036# #
1037# # header.append("IOPS ~ Cnf% ~ Dev%")
1038# # allign.extend(["r"] * len(list_of_results))
1039# # header.extend(
1040# # "IOPS_{0} %".format(i + 2) for i in range(len(list_of_results[1:]))
1041# # )
1042# #
1043# # header.append("BW")
1044# # allign.extend(["r"] * len(list_of_results))
1045# # header.extend(
1046# # "BW_{0} %".format(i + 2) for i in range(len(list_of_results[1:]))
1047# # )
1048# #
1049# # header.append("LAT")
1050# # allign.extend(["r"] * len(list_of_results))
1051# # header.extend(
1052# # "LAT_{0}".format(i + 2) for i in range(len(list_of_results[1:]))
1053# # )
1054# #
1055# # tab.header(header)
1056# # sep = ["-" * 3] * len(header)
1057# # processed_results = map(cls.prepare_data, list_of_results)
1058# #
1059# # key2results = []
1060# # for res in processed_results:
1061# # key2results.append(dict(
1062# # ((item["name"], item["summ"]), item) for item in res
1063# # ))
1064# #
1065# # prev_k = None
1066# # iops_frmt = "{0[iops]} ~ {0[conf]:>2} ~ {0[dev]:>2}"
1067# # for item in processed_results[0]:
1068# # if prev_k is not None:
1069# # if prev_k != item["key"]:
1070# # tab.add_row(sep)
1071# #
1072# # prev_k = item["key"]
1073# #
1074# # key = (item['name'], item['summ'])
1075# # line = list(key)
1076# # base = key2results[0][key]
1077# #
1078# # line.append(iops_frmt.format(base))
1079# #
1080# # for test_results in key2results[1:]:
1081# # val = test_results.get(key)
1082# # if val is None:
1083# # line.append("-")
1084# # elif base['iops'] == 0:
1085# # line.append("Nan")
1086# # else:
1087# # prc_val = {'dev': val['dev'], 'conf': val['conf']}
1088# # prc_val['iops'] = int(100 * val['iops'] / base['iops'])
1089# # line.append(iops_frmt.format(prc_val))
1090# #
1091# # line.append(base['bw'])
1092# #
1093# # for test_results in key2results[1:]:
1094# # val = test_results.get(key)
1095# # if val is None:
1096# # line.append("-")
1097# # elif base['bw'] == 0:
1098# # line.append("Nan")
1099# # else:
1100# # line.append(int(100 * val['bw'] / base['bw']))
1101# #
1102# # for test_results in key2results:
1103# # val = test_results.get(key)
1104# # if val is None:
1105# # line.append("-")
1106# # else:
1107# # line.append("{0[lat_50]} - {0[lat_95]}".format(val))
1108# #
1109# # tab.add_row(line)
1110# #
1111# # tab.set_cols_align(allign)
1112# # return tab.draw()
1113#
1114#
1115# # READ_IOPS_DISCSTAT_POS = 3
1116# # WRITE_IOPS_DISCSTAT_POS = 7
1117# #
1118# #
1119# # def load_sys_log_file(ftype: str, fname: str) -> TimeSeriesValue:
1120# # assert ftype == 'iops'
1121# # pval = None
1122# # with open(fname) as fd:
1123# # iops = []
1124# # for ln in fd:
1125# # params = ln.split()
1126# # cval = int(params[WRITE_IOPS_DISCSTAT_POS]) + \
1127# # int(params[READ_IOPS_DISCSTAT_POS])
1128# # if pval is not None:
1129# # iops.append(cval - pval)
1130# # pval = cval
1131# #
1132# # vals = [(idx * 1000, val) for idx, val in enumerate(iops)]
1133# # return TimeSeriesValue(vals)
1134# #
1135# #
1136# # def load_test_results(folder: str, run_num: int) -> 'FioRunResult':
1137# # res = {}
1138# # params = None
1139# #
1140# # fn = os.path.join(folder, str(run_num) + '_params.yaml')
1141# # params = yaml.load(open(fn).read())
1142# #
1143# # conn_ids_set = set()
1144# # rr = r"{}_(?P<conn_id>.*?)_(?P<type>[^_.]*)\.\d+\.log$".format(run_num)
1145# # for fname in os.listdir(folder):
1146# # rm = re.match(rr, fname)
1147# # if rm is None:
1148# # continue
1149# #
1150# # conn_id_s = rm.group('conn_id')
1151# # conn_id = conn_id_s.replace('_', ':')
1152# # ftype = rm.group('type')
1153# #
1154# # if ftype not in ('iops', 'bw', 'lat'):
1155# # continue
1156# #
1157# # ts = load_fio_log_file(os.path.join(folder, fname))
1158# # res.setdefault(ftype, {}).setdefault(conn_id, []).append(ts)
1159# #
1160# # conn_ids_set.add(conn_id)
1161# #
1162# # rr = r"{}_(?P<conn_id>.*?)_(?P<type>[^_.]*)\.sys\.log$".format(run_num)
1163# # for fname in os.listdir(folder):
1164# # rm = re.match(rr, fname)
1165# # if rm is None:
1166# # continue
1167# #
1168# # conn_id_s = rm.group('conn_id')
1169# # conn_id = conn_id_s.replace('_', ':')
1170# # ftype = rm.group('type')
1171# #
1172# # if ftype not in ('iops', 'bw', 'lat'):
1173# # continue
1174# #
1175# # ts = load_sys_log_file(ftype, os.path.join(folder, fname))
1176# # res.setdefault(ftype + ":sys", {}).setdefault(conn_id, []).append(ts)
1177# #
1178# # conn_ids_set.add(conn_id)
1179# #
1180# # mm_res = {}
1181# #
1182# # if len(res) == 0:
1183# # raise ValueError("No data was found")
1184# #
1185# # for key, data in res.items():
1186# # conn_ids = sorted(conn_ids_set)
1187# # awail_ids = [conn_id for conn_id in conn_ids if conn_id in data]
1188# # matr = [data[conn_id] for conn_id in awail_ids]
1189# # mm_res[key] = MeasurementMatrix(matr, awail_ids)
1190# #
1191# # raw_res = {}
1192# # for conn_id in conn_ids:
1193# # fn = os.path.join(folder, "{0}_{1}_rawres.json".format(run_num, conn_id_s))
1194# #
1195# # # remove message hack
1196# # fc = "{" + open(fn).read().split('{', 1)[1]
1197# # raw_res[conn_id] = json.loads(fc)
1198# #
1199# # fio_task = FioJobSection(params['name'])
1200# # fio_task.vals.update(params['vals'])
1201# #
1202# # config = TestConfig('io', params, None, params['nodes'], folder, None)
1203# # return FioRunResult(config, fio_task, mm_res, raw_res, params['intervals'], run_num)
1204# #
1205#
1206# # class DiskPerfInfo:
1207# # def __init__(self, name: str, summary: str, params: Dict[str, Any], testnodes_count: int) -> None:
1208# # self.name = name
1209# # self.bw = None
1210# # self.iops = None
1211# # self.lat = None
1212# # self.lat_50 = None
1213# # self.lat_95 = None
1214# # self.lat_avg = None
1215# #
1216# # self.raw_bw = []
1217# # self.raw_iops = []
1218# # self.raw_lat = []
1219# #
1220# # self.params = params
1221# # self.testnodes_count = testnodes_count
1222# # self.summary = summary
1223# #
1224# # self.sync_mode = get_test_sync_mode(self.params['vals'])
1225# # self.concurence = self.params['vals'].get('numjobs', 1)
1226# #
1227# #
1228# # class IOTestResults:
1229# # def __init__(self, suite_name: str, fio_results: 'FioRunResult', log_directory: str):
1230# # self.suite_name = suite_name
1231# # self.fio_results = fio_results
1232# # self.log_directory = log_directory
1233# #
1234# # def __iter__(self):
1235# # return iter(self.fio_results)
1236# #
1237# # def __len__(self):
1238# # return len(self.fio_results)
1239# #
1240# # def get_yamable(self) -> Dict[str, List[str]]:
1241# # items = [(fio_res.summary(), fio_res.idx) for fio_res in self]
1242# # return {self.suite_name: [self.log_directory] + items}
1243#
1244#
1245# # class FioRunResult(TestResults):
1246# # """
1247# # Fio run results
1248# # config: TestConfig
1249# # fio_task: FioJobSection
1250# # ts_results: {str: MeasurementMatrix[TimeSeriesValue]}
1251# # raw_result: ????
1252# # run_interval:(float, float) - test tun time, used for sensors
1253# # """
1254# # def __init__(self, config, fio_task, ts_results, raw_result, run_interval, idx):
1255# #
1256# # self.name = fio_task.name.rsplit("_", 1)[0]
1257# # self.fio_task = fio_task
1258# # self.idx = idx
1259# #
1260# # self.bw = ts_results['bw']
1261# # self.lat = ts_results['lat']
1262# # self.iops = ts_results['iops']
1263# #
1264# # if 'iops:sys' in ts_results:
1265# # self.iops_sys = ts_results['iops:sys']
1266# # else:
1267# # self.iops_sys = None
1268# #
1269# # res = {"bw": self.bw,
1270# # "lat": self.lat,
1271# # "iops": self.iops,
1272# # "iops:sys": self.iops_sys}
1273# #
1274# # self.sensors_data = None
1275# # self._pinfo = None
1276# # TestResults.__init__(self, config, res, raw_result, run_interval)
1277# #
1278# # def get_params_from_fio_report(self):
1279# # nodes = self.bw.connections_ids
1280# #
1281# # iops = [self.raw_result[node]['jobs'][0]['mixed']['iops'] for node in nodes]
1282# # total_ios = [self.raw_result[node]['jobs'][0]['mixed']['total_ios'] for node in nodes]
1283# # runtime = [self.raw_result[node]['jobs'][0]['mixed']['runtime'] / 1000 for node in nodes]
1284# # flt_iops = [float(ios) / rtime for ios, rtime in zip(total_ios, runtime)]
1285# #
1286# # bw = [self.raw_result[node]['jobs'][0]['mixed']['bw'] for node in nodes]
1287# # total_bytes = [self.raw_result[node]['jobs'][0]['mixed']['io_bytes'] for node in nodes]
1288# # flt_bw = [float(tbytes) / rtime for tbytes, rtime in zip(total_bytes, runtime)]
1289# #
1290# # return {'iops': iops,
1291# # 'flt_iops': flt_iops,
1292# # 'bw': bw,
1293# # 'flt_bw': flt_bw}
1294# #
1295# # def summary(self):
1296# # return get_test_summary(self.fio_task, len(self.config.nodes))
1297# #
1298# # def summary_tpl(self):
1299# # return get_test_summary_tuple(self.fio_task, len(self.config.nodes))
1300# #
1301# # def get_lat_perc_50_95_multy(self):
1302# # lat_mks = collections.defaultdict(lambda: 0)
1303# # num_res = 0
1304# #
1305# # for result in self.raw_result.values():
1306# # num_res += len(result['jobs'])
1307# # for job_info in result['jobs']:
1308# # for k, v in job_info['latency_ms'].items():
1309# # if isinstance(k, basestring) and k.startswith('>='):
1310# # lat_mks[int(k[2:]) * 1000] += v
1311# # else:
1312# # lat_mks[int(k) * 1000] += v
1313# #
1314# # for k, v in job_info['latency_us'].items():
1315# # lat_mks[int(k)] += v
1316# #
1317# # for k, v in lat_mks.items():
1318# # lat_mks[k] = float(v) / num_res
1319# # return get_lat_perc_50_95(lat_mks)
1320# #
1321# # def disk_perf_info(self, avg_interval=2.0):
1322# #
1323# # if self._pinfo is not None:
1324# # return self._pinfo
1325# #
1326# # testnodes_count = len(self.config.nodes)
1327# #
1328# # pinfo = DiskPerfInfo(self.name,
1329# # self.summary(),
1330# # self.params,
1331# # testnodes_count)
1332# #
1333# # def prepare(data, drop=1):
1334# # if data is None:
1335# # return data
1336# #
1337# # res = []
1338# # for ts_data in data:
1339# # if ts_data.average_interval() < avg_interval:
1340# # ts_data = ts_data.derived(avg_interval)
1341# #
1342# # # drop last value on bounds
1343# # # as they may contains ranges without activities
1344# # assert len(ts_data.values) >= drop + 1, str(drop) + " " + str(ts_data.values)
1345# #
1346# # if drop > 0:
1347# # res.append(ts_data.values[:-drop])
1348# # else:
1349# # res.append(ts_data.values)
1350# #
1351# # return res
1352# #
1353# # def agg_data(matr):
1354# # arr = sum(matr, [])
1355# # min_len = min(map(len, arr))
1356# # res = []
1357# # for idx in range(min_len):
1358# # res.append(sum(dt[idx] for dt in arr))
1359# # return res
1360# #
1361# # pinfo.raw_lat = map(prepare, self.lat.per_vm())
1362# # num_th = sum(map(len, pinfo.raw_lat))
1363# # lat_avg = [val / num_th for val in agg_data(pinfo.raw_lat)]
1364# # pinfo.lat_avg = data_property(lat_avg).average / 1000 # us to ms
1365# #
1366# # pinfo.lat_50, pinfo.lat_95 = self.get_lat_perc_50_95_multy()
1367# # pinfo.lat = pinfo.lat_50
1368# #
1369# # pinfo.raw_bw = map(prepare, self.bw.per_vm())
1370# # pinfo.raw_iops = map(prepare, self.iops.per_vm())
1371# #
1372# # if self.iops_sys is not None:
1373# # pinfo.raw_iops_sys = map(prepare, self.iops_sys.per_vm())
1374# # pinfo.iops_sys = data_property(agg_data(pinfo.raw_iops_sys))
1375# # else:
1376# # pinfo.raw_iops_sys = None
1377# # pinfo.iops_sys = None
1378# #
1379# # fparams = self.get_params_from_fio_report()
1380# # fio_report_bw = sum(fparams['flt_bw'])
1381# # fio_report_iops = sum(fparams['flt_iops'])
1382# #
1383# # agg_bw = agg_data(pinfo.raw_bw)
1384# # agg_iops = agg_data(pinfo.raw_iops)
1385# #
1386# # log_bw_avg = average(agg_bw)
1387# # log_iops_avg = average(agg_iops)
1388# #
1389# # # update values to match average from fio report
1390# # coef_iops = fio_report_iops / float(log_iops_avg)
1391# # coef_bw = fio_report_bw / float(log_bw_avg)
1392# #
1393# # bw_log = data_property([val * coef_bw for val in agg_bw])
1394# # iops_log = data_property([val * coef_iops for val in agg_iops])
1395# #
1396# # bw_report = data_property([fio_report_bw])
1397# # iops_report = data_property([fio_report_iops])
1398# #
1399# # # When IOPS/BW per thread is too low
1400# # # data from logs is rounded to match
1401# # iops_per_th = sum(sum(pinfo.raw_iops, []), [])
1402# # if average(iops_per_th) > 10:
1403# # pinfo.iops = iops_log
1404# # pinfo.iops2 = iops_report
1405# # else:
1406# # pinfo.iops = iops_report
1407# # pinfo.iops2 = iops_log
1408# #
1409# # bw_per_th = sum(sum(pinfo.raw_bw, []), [])
1410# # if average(bw_per_th) > 10:
1411# # pinfo.bw = bw_log
1412# # pinfo.bw2 = bw_report
1413# # else:
1414# # pinfo.bw = bw_report
1415# # pinfo.bw2 = bw_log
1416# #
1417# # self._pinfo = pinfo
1418# #
1419# # return pinfo
1420#
1421# # class TestResult:
1422# # """Hold all information for a given test - test info,
1423# # sensors data and performance results for test period from all nodes"""
1424# # run_id = None # type: int
1425# # test_info = None # type: Any
1426# # begin_time = None # type: int
1427# # end_time = None # type: int
1428# # sensors = None # Dict[Tuple[str, str, str], TimeSeries]
1429# # performance = None # Dict[Tuple[str, str], TimeSeries]
1430# #
1431# # class TestResults:
1432# # """
1433# # this class describe test results
1434# #
1435# # config:TestConfig - test config object
1436# # params:dict - parameters from yaml file for this test
1437# # results:{str:MeasurementMesh} - test results object
1438# # raw_result:Any - opaque object to store raw results
1439# # run_interval:(float, float) - test tun time, used for sensors
1440# # """
1441# #
1442# # def __init__(self,
1443# # config: TestConfig,
1444# # results: Dict[str, Any],
1445# # raw_result: Any,
1446# # run_interval: Tuple[float, float]) -> None:
1447# # self.config = config
1448# # self.params = config.params
1449# # self.results = results
1450# # self.raw_result = raw_result
1451# # self.run_interval = run_interval
1452# #
1453# # def __str__(self) -> str:
1454# # res = "{0}({1}):\n results:\n".format(
1455# # self.__class__.__name__,
1456# # self.summary())
1457# #
1458# # for name, val in self.results.items():
1459# # res += " {0}={1}\n".format(name, val)
1460# #
1461# # res += " params:\n"
1462# #
1463# # for name, val in self.params.items():
1464# # res += " {0}={1}\n".format(name, val)
1465# #
1466# # return res
1467# #
1468# # def summary(self) -> str:
1469# # raise NotImplementedError()
1470# # return ""
1471# #
1472# # def get_yamable(self) -> Any:
1473# # raise NotImplementedError()
1474# # return None
1475#
1476#
1477#
1478# # class MeasurementMatrix:
1479# # """
1480# # data:[[MeasurementResult]] - VM_COUNT x TH_COUNT matrix of MeasurementResult
1481# # """
1482# # def __init__(self, data, connections_ids):
1483# # self.data = data
1484# # self.connections_ids = connections_ids
1485# #
1486# # def per_vm(self):
1487# # return self.data
1488# #
1489# # def per_th(self):
1490# # return sum(self.data, [])
1491#
1492#
1493# # class MeasurementResults:
1494# # data = None # type: List[Any]
1495# #
1496# # def stat(self) -> StatProps:
1497# # return data_property(self.data)
1498# #
1499# # def __str__(self) -> str:
1500# # return 'TS([' + ", ".join(map(str, self.data)) + '])'
1501# #
1502# #
1503# # class SimpleVals(MeasurementResults):
1504# # """
1505# # data:[float] - list of values
1506# # """
1507# # def __init__(self, data: List[float]) -> None:
1508# # self.data = data
1509# #
1510# #
1511# # class TimeSeriesValue(MeasurementResults):
1512# # """
1513# # data:[(float, float, float)] - list of (start_time, lenght, average_value_for_interval)
1514# # odata: original values
1515# # """
1516# # def __init__(self, data: List[Tuple[float, float]]) -> None:
1517# # assert len(data) > 0
1518# # self.odata = data[:]
1519# # self.data = [] # type: List[Tuple[float, float, float]]
1520# #
1521# # cstart = 0.0
1522# # for nstart, nval in data:
1523# # self.data.append((cstart, nstart - cstart, nval))
1524# # cstart = nstart
1525# #
1526# # @property
1527# # def values(self) -> List[float]:
1528# # return [val[2] for val in self.data]
1529# #
1530# # def average_interval(self) -> float:
1531# # return float(sum([val[1] for val in self.data])) / len(self.data)
1532# #
1533# # def skip(self, seconds) -> 'TimeSeriesValue':
1534# # nres = []
1535# # for start, ln, val in self.data:
1536# # nstart = start + ln - seconds
1537# # if nstart > 0:
1538# # nres.append([nstart, val])
1539# # return self.__class__(nres)
1540# #
1541# # def derived(self, tdelta) -> 'TimeSeriesValue':
1542# # end = self.data[-1][0] + self.data[-1][1]
1543# # tdelta = float(tdelta)
1544# #
1545# # ln = end / tdelta
1546# #
1547# # if ln - int(ln) > 0:
1548# # ln += 1
1549# #
1550# # res = [[tdelta * i, 0.0] for i in range(int(ln))]
1551# #
1552# # for start, lenght, val in self.data:
1553# # start_idx = int(start / tdelta)
1554# # end_idx = int((start + lenght) / tdelta)
1555# #
1556# # for idx in range(start_idx, end_idx + 1):
1557# # rstart = tdelta * idx
1558# # rend = tdelta * (idx + 1)
1559# #
1560# # intersection_ln = min(rend, start + lenght) - max(start, rstart)
1561# # if intersection_ln > 0:
1562# # try:
1563# # res[idx][1] += val * intersection_ln / tdelta
1564# # except IndexError:
1565# # raise
1566# #
1567# # return self.__class__(res)
1568#
1569#
1570# def console_report_stage(ctx: TestRun) -> None:
1571# # TODO(koder): load data from storage
1572# raise NotImplementedError("...")
1573# # first_report = True
1574# # text_rep_fname = ctx.config.text_report_file
1575# #
1576# # with open(text_rep_fname, "w") as fd:
1577# # for tp, data in ctx.results.items():
1578# # if 'io' == tp and data is not None:
1579# # rep_lst = []
1580# # for result in data:
1581# # rep_lst.append(
1582# # IOPerfTest.format_for_console(list(result)))
1583# # rep = "\n\n".join(rep_lst)
1584# # elif tp in ['mysql', 'pgbench'] and data is not None:
1585# # rep = MysqlTest.format_for_console(data)
1586# # elif tp == 'omg':
1587# # rep = OmgTest.format_for_console(data)
1588# # else:
1589# # logger.warning("Can't generate text report for " + tp)
1590# # continue
1591# #
1592# # fd.write(rep)
1593# # fd.write("\n")
1594# #
1595# # if first_report:
1596# # logger.info("Text report were stored in " + text_rep_fname)
1597# # first_report = False
1598# #
1599# # print("\n" + rep + "\n")
1600#
1601#
1602# # def test_load_report_stage(cfg: Config, ctx: TestRun) -> None:
1603# # load_rep_fname = cfg.load_report_file
1604# # found = False
1605# # for idx, (tp, data) in enumerate(ctx.results.items()):
1606# # if 'io' == tp and data is not None:
1607# # if found:
1608# # logger.error("Making reports for more than one " +
1609# # "io block isn't supported! All " +
1610# # "report, except first are skipped")
1611# # continue
1612# # found = True
1613# # report.make_load_report(idx, cfg['results'], load_rep_fname)
1614# #
1615# #
1616#
1617# # def html_report_stage(ctx: TestRun) -> None:
1618# # TODO(koder): load data from storage
1619# # raise NotImplementedError("...")
1620# # html_rep_fname = cfg.html_report_file
1621# # found = False
1622# # for tp, data in ctx.results.items():
1623# # if 'io' == tp and data is not None:
1624# # if found or len(data) > 1:
1625# # logger.error("Making reports for more than one " +
1626# # "io block isn't supported! All " +
1627# # "report, except first are skipped")
1628# # continue
1629# # found = True
1630# # report.make_io_report(list(data[0]),
1631# # cfg.get('comment', ''),
1632# # html_rep_fname,
1633# # lab_info=ctx.nodes)
1634#
1635# #
1636# # def load_data_from_path(test_res_dir: str) -> Mapping[str, List[Any]]:
1637# # files = get_test_files(test_res_dir)
1638# # raw_res = yaml_load(open(files['raw_results']).read())
1639# # res = collections.defaultdict(list)
1640# #
1641# # for tp, test_lists in raw_res:
1642# # for tests in test_lists:
1643# # for suite_name, suite_data in tests.items():
1644# # result_folder = suite_data[0]
1645# # res[tp].append(TOOL_TYPE_MAPPER[tp].load(suite_name, result_folder))
1646# #
1647# # return res
1648# #
1649# #
1650# # def load_data_from_path_stage(var_dir: str, _, ctx: TestRun) -> None:
1651# # for tp, vals in load_data_from_path(var_dir).items():
1652# # ctx.results.setdefault(tp, []).extend(vals)
1653# #
1654# #
1655# # def load_data_from(var_dir: str) -> Callable[[TestRun], None]:
1656# # return functools.partial(load_data_from_path_stage, var_dir)