blob: f42c7c9855b2e2fb3afd749079f58e3083f968ee [file] [log] [blame]
koder aka kdanilov108ac362017-01-19 20:17:16 +02001import os
koder aka kdanilov7f59d562016-12-26 01:34:23 +02002import abc
koder aka kdanilova047e1b2015-04-21 23:16:59 +03003import logging
kdanylov aka koder84de1e42017-05-22 14:00:07 +03004import collections
koder aka kdanilov108ac362017-01-19 20:17:16 +02005from collections import defaultdict
kdanylov aka koder84de1e42017-05-22 14:00:07 +03006from typing import Dict, Any, Iterator, Tuple, cast, List, Set, Optional, Union, Type, Iterable
koder aka kdanilovcff7b2e2015-04-18 20:48:15 +03007
koder aka kdanilovffaf48d2016-12-27 02:25:29 +02008import numpy
kdanylov aka koder84de1e42017-05-22 14:00:07 +03009import scipy.stats
kdanylov aka koder736e5c12017-05-07 17:27:14 +030010from statsmodels.tsa.stattools import adfuller
kdanylov aka kodercdfcdaf2017-04-29 10:03:39 +030011
kdanylov aka koder736e5c12017-05-07 17:27:14 +030012import xmlbuilder3
koder aka kdanilovbe8f89f2015-04-28 14:51:51 +030013
koder aka kdanilov108ac362017-01-19 20:17:16 +020014import wally
koder aka kdanilovffaf48d2016-12-27 02:25:29 +020015
kdanylov aka koder84de1e42017-05-22 14:00:07 +030016# import matplotlib
17# matplotlib.use('GTKAgg')
18
kdanylov aka koderb0833332017-05-13 20:39:17 +030019from cephlib import html
20from cephlib.units import b2ssize, b2ssize_10, unit_conversion_coef, unit_conversion_coef_f
21from cephlib.statistic import calc_norm_stat_props
kdanylov aka koder84de1e42017-05-22 14:00:07 +030022from cephlib.storage_selectors import sum_sensors, find_sensors_to_2d, update_storage_selector, DevRoles
kdanylov aka koderb0833332017-05-13 20:39:17 +030023from cephlib.wally_storage import find_nodes_by_roles
kdanylov aka koder026e5f22017-05-15 01:04:39 +030024from cephlib.plot import (plot_simple_bars, plot_hmap_from_2d, plot_lat_over_time, plot_simple_over_time,
kdanylov aka koder84de1e42017-05-22 14:00:07 +030025 plot_histo_heatmap, plot_v_over_time, plot_hist, plot_dots_with_regression)
26from cephlib.numeric_types import ndarray2d
27from cephlib.node import NodeRole
kdanylov aka koderb0833332017-05-13 20:39:17 +030028
29from .utils import STORAGE_ROLES
koder aka kdanilov39e449e2016-12-17 15:15:26 +020030from .stage import Stage, StepOrder
31from .test_run_class import TestRun
kdanylov aka koder026e5f22017-05-15 01:04:39 +030032from .result_classes import IWallyStorage
kdanylov aka koder3a9e5db2017-05-09 20:00:44 +030033from .result_classes import DataSource, TimeSeries, SuiteConfig
koder aka kdanilov108ac362017-01-19 20:17:16 +020034from .suits.io.fio import FioTest, FioJobConfig
koder aka kdanilova732a602017-02-01 20:29:56 +020035from .suits.io.fio_job import FioJobParams
36from .suits.job import JobConfig
kdanylov aka koderb0833332017-05-13 20:39:17 +030037from .data_selectors import get_aggregated, AGG_TAG
kdanylov aka koder3a9e5db2017-05-09 20:00:44 +030038from .report_profiles import (DefStyleProfile, DefColorProfile, StyleProfile, ColorProfile,
39 default_format, io_chart_format)
kdanylov aka koder026e5f22017-05-15 01:04:39 +030040from .plot import io_chart
kdanylov aka koder84de1e42017-05-22 14:00:07 +030041from .resources import ResourceNames, get_resources_usage, make_iosum, get_cluster_cpu_load
42from .console_report import get_console_report_table, console_report_headers, console_report_align, Texttable
kdanylov aka koder026e5f22017-05-15 01:04:39 +030043
44
koder aka kdanilov962ee5f2016-12-19 02:40:08 +020045logger = logging.getLogger("wally")
koder aka kdanilova047e1b2015-04-21 23:16:59 +030046
47
koder aka kdanilov108ac362017-01-19 20:17:16 +020048# ---------------- CONSTS ---------------------------------------------------------------------------------------------
koder aka kdanilov39e449e2016-12-17 15:15:26 +020049
koder aka kdanilov7f59d562016-12-26 01:34:23 +020050
koder aka kdanilov108ac362017-01-19 20:17:16 +020051DEBUG = False
koder aka kdanilov39e449e2016-12-17 15:15:26 +020052
koder aka kdanilov39e449e2016-12-17 15:15:26 +020053
koder aka kdanilov108ac362017-01-19 20:17:16 +020054# -------------- AGGREGATION AND STAT FUNCTIONS ----------------------------------------------------------------------
koder aka kdanilov108ac362017-01-19 20:17:16 +020055
kdanylov aka koder3a9e5db2017-05-09 20:00:44 +030056LEVEL_SENSORS = {("block-io", "io_queue"), ("system-cpu", "procs_blocked"), ("system-cpu", "procs_queue")}
koder aka kdanilova732a602017-02-01 20:29:56 +020057
58
59def is_level_sensor(sensor: str, metric: str) -> bool:
60 """Returns True if sensor measure level of any kind, E.g. queue depth."""
61 return (sensor, metric) in LEVEL_SENSORS
62
63
64def is_delta_sensor(sensor: str, metric: str) -> bool:
65 """Returns True if sensor provides deltas for cumulative value. E.g. io completed in given period"""
66 return not is_level_sensor(sensor, metric)
67
kdanylov aka koder736e5c12017-05-07 17:27:14 +030068
kdanylov aka koder3a9e5db2017-05-09 20:00:44 +030069# def get_idle_load(rstorage: ResultStorage, *args, **kwargs) -> float:
70# if 'idle' not in rstorage.storage:
71# return 0.0
72# idle_time = rstorage.storage.get('idle')
73# ssum = summ_sensors(rstorage, time_range=idle_time, *args, **kwargs)
74# return numpy.average(ssum)
kdanylov aka koder736e5c12017-05-07 17:27:14 +030075
koder aka kdanilov108ac362017-01-19 20:17:16 +020076
77# -------------------- REPORT HELPERS --------------------------------------------------------------------------------
78
79
koder aka kdanilov7f59d562016-12-26 01:34:23 +020080class HTMLBlock:
81 data = None # type: str
82 js_links = [] # type: List[str]
83 css_links = [] # type: List[str]
koder aka kdanilova732a602017-02-01 20:29:56 +020084 order_attr = None # type: Any
85
86 def __init__(self, data: str, order_attr: Any = None) -> None:
87 self.data = data
88 self.order_attr = order_attr
89
kdanylov aka koder45183182017-04-30 23:55:40 +030090 def __eq__(self, o: Any) -> bool:
koder aka kdanilova732a602017-02-01 20:29:56 +020091 return o.order_attr == self.order_attr # type: ignore
92
kdanylov aka koder45183182017-04-30 23:55:40 +030093 def __lt__(self, o: Any) -> bool:
koder aka kdanilova732a602017-02-01 20:29:56 +020094 return o.order_attr > self.order_attr # type: ignore
95
96
97class Table:
98 def __init__(self, header: List[str]) -> None:
99 self.header = header
kdanylov aka koder026e5f22017-05-15 01:04:39 +0300100 self.data = [] # type: List[List[str]]
koder aka kdanilova732a602017-02-01 20:29:56 +0200101
102 def add_line(self, values: List[str]) -> None:
103 self.data.append(values)
104
kdanylov aka koder026e5f22017-05-15 01:04:39 +0300105 def html(self) -> str:
koder aka kdanilova732a602017-02-01 20:29:56 +0200106 return html.table("", self.header, self.data)
koder aka kdanilov7f59d562016-12-26 01:34:23 +0200107
108
koder aka kdanilov108ac362017-01-19 20:17:16 +0200109class Menu1st:
koder aka kdanilov108ac362017-01-19 20:17:16 +0200110 summary = "Summary"
koder aka kdanilova732a602017-02-01 20:29:56 +0200111 per_job = "Per Job"
kdanylov aka koder84de1e42017-05-22 14:00:07 +0300112 engineering = "Engineering"
113 engineering_per_job = "Engineering per job"
114 order = [summary, per_job, engineering, engineering_per_job]
koder aka kdanilov108ac362017-01-19 20:17:16 +0200115
116
117class Menu2ndEng:
kdanylov aka koder84de1e42017-05-22 14:00:07 +0300118 summary = "Summary"
koder aka kdanilov108ac362017-01-19 20:17:16 +0200119 iops_time = "IOPS(time)"
120 hist = "IOPS/lat overall histogram"
121 lat_time = "Lat(time)"
kdanylov aka koder84de1e42017-05-22 14:00:07 +0300122 resource_regression = "Resource usage LR"
123 order = [summary, iops_time, hist, lat_time, resource_regression]
koder aka kdanilov108ac362017-01-19 20:17:16 +0200124
125
126class Menu2ndSumm:
kdanylov aka koder84de1e42017-05-22 14:00:07 +0300127 summary = "Summary"
koder aka kdanilov108ac362017-01-19 20:17:16 +0200128 io_lat_qd = "IO & Lat vs QD"
kdanylov aka koder84de1e42017-05-22 14:00:07 +0300129 resources_usage_qd = "Resource usage"
130 order = [summary, io_lat_qd, resources_usage_qd]
koder aka kdanilov108ac362017-01-19 20:17:16 +0200131
132
kdanylov aka koder84de1e42017-05-22 14:00:07 +0300133menu_1st_order = [Menu1st.summary, Menu1st.engineering, Menu1st.per_job, Menu1st.engineering_per_job]
koder aka kdanilov108ac362017-01-19 20:17:16 +0200134
135
136# -------------------- REPORTS --------------------------------------------------------------------------------------
137
kdanylov aka koder3a9e5db2017-05-09 20:00:44 +0300138class ReporterBase:
kdanylov aka koder026e5f22017-05-15 01:04:39 +0300139 def __init__(self, rstorage: IWallyStorage, style: StyleProfile, colors: ColorProfile) -> None:
kdanylov aka koder3a9e5db2017-05-09 20:00:44 +0300140 self.style = style
141 self.colors = colors
142 self.rstorage = rstorage
koder aka kdanilov108ac362017-01-19 20:17:16 +0200143
kdanylov aka koder3a9e5db2017-05-09 20:00:44 +0300144 def plt(self, func, ds: DataSource, *args, **kwargs) -> str:
145 return func(self.rstorage, self.style, self.colors, ds, *args, **kwargs)
146
147
148class SuiteReporter(ReporterBase, metaclass=abc.ABCMeta):
149 suite_types = set() # type: Set[str]
koder aka kdanilova732a602017-02-01 20:29:56 +0200150
koder aka kdanilov7f59d562016-12-26 01:34:23 +0200151 @abc.abstractmethod
kdanylov aka koder3a9e5db2017-05-09 20:00:44 +0300152 def get_divs(self, suite: SuiteConfig) -> Iterator[Tuple[str, str, HTMLBlock]]:
koder aka kdanilova732a602017-02-01 20:29:56 +0200153 pass
154
155
kdanylov aka koder3a9e5db2017-05-09 20:00:44 +0300156class JobReporter(ReporterBase, metaclass=abc.ABCMeta):
koder aka kdanilova732a602017-02-01 20:29:56 +0200157 suite_type = set() # type: Set[str]
158
159 @abc.abstractmethod
kdanylov aka koder3a9e5db2017-05-09 20:00:44 +0300160 def get_divs(self, suite: SuiteConfig, job: JobConfig) -> Iterator[Tuple[str, str, HTMLBlock]]:
koder aka kdanilov7f59d562016-12-26 01:34:23 +0200161 pass
162
163
kdanylov aka koder3a9e5db2017-05-09 20:00:44 +0300164# # Linearization report
165# class IOPSBsize(SuiteReporter):
166# """Creates graphs, which show how IOPS and Latency depend on block size"""
167#
168#
kdanylov aka koder84de1e42017-05-22 14:00:07 +0300169
170
171class StoragePerfSummary:
172 iops_units = "KiBps"
173 bw_units = "Bps"
174 NO_VAL = -1
175
176 def __init__(self) -> None:
177 self.rw_iops_10ms = self.NO_VAL # type: int
178 self.rw_iops_30ms = self.NO_VAL # type: int
179 self.rw_iops_100ms = self.NO_VAL # type: int
180
181 self.rr_iops_10ms = self.NO_VAL # type: int
182 self.rr_iops_30ms = self.NO_VAL # type: int
183 self.rr_iops_100ms = self.NO_VAL # type: int
184
185 self.bw_write_max = self.NO_VAL # type: int
186 self.bw_read_max = self.NO_VAL # type: int
187
188 self.bw = None # type: Optional[float]
189 self.read_iops = None # type: Optional[float]
190 self.write_iops = None # type: Optional[float]
191
192
193def get_performance_summary(storage: IWallyStorage, suite: SuiteConfig,
194 hboxes: int, large_blocks: int) -> Tuple[StoragePerfSummary, StoragePerfSummary]:
195
196 psum95 = StoragePerfSummary()
197 psum50 = StoragePerfSummary()
198
199 for job in storage.iter_job(suite):
200 if isinstance(job, FioJobConfig):
201 fjob = cast(FioJobConfig, job)
202 io_sum = make_iosum(storage, suite, job, hboxes)
203
204 bw_avg = io_sum.bw.average * unit_conversion_coef(io_sum.bw.units, StoragePerfSummary.bw_units)
205
206 if fjob.bsize < large_blocks:
207 lat_95_ms = io_sum.lat.perc_95 * unit_conversion_coef(io_sum.lat.units, 'ms')
208 lat_50_ms = io_sum.lat.perc_50 * unit_conversion_coef(io_sum.lat.units, 'ms')
209
210 iops_avg = io_sum.bw.average * unit_conversion_coef(io_sum.bw.units, StoragePerfSummary.iops_units)
211 iops_avg /= fjob.bsize
212
213 if fjob.oper == 'randwrite' and fjob.sync_mode == 'd':
214 for lat, field in [(10, 'rw_iops_10ms'), (30, 'rw_iops_30ms'), (100, 'rw_iops_100ms')]:
215 if lat_95_ms <= lat:
216 setattr(psum95, field, max(getattr(psum95, field), iops_avg))
217 if lat_50_ms <= lat:
218 setattr(psum50, field, max(getattr(psum50, field), iops_avg))
219
220 if fjob.oper == 'randread' and fjob.sync_mode == 'd':
221 for lat, field in [(10, 'rr_iops_10ms'), (30, 'rr_iops_30ms'), (100, 'rr_iops_100ms')]:
222 if lat_95_ms <= lat:
223 setattr(psum95, field, max(getattr(psum95, field), iops_avg))
224 if lat_50_ms <= lat:
225 setattr(psum50, field, max(getattr(psum50, field), iops_avg))
226 elif fjob.sync_mode == 'd':
227 if fjob.oper in ('randwrite', 'write'):
228 psum50.bw_write_max = max(psum50.bw_write_max, bw_avg)
229 elif fjob.oper in ('randread', 'read'):
230 psum50.bw_read_max = max(psum50.bw_read_max, bw_avg)
231
232 return psum50, psum95
233
234
235# Main performance report
236class PerformanceSummary(SuiteReporter):
237 """Aggregated summary for storage"""
238 def get_divs(self, suite: SuiteConfig) -> Iterator[Tuple[str, str, HTMLBlock]]:
239 psum50, psum95 = get_performance_summary(self.rstorage, suite, self.style.hist_boxes, self.style.large_blocks)
240
241 caption = "Storage summary report"
242 res = html.H3(html.center(caption))
243
244 headers = ["Mode", "Stats", "Explanation"]
245 align = ['left', 'right', "left"]
246 data = []
247
248 if psum95.rr_iops_10ms != psum95.NO_VAL or psum95.rr_iops_30ms != psum95.NO_VAL or \
249 psum95.rr_iops_100ms != psum95.NO_VAL:
250 data.append("Average random read IOPS for small blocks")
251
252 if psum95.rr_iops_10ms != psum95.NO_VAL:
253 data.append(("Database", b2ssize_10(psum95.rr_iops_10ms), "Latency 95th percentile < 10ms"))
254 if psum95.rr_iops_30ms != psum95.NO_VAL:
255 data.append(("File system", b2ssize_10(psum95.rr_iops_30ms), "Latency 95th percentile < 30ms"))
256 if psum95.rr_iops_100ms != psum95.NO_VAL:
257 data.append(("File server", b2ssize_10(psum95.rr_iops_100ms), "Latency 95th percentile < 100ms"))
258
259 if psum95.rw_iops_10ms != psum95.NO_VAL or psum95.rw_iops_30ms != psum95.NO_VAL or \
260 psum95.rw_iops_100ms != psum95.NO_VAL:
261 data.append("Average random write IOPS for small blocks")
262
263 if psum95.rw_iops_10ms != psum95.NO_VAL:
264 data.append(("Database", b2ssize_10(psum95.rw_iops_10ms), "Latency 95th percentile < 10ms"))
265 if psum95.rw_iops_30ms != psum95.NO_VAL:
266 data.append(("File system", b2ssize_10(psum95.rw_iops_30ms), "Latency 95th percentile < 30ms"))
267 if psum95.rw_iops_100ms != psum95.NO_VAL:
268 data.append(("File server", b2ssize_10(psum95.rw_iops_100ms), "Latency 95th percentile < 100ms"))
269
270 if psum50.bw_write_max != psum50.NO_VAL or psum50.bw_read_max != psum50.NO_VAL:
271 data.append("Average sequention IO")
272
273 if psum50.bw_write_max != psum95.NO_VAL:
274 data.append(("Write", b2ssize(psum50.bw_write_max) + psum50.bw_units,
275 "Large blocks (>={}KiB)".format(self.style.large_blocks)))
276 if psum50.bw_read_max != psum95.NO_VAL:
277 data.append(("Read", b2ssize(psum50.bw_read_max) + psum50.bw_units,
278 "Large blocks (>={}KiB)".format(self.style.large_blocks)))
279
280 res += html.center(html.table("Performance", headers, data, align=align))
281 yield Menu1st.summary, Menu2ndSumm.summary, HTMLBlock(res)
282
kdanylov aka koder3a9e5db2017-05-09 20:00:44 +0300283
284# # Node load over test time
285# class NodeLoad(SuiteReporter):
286# """IOPS/latency during test"""
287
288# # Ceph operation breakout report
289# class CephClusterSummary(SuiteReporter):
koder aka kdanilov7f59d562016-12-26 01:34:23 +0200290
291
292# Main performance report
kdanylov aka koder3a9e5db2017-05-09 20:00:44 +0300293class IOQD(SuiteReporter):
koder aka kdanilov7f59d562016-12-26 01:34:23 +0200294 """Creates graph, which show how IOPS and Latency depend on QD"""
koder aka kdanilova732a602017-02-01 20:29:56 +0200295 suite_types = {'fio'}
296
kdanylov aka koder3a9e5db2017-05-09 20:00:44 +0300297 def get_divs(self, suite: SuiteConfig) -> Iterator[Tuple[str, str, HTMLBlock]]:
koder aka kdanilova732a602017-02-01 20:29:56 +0200298 ts_map = defaultdict(list) # type: Dict[FioJobParams, List[Tuple[SuiteConfig, FioJobConfig]]]
kdanylov aka koder026e5f22017-05-15 01:04:39 +0300299 str_summary = {} # type: Dict[FioJobParams, Tuple[str, str]]
kdanylov aka koder3a9e5db2017-05-09 20:00:44 +0300300
301 for job in self.rstorage.iter_job(suite):
koder aka kdanilov108ac362017-01-19 20:17:16 +0200302 fjob = cast(FioJobConfig, job)
koder aka kdanilova732a602017-02-01 20:29:56 +0200303 fjob_no_qd = cast(FioJobParams, fjob.params.copy(qd=None))
304 str_summary[fjob_no_qd] = (fjob_no_qd.summary, fjob_no_qd.long_summary)
305 ts_map[fjob_no_qd].append((suite, fjob))
koder aka kdanilov108ac362017-01-19 20:17:16 +0200306
kdanylov aka koder84de1e42017-05-22 14:00:07 +0300307 caption = "IOPS, bandwith, and latency as function of parallel IO request count (QD)"
308 yield Menu1st.summary, Menu2ndSumm.io_lat_qd, HTMLBlock(html.H3(html.center(caption)))
309
koder aka kdanilova732a602017-02-01 20:29:56 +0200310 for tpl, suites_jobs in ts_map.items():
kdanylov aka koder3a9e5db2017-05-09 20:00:44 +0300311 if len(suites_jobs) >= self.style.min_iops_vs_qd_jobs:
312 iosums = [make_iosum(self.rstorage, suite, job, self.style.hist_boxes) for suite, job in suites_jobs]
koder aka kdanilova732a602017-02-01 20:29:56 +0200313 iosums.sort(key=lambda x: x.qd)
314 summary, summary_long = str_summary[tpl]
kdanylov aka koder736e5c12017-05-07 17:27:14 +0300315
koder aka kdanilova732a602017-02-01 20:29:56 +0200316 ds = DataSource(suite_id=suite.storage_id,
317 job_id=summary,
318 node_id=AGG_TAG,
319 sensor="fio",
320 dev=AGG_TAG,
321 metric="io_over_qd",
kdanylov aka koder736e5c12017-05-07 17:27:14 +0300322 tag=io_chart_format)
koder aka kdanilov108ac362017-01-19 20:17:16 +0200323
kdanylov aka koder84de1e42017-05-22 14:00:07 +0300324 fpath = self.plt(io_chart, ds, title=summary_long, legend="IOPS/BW", iosums=iosums)
325 yield Menu1st.summary, Menu2ndSumm.io_lat_qd, HTMLBlock(html.img(fpath))
koder aka kdanilov7f59d562016-12-26 01:34:23 +0200326
327
kdanylov aka koder3a9e5db2017-05-09 20:00:44 +0300328class ResourceQD(SuiteReporter):
329 suite_types = {'fio'}
330
331 def get_divs(self, suite: SuiteConfig) -> Iterator[Tuple[str, str, HTMLBlock]]:
332 qd_grouped_jobs = {} # type: Dict[FioJobParams, List[FioJobConfig]]
kdanylov aka koderb0833332017-05-13 20:39:17 +0300333 test_nc = len(list(find_nodes_by_roles(self.rstorage.storage, ['testnode'])))
kdanylov aka koder3a9e5db2017-05-09 20:00:44 +0300334 for job in self.rstorage.iter_job(suite):
335 fjob = cast(FioJobConfig, job)
336 if fjob.bsize != 4:
337 continue
338
339 fjob_no_qd = cast(FioJobParams, fjob.params.copy(qd=None))
340 qd_grouped_jobs.setdefault(fjob_no_qd, []).append(fjob)
341
kdanylov aka koder84de1e42017-05-22 14:00:07 +0300342 yield Menu1st.summary, Menu2ndSumm.resources_usage_qd, HTMLBlock(html.center(html.H3("Resource usage summary")))
343
kdanylov aka koder3a9e5db2017-05-09 20:00:44 +0300344 for jc_no_qd, jobs in sorted(qd_grouped_jobs.items()):
345 cpu_usage2qd = {}
346 for job in jobs:
347 usage, iops_ok = get_resources_usage(suite, job, self.rstorage, hist_boxes=self.style.hist_boxes,
348 large_block=self.style.large_blocks)
349
350 if iops_ok:
351 cpu_usage2qd[job.qd] = usage[ResourceNames.storage_cpu_s]
352
353 if len(cpu_usage2qd) < StyleProfile.min_iops_vs_qd_jobs:
354 continue
355
356 labels, vals, errs = zip(*((l, avg, dev) for l, (_, avg, dev) in sorted(cpu_usage2qd.items())))
357
358 if test_nc == 1:
359 labels = list(map(str, labels))
360 else:
361 labels = ["{} * {}".format(label, test_nc) for label in labels]
362
363 ds = DataSource(suite_id=suite.storage_id,
364 job_id=jc_no_qd.summary,
365 node_id="cluster",
366 sensor=AGG_TAG,
367 dev='cpu',
368 metric="cpu_for_iop",
369 tag=io_chart_format)
370
kdanylov aka koder84de1e42017-05-22 14:00:07 +0300371 title = "CPU time per IOP, " + jc_no_qd.long_summary
372 fpath = self.plt(plot_simple_bars, ds, title, labels, vals, errs,
373 xlabel="CPU core time per IOP",
374 ylabel="QD * Test nodes" if test_nc != 1 else "QD",
kdanylov aka koder3a9e5db2017-05-09 20:00:44 +0300375 x_formatter=(lambda x, pos: b2ssize_10(x) + 's'),
376 one_point_zero_line=False)
377
kdanylov aka koder84de1e42017-05-22 14:00:07 +0300378 yield Menu1st.summary, Menu2ndSumm.resources_usage_qd, HTMLBlock(html.img(fpath))
379
380
381def get_resources_usage2(suite: SuiteConfig, job: JobConfig, rstorage: IWallyStorage,
382 roles, sensor, metric, test_metric, agg_window: int = 5) -> ndarray2d:
383 assert test_metric == 'iops'
384 fjob = cast(FioJobConfig, job)
385 bw = get_aggregated(rstorage, suite.storage_id, job.storage_id, "bw", job.reliable_info_range_s)
386 io_transfered = bw.data * unit_conversion_coef_f(bw.units, "Bps")
387 ops_done = io_transfered / (fjob.bsize * unit_conversion_coef_f("KiBps", "Bps"))
388 nodes = [node for node in rstorage.load_nodes() if node.roles.intersection(STORAGE_ROLES)]
389
390 if sensor == 'system-cpu':
391 assert metric == 'used'
392 core_count = None
393 for node in nodes:
394 if core_count is None:
395 core_count = sum(cores for _, cores in node.hw_info.cpus)
396 else:
397 assert core_count == sum(cores for _, cores in node.hw_info.cpus)
398 cpu_ts = get_cluster_cpu_load(rstorage, roles, job.reliable_info_range_s)
399 metric_data = (1.0 - (cpu_ts['idle'].data + cpu_ts['iowait'].data) / cpu_ts['total'].data) * core_count
400 else:
401 metric_data = sum_sensors(rstorage, job.reliable_info_range_s,
402 node_id=[node.node_id for node in nodes], sensor=sensor, metric=metric)
403
404 res = []
405 for pos in range(0, len(ops_done) - agg_window, agg_window):
406 pe = pos + agg_window
407 res.append((numpy.average(ops_done[pos: pe]), numpy.average(metric_data.data[pos: pe])))
408
409 return res
410
411
412class ResourceConsumptionSummary(SuiteReporter):
413 suite_types = {'fio'}
414
415 def get_divs(self, suite: SuiteConfig) -> Iterator[Tuple[str, str, HTMLBlock]]:
416 vs = 'iops'
417 for job_tp in ('rwd4', 'rrd4'):
418 for sensor_metric in ('net-io.send_packets', 'system-cpu.used'):
419 sensor, metric = sensor_metric.split(".")
420 usage = []
421 for job in self.rstorage.iter_job(suite):
422 if job_tp in job.summary:
423 usage.extend(get_resources_usage2(suite, job, self.rstorage, STORAGE_ROLES,
424 sensor=sensor, metric=metric, test_metric=vs))
425
426 if not usage:
427 continue
428
429 iops, cpu = zip(*usage)
430 slope, intercept, r_value, p_value, std_err = scipy.stats.linregress(iops, cpu)
431 x = numpy.array([0.0, max(iops) * 1.1])
432
433 ds = DataSource(suite_id=suite.storage_id,
434 job_id=job_tp,
435 node_id="storage",
436 sensor='usage-regression',
437 dev=AGG_TAG,
438 metric=sensor_metric + '.VS.' + vs,
439 tag=default_format)
440
441 fname = self.plt(plot_dots_with_regression, ds,
442 "{}::{}.{}".format(job_tp, sensor_metric, vs),
443 x=iops, y=cpu,
444 xlabel=vs,
445 ylabel=sensor_metric,
446 x_approx=x, y_approx=intercept + slope * x)
447
448 yield Menu1st.engineering, Menu2ndEng.resource_regression, HTMLBlock(html.img(fname))
449
450
451class EngineeringSummary(SuiteReporter):
452 suite_types = {'fio'}
453
454 def get_divs(self, suite: SuiteConfig) -> Iterator[Tuple[str, str, HTMLBlock]]:
455 tbl = [line for line in get_console_report_table(suite, self.rstorage) if line is not Texttable.HLINE]
456 align = [{'l': 'left', 'r': 'right'}[al] for al in console_report_align]
457 res = html.center(html.table("Test results", console_report_headers, tbl, align=align))
458 yield Menu1st.engineering, Menu2ndEng.summary, HTMLBlock(res)
koder aka kdanilov7f59d562016-12-26 01:34:23 +0200459
460
koder aka kdanilova732a602017-02-01 20:29:56 +0200461class StatInfo(JobReporter):
462 """Statistic info for job results"""
463 suite_types = {'fio'}
464
kdanylov aka koder3a9e5db2017-05-09 20:00:44 +0300465 def get_divs(self, suite: SuiteConfig, job: JobConfig) -> Iterator[Tuple[str, str, HTMLBlock]]:
koder aka kdanilova732a602017-02-01 20:29:56 +0200466
467 fjob = cast(FioJobConfig, job)
kdanylov aka koder3a9e5db2017-05-09 20:00:44 +0300468 io_sum = make_iosum(self.rstorage, suite, fjob, self.style.hist_boxes)
koder aka kdanilova732a602017-02-01 20:29:56 +0200469
kdanylov aka koder3a9e5db2017-05-09 20:00:44 +0300470 caption = "Test summary - " + job.params.long_summary
kdanylov aka koderb0833332017-05-13 20:39:17 +0300471 test_nc = len(list(find_nodes_by_roles(self.rstorage.storage, ['testnode'])))
kdanylov aka koder3a9e5db2017-05-09 20:00:44 +0300472 if test_nc > 1:
473 caption += " * {} nodes".format(test_nc)
474
kdanylov aka koder84de1e42017-05-22 14:00:07 +0300475 res = html.H3(html.center(caption))
kdanylov aka koder3a9e5db2017-05-09 20:00:44 +0300476 stat_data_headers = ["Name",
477 "Total done",
478 "Average ~ Dev",
479 "Conf interval",
480 "Mediana",
481 "Mode",
482 "Kurt / Skew",
483 "95%",
484 "99%",
kdanylov aka koder736e5c12017-05-07 17:27:14 +0300485 "ADF test"]
koder aka kdanilova732a602017-02-01 20:29:56 +0200486
kdanylov aka koder3a9e5db2017-05-09 20:00:44 +0300487 align = ['left'] + ['right'] * (len(stat_data_headers) - 1)
488
489 bw_units = "B"
490 bw_target_units = bw_units + 'ps'
kdanylov aka koderb0833332017-05-13 20:39:17 +0300491 bw_coef = unit_conversion_coef_f(io_sum.bw.units, bw_target_units)
kdanylov aka koder45183182017-04-30 23:55:40 +0300492
kdanylov aka koder736e5c12017-05-07 17:27:14 +0300493 adf_v, *_1, stats, _2 = adfuller(io_sum.bw.data)
494
495 for v in ("1%", "5%", "10%"):
496 if adf_v <= stats[v]:
497 ad_test = v
498 break
499 else:
500 ad_test = "Failed"
501
koder aka kdanilova732a602017-02-01 20:29:56 +0200502 bw_data = ["Bandwidth",
kdanylov aka koder3a9e5db2017-05-09 20:00:44 +0300503 b2ssize(io_sum.bw.data.sum() * bw_coef) + bw_units,
kdanylov aka koder45183182017-04-30 23:55:40 +0300504 "{}{} ~ {}{}".format(b2ssize(io_sum.bw.average * bw_coef), bw_target_units,
505 b2ssize(io_sum.bw.deviation * bw_coef), bw_target_units),
506 b2ssize(io_sum.bw.confidence * bw_coef) + bw_target_units,
507 b2ssize(io_sum.bw.perc_50 * bw_coef) + bw_target_units,
koder aka kdanilova732a602017-02-01 20:29:56 +0200508 "-",
509 "{:.2f} / {:.2f}".format(io_sum.bw.kurt, io_sum.bw.skew),
kdanylov aka koder45183182017-04-30 23:55:40 +0300510 b2ssize(io_sum.bw.perc_5 * bw_coef) + bw_target_units,
kdanylov aka koder736e5c12017-05-07 17:27:14 +0300511 b2ssize(io_sum.bw.perc_1 * bw_coef) + bw_target_units,
512 ad_test]
koder aka kdanilova732a602017-02-01 20:29:56 +0200513
kdanylov aka koder3a9e5db2017-05-09 20:00:44 +0300514 stat_data = [bw_data]
koder aka kdanilova732a602017-02-01 20:29:56 +0200515
kdanylov aka koder3a9e5db2017-05-09 20:00:44 +0300516 if fjob.bsize < StyleProfile.large_blocks:
kdanylov aka koderb0833332017-05-13 20:39:17 +0300517 iops_coef = unit_conversion_coef_f(io_sum.bw.units, 'KiBps') / fjob.bsize
kdanylov aka koder3a9e5db2017-05-09 20:00:44 +0300518 iops_data = ["IOPS",
519 b2ssize_10(io_sum.bw.data.sum() * iops_coef),
520 "{}IOPS ~ {}IOPS".format(b2ssize_10(io_sum.bw.average * iops_coef),
521 b2ssize_10(io_sum.bw.deviation * iops_coef)),
522 b2ssize_10(io_sum.bw.confidence * iops_coef) + "IOPS",
523 b2ssize_10(io_sum.bw.perc_50 * iops_coef) + "IOPS",
524 "-",
525 "{:.2f} / {:.2f}".format(io_sum.bw.kurt, io_sum.bw.skew),
526 b2ssize_10(io_sum.bw.perc_5 * iops_coef) + "IOPS",
527 b2ssize_10(io_sum.bw.perc_1 * iops_coef) + "IOPS",
528 ad_test]
koder aka kdanilova732a602017-02-01 20:29:56 +0200529
kdanylov aka koder3a9e5db2017-05-09 20:00:44 +0300530 lat_target_unit = 's'
kdanylov aka koderb0833332017-05-13 20:39:17 +0300531 lat_coef = unit_conversion_coef_f(io_sum.lat.units, lat_target_unit)
kdanylov aka koder3a9e5db2017-05-09 20:00:44 +0300532 # latency
533 lat_data = ["Latency",
534 "-",
535 "-",
536 "-",
537 b2ssize_10(io_sum.lat.perc_50 * lat_coef) + lat_target_unit,
538 "-",
539 "-",
540 b2ssize_10(io_sum.lat.perc_95 * lat_coef) + lat_target_unit,
541 b2ssize_10(io_sum.lat.perc_99 * lat_coef) + lat_target_unit,
542 '-']
543
544 # sensor usage
545 stat_data.extend([iops_data, lat_data])
546
kdanylov aka koder84de1e42017-05-22 14:00:07 +0300547 res += html.center(html.table("Test results", stat_data_headers, stat_data, align=align))
kdanylov aka koder736e5c12017-05-07 17:27:14 +0300548 yield Menu1st.per_job, job.summary, HTMLBlock(res)
koder aka kdanilova732a602017-02-01 20:29:56 +0200549
koder aka kdanilova732a602017-02-01 20:29:56 +0200550
kdanylov aka koder736e5c12017-05-07 17:27:14 +0300551class Resources(JobReporter):
552 """Statistic info for job results"""
553 suite_types = {'fio'}
554
kdanylov aka koder3a9e5db2017-05-09 20:00:44 +0300555 def get_divs(self, suite: SuiteConfig, job: JobConfig) -> Iterator[Tuple[str, str, HTMLBlock]]:
kdanylov aka koder736e5c12017-05-07 17:27:14 +0300556
kdanylov aka koder3a9e5db2017-05-09 20:00:44 +0300557 records, iops_ok = get_resources_usage(suite, job, self.rstorage,
558 large_block=self.style.large_blocks,
559 hist_boxes=self.style.hist_boxes)
koder aka kdanilova732a602017-02-01 20:29:56 +0200560
kdanylov aka koder736e5c12017-05-07 17:27:14 +0300561 table_structure = [
562 "Service provided",
kdanylov aka koder3a9e5db2017-05-09 20:00:44 +0300563 (ResourceNames.io_made, ResourceNames.data_tr),
kdanylov aka koder736e5c12017-05-07 17:27:14 +0300564 "Test nodes total load",
kdanylov aka koder3a9e5db2017-05-09 20:00:44 +0300565 (ResourceNames.test_send_pkt, ResourceNames.test_send),
566 (ResourceNames.test_recv_pkt, ResourceNames.test_recv),
567 (ResourceNames.test_net_pkt, ResourceNames.test_net),
568 (ResourceNames.test_write_iop, ResourceNames.test_write),
569 (ResourceNames.test_read_iop, ResourceNames.test_read),
570 (ResourceNames.test_iop, ResourceNames.test_rw),
kdanylov aka koder736e5c12017-05-07 17:27:14 +0300571 "Storage nodes resource consumed",
kdanylov aka koder3a9e5db2017-05-09 20:00:44 +0300572 (ResourceNames.storage_send_pkt, ResourceNames.storage_send),
573 (ResourceNames.storage_recv_pkt, ResourceNames.storage_recv),
574 (ResourceNames.storage_net_pkt, ResourceNames.storage_net),
575 (ResourceNames.storage_write_iop, ResourceNames.storage_write),
576 (ResourceNames.storage_read_iop, ResourceNames.storage_read),
577 (ResourceNames.storage_iop, ResourceNames.storage_rw),
578 (ResourceNames.storage_cpu_s, ResourceNames.storage_cpu_s_b),
kdanylov aka koder026e5f22017-05-15 01:04:39 +0300579 ] # type: List[Union[str, Tuple[Optional[str], ...]]]
kdanylov aka koder3a9e5db2017-05-09 20:00:44 +0300580
581 if not iops_ok:
kdanylov aka koder026e5f22017-05-15 01:04:39 +0300582 table_structure2 = [] # type: List[Union[Tuple[str, ...], str]]
kdanylov aka koder3a9e5db2017-05-09 20:00:44 +0300583 for line in table_structure:
584 if isinstance(line, str):
585 table_structure2.append(line)
586 else:
587 assert len(line) == 2
588 table_structure2.append((line[1],))
589 table_structure = table_structure2
koder aka kdanilova732a602017-02-01 20:29:56 +0200590
kdanylov aka koder84de1e42017-05-22 14:00:07 +0300591 yield Menu1st.per_job, job.summary, HTMLBlock(html.H3(html.center("Resources usage")))
kdanylov aka koder736e5c12017-05-07 17:27:14 +0300592
593 doc = xmlbuilder3.XMLBuilder("table",
594 **{"class": "table table-bordered table-striped table-condensed table-hover",
595 "style": "width: auto;"})
596
597 with doc.thead:
598 with doc.tr:
kdanylov aka koder3a9e5db2017-05-09 20:00:44 +0300599 [doc.th(header) for header in ["Resource", "Usage count", "To service"] * (2 if iops_ok else 1)]
kdanylov aka koder736e5c12017-05-07 17:27:14 +0300600
kdanylov aka koder3a9e5db2017-05-09 20:00:44 +0300601 cols = 6 if iops_ok else 3
602 col_per_tp = 3
kdanylov aka koder736e5c12017-05-07 17:27:14 +0300603
604 short_name = {
kdanylov aka koder3a9e5db2017-05-09 20:00:44 +0300605 name: (name if name in {ResourceNames.io_made, ResourceNames.data_tr}
606 else " ".join(name.split()[2:]).capitalize())
kdanylov aka koder736e5c12017-05-07 17:27:14 +0300607 for name in records.keys()
608 }
609
kdanylov aka koderb0833332017-05-13 20:39:17 +0300610 short_name[ResourceNames.storage_cpu_s] = "CPU core (s/IOP)"
611 short_name[ResourceNames.storage_cpu_s_b] = "CPU core (s/B)"
kdanylov aka koder736e5c12017-05-07 17:27:14 +0300612
613 with doc.tbody:
614 with doc.tr:
kdanylov aka koder3a9e5db2017-05-09 20:00:44 +0300615 if iops_ok:
616 doc.td(colspan=str(col_per_tp)).center.b("Operations")
617 doc.td(colspan=str(col_per_tp)).center.b("Bytes")
kdanylov aka koder736e5c12017-05-07 17:27:14 +0300618
619 for line in table_structure:
620 with doc.tr:
621 if isinstance(line, str):
622 with doc.td(colspan=str(cols)):
623 doc.center.b(line)
624 else:
625 for name in line:
626 if name is None:
kdanylov aka koder3a9e5db2017-05-09 20:00:44 +0300627 doc.td("-", colspan=str(col_per_tp))
kdanylov aka koder736e5c12017-05-07 17:27:14 +0300628 continue
629
630 amount_s, avg, dev = records[name]
631
kdanylov aka koder3a9e5db2017-05-09 20:00:44 +0300632 if name in (ResourceNames.storage_cpu_s, ResourceNames.storage_cpu_s_b) and avg is not None:
633 if dev is None:
634 rel_val_s = b2ssize_10(avg) + 's'
635 else:
636 dev_s = str(int(dev * 100 / avg)) + "%" if avg > 1E-9 else b2ssize_10(dev) + 's'
637 rel_val_s = "{}s ~ {}".format(b2ssize_10(avg), dev_s)
kdanylov aka koder736e5c12017-05-07 17:27:14 +0300638 else:
639 if avg is None:
640 rel_val_s = '-'
641 else:
642 avg_s = int(avg) if avg > 10 else '{:.1f}'.format(avg)
kdanylov aka koder3a9e5db2017-05-09 20:00:44 +0300643 if dev is None:
644 rel_val_s = avg_s
kdanylov aka koder736e5c12017-05-07 17:27:14 +0300645 else:
kdanylov aka koder3a9e5db2017-05-09 20:00:44 +0300646 if avg > 1E-5:
647 dev_s = str(int(dev * 100 / avg)) + "%"
648 else:
649 dev_s = int(dev) if dev > 10 else '{:.1f}'.format(dev)
650 rel_val_s = "{} ~ {}".format(avg_s, dev_s)
kdanylov aka koder736e5c12017-05-07 17:27:14 +0300651
652 doc.td(short_name[name], align="left")
653 doc.td(amount_s, align="right")
654
655 if avg is None or avg < 0.9:
656 doc.td(rel_val_s, align="right")
657 elif avg < 2.0:
658 doc.td(align="right").font(rel_val_s, color='green')
659 elif avg < 5.0:
660 doc.td(align="right").font(rel_val_s, color='orange')
661 else:
662 doc.td(align="right").font(rel_val_s, color='red')
663
664 res = xmlbuilder3.tostr(doc).split("\n", 1)[1]
665 yield Menu1st.per_job, job.summary, HTMLBlock(html.center(res))
666
kdanylov aka koder3a9e5db2017-05-09 20:00:44 +0300667 iop_names = [ResourceNames.test_write_iop, ResourceNames.test_read_iop, ResourceNames.test_iop,
668 ResourceNames.storage_write_iop, ResourceNames.storage_read_iop, ResourceNames.storage_iop]
kdanylov aka koder736e5c12017-05-07 17:27:14 +0300669
kdanylov aka koder3a9e5db2017-05-09 20:00:44 +0300670 bytes_names = [ResourceNames.test_write, ResourceNames.test_read, ResourceNames.test_rw,
671 ResourceNames.test_send, ResourceNames.test_recv, ResourceNames.test_net,
672 ResourceNames.storage_write, ResourceNames.storage_read, ResourceNames.storage_rw,
kdanylov aka koder026e5f22017-05-15 01:04:39 +0300673 ResourceNames.storage_send, ResourceNames.storage_recv,
674 ResourceNames.storage_net] # type: List[str]
kdanylov aka koder736e5c12017-05-07 17:27:14 +0300675
kdanylov aka koder3a9e5db2017-05-09 20:00:44 +0300676 net_pkt_names = [ResourceNames.test_send_pkt, ResourceNames.test_recv_pkt, ResourceNames.test_net_pkt,
677 ResourceNames.storage_send_pkt, ResourceNames.storage_recv_pkt, ResourceNames.storage_net_pkt]
kdanylov aka koder736e5c12017-05-07 17:27:14 +0300678
kdanylov aka koder3a9e5db2017-05-09 20:00:44 +0300679 pairs = [("bytes", bytes_names)]
680 if iops_ok:
681 pairs.insert(0, ('iop', iop_names))
682 pairs.append(('Net packets per IOP', net_pkt_names))
683
684 yield Menu1st.per_job, job.summary, \
kdanylov aka koder84de1e42017-05-22 14:00:07 +0300685 HTMLBlock(html.H3(html.center("Resource consumption per service provided")))
kdanylov aka koder3a9e5db2017-05-09 20:00:44 +0300686
687 for tp, names in pairs:
kdanylov aka koder026e5f22017-05-15 01:04:39 +0300688 vals = [] # type: List[float]
689 devs = [] # type: List[float]
690 avail_names = [] # type: List[str]
kdanylov aka koder736e5c12017-05-07 17:27:14 +0300691 for name in names:
692 if name in records:
693 avail_names.append(name)
694 _, avg, dev = records[name]
kdanylov aka koder3a9e5db2017-05-09 20:00:44 +0300695
696 if dev is None:
697 dev = 0
698
kdanylov aka koder736e5c12017-05-07 17:27:14 +0300699 vals.append(avg)
700 devs.append(dev)
701
702 # synchronously sort values and names, values is a key
kdanylov aka koder026e5f22017-05-15 01:04:39 +0300703 vals, names, devs = map(list, zip(*sorted(zip(vals, names, devs)))) # type: ignore
kdanylov aka koder736e5c12017-05-07 17:27:14 +0300704
705 ds = DataSource(suite_id=suite.storage_id,
706 job_id=job.storage_id,
707 node_id=AGG_TAG,
708 sensor='resources',
709 dev=AGG_TAG,
710 metric=tp.replace(' ', "_") + '2service_bar',
711 tag=default_format)
712
kdanylov aka koder3a9e5db2017-05-09 20:00:44 +0300713 fname = self.plt(plot_simple_bars, ds, tp.capitalize(),
714 [name.replace(" nodes", "") for name in names],
715 vals, devs)
kdanylov aka koder736e5c12017-05-07 17:27:14 +0300716
717 yield Menu1st.per_job, job.summary, HTMLBlock(html.img(fname))
718
719
720class BottleNeck(JobReporter):
721 """Statistic info for job results"""
722 suite_types = {'fio'}
723
kdanylov aka koder3a9e5db2017-05-09 20:00:44 +0300724 def get_divs(self, suite: SuiteConfig, job: JobConfig) -> Iterator[Tuple[str, str, HTMLBlock]]:
kdanylov aka koder736e5c12017-05-07 17:27:14 +0300725
kdanylov aka koderb0833332017-05-13 20:39:17 +0300726 nodes = list(find_nodes_by_roles(self.rstorage.storage, STORAGE_ROLES))
kdanylov aka koder736e5c12017-05-07 17:27:14 +0300727
728 sensor = 'block-io'
729 metric = 'io_queue'
730 bn_val = 16
731
kdanylov aka koderb0833332017-05-13 20:39:17 +0300732 for node_id in nodes:
kdanylov aka koder736e5c12017-05-07 17:27:14 +0300733 bn = 0
734 tot = 0
kdanylov aka koder84de1e42017-05-22 14:00:07 +0300735 for ds in self.rstorage.iter_sensors(node_id=node_id, sensor=sensor, metric=metric):
kdanylov aka koder736e5c12017-05-07 17:27:14 +0300736 if ds.dev in ('sdb', 'sdc', 'sdd', 'sde'):
kdanylov aka koderb0833332017-05-13 20:39:17 +0300737 ts = self.rstorage.get_sensor(ds, job.reliable_info_range_s)
738 bn += (ts.data > bn_val).sum()
739 tot += len(ts.data)
740 print(node_id, bn, tot)
kdanylov aka koder736e5c12017-05-07 17:27:14 +0300741
742 yield Menu1st.per_job, job.summary, HTMLBlock("")
koder aka kdanilova732a602017-02-01 20:29:56 +0200743
744
kdanylov aka kodercdfcdaf2017-04-29 10:03:39 +0300745# CPU load
746class CPULoadPlot(JobReporter):
kdanylov aka koder3a9e5db2017-05-09 20:00:44 +0300747 def get_divs(self, suite: SuiteConfig, job: JobConfig) -> Iterator[Tuple[str, str, HTMLBlock]]:
kdanylov aka kodercdfcdaf2017-04-29 10:03:39 +0300748
kdanylov aka kodercdfcdaf2017-04-29 10:03:39 +0300749 # plot CPU time
750 for rt, roles in [('storage', STORAGE_ROLES), ('test', ['testnode'])]:
kdanylov aka koder3a9e5db2017-05-09 20:00:44 +0300751 cpu_ts = get_cluster_cpu_load(self.rstorage, roles, job.reliable_info_range_s)
kdanylov aka koder736e5c12017-05-07 17:27:14 +0300752 tss = [(name, ts.data * 100 / cpu_ts['total'].data)
753 for name, ts in cpu_ts.items()
kdanylov aka koder3a9e5db2017-05-09 20:00:44 +0300754 if name in {'user', 'sys', 'idle', 'iowait'}]
755
756
757 ds = cpu_ts['idle'].source(job_id=job.storage_id, suite_id=suite.storage_id,
758 node_id=AGG_TAG, metric='allcpu', tag=rt + '.plt.' + default_format)
759
760 fname = self.plt(plot_simple_over_time, ds, tss=tss, average=True, ylabel="CPU time %",
761 title="{} nodes CPU usage".format(rt.capitalize()),
762 xlabel="Time from test begin")
kdanylov aka kodercdfcdaf2017-04-29 10:03:39 +0300763
764 yield Menu1st.per_job, job.summary, HTMLBlock(html.img(fname))
765
766
kdanylov aka koder84de1e42017-05-22 14:00:07 +0300767class DevRoles:
768 client_disk = 'client_disk'
769 client_net = 'client_net'
770 client_cpu = 'client_cpu'
771
772 storage_disk = 'storage_disk'
773 storage_client_net = 'storage_client_net'
774 storage_replication_net = 'storage_replication_net'
775 storage_cpu = 'storage_disk'
776 ceph_storage = 'ceph_storage'
777 ceph_journal = 'ceph_journal'
778
779 compute_disk = 'compute_disk'
780 compute_net = 'compute_net'
781 compute_cpu = 'compute_cpu'
782
783
784def roles_for_sensors(storage: IWallyStorage) -> Dict[str, List[DataSource]]:
785 role2ds = defaultdict(list)
786
787 for node in storage.load_nodes():
788 ds = DataSource(node_id=node.node_id)
789 if 'ceph-osd' in node.roles:
790 for jdev in node.params.get('ceph_journal_devs', []):
791 role2ds[DevRoles.ceph_journal].append(ds(dev=jdev))
792 role2ds[DevRoles.storage_disk].append(ds(dev=jdev))
793
794 for sdev in node.params.get('ceph_storage_devs', []):
795 role2ds[DevRoles.ceph_storage].append(ds(dev=sdev))
796 role2ds[DevRoles.storage_disk].append(ds(dev=sdev))
797
798 if node.hw_info:
799 for dev in node.hw_info.disks_info:
800 role2ds[DevRoles.storage_disk].append(ds(dev=dev))
801
802 if 'testnode' in node.roles:
803 role2ds[DevRoles.client_disk].append(ds(dev='rbd0'))
804
805 return role2ds
806
807
808def get_sources_for_roles(roles: Iterable[str]) -> List[DataSource]:
809 return []
810
811
kdanylov aka kodercdfcdaf2017-04-29 10:03:39 +0300812# IO time and QD
813class QDIOTimeHeatmap(JobReporter):
kdanylov aka koder3a9e5db2017-05-09 20:00:44 +0300814 def get_divs(self, suite: SuiteConfig, job: JobConfig) -> Iterator[Tuple[str, str, HTMLBlock]]:
kdanylov aka kodercdfcdaf2017-04-29 10:03:39 +0300815
kdanylov aka koder84de1e42017-05-22 14:00:07 +0300816 # journal_devs = None
817 # storage_devs = None
818 # test_nodes_devs = ['rbd0']
819 #
820 # for node in self.rstorage.load_nodes():
821 # if node.roles.intersection(STORAGE_ROLES):
822 # cjd = set(node.params['ceph_journal_devs'])
823 # if journal_devs is None:
824 # journal_devs = cjd
825 # else:
826 # assert journal_devs == cjd, "{!r} != {!r}".format(journal_devs, cjd)
827 #
828 # csd = set(node.params['ceph_storage_devs'])
829 # if storage_devs is None:
830 # storage_devs = csd
831 # else:
832 # assert storage_devs == csd, "{!r} != {!r}".format(storage_devs, csd)
833 #
kdanylov aka kodercdfcdaf2017-04-29 10:03:39 +0300834
kdanylov aka kodercdfcdaf2017-04-29 10:03:39 +0300835 trange = (job.reliable_info_range[0] // 1000, job.reliable_info_range[1] // 1000)
kdanylov aka koder84de1e42017-05-22 14:00:07 +0300836 test_nc = len(list(find_nodes_by_roles(self.rstorage.storage, ['testnode'])))
kdanylov aka kodercdfcdaf2017-04-29 10:03:39 +0300837
kdanylov aka koder84de1e42017-05-22 14:00:07 +0300838 for dev_role in (DevRoles.ceph_storage, DevRoles.ceph_journal, DevRoles.client_disk):
kdanylov aka koder736e5c12017-05-07 17:27:14 +0300839
kdanylov aka koder84de1e42017-05-22 14:00:07 +0300840 caption = "{} IO heatmaps - {}".format(dev_role.capitalize(), cast(FioJobParams, job).params.long_summary)
841 if test_nc != 1:
842 caption += " * {} nodes".format(test_nc)
843
844 yield Menu1st.engineering_per_job, job.summary, HTMLBlock(html.H3(html.center(caption)))
kdanylov aka koder736e5c12017-05-07 17:27:14 +0300845
kdanylov aka kodercdfcdaf2017-04-29 10:03:39 +0300846 # QD heatmap
kdanylov aka koder84de1e42017-05-22 14:00:07 +0300847 # nodes = find_nodes_by_roles(self.rstorage.storage, roles)
kdanylov aka koder736e5c12017-05-07 17:27:14 +0300848
kdanylov aka koder84de1e42017-05-22 14:00:07 +0300849 ioq2d = find_sensors_to_2d(self.rstorage, trange, dev_role=dev_role, sensor='block-io', metric='io_queue')
850
851 ds = DataSource(suite.storage_id, job.storage_id, AGG_TAG, 'block-io', dev_role,
852 tag="hmap." + default_format)
kdanylov aka koder736e5c12017-05-07 17:27:14 +0300853
kdanylov aka koder3a9e5db2017-05-09 20:00:44 +0300854 fname = self.plt(plot_hmap_from_2d, ds(metric='io_queue'), data2d=ioq2d, xlabel='Time', ylabel="IO QD",
kdanylov aka koder84de1e42017-05-22 14:00:07 +0300855 title=dev_role.capitalize() + " devs QD", bins=StyleProfile.qd_bins)
856 yield Menu1st.engineering_per_job, job.summary, HTMLBlock(html.img(fname))
kdanylov aka kodercdfcdaf2017-04-29 10:03:39 +0300857
858 # Block size heatmap
kdanylov aka koder84de1e42017-05-22 14:00:07 +0300859 wc2d = find_sensors_to_2d(self.rstorage, trange, dev_role=dev_role, sensor='block-io',
kdanylov aka koderb0833332017-05-13 20:39:17 +0300860 metric='writes_completed')
kdanylov aka kodercdfcdaf2017-04-29 10:03:39 +0300861 wc2d[wc2d < 1E-3] = 1
kdanylov aka koder84de1e42017-05-22 14:00:07 +0300862 sw2d = find_sensors_to_2d(self.rstorage, trange, dev_role=dev_role, sensor='block-io',
kdanylov aka koderb0833332017-05-13 20:39:17 +0300863 metric='sectors_written')
kdanylov aka kodercdfcdaf2017-04-29 10:03:39 +0300864 data2d = sw2d / wc2d / 1024
kdanylov aka koder3a9e5db2017-05-09 20:00:44 +0300865 fname = self.plt(plot_hmap_from_2d, ds(metric='wr_block_size'),
kdanylov aka koder84de1e42017-05-22 14:00:07 +0300866 data2d=data2d, title=dev_role.capitalize() + " write block size",
kdanylov aka koder3a9e5db2017-05-09 20:00:44 +0300867 ylabel="IO bsize, KiB", xlabel='Time', bins=StyleProfile.block_size_bins)
kdanylov aka koder84de1e42017-05-22 14:00:07 +0300868 yield Menu1st.engineering_per_job, job.summary, HTMLBlock(html.img(fname))
kdanylov aka kodercdfcdaf2017-04-29 10:03:39 +0300869
870 # iotime heatmap
kdanylov aka koder84de1e42017-05-22 14:00:07 +0300871 wtime2d = find_sensors_to_2d(self.rstorage, trange, dev_role=dev_role, sensor='block-io',
kdanylov aka koderb0833332017-05-13 20:39:17 +0300872 metric='io_time')
kdanylov aka koder3a9e5db2017-05-09 20:00:44 +0300873 fname = self.plt(plot_hmap_from_2d, ds(metric='io_time'), data2d=wtime2d,
874 xlabel='Time', ylabel="IO time (ms) per second",
kdanylov aka koder84de1e42017-05-22 14:00:07 +0300875 title=dev_role.capitalize() + " iotime", bins=StyleProfile.iotime_bins)
876 yield Menu1st.engineering_per_job, job.summary, HTMLBlock(html.img(fname))
kdanylov aka kodercdfcdaf2017-04-29 10:03:39 +0300877
878
koder aka kdanilov108ac362017-01-19 20:17:16 +0200879# IOPS/latency over test time for each job
kdanylov aka koder736e5c12017-05-07 17:27:14 +0300880class LoadToolResults(JobReporter):
koder aka kdanilov7f59d562016-12-26 01:34:23 +0200881 """IOPS/latency during test"""
koder aka kdanilova732a602017-02-01 20:29:56 +0200882 suite_types = {'fio'}
koder aka kdanilov108ac362017-01-19 20:17:16 +0200883
kdanylov aka koder3a9e5db2017-05-09 20:00:44 +0300884 def get_divs(self, suite: SuiteConfig, job: JobConfig) -> Iterator[Tuple[str, str, HTMLBlock]]:
koder aka kdanilova732a602017-02-01 20:29:56 +0200885 fjob = cast(FioJobConfig, job)
koder aka kdanilov108ac362017-01-19 20:17:16 +0200886
kdanylov aka koder84de1e42017-05-22 14:00:07 +0300887 # caption = "Load tool results, " + job.params.long_summary
888 caption = "Load tool results"
889 yield Menu1st.per_job, job.summary, HTMLBlock(html.H3(html.center(caption)))
kdanylov aka koder736e5c12017-05-07 17:27:14 +0300890
kdanylov aka koderb0833332017-05-13 20:39:17 +0300891 agg_io = get_aggregated(self.rstorage, suite.storage_id, fjob.storage_id, "bw", job.reliable_info_range_s)
kdanylov aka koder84de1e42017-05-22 14:00:07 +0300892
kdanylov aka koder736e5c12017-05-07 17:27:14 +0300893 if fjob.bsize >= DefStyleProfile.large_blocks:
kdanylov aka koder84de1e42017-05-22 14:00:07 +0300894 title = "Fio measured bandwidth over time"
koder aka kdanilova732a602017-02-01 20:29:56 +0200895 units = "MiBps"
kdanylov aka koderb0833332017-05-13 20:39:17 +0300896 agg_io.data //= int(unit_conversion_coef_f(units, agg_io.units))
koder aka kdanilova732a602017-02-01 20:29:56 +0200897 else:
kdanylov aka kodercdfcdaf2017-04-29 10:03:39 +0300898 title = "Fio measured IOPS over time"
kdanylov aka koderb0833332017-05-13 20:39:17 +0300899 agg_io.data //= (int(unit_conversion_coef_f("KiBps", agg_io.units)) * fjob.bsize)
koder aka kdanilova732a602017-02-01 20:29:56 +0200900 units = "IOPS"
koder aka kdanilov108ac362017-01-19 20:17:16 +0200901
kdanylov aka koder3a9e5db2017-05-09 20:00:44 +0300902 fpath = self.plt(plot_v_over_time, agg_io.source(tag='ts.' + default_format), title, units, agg_io)
koder aka kdanilova732a602017-02-01 20:29:56 +0200903 yield Menu1st.per_job, fjob.summary, HTMLBlock(html.img(fpath))
koder aka kdanilov108ac362017-01-19 20:17:16 +0200904
kdanylov aka koder84de1e42017-05-22 14:00:07 +0300905 title = "BW distribution" if fjob.bsize >= DefStyleProfile.large_blocks else "IOPS distribution"
906 io_stat_prop = calc_norm_stat_props(agg_io, bins_count=StyleProfile.hist_boxes)
907 fpath = self.plt(plot_hist, agg_io.source(tag='hist.' + default_format), title, units, io_stat_prop)
908 yield Menu1st.per_job, fjob.summary, HTMLBlock(html.img(fpath))
909
kdanylov aka koder3a9e5db2017-05-09 20:00:44 +0300910 if fjob.bsize < DefStyleProfile.large_blocks:
kdanylov aka koderb0833332017-05-13 20:39:17 +0300911 agg_lat = get_aggregated(self.rstorage, suite.storage_id, fjob.storage_id, "lat",
912 job.reliable_info_range_s)
kdanylov aka koder3a9e5db2017-05-09 20:00:44 +0300913 TARGET_UNITS = 'ms'
kdanylov aka koderb0833332017-05-13 20:39:17 +0300914 coef = unit_conversion_coef_f(agg_lat.units, TARGET_UNITS)
915 agg_lat.histo_bins = agg_lat.histo_bins.copy() * coef
kdanylov aka koder3a9e5db2017-05-09 20:00:44 +0300916 agg_lat.units = TARGET_UNITS
koder aka kdanilov108ac362017-01-19 20:17:16 +0200917
kdanylov aka koder3a9e5db2017-05-09 20:00:44 +0300918 fpath = self.plt(plot_lat_over_time, agg_lat.source(tag='ts.' + default_format), "Latency", agg_lat,
919 ylabel="Latency, " + agg_lat.units)
920 yield Menu1st.per_job, fjob.summary, HTMLBlock(html.img(fpath))
koder aka kdanilov108ac362017-01-19 20:17:16 +0200921
kdanylov aka koder3a9e5db2017-05-09 20:00:44 +0300922 fpath = self.plt(plot_histo_heatmap, agg_lat.source(tag='hmap.' + default_format),
923 "Latency heatmap", agg_lat, ylabel="Latency, " + agg_lat.units, xlabel='Test time')
koder aka kdanilov108ac362017-01-19 20:17:16 +0200924
kdanylov aka koder3a9e5db2017-05-09 20:00:44 +0300925 yield Menu1st.per_job, fjob.summary, HTMLBlock(html.img(fpath))
koder aka kdanilov108ac362017-01-19 20:17:16 +0200926
koder aka kdanilov7f59d562016-12-26 01:34:23 +0200927
928# Cluster load over test time
koder aka kdanilova732a602017-02-01 20:29:56 +0200929class ClusterLoad(JobReporter):
koder aka kdanilov7f59d562016-12-26 01:34:23 +0200930 """IOPS/latency during test"""
931
koder aka kdanilova732a602017-02-01 20:29:56 +0200932 # TODO: units should came from sensor
koder aka kdanilov108ac362017-01-19 20:17:16 +0200933 storage_sensors = [
kdanylov aka koder45183182017-04-30 23:55:40 +0300934 ('block-io', 'reads_completed', "Read", 'iop'),
935 ('block-io', 'writes_completed', "Write", 'iop'),
kdanylov aka koder736e5c12017-05-07 17:27:14 +0300936 ('block-io', 'sectors_read', "Read", 'MiB'),
937 ('block-io', 'sectors_written', "Write", 'MiB'),
koder aka kdanilov108ac362017-01-19 20:17:16 +0200938 ]
939
kdanylov aka koder3a9e5db2017-05-09 20:00:44 +0300940 def get_divs(self, suite: SuiteConfig, job: JobConfig) -> Iterator[Tuple[str, str, HTMLBlock]]:
941
kdanylov aka koder84de1e42017-05-22 14:00:07 +0300942 yield Menu1st.per_job, job.summary, HTMLBlock(html.H3(html.center("Cluster load")))
koder aka kdanilov108ac362017-01-19 20:17:16 +0200943
kdanylov aka koder3a9e5db2017-05-09 20:00:44 +0300944 sensors = []
945 max_iop = 0
946 max_bytes = 0
kdanylov aka koderb0833332017-05-13 20:39:17 +0300947 stor_nodes = find_nodes_by_roles(self.rstorage.storage, STORAGE_ROLES)
kdanylov aka koder45183182017-04-30 23:55:40 +0300948 for sensor, metric, op, units in self.storage_sensors:
kdanylov aka koder84de1e42017-05-22 14:00:07 +0300949 ts = sum_sensors(self.rstorage, job.reliable_info_range_s, node_id=stor_nodes, sensor=sensor, metric=metric)
kdanylov aka koderb0833332017-05-13 20:39:17 +0300950 if ts is not None:
951 ds = DataSource(suite_id=suite.storage_id,
952 job_id=job.storage_id,
953 node_id="storage",
954 sensor=sensor,
955 dev=AGG_TAG,
956 metric=metric,
957 tag="ts." + default_format)
koder aka kdanilov108ac362017-01-19 20:17:16 +0200958
kdanylov aka koderb0833332017-05-13 20:39:17 +0300959 data = ts.data if units != 'MiB' else ts.data * unit_conversion_coef_f(ts.units, 'MiB')
960 ts = TimeSeries(times=numpy.arange(*job.reliable_info_range_s),
961 data=data,
962 units=units if ts.units is None else ts.units,
963 time_units=ts.time_units,
964 source=ds,
965 histo_bins=ts.histo_bins)
kdanylov aka koder0e0cfcb2017-03-27 22:19:09 +0300966
kdanylov aka koderb0833332017-05-13 20:39:17 +0300967 sensors.append(("{} {}".format(op, units), ds, ts, units))
koder aka kdanilov108ac362017-01-19 20:17:16 +0200968
kdanylov aka koderb0833332017-05-13 20:39:17 +0300969 if units == 'iop':
970 max_iop = max(max_iop, data.sum())
971 else:
972 assert units == 'MiB'
973 max_bytes = max(max_bytes, data.sum())
koder aka kdanilov108ac362017-01-19 20:17:16 +0200974
kdanylov aka koder3a9e5db2017-05-09 20:00:44 +0300975 for title, ds, ts, units in sensors:
976 if ts.data.sum() >= (max_iop if units == 'iop' else max_bytes) * DefStyleProfile.min_load_diff:
977 fpath = self.plt(plot_v_over_time, ds, title, units, ts=ts)
978 yield Menu1st.per_job, job.summary, HTMLBlock(html.img(fpath))
979 else:
kdanylov aka koder84de1e42017-05-22 14:00:07 +0300980 logger.info("Hide '%s' plot for %s, as it's load is less then %s%% from maximum",
kdanylov aka koder3a9e5db2017-05-09 20:00:44 +0300981 title, job.summary, int(DefStyleProfile.min_load_diff * 100))
koder aka kdanilov7f59d562016-12-26 01:34:23 +0200982
983
koder aka kdanilov108ac362017-01-19 20:17:16 +0200984# ------------------------------------------ REPORT STAGES -----------------------------------------------------------
985
986
kdanylov aka koder84de1e42017-05-22 14:00:07 +0300987def add_devroles(ctx: TestRun):
988 # TODO: need to detect all devices for node on this stage using hw info
989 detected_selectors = collections.defaultdict(
990 lambda: collections.defaultdict(list)) # type: Dict[str, Dict[str, List[str]]]
991
992 for node in ctx.nodes:
993 if NodeRole.osd in node.info.roles:
994 all_devs = set()
995
996 jdevs = node.info.params.get('ceph_journal_devs')
997 if jdevs:
998 all_devs.update(jdevs)
999 detected_selectors[node.info.hostname]["|".join(jdevs)].append(DevRoles.osd_journal)
1000
1001 sdevs = node.info.params.get('ceph_storage_devs')
1002 if sdevs:
1003 all_devs.update(sdevs)
1004 detected_selectors[node.info.hostname]["|".join(sdevs)].append(DevRoles.osd_storage)
1005
1006 if all_devs:
1007 detected_selectors[node.info.hostname]["|".join(all_devs)].append(DevRoles.storage_block)
1008
1009 for hostname, dev_rules in detected_selectors.items():
1010 dev_locs = [] # type: List[Dict[str, List[str]]]
1011 ctx.devs_locator.append({hostname: dev_locs})
1012 for dev_names, roles in dev_rules.items():
1013 dev_locs.append({dev_names: roles})
1014
1015
koder aka kdanilov108ac362017-01-19 20:17:16 +02001016class HtmlReportStage(Stage):
1017 priority = StepOrder.REPORT
1018
1019 def run(self, ctx: TestRun) -> None:
kdanylov aka koder84de1e42017-05-22 14:00:07 +03001020 nodes = ctx.rstorage.load_nodes()
1021 update_storage_selector(ctx.rstorage, ctx.devs_locator, nodes)
1022
1023 job_reporters_cls = [StatInfo, LoadToolResults, Resources, ClusterLoad, CPULoadPlot, QDIOTimeHeatmap]
1024 # job_reporters_cls = [QDIOTimeHeatmap]
kdanylov aka koder026e5f22017-05-15 01:04:39 +03001025 job_reporters = [rcls(ctx.rstorage, DefStyleProfile, DefColorProfile)
1026 for rcls in job_reporters_cls] # type: ignore
kdanylov aka koder3a9e5db2017-05-09 20:00:44 +03001027
kdanylov aka koder84de1e42017-05-22 14:00:07 +03001028 suite_reporters_cls = [IOQD,
1029 ResourceQD,
1030 PerformanceSummary,
1031 EngineeringSummary,
1032 ResourceConsumptionSummary] # type: List[Type[SuiteReporter]]
1033 # suite_reporters_cls = [] # type: List[Type[SuiteReporter]]
kdanylov aka koder026e5f22017-05-15 01:04:39 +03001034 suite_reporters = [rcls(ctx.rstorage, DefStyleProfile, DefColorProfile)
1035 for rcls in suite_reporters_cls] # type: ignore
koder aka kdanilov108ac362017-01-19 20:17:16 +02001036
1037 root_dir = os.path.dirname(os.path.dirname(wally.__file__))
1038 doc_templ_path = os.path.join(root_dir, "report_templates/index.html")
1039 report_template = open(doc_templ_path, "rt").read()
1040 css_file_src = os.path.join(root_dir, "report_templates/main.css")
1041 css_file = open(css_file_src, "rt").read()
1042
1043 menu_block = []
1044 content_block = []
1045 link_idx = 0
1046
koder aka kdanilova732a602017-02-01 20:29:56 +02001047 # matplotlib.rcParams.update(ctx.config.reporting.matplotlib_params.raw())
1048 # ColorProfile.__dict__.update(ctx.config.reporting.colors.raw())
1049 # StyleProfile.__dict__.update(ctx.config.reporting.style.raw())
koder aka kdanilov108ac362017-01-19 20:17:16 +02001050
koder aka kdanilova732a602017-02-01 20:29:56 +02001051 items = defaultdict(lambda: defaultdict(list)) # type: Dict[str, Dict[str, List[HTMLBlock]]]
kdanylov aka kodercdfcdaf2017-04-29 10:03:39 +03001052 DEBUG = False
kdanylov aka koder3a9e5db2017-05-09 20:00:44 +03001053 job_summ_sort_order = []
1054
koder aka kdanilova732a602017-02-01 20:29:56 +02001055 # TODO: filter reporters
kdanylov aka koderb0833332017-05-13 20:39:17 +03001056 for suite in ctx.rstorage.iter_suite(FioTest.name):
1057 all_jobs = list(ctx.rstorage.iter_job(suite))
koder aka kdanilova732a602017-02-01 20:29:56 +02001058 all_jobs.sort(key=lambda job: job.params)
koder aka kdanilova732a602017-02-01 20:29:56 +02001059
kdanylov aka koder3a9e5db2017-05-09 20:00:44 +03001060 new_jobs_in_order = [job.summary for job in all_jobs]
1061 same = set(new_jobs_in_order).intersection(set(job_summ_sort_order))
1062 assert not same, "Job with same names in different suits found: " + ",".join(same)
1063 job_summ_sort_order.extend(new_jobs_in_order)
1064
kdanylov aka koderb0833332017-05-13 20:39:17 +03001065 for job in all_jobs:
kdanylov aka koder3a9e5db2017-05-09 20:00:44 +03001066 try:
kdanylov aka koder026e5f22017-05-15 01:04:39 +03001067 for reporter in job_reporters: # type: JobReporter
kdanylov aka koder3a9e5db2017-05-09 20:00:44 +03001068 logger.debug("Start reporter %s on job %s suite %s",
1069 reporter.__class__.__name__, job.summary, suite.test_type)
1070 for block, item, html in reporter.get_divs(suite, job):
1071 items[block][item].append(html)
1072 if DEBUG:
1073 break
1074 except Exception:
1075 logger.exception("Failed to generate report for %s", job.summary)
1076
kdanylov aka koder026e5f22017-05-15 01:04:39 +03001077 for sreporter in suite_reporters: # type: SuiteReporter
kdanylov aka koder736e5c12017-05-07 17:27:14 +03001078 try:
kdanylov aka koder026e5f22017-05-15 01:04:39 +03001079 logger.debug("Start reporter %s on suite %s", sreporter.__class__.__name__, suite.test_type)
1080 for block, item, html in sreporter.get_divs(suite):
kdanylov aka koder736e5c12017-05-07 17:27:14 +03001081 items[block][item].append(html)
kdanylov aka koder026e5f22017-05-15 01:04:39 +03001082 except Exception:
kdanylov aka koder84de1e42017-05-22 14:00:07 +03001083 logger.exception("Failed to generate report for suite %s", suite.storage_id)
koder aka kdanilov108ac362017-01-19 20:17:16 +02001084
koder aka kdanilova732a602017-02-01 20:29:56 +02001085 if DEBUG:
1086 break
1087
kdanylov aka kodercdfcdaf2017-04-29 10:03:39 +03001088 logger.debug("Generating result html")
1089
koder aka kdanilov108ac362017-01-19 20:17:16 +02001090 for idx_1st, menu_1st in enumerate(sorted(items, key=lambda x: menu_1st_order.index(x))):
1091 menu_block.append(
1092 '<a href="#item{}" class="nav-group" data-toggle="collapse" data-parent="#MainMenu">{}</a>'
1093 .format(idx_1st, menu_1st)
1094 )
1095 menu_block.append('<div class="collapse" id="item{}">'.format(idx_1st))
kdanylov aka koder3a9e5db2017-05-09 20:00:44 +03001096
kdanylov aka koder84de1e42017-05-22 14:00:07 +03001097 if menu_1st in (Menu1st.per_job, Menu1st.engineering_per_job):
1098 key = job_summ_sort_order.index
1099 elif menu_1st == Menu1st.engineering:
1100 key = Menu2ndEng.order.index
1101 elif menu_1st == Menu1st.summary:
1102 key = Menu2ndSumm.order.index
kdanylov aka koder3a9e5db2017-05-09 20:00:44 +03001103 else:
kdanylov aka koder84de1e42017-05-22 14:00:07 +03001104 key = lambda x: x
1105
1106 in_order = sorted(items[menu_1st], key=key)
kdanylov aka koder3a9e5db2017-05-09 20:00:44 +03001107
1108 for menu_2nd in in_order:
koder aka kdanilov108ac362017-01-19 20:17:16 +02001109 menu_block.append(' <a href="#content{}" class="nav-group-item">{}</a>'
1110 .format(link_idx, menu_2nd))
1111 content_block.append('<div id="content{}">'.format(link_idx))
koder aka kdanilova732a602017-02-01 20:29:56 +02001112 content_block.extend(" " + x.data for x in items[menu_1st][menu_2nd])
koder aka kdanilov108ac362017-01-19 20:17:16 +02001113 content_block.append('</div>')
1114 link_idx += 1
1115 menu_block.append('</div>')
1116
1117 report = report_template.replace("{{{menu}}}", ("\n" + " " * 16).join(menu_block))
1118 report = report.replace("{{{content}}}", ("\n" + " " * 16).join(content_block))
kdanylov aka koderb0833332017-05-13 20:39:17 +03001119 report_path = ctx.rstorage.put_report(report, "index.html")
1120 ctx.rstorage.put_report(css_file, "main.css")
koder aka kdanilov108ac362017-01-19 20:17:16 +02001121 logger.info("Report is stored into %r", report_path)