blob: 2ccd9de438f9e7de330a3d0bc1e1a68ea749d05c [file] [log] [blame]
koder aka kdanilov6c491062015-04-09 22:33:13 +03001import re
2import json
3
4
5from disk_perf_test_tool.utils import ssize_to_b
6from disk_perf_test_tool.scripts.postprocessing import data_stat
7
8
9def parse_output(out_err):
10 start_patt = r"(?ims)=+\s+RESULTS\(format=json\)\s+=+"
11 end_patt = r"(?ims)=+\s+END OF RESULTS\s+=+"
12
13 for block in re.split(start_patt, out_err)[1:]:
14 data, garbage = re.split(end_patt, block)
15 yield json.loads(data.strip())
16
17 start_patt = r"(?ims)=+\s+RESULTS\(format=eval\)\s+=+"
18 end_patt = r"(?ims)=+\s+END OF RESULTS\s+=+"
19
20 for block in re.split(start_patt, out_err)[1:]:
21 data, garbage = re.split(end_patt, block)
22 yield eval(data.strip())
23
24
25def filter_data(name_prefix, fields_to_select, **filters):
26 def closure(data):
27 for result in data:
28 if name_prefix is not None:
29 if not result['jobname'].startswith(name_prefix):
30 continue
31
32 for k, v in filters.items():
33 if result.get(k) != v:
34 break
35 else:
36 yield map(result.get, fields_to_select)
37 return closure
38
39
40def load_data(raw_data):
41 data = list(parse_output(raw_data))[0]
42
43 for key, val in data['res'].items():
44 if 'blocksize' not in val:
45 val['blocksize'] = key.split('_')[2][3:].split('th')[0]
46
47 val['blocksize_b'] = ssize_to_b(val['blocksize'])
48
49 val['iops_mediana'], val['iops_stddev'] = \
50 data_stat.med_dev(val['iops'])
51 val['bw_mediana'], val['bw_stddev'] = data_stat.med_dev(val['bw_mean'])
52 val['lat_mediana'], val['lat_stddev'] = data_stat.med_dev(val['lat'])
53 yield val
54
55
56def load_files(*fnames):
57 for fname in fnames:
58 for i in load_data(open(fname).read()):
59 yield i