blob: 7edff67e768b7b70666b18d28a71c337d4ef20fe [file] [log] [blame]
koder aka kdanilov6c491062015-04-09 22:33:13 +03001import math
koder aka kdanilovffaf48d2016-12-27 02:25:29 +02002import logging
koder aka kdanilov6c491062015-04-09 22:33:13 +03003import itertools
koder aka kdanilova732a602017-02-01 20:29:56 +02004from typing import List, Callable, Iterable, cast, Tuple
koder aka kdanilovcff7b2e2015-04-18 20:48:15 +03005
koder aka kdanilov7f59d562016-12-26 01:34:23 +02006import numpy
7from scipy import stats, optimize
8from numpy import linalg
9from numpy.polynomial.chebyshev import chebfit, chebval
koder aka kdanilov6c491062015-04-09 22:33:13 +030010
11
koder aka kdanilovf2865172016-12-30 03:35:11 +020012from .result_classes import NormStatProps, HistoStatProps, TimeSeries
koder aka kdanilovffaf48d2016-12-27 02:25:29 +020013from .utils import Number
koder aka kdanilovbb6d6cd2015-06-20 02:55:07 +030014
15
koder aka kdanilovffaf48d2016-12-27 02:25:29 +020016logger = logging.getLogger("wally")
koder aka kdanilov7f59d562016-12-26 01:34:23 +020017DOUBLE_DELTA = 1e-8
koder aka kdanilov108ac362017-01-19 20:17:16 +020018MIN_VALUES_FOR_CONFIDENCE = 7
koder aka kdanilove87ae652015-04-20 02:14:35 +030019
20
koder aka kdanilov108ac362017-01-19 20:17:16 +020021average = numpy.mean
22dev = lambda x: math.sqrt(numpy.var(x, ddof=1))
koder aka kdanilov6c491062015-04-09 22:33:13 +030023
24
koder aka kdanilov108ac362017-01-19 20:17:16 +020025def calc_norm_stat_props(ts: TimeSeries, bins_count: int, confidence: float = 0.95) -> NormStatProps:
koder aka kdanilov7f59d562016-12-26 01:34:23 +020026 "Calculate statistical properties of array of numbers"
27
koder aka kdanilov108ac362017-01-19 20:17:16 +020028 # array.array has very basic support
29 data = cast(List[int], ts.data)
30 res = NormStatProps(data) # type: ignore
koder aka kdanilov7f59d562016-12-26 01:34:23 +020031
32 if len(data) == 0:
33 raise ValueError("Input array is empty")
34
35 data = sorted(data)
36 res.average = average(data)
37 res.deviation = dev(data)
koder aka kdanilovffaf48d2016-12-27 02:25:29 +020038
koder aka kdanilov7f59d562016-12-26 01:34:23 +020039 res.max = data[-1]
40 res.min = data[0]
41
koder aka kdanilova732a602017-02-01 20:29:56 +020042 pcs = numpy.percentile(data, q=[1.0, 5.0, 10., 50., 90., 95., 99.])
43 res.perc_1, res.perc_5, res.perc_10, res.perc_50, res.perc_90, res.perc_95, res.perc_99 = pcs
koder aka kdanilov7f59d562016-12-26 01:34:23 +020044
koder aka kdanilov108ac362017-01-19 20:17:16 +020045 if len(data) >= MIN_VALUES_FOR_CONFIDENCE:
koder aka kdanilov7f59d562016-12-26 01:34:23 +020046 res.confidence = stats.sem(data) * \
47 stats.t.ppf((1 + confidence) / 2, len(data) - 1)
koder aka kdanilov108ac362017-01-19 20:17:16 +020048 res.confidence_level = confidence
koder aka kdanilov7f59d562016-12-26 01:34:23 +020049 else:
50 res.confidence = None
koder aka kdanilov108ac362017-01-19 20:17:16 +020051 res.confidence_level = None
koder aka kdanilov7f59d562016-12-26 01:34:23 +020052
koder aka kdanilova732a602017-02-01 20:29:56 +020053 res.bins_populations, res.bins_edges = numpy.histogram(data, bins=bins_count)
54 res.bins_edges = res.bins_edges[:-1]
koder aka kdanilovffaf48d2016-12-27 02:25:29 +020055
56 try:
57 res.normtest = stats.mstats.normaltest(data)
58 except Exception as exc:
59 logger.warning("stats.mstats.normaltest failed with error: %s", exc)
60
koder aka kdanilov108ac362017-01-19 20:17:16 +020061 res.skew = stats.skew(data)
62 res.kurt = stats.kurtosis(data)
63
koder aka kdanilov7f59d562016-12-26 01:34:23 +020064 return res
65
66
koder aka kdanilova732a602017-02-01 20:29:56 +020067# update this code
68def rebin_histogram(bins_populations: numpy.array,
69 bins_edges: numpy.array,
70 new_bins_count: int,
71 left_tail_idx: int = None,
72 right_tail_idx: int = None,
73 log_bins: bool = False) -> Tuple[numpy.array, numpy.array]:
74 # rebin large histogram into smaller with new_bins bins, linearly distributes across
75 # left_tail_idx:right_tail_idx range
76
77 assert len(bins_populations.shape) == 1
78 assert len(bins_edges.shape) == 1
79 assert bins_edges.shape[0] == bins_populations.shape[0]
80
81 if left_tail_idx is None:
82 min_val = bins_edges[0]
83 else:
84 min_val = bins_edges[left_tail_idx]
85
86 if right_tail_idx is None:
87 max_val = bins_edges[-1]
88 else:
89 max_val = bins_edges[right_tail_idx]
90
91 if log_bins:
92 assert min_val > 1E-3
93 step = (max_val / min_val) ** (1 / new_bins_count)
94 new_bins_edges = min_val * (step ** numpy.arange(new_bins_count)) # type: numpy.array
95 else:
96 new_bins_edges = numpy.linspace(min_val, max_val, new_bins_count + 1, dtype='float')[:-1] # type: numpy.array
97
98 old_bins_pos = numpy.searchsorted(new_bins_edges, bins_edges, side='right')
99 new_bins = numpy.zeros(new_bins_count, dtype=int) # type: numpy.array
100
101 # last source bin can't be split
102 # TODO: need to add assert for this
103 new_bins[-1] += bins_populations[-1]
104 bin_sizes = bins_edges[1:] - bins_edges[:-1]
105
106 # correct position to get bin idx from edge idx
107 old_bins_pos -= 1
108 old_bins_pos[old_bins_pos < 0] = 0
109 new_bins_sizes = new_bins_edges[1:] - new_bins_edges[:-1]
110
111 for population, begin, end, bsize in zip(bins_populations[:-1], old_bins_pos[:-1], old_bins_pos[1:], bin_sizes):
112 if begin == end:
113 new_bins[begin] += population
114 else:
115 density = population / bsize
116 for curr_box in range(begin, end):
117 cnt = min(int(new_bins_sizes[begin] * density + 0.5), population)
118 new_bins[begin] += cnt
119 population -= cnt
120
121 return new_bins, new_bins_edges
122
123
koder aka kdanilov108ac362017-01-19 20:17:16 +0200124def calc_histo_stat_props(ts: TimeSeries,
125 bins_edges: numpy.array,
koder aka kdanilova732a602017-02-01 20:29:56 +0200126 rebins_count: int,
127 tail: float = 0.005) -> HistoStatProps:
128 log_bins = False
129 res = HistoStatProps(ts.data)
koder aka kdanilovf2865172016-12-30 03:35:11 +0200130
koder aka kdanilov108ac362017-01-19 20:17:16 +0200131 # summ across all series
koder aka kdanilova732a602017-02-01 20:29:56 +0200132 aggregated = ts.data.sum(axis=0, dtype='int')
133 total = aggregated.sum()
koder aka kdanilov108ac362017-01-19 20:17:16 +0200134
135 # percentiles levels
koder aka kdanilova732a602017-02-01 20:29:56 +0200136 expected = list(numpy.array([0.01, 0.05, 0.1, 0.5, 0.9, 0.95, 0.99]) * total)
137 cumsum = numpy.cumsum(aggregated)
koder aka kdanilovf2865172016-12-30 03:35:11 +0200138
koder aka kdanilova732a602017-02-01 20:29:56 +0200139 percentiles_bins = numpy.searchsorted(cumsum, expected)
140 percentiles = bins_edges[percentiles_bins]
141 res.perc_1, res.perc_5, res.perc_10, res.perc_50, res.perc_90, res.perc_95, res.perc_99 = percentiles
koder aka kdanilovf2865172016-12-30 03:35:11 +0200142
koder aka kdanilova732a602017-02-01 20:29:56 +0200143 # don't show tail ranges on histogram
144 left_tail_idx, right_tail_idx = numpy.searchsorted(cumsum, [tail * total, (1 - tail) * total])
koder aka kdanilov108ac362017-01-19 20:17:16 +0200145
146 # minimax and maximal non-zero elements
koder aka kdanilova732a602017-02-01 20:29:56 +0200147 non_zero = numpy.nonzero(aggregated)[0]
koder aka kdanilov108ac362017-01-19 20:17:16 +0200148 res.min = bins_edges[aggregated[non_zero[0]]]
149 res.max = bins_edges[non_zero[-1] + (1 if non_zero[-1] != len(bins_edges) else 0)]
150
koder aka kdanilova732a602017-02-01 20:29:56 +0200151 res.log_bins = False
152 res.bins_populations, res.bins_edges = rebin_histogram(aggregated, bins_edges, rebins_count,
153 left_tail_idx, right_tail_idx)
koder aka kdanilov108ac362017-01-19 20:17:16 +0200154
koder aka kdanilovf2865172016-12-30 03:35:11 +0200155 return res
156
157
koder aka kdanilov7f59d562016-12-26 01:34:23 +0200158def groupby_globally(data: Iterable, key_func: Callable):
159 grouped = {} # type: ignore
koder aka kdanilov6c491062015-04-09 22:33:13 +0300160 grouped_iter = itertools.groupby(data, key_func)
161
162 for (bs, cache_tp, act, conc), curr_data_it in grouped_iter:
163 key = (bs, cache_tp, act, conc)
164 grouped.setdefault(key, []).extend(curr_data_it)
165
166 return grouped
167
168
koder aka kdanilov7f59d562016-12-26 01:34:23 +0200169def approximate_curve(x: List[Number], y: List[float], xnew: List[Number], curved_coef: int) -> List[float]:
koder aka kdanilov6c491062015-04-09 22:33:13 +0300170 """returns ynew - y values of some curve approximation"""
koder aka kdanilov7f59d562016-12-26 01:34:23 +0200171 return cast(List[float], chebval(xnew, chebfit(x, y, curved_coef)))
koder aka kdanilov6c491062015-04-09 22:33:13 +0300172
173
koder aka kdanilov7f59d562016-12-26 01:34:23 +0200174def approximate_line(x: List[Number], y: List[float], xnew: List[Number], relative_dist: bool = False) -> List[float]:
175 """
176 x, y - test data, xnew - dots, where we want find approximation
177 if not relative_dist distance = y - newy
178 returns ynew - y values of linear approximation
179 """
180 ox = numpy.array(x)
181 oy = numpy.array(y)
koder aka kdanilov66839a92015-04-11 13:22:31 +0300182
Ved-vampir03166442015-04-10 17:28:23 +0300183 # set approximation function
koder aka kdanilov66839a92015-04-11 13:22:31 +0300184 def func_line(tpl, x):
185 return tpl[0] * x + tpl[1]
186
187 def error_func_rel(tpl, x, y):
188 return 1.0 - y / func_line(tpl, x)
189
190 def error_func_abs(tpl, x, y):
191 return y - func_line(tpl, x)
192
Ved-vampir03166442015-04-10 17:28:23 +0300193 # choose distance mode
koder aka kdanilov66839a92015-04-11 13:22:31 +0300194 error_func = error_func_rel if relative_dist else error_func_abs
195
196 tpl_initial = tuple(linalg.solve([[ox[0], 1.0], [ox[1], 1.0]],
197 oy[:2]))
198
Ved-vampir03166442015-04-10 17:28:23 +0300199 # find line
koder aka kdanilov7f59d562016-12-26 01:34:23 +0200200 tpl_final, success = optimize.leastsq(error_func, tpl_initial[:], args=(ox, oy))
koder aka kdanilov66839a92015-04-11 13:22:31 +0300201
Ved-vampir03166442015-04-10 17:28:23 +0300202 # if error
203 if success not in range(1, 5):
204 raise ValueError("No line for this dots")
koder aka kdanilov66839a92015-04-11 13:22:31 +0300205
Ved-vampir03166442015-04-10 17:28:23 +0300206 # return new dots
koder aka kdanilov7f59d562016-12-26 01:34:23 +0200207 return func_line(tpl_final, numpy.array(xnew))
koder aka kdanilov6c491062015-04-09 22:33:13 +0300208
209
koder aka kdanilova732a602017-02-01 20:29:56 +0200210def moving_average(data: numpy.array, window: int) -> numpy.array:
211 cumsum = numpy.cumsum(data)
212 cumsum[window:] = cumsum[window:] - cumsum[:-window]
213 return cumsum[window - 1:] / window
214
215
216def moving_dev(data: numpy.array, window: int) -> numpy.array:
217 cumsum = numpy.cumsum(data)
218 cumsum2 = numpy.cumsum(data ** 2)
219 cumsum[window:] = cumsum[window:] - cumsum[:-window]
220 cumsum2[window:] = cumsum2[window:] - cumsum2[:-window]
221 return ((cumsum2[window - 1:] - cumsum[window - 1:] ** 2 / window) / (window - 1)) ** 0.5
222
223
224def find_ouliers(data: numpy.array,
225 center_range: Tuple[int, int] = (25, 75),
kdanylov aka koder0e0cfcb2017-03-27 22:19:09 +0300226 cut_range: float = 3.0) -> numpy.array:
koder aka kdanilova732a602017-02-01 20:29:56 +0200227 v1, v2 = numpy.percentile(data, center_range)
228 return numpy.abs(data - (v1 + v2) / 2) > ((v2 - v1) / 2 * cut_range)
229
230
231def find_ouliers_ts(data: numpy.array,
232 windows_size: int = 30,
233 center_range: Tuple[int, int] = (25, 75),
kdanylov aka koder0e0cfcb2017-03-27 22:19:09 +0300234 cut_range: float = 3.0) -> numpy.array:
koder aka kdanilova732a602017-02-01 20:29:56 +0200235 outliers = numpy.empty(data.shape, dtype=bool)
236
237 if len(data) < windows_size:
238 outliers[:] = False
239 return outliers
240
241 begin_idx = 0
242 if len(data) < windows_size * 2:
243 end_idx = (len(data) % windows_size) // 2 + windows_size
244 else:
245 end_idx = len(data)
246
247 while True:
248 cdata = data[begin_idx: end_idx]
249 outliers[begin_idx: end_idx] = find_ouliers(cdata, center_range, cut_range)
250 begin_idx = end_idx
251
252 if end_idx == len(data):
253 break
254
255 end_idx += windows_size
256 if len(data) - end_idx < windows_size:
257 end_idx = len(data)
258
259 return outliers
260
261
262def hist_outliers_nd(bin_populations: numpy.array,
263 bin_centers: numpy.array,
264 center_range: Tuple[int, int] = (25, 75),
265 cut_range: float = 3.0) -> Tuple[int, int]:
266 assert len(bin_populations) == len(bin_centers)
267 total_count = bin_populations.sum()
268
269 perc25 = total_count / 100.0 * center_range[0]
270 perc75 = total_count / 100.0 * center_range[1]
271
272 perc25_idx, perc75_idx = numpy.searchsorted(numpy.cumsum(bin_populations), [perc25, perc75])
273 middle = (bin_centers[perc75_idx] + bin_centers[perc25_idx]) / 2
274 r = (bin_centers[perc75_idx] - bin_centers[perc25_idx]) / 2
275
276 lower_bound = middle - r * cut_range
277 upper_bound = middle + r * cut_range
278
279 lower_cut_idx, upper_cut_idx = numpy.searchsorted(bin_centers, [lower_bound, upper_bound])
280 return lower_cut_idx, upper_cut_idx
281
282
283def hist_outliers_perc(bin_populations: numpy.array,
284 bounds_perc: Tuple[float, float] = (0.01, 0.99)) -> Tuple[int, int]:
285 assert len(bin_populations.shape) == 1
286 total_count = bin_populations.sum()
287 lower_perc = total_count * bounds_perc[0]
288 upper_perc = total_count * bounds_perc[1]
289 return numpy.searchsorted(numpy.cumsum(bin_populations), [lower_perc, upper_perc])
290
291
292def ts_hist_outliers_perc(bin_populations: numpy.array,
293 window_size: int = 10,
294 bounds_perc: Tuple[float, float] = (0.01, 0.99)) -> Tuple[int, int]:
295 assert len(bin_populations.shape) == 2
296
297 points = list(range(0, len(bin_populations), window_size))
298 if len(bin_populations) % window_size != 0:
299 points.append(points[-1] + window_size)
300
301 ranges = []
302 for begin, end in zip(points[:-1], points[1:]):
303 window_hist = bin_populations[begin:end].sum(axis=0)
304 ranges.append(hist_outliers_perc(window_hist, bounds_perc=bounds_perc))
305
306 return min(i[0] for i in ranges), max(i[1] for i in ranges)
307
308
koder aka kdanilov7f59d562016-12-26 01:34:23 +0200309# TODO: revise next
310# def difference(y, ynew):
311# """returns average and maximum relative and
312# absolute differences between y and ynew
313# result may contain None values for y = 0
314# return value - tuple:
315# [(abs dif, rel dif) * len(y)],
316# (abs average, abs max),
317# (rel average, rel max)"""
318#
319# abs_dlist = []
320# rel_dlist = []
321#
322# for y1, y2 in zip(y, ynew):
323# # absolute
324# abs_dlist.append(y1 - y2)
325#
326# if y1 > 1E-6:
327# rel_dlist.append(abs(abs_dlist[-1] / y1))
328# else:
329# raise ZeroDivisionError("{0!r} is too small".format(y1))
330#
331# da_avg = sum(abs_dlist) / len(abs_dlist)
332# dr_avg = sum(rel_dlist) / len(rel_dlist)
333#
334# return (zip(abs_dlist, rel_dlist),
335# (da_avg, max(abs_dlist)), (dr_avg, max(rel_dlist))
336# )