koder aka kdanilov | 6c49106 | 2015-04-09 22:33:13 +0300 | [diff] [blame] | 1 | import math |
koder aka kdanilov | ffaf48d | 2016-12-27 02:25:29 +0200 | [diff] [blame] | 2 | import logging |
koder aka kdanilov | 6c49106 | 2015-04-09 22:33:13 +0300 | [diff] [blame] | 3 | import itertools |
koder aka kdanilov | a732a60 | 2017-02-01 20:29:56 +0200 | [diff] [blame] | 4 | from typing import List, Callable, Iterable, cast, Tuple |
koder aka kdanilov | cff7b2e | 2015-04-18 20:48:15 +0300 | [diff] [blame] | 5 | |
koder aka kdanilov | 7f59d56 | 2016-12-26 01:34:23 +0200 | [diff] [blame] | 6 | import numpy |
| 7 | from scipy import stats, optimize |
| 8 | from numpy import linalg |
| 9 | from numpy.polynomial.chebyshev import chebfit, chebval |
koder aka kdanilov | 6c49106 | 2015-04-09 22:33:13 +0300 | [diff] [blame] | 10 | |
| 11 | |
koder aka kdanilov | f286517 | 2016-12-30 03:35:11 +0200 | [diff] [blame] | 12 | from .result_classes import NormStatProps, HistoStatProps, TimeSeries |
koder aka kdanilov | ffaf48d | 2016-12-27 02:25:29 +0200 | [diff] [blame] | 13 | from .utils import Number |
koder aka kdanilov | bb6d6cd | 2015-06-20 02:55:07 +0300 | [diff] [blame] | 14 | |
| 15 | |
koder aka kdanilov | ffaf48d | 2016-12-27 02:25:29 +0200 | [diff] [blame] | 16 | logger = logging.getLogger("wally") |
koder aka kdanilov | 7f59d56 | 2016-12-26 01:34:23 +0200 | [diff] [blame] | 17 | DOUBLE_DELTA = 1e-8 |
koder aka kdanilov | 108ac36 | 2017-01-19 20:17:16 +0200 | [diff] [blame] | 18 | MIN_VALUES_FOR_CONFIDENCE = 7 |
koder aka kdanilov | e87ae65 | 2015-04-20 02:14:35 +0300 | [diff] [blame] | 19 | |
| 20 | |
koder aka kdanilov | 108ac36 | 2017-01-19 20:17:16 +0200 | [diff] [blame] | 21 | average = numpy.mean |
| 22 | dev = lambda x: math.sqrt(numpy.var(x, ddof=1)) |
koder aka kdanilov | 6c49106 | 2015-04-09 22:33:13 +0300 | [diff] [blame] | 23 | |
| 24 | |
kdanylov aka koder | 150b219 | 2017-04-01 16:53:01 +0300 | [diff] [blame^] | 25 | def calc_norm_stat_props(ts: TimeSeries, bins_count: int = None, confidence: float = 0.95) -> NormStatProps: |
koder aka kdanilov | 7f59d56 | 2016-12-26 01:34:23 +0200 | [diff] [blame] | 26 | "Calculate statistical properties of array of numbers" |
| 27 | |
koder aka kdanilov | 108ac36 | 2017-01-19 20:17:16 +0200 | [diff] [blame] | 28 | # array.array has very basic support |
| 29 | data = cast(List[int], ts.data) |
| 30 | res = NormStatProps(data) # type: ignore |
koder aka kdanilov | 7f59d56 | 2016-12-26 01:34:23 +0200 | [diff] [blame] | 31 | |
| 32 | if len(data) == 0: |
| 33 | raise ValueError("Input array is empty") |
| 34 | |
| 35 | data = sorted(data) |
| 36 | res.average = average(data) |
| 37 | res.deviation = dev(data) |
koder aka kdanilov | ffaf48d | 2016-12-27 02:25:29 +0200 | [diff] [blame] | 38 | |
koder aka kdanilov | 7f59d56 | 2016-12-26 01:34:23 +0200 | [diff] [blame] | 39 | res.max = data[-1] |
| 40 | res.min = data[0] |
| 41 | |
koder aka kdanilov | a732a60 | 2017-02-01 20:29:56 +0200 | [diff] [blame] | 42 | pcs = numpy.percentile(data, q=[1.0, 5.0, 10., 50., 90., 95., 99.]) |
| 43 | res.perc_1, res.perc_5, res.perc_10, res.perc_50, res.perc_90, res.perc_95, res.perc_99 = pcs |
koder aka kdanilov | 7f59d56 | 2016-12-26 01:34:23 +0200 | [diff] [blame] | 44 | |
koder aka kdanilov | 108ac36 | 2017-01-19 20:17:16 +0200 | [diff] [blame] | 45 | if len(data) >= MIN_VALUES_FOR_CONFIDENCE: |
koder aka kdanilov | 7f59d56 | 2016-12-26 01:34:23 +0200 | [diff] [blame] | 46 | res.confidence = stats.sem(data) * \ |
| 47 | stats.t.ppf((1 + confidence) / 2, len(data) - 1) |
koder aka kdanilov | 108ac36 | 2017-01-19 20:17:16 +0200 | [diff] [blame] | 48 | res.confidence_level = confidence |
koder aka kdanilov | 7f59d56 | 2016-12-26 01:34:23 +0200 | [diff] [blame] | 49 | else: |
| 50 | res.confidence = None |
koder aka kdanilov | 108ac36 | 2017-01-19 20:17:16 +0200 | [diff] [blame] | 51 | res.confidence_level = None |
koder aka kdanilov | 7f59d56 | 2016-12-26 01:34:23 +0200 | [diff] [blame] | 52 | |
kdanylov aka koder | 150b219 | 2017-04-01 16:53:01 +0300 | [diff] [blame^] | 53 | if bins_count is not None: |
| 54 | res.bins_populations, res.bins_edges = numpy.histogram(data, bins=bins_count) |
| 55 | res.bins_edges = res.bins_edges[:-1] |
koder aka kdanilov | ffaf48d | 2016-12-27 02:25:29 +0200 | [diff] [blame] | 56 | |
| 57 | try: |
| 58 | res.normtest = stats.mstats.normaltest(data) |
| 59 | except Exception as exc: |
| 60 | logger.warning("stats.mstats.normaltest failed with error: %s", exc) |
| 61 | |
koder aka kdanilov | 108ac36 | 2017-01-19 20:17:16 +0200 | [diff] [blame] | 62 | res.skew = stats.skew(data) |
| 63 | res.kurt = stats.kurtosis(data) |
| 64 | |
koder aka kdanilov | 7f59d56 | 2016-12-26 01:34:23 +0200 | [diff] [blame] | 65 | return res |
| 66 | |
| 67 | |
koder aka kdanilov | a732a60 | 2017-02-01 20:29:56 +0200 | [diff] [blame] | 68 | # update this code |
| 69 | def rebin_histogram(bins_populations: numpy.array, |
| 70 | bins_edges: numpy.array, |
| 71 | new_bins_count: int, |
| 72 | left_tail_idx: int = None, |
| 73 | right_tail_idx: int = None, |
| 74 | log_bins: bool = False) -> Tuple[numpy.array, numpy.array]: |
| 75 | # rebin large histogram into smaller with new_bins bins, linearly distributes across |
| 76 | # left_tail_idx:right_tail_idx range |
| 77 | |
| 78 | assert len(bins_populations.shape) == 1 |
| 79 | assert len(bins_edges.shape) == 1 |
| 80 | assert bins_edges.shape[0] == bins_populations.shape[0] |
| 81 | |
| 82 | if left_tail_idx is None: |
| 83 | min_val = bins_edges[0] |
| 84 | else: |
| 85 | min_val = bins_edges[left_tail_idx] |
| 86 | |
| 87 | if right_tail_idx is None: |
| 88 | max_val = bins_edges[-1] |
| 89 | else: |
| 90 | max_val = bins_edges[right_tail_idx] |
| 91 | |
| 92 | if log_bins: |
| 93 | assert min_val > 1E-3 |
| 94 | step = (max_val / min_val) ** (1 / new_bins_count) |
| 95 | new_bins_edges = min_val * (step ** numpy.arange(new_bins_count)) # type: numpy.array |
| 96 | else: |
| 97 | new_bins_edges = numpy.linspace(min_val, max_val, new_bins_count + 1, dtype='float')[:-1] # type: numpy.array |
| 98 | |
| 99 | old_bins_pos = numpy.searchsorted(new_bins_edges, bins_edges, side='right') |
| 100 | new_bins = numpy.zeros(new_bins_count, dtype=int) # type: numpy.array |
| 101 | |
| 102 | # last source bin can't be split |
| 103 | # TODO: need to add assert for this |
| 104 | new_bins[-1] += bins_populations[-1] |
| 105 | bin_sizes = bins_edges[1:] - bins_edges[:-1] |
| 106 | |
| 107 | # correct position to get bin idx from edge idx |
| 108 | old_bins_pos -= 1 |
| 109 | old_bins_pos[old_bins_pos < 0] = 0 |
| 110 | new_bins_sizes = new_bins_edges[1:] - new_bins_edges[:-1] |
| 111 | |
| 112 | for population, begin, end, bsize in zip(bins_populations[:-1], old_bins_pos[:-1], old_bins_pos[1:], bin_sizes): |
| 113 | if begin == end: |
| 114 | new_bins[begin] += population |
| 115 | else: |
| 116 | density = population / bsize |
| 117 | for curr_box in range(begin, end): |
| 118 | cnt = min(int(new_bins_sizes[begin] * density + 0.5), population) |
| 119 | new_bins[begin] += cnt |
| 120 | population -= cnt |
| 121 | |
| 122 | return new_bins, new_bins_edges |
| 123 | |
| 124 | |
koder aka kdanilov | 108ac36 | 2017-01-19 20:17:16 +0200 | [diff] [blame] | 125 | def calc_histo_stat_props(ts: TimeSeries, |
| 126 | bins_edges: numpy.array, |
kdanylov aka koder | 150b219 | 2017-04-01 16:53:01 +0300 | [diff] [blame^] | 127 | rebins_count: int = None, |
koder aka kdanilov | a732a60 | 2017-02-01 20:29:56 +0200 | [diff] [blame] | 128 | tail: float = 0.005) -> HistoStatProps: |
| 129 | log_bins = False |
| 130 | res = HistoStatProps(ts.data) |
koder aka kdanilov | f286517 | 2016-12-30 03:35:11 +0200 | [diff] [blame] | 131 | |
koder aka kdanilov | 108ac36 | 2017-01-19 20:17:16 +0200 | [diff] [blame] | 132 | # summ across all series |
koder aka kdanilov | a732a60 | 2017-02-01 20:29:56 +0200 | [diff] [blame] | 133 | aggregated = ts.data.sum(axis=0, dtype='int') |
| 134 | total = aggregated.sum() |
koder aka kdanilov | 108ac36 | 2017-01-19 20:17:16 +0200 | [diff] [blame] | 135 | |
| 136 | # percentiles levels |
koder aka kdanilov | a732a60 | 2017-02-01 20:29:56 +0200 | [diff] [blame] | 137 | expected = list(numpy.array([0.01, 0.05, 0.1, 0.5, 0.9, 0.95, 0.99]) * total) |
| 138 | cumsum = numpy.cumsum(aggregated) |
koder aka kdanilov | f286517 | 2016-12-30 03:35:11 +0200 | [diff] [blame] | 139 | |
koder aka kdanilov | a732a60 | 2017-02-01 20:29:56 +0200 | [diff] [blame] | 140 | percentiles_bins = numpy.searchsorted(cumsum, expected) |
| 141 | percentiles = bins_edges[percentiles_bins] |
| 142 | res.perc_1, res.perc_5, res.perc_10, res.perc_50, res.perc_90, res.perc_95, res.perc_99 = percentiles |
koder aka kdanilov | f286517 | 2016-12-30 03:35:11 +0200 | [diff] [blame] | 143 | |
koder aka kdanilov | a732a60 | 2017-02-01 20:29:56 +0200 | [diff] [blame] | 144 | # don't show tail ranges on histogram |
| 145 | left_tail_idx, right_tail_idx = numpy.searchsorted(cumsum, [tail * total, (1 - tail) * total]) |
koder aka kdanilov | 108ac36 | 2017-01-19 20:17:16 +0200 | [diff] [blame] | 146 | |
| 147 | # minimax and maximal non-zero elements |
koder aka kdanilov | a732a60 | 2017-02-01 20:29:56 +0200 | [diff] [blame] | 148 | non_zero = numpy.nonzero(aggregated)[0] |
koder aka kdanilov | 108ac36 | 2017-01-19 20:17:16 +0200 | [diff] [blame] | 149 | res.min = bins_edges[aggregated[non_zero[0]]] |
| 150 | res.max = bins_edges[non_zero[-1] + (1 if non_zero[-1] != len(bins_edges) else 0)] |
| 151 | |
koder aka kdanilov | a732a60 | 2017-02-01 20:29:56 +0200 | [diff] [blame] | 152 | res.log_bins = False |
kdanylov aka koder | 150b219 | 2017-04-01 16:53:01 +0300 | [diff] [blame^] | 153 | if rebins_count is not None: |
| 154 | res.bins_populations, res.bins_edges = rebin_histogram(aggregated, bins_edges, rebins_count, |
| 155 | left_tail_idx, right_tail_idx) |
| 156 | else: |
| 157 | res.bins_populations = aggregated |
| 158 | res.bins_edges = bins_edges.copy() |
koder aka kdanilov | 108ac36 | 2017-01-19 20:17:16 +0200 | [diff] [blame] | 159 | |
koder aka kdanilov | f286517 | 2016-12-30 03:35:11 +0200 | [diff] [blame] | 160 | return res |
| 161 | |
| 162 | |
koder aka kdanilov | 7f59d56 | 2016-12-26 01:34:23 +0200 | [diff] [blame] | 163 | def groupby_globally(data: Iterable, key_func: Callable): |
| 164 | grouped = {} # type: ignore |
koder aka kdanilov | 6c49106 | 2015-04-09 22:33:13 +0300 | [diff] [blame] | 165 | grouped_iter = itertools.groupby(data, key_func) |
| 166 | |
| 167 | for (bs, cache_tp, act, conc), curr_data_it in grouped_iter: |
| 168 | key = (bs, cache_tp, act, conc) |
| 169 | grouped.setdefault(key, []).extend(curr_data_it) |
| 170 | |
| 171 | return grouped |
| 172 | |
| 173 | |
koder aka kdanilov | 7f59d56 | 2016-12-26 01:34:23 +0200 | [diff] [blame] | 174 | def approximate_curve(x: List[Number], y: List[float], xnew: List[Number], curved_coef: int) -> List[float]: |
koder aka kdanilov | 6c49106 | 2015-04-09 22:33:13 +0300 | [diff] [blame] | 175 | """returns ynew - y values of some curve approximation""" |
koder aka kdanilov | 7f59d56 | 2016-12-26 01:34:23 +0200 | [diff] [blame] | 176 | return cast(List[float], chebval(xnew, chebfit(x, y, curved_coef))) |
koder aka kdanilov | 6c49106 | 2015-04-09 22:33:13 +0300 | [diff] [blame] | 177 | |
| 178 | |
koder aka kdanilov | 7f59d56 | 2016-12-26 01:34:23 +0200 | [diff] [blame] | 179 | def approximate_line(x: List[Number], y: List[float], xnew: List[Number], relative_dist: bool = False) -> List[float]: |
| 180 | """ |
| 181 | x, y - test data, xnew - dots, where we want find approximation |
| 182 | if not relative_dist distance = y - newy |
| 183 | returns ynew - y values of linear approximation |
| 184 | """ |
| 185 | ox = numpy.array(x) |
| 186 | oy = numpy.array(y) |
koder aka kdanilov | 66839a9 | 2015-04-11 13:22:31 +0300 | [diff] [blame] | 187 | |
Ved-vampir | 0316644 | 2015-04-10 17:28:23 +0300 | [diff] [blame] | 188 | # set approximation function |
koder aka kdanilov | 66839a9 | 2015-04-11 13:22:31 +0300 | [diff] [blame] | 189 | def func_line(tpl, x): |
| 190 | return tpl[0] * x + tpl[1] |
| 191 | |
| 192 | def error_func_rel(tpl, x, y): |
| 193 | return 1.0 - y / func_line(tpl, x) |
| 194 | |
| 195 | def error_func_abs(tpl, x, y): |
| 196 | return y - func_line(tpl, x) |
| 197 | |
Ved-vampir | 0316644 | 2015-04-10 17:28:23 +0300 | [diff] [blame] | 198 | # choose distance mode |
koder aka kdanilov | 66839a9 | 2015-04-11 13:22:31 +0300 | [diff] [blame] | 199 | error_func = error_func_rel if relative_dist else error_func_abs |
| 200 | |
| 201 | tpl_initial = tuple(linalg.solve([[ox[0], 1.0], [ox[1], 1.0]], |
| 202 | oy[:2])) |
| 203 | |
Ved-vampir | 0316644 | 2015-04-10 17:28:23 +0300 | [diff] [blame] | 204 | # find line |
koder aka kdanilov | 7f59d56 | 2016-12-26 01:34:23 +0200 | [diff] [blame] | 205 | tpl_final, success = optimize.leastsq(error_func, tpl_initial[:], args=(ox, oy)) |
koder aka kdanilov | 66839a9 | 2015-04-11 13:22:31 +0300 | [diff] [blame] | 206 | |
Ved-vampir | 0316644 | 2015-04-10 17:28:23 +0300 | [diff] [blame] | 207 | # if error |
| 208 | if success not in range(1, 5): |
| 209 | raise ValueError("No line for this dots") |
koder aka kdanilov | 66839a9 | 2015-04-11 13:22:31 +0300 | [diff] [blame] | 210 | |
Ved-vampir | 0316644 | 2015-04-10 17:28:23 +0300 | [diff] [blame] | 211 | # return new dots |
koder aka kdanilov | 7f59d56 | 2016-12-26 01:34:23 +0200 | [diff] [blame] | 212 | return func_line(tpl_final, numpy.array(xnew)) |
koder aka kdanilov | 6c49106 | 2015-04-09 22:33:13 +0300 | [diff] [blame] | 213 | |
| 214 | |
koder aka kdanilov | a732a60 | 2017-02-01 20:29:56 +0200 | [diff] [blame] | 215 | def moving_average(data: numpy.array, window: int) -> numpy.array: |
| 216 | cumsum = numpy.cumsum(data) |
| 217 | cumsum[window:] = cumsum[window:] - cumsum[:-window] |
| 218 | return cumsum[window - 1:] / window |
| 219 | |
| 220 | |
| 221 | def moving_dev(data: numpy.array, window: int) -> numpy.array: |
| 222 | cumsum = numpy.cumsum(data) |
| 223 | cumsum2 = numpy.cumsum(data ** 2) |
| 224 | cumsum[window:] = cumsum[window:] - cumsum[:-window] |
| 225 | cumsum2[window:] = cumsum2[window:] - cumsum2[:-window] |
| 226 | return ((cumsum2[window - 1:] - cumsum[window - 1:] ** 2 / window) / (window - 1)) ** 0.5 |
| 227 | |
| 228 | |
| 229 | def find_ouliers(data: numpy.array, |
| 230 | center_range: Tuple[int, int] = (25, 75), |
kdanylov aka koder | 0e0cfcb | 2017-03-27 22:19:09 +0300 | [diff] [blame] | 231 | cut_range: float = 3.0) -> numpy.array: |
koder aka kdanilov | a732a60 | 2017-02-01 20:29:56 +0200 | [diff] [blame] | 232 | v1, v2 = numpy.percentile(data, center_range) |
| 233 | return numpy.abs(data - (v1 + v2) / 2) > ((v2 - v1) / 2 * cut_range) |
| 234 | |
| 235 | |
| 236 | def find_ouliers_ts(data: numpy.array, |
| 237 | windows_size: int = 30, |
| 238 | center_range: Tuple[int, int] = (25, 75), |
kdanylov aka koder | 0e0cfcb | 2017-03-27 22:19:09 +0300 | [diff] [blame] | 239 | cut_range: float = 3.0) -> numpy.array: |
koder aka kdanilov | a732a60 | 2017-02-01 20:29:56 +0200 | [diff] [blame] | 240 | outliers = numpy.empty(data.shape, dtype=bool) |
| 241 | |
| 242 | if len(data) < windows_size: |
| 243 | outliers[:] = False |
| 244 | return outliers |
| 245 | |
| 246 | begin_idx = 0 |
| 247 | if len(data) < windows_size * 2: |
| 248 | end_idx = (len(data) % windows_size) // 2 + windows_size |
| 249 | else: |
| 250 | end_idx = len(data) |
| 251 | |
| 252 | while True: |
| 253 | cdata = data[begin_idx: end_idx] |
| 254 | outliers[begin_idx: end_idx] = find_ouliers(cdata, center_range, cut_range) |
| 255 | begin_idx = end_idx |
| 256 | |
| 257 | if end_idx == len(data): |
| 258 | break |
| 259 | |
| 260 | end_idx += windows_size |
| 261 | if len(data) - end_idx < windows_size: |
| 262 | end_idx = len(data) |
| 263 | |
| 264 | return outliers |
| 265 | |
| 266 | |
| 267 | def hist_outliers_nd(bin_populations: numpy.array, |
| 268 | bin_centers: numpy.array, |
| 269 | center_range: Tuple[int, int] = (25, 75), |
| 270 | cut_range: float = 3.0) -> Tuple[int, int]: |
| 271 | assert len(bin_populations) == len(bin_centers) |
| 272 | total_count = bin_populations.sum() |
| 273 | |
| 274 | perc25 = total_count / 100.0 * center_range[0] |
| 275 | perc75 = total_count / 100.0 * center_range[1] |
| 276 | |
| 277 | perc25_idx, perc75_idx = numpy.searchsorted(numpy.cumsum(bin_populations), [perc25, perc75]) |
| 278 | middle = (bin_centers[perc75_idx] + bin_centers[perc25_idx]) / 2 |
| 279 | r = (bin_centers[perc75_idx] - bin_centers[perc25_idx]) / 2 |
| 280 | |
| 281 | lower_bound = middle - r * cut_range |
| 282 | upper_bound = middle + r * cut_range |
| 283 | |
| 284 | lower_cut_idx, upper_cut_idx = numpy.searchsorted(bin_centers, [lower_bound, upper_bound]) |
| 285 | return lower_cut_idx, upper_cut_idx |
| 286 | |
| 287 | |
| 288 | def hist_outliers_perc(bin_populations: numpy.array, |
| 289 | bounds_perc: Tuple[float, float] = (0.01, 0.99)) -> Tuple[int, int]: |
| 290 | assert len(bin_populations.shape) == 1 |
| 291 | total_count = bin_populations.sum() |
| 292 | lower_perc = total_count * bounds_perc[0] |
| 293 | upper_perc = total_count * bounds_perc[1] |
| 294 | return numpy.searchsorted(numpy.cumsum(bin_populations), [lower_perc, upper_perc]) |
| 295 | |
| 296 | |
| 297 | def ts_hist_outliers_perc(bin_populations: numpy.array, |
| 298 | window_size: int = 10, |
| 299 | bounds_perc: Tuple[float, float] = (0.01, 0.99)) -> Tuple[int, int]: |
| 300 | assert len(bin_populations.shape) == 2 |
| 301 | |
| 302 | points = list(range(0, len(bin_populations), window_size)) |
| 303 | if len(bin_populations) % window_size != 0: |
| 304 | points.append(points[-1] + window_size) |
| 305 | |
| 306 | ranges = [] |
| 307 | for begin, end in zip(points[:-1], points[1:]): |
| 308 | window_hist = bin_populations[begin:end].sum(axis=0) |
| 309 | ranges.append(hist_outliers_perc(window_hist, bounds_perc=bounds_perc)) |
| 310 | |
| 311 | return min(i[0] for i in ranges), max(i[1] for i in ranges) |
| 312 | |
| 313 | |
koder aka kdanilov | 7f59d56 | 2016-12-26 01:34:23 +0200 | [diff] [blame] | 314 | # TODO: revise next |
| 315 | # def difference(y, ynew): |
| 316 | # """returns average and maximum relative and |
| 317 | # absolute differences between y and ynew |
| 318 | # result may contain None values for y = 0 |
| 319 | # return value - tuple: |
| 320 | # [(abs dif, rel dif) * len(y)], |
| 321 | # (abs average, abs max), |
| 322 | # (rel average, rel max)""" |
| 323 | # |
| 324 | # abs_dlist = [] |
| 325 | # rel_dlist = [] |
| 326 | # |
| 327 | # for y1, y2 in zip(y, ynew): |
| 328 | # # absolute |
| 329 | # abs_dlist.append(y1 - y2) |
| 330 | # |
| 331 | # if y1 > 1E-6: |
| 332 | # rel_dlist.append(abs(abs_dlist[-1] / y1)) |
| 333 | # else: |
| 334 | # raise ZeroDivisionError("{0!r} is too small".format(y1)) |
| 335 | # |
| 336 | # da_avg = sum(abs_dlist) / len(abs_dlist) |
| 337 | # dr_avg = sum(rel_dlist) / len(rel_dlist) |
| 338 | # |
| 339 | # return (zip(abs_dlist, rel_dlist), |
| 340 | # (da_avg, max(abs_dlist)), (dr_avg, max(rel_dlist)) |
| 341 | # ) |