koder aka kdanilov | 6c49106 | 2015-04-09 22:33:13 +0300 | [diff] [blame] | 1 | import math |
koder aka kdanilov | ffaf48d | 2016-12-27 02:25:29 +0200 | [diff] [blame] | 2 | import logging |
koder aka kdanilov | 6c49106 | 2015-04-09 22:33:13 +0300 | [diff] [blame] | 3 | import itertools |
koder aka kdanilov | a732a60 | 2017-02-01 20:29:56 +0200 | [diff] [blame] | 4 | from typing import List, Callable, Iterable, cast, Tuple |
koder aka kdanilov | cff7b2e | 2015-04-18 20:48:15 +0300 | [diff] [blame] | 5 | |
koder aka kdanilov | 7f59d56 | 2016-12-26 01:34:23 +0200 | [diff] [blame] | 6 | import numpy |
| 7 | from scipy import stats, optimize |
| 8 | from numpy import linalg |
| 9 | from numpy.polynomial.chebyshev import chebfit, chebval |
koder aka kdanilov | 6c49106 | 2015-04-09 22:33:13 +0300 | [diff] [blame] | 10 | |
| 11 | |
koder aka kdanilov | f286517 | 2016-12-30 03:35:11 +0200 | [diff] [blame] | 12 | from .result_classes import NormStatProps, HistoStatProps, TimeSeries |
koder aka kdanilov | ffaf48d | 2016-12-27 02:25:29 +0200 | [diff] [blame] | 13 | from .utils import Number |
koder aka kdanilov | bb6d6cd | 2015-06-20 02:55:07 +0300 | [diff] [blame] | 14 | |
| 15 | |
koder aka kdanilov | ffaf48d | 2016-12-27 02:25:29 +0200 | [diff] [blame] | 16 | logger = logging.getLogger("wally") |
koder aka kdanilov | 7f59d56 | 2016-12-26 01:34:23 +0200 | [diff] [blame] | 17 | DOUBLE_DELTA = 1e-8 |
koder aka kdanilov | 108ac36 | 2017-01-19 20:17:16 +0200 | [diff] [blame] | 18 | MIN_VALUES_FOR_CONFIDENCE = 7 |
koder aka kdanilov | e87ae65 | 2015-04-20 02:14:35 +0300 | [diff] [blame] | 19 | |
| 20 | |
koder aka kdanilov | 108ac36 | 2017-01-19 20:17:16 +0200 | [diff] [blame] | 21 | average = numpy.mean |
| 22 | dev = lambda x: math.sqrt(numpy.var(x, ddof=1)) |
koder aka kdanilov | 6c49106 | 2015-04-09 22:33:13 +0300 | [diff] [blame] | 23 | |
| 24 | |
koder aka kdanilov | 108ac36 | 2017-01-19 20:17:16 +0200 | [diff] [blame] | 25 | def calc_norm_stat_props(ts: TimeSeries, bins_count: int, confidence: float = 0.95) -> NormStatProps: |
koder aka kdanilov | 7f59d56 | 2016-12-26 01:34:23 +0200 | [diff] [blame] | 26 | "Calculate statistical properties of array of numbers" |
| 27 | |
koder aka kdanilov | 108ac36 | 2017-01-19 20:17:16 +0200 | [diff] [blame] | 28 | # array.array has very basic support |
| 29 | data = cast(List[int], ts.data) |
| 30 | res = NormStatProps(data) # type: ignore |
koder aka kdanilov | 7f59d56 | 2016-12-26 01:34:23 +0200 | [diff] [blame] | 31 | |
| 32 | if len(data) == 0: |
| 33 | raise ValueError("Input array is empty") |
| 34 | |
| 35 | data = sorted(data) |
| 36 | res.average = average(data) |
| 37 | res.deviation = dev(data) |
koder aka kdanilov | ffaf48d | 2016-12-27 02:25:29 +0200 | [diff] [blame] | 38 | |
koder aka kdanilov | 7f59d56 | 2016-12-26 01:34:23 +0200 | [diff] [blame] | 39 | res.max = data[-1] |
| 40 | res.min = data[0] |
| 41 | |
koder aka kdanilov | a732a60 | 2017-02-01 20:29:56 +0200 | [diff] [blame] | 42 | pcs = numpy.percentile(data, q=[1.0, 5.0, 10., 50., 90., 95., 99.]) |
| 43 | res.perc_1, res.perc_5, res.perc_10, res.perc_50, res.perc_90, res.perc_95, res.perc_99 = pcs |
koder aka kdanilov | 7f59d56 | 2016-12-26 01:34:23 +0200 | [diff] [blame] | 44 | |
koder aka kdanilov | 108ac36 | 2017-01-19 20:17:16 +0200 | [diff] [blame] | 45 | if len(data) >= MIN_VALUES_FOR_CONFIDENCE: |
koder aka kdanilov | 7f59d56 | 2016-12-26 01:34:23 +0200 | [diff] [blame] | 46 | res.confidence = stats.sem(data) * \ |
| 47 | stats.t.ppf((1 + confidence) / 2, len(data) - 1) |
koder aka kdanilov | 108ac36 | 2017-01-19 20:17:16 +0200 | [diff] [blame] | 48 | res.confidence_level = confidence |
koder aka kdanilov | 7f59d56 | 2016-12-26 01:34:23 +0200 | [diff] [blame] | 49 | else: |
| 50 | res.confidence = None |
koder aka kdanilov | 108ac36 | 2017-01-19 20:17:16 +0200 | [diff] [blame] | 51 | res.confidence_level = None |
koder aka kdanilov | 7f59d56 | 2016-12-26 01:34:23 +0200 | [diff] [blame] | 52 | |
koder aka kdanilov | a732a60 | 2017-02-01 20:29:56 +0200 | [diff] [blame] | 53 | res.bins_populations, res.bins_edges = numpy.histogram(data, bins=bins_count) |
| 54 | res.bins_edges = res.bins_edges[:-1] |
koder aka kdanilov | ffaf48d | 2016-12-27 02:25:29 +0200 | [diff] [blame] | 55 | |
| 56 | try: |
| 57 | res.normtest = stats.mstats.normaltest(data) |
| 58 | except Exception as exc: |
| 59 | logger.warning("stats.mstats.normaltest failed with error: %s", exc) |
| 60 | |
koder aka kdanilov | 108ac36 | 2017-01-19 20:17:16 +0200 | [diff] [blame] | 61 | res.skew = stats.skew(data) |
| 62 | res.kurt = stats.kurtosis(data) |
| 63 | |
koder aka kdanilov | 7f59d56 | 2016-12-26 01:34:23 +0200 | [diff] [blame] | 64 | return res |
| 65 | |
| 66 | |
koder aka kdanilov | a732a60 | 2017-02-01 20:29:56 +0200 | [diff] [blame] | 67 | # update this code |
| 68 | def rebin_histogram(bins_populations: numpy.array, |
| 69 | bins_edges: numpy.array, |
| 70 | new_bins_count: int, |
| 71 | left_tail_idx: int = None, |
| 72 | right_tail_idx: int = None, |
| 73 | log_bins: bool = False) -> Tuple[numpy.array, numpy.array]: |
| 74 | # rebin large histogram into smaller with new_bins bins, linearly distributes across |
| 75 | # left_tail_idx:right_tail_idx range |
| 76 | |
| 77 | assert len(bins_populations.shape) == 1 |
| 78 | assert len(bins_edges.shape) == 1 |
| 79 | assert bins_edges.shape[0] == bins_populations.shape[0] |
| 80 | |
| 81 | if left_tail_idx is None: |
| 82 | min_val = bins_edges[0] |
| 83 | else: |
| 84 | min_val = bins_edges[left_tail_idx] |
| 85 | |
| 86 | if right_tail_idx is None: |
| 87 | max_val = bins_edges[-1] |
| 88 | else: |
| 89 | max_val = bins_edges[right_tail_idx] |
| 90 | |
| 91 | if log_bins: |
| 92 | assert min_val > 1E-3 |
| 93 | step = (max_val / min_val) ** (1 / new_bins_count) |
| 94 | new_bins_edges = min_val * (step ** numpy.arange(new_bins_count)) # type: numpy.array |
| 95 | else: |
| 96 | new_bins_edges = numpy.linspace(min_val, max_val, new_bins_count + 1, dtype='float')[:-1] # type: numpy.array |
| 97 | |
| 98 | old_bins_pos = numpy.searchsorted(new_bins_edges, bins_edges, side='right') |
| 99 | new_bins = numpy.zeros(new_bins_count, dtype=int) # type: numpy.array |
| 100 | |
| 101 | # last source bin can't be split |
| 102 | # TODO: need to add assert for this |
| 103 | new_bins[-1] += bins_populations[-1] |
| 104 | bin_sizes = bins_edges[1:] - bins_edges[:-1] |
| 105 | |
| 106 | # correct position to get bin idx from edge idx |
| 107 | old_bins_pos -= 1 |
| 108 | old_bins_pos[old_bins_pos < 0] = 0 |
| 109 | new_bins_sizes = new_bins_edges[1:] - new_bins_edges[:-1] |
| 110 | |
| 111 | for population, begin, end, bsize in zip(bins_populations[:-1], old_bins_pos[:-1], old_bins_pos[1:], bin_sizes): |
| 112 | if begin == end: |
| 113 | new_bins[begin] += population |
| 114 | else: |
| 115 | density = population / bsize |
| 116 | for curr_box in range(begin, end): |
| 117 | cnt = min(int(new_bins_sizes[begin] * density + 0.5), population) |
| 118 | new_bins[begin] += cnt |
| 119 | population -= cnt |
| 120 | |
| 121 | return new_bins, new_bins_edges |
| 122 | |
| 123 | |
koder aka kdanilov | 108ac36 | 2017-01-19 20:17:16 +0200 | [diff] [blame] | 124 | def calc_histo_stat_props(ts: TimeSeries, |
| 125 | bins_edges: numpy.array, |
koder aka kdanilov | a732a60 | 2017-02-01 20:29:56 +0200 | [diff] [blame] | 126 | rebins_count: int, |
| 127 | tail: float = 0.005) -> HistoStatProps: |
| 128 | log_bins = False |
| 129 | res = HistoStatProps(ts.data) |
koder aka kdanilov | f286517 | 2016-12-30 03:35:11 +0200 | [diff] [blame] | 130 | |
koder aka kdanilov | 108ac36 | 2017-01-19 20:17:16 +0200 | [diff] [blame] | 131 | # summ across all series |
koder aka kdanilov | a732a60 | 2017-02-01 20:29:56 +0200 | [diff] [blame] | 132 | aggregated = ts.data.sum(axis=0, dtype='int') |
| 133 | total = aggregated.sum() |
koder aka kdanilov | 108ac36 | 2017-01-19 20:17:16 +0200 | [diff] [blame] | 134 | |
| 135 | # percentiles levels |
koder aka kdanilov | a732a60 | 2017-02-01 20:29:56 +0200 | [diff] [blame] | 136 | expected = list(numpy.array([0.01, 0.05, 0.1, 0.5, 0.9, 0.95, 0.99]) * total) |
| 137 | cumsum = numpy.cumsum(aggregated) |
koder aka kdanilov | f286517 | 2016-12-30 03:35:11 +0200 | [diff] [blame] | 138 | |
koder aka kdanilov | a732a60 | 2017-02-01 20:29:56 +0200 | [diff] [blame] | 139 | percentiles_bins = numpy.searchsorted(cumsum, expected) |
| 140 | percentiles = bins_edges[percentiles_bins] |
| 141 | res.perc_1, res.perc_5, res.perc_10, res.perc_50, res.perc_90, res.perc_95, res.perc_99 = percentiles |
koder aka kdanilov | f286517 | 2016-12-30 03:35:11 +0200 | [diff] [blame] | 142 | |
koder aka kdanilov | a732a60 | 2017-02-01 20:29:56 +0200 | [diff] [blame] | 143 | # don't show tail ranges on histogram |
| 144 | left_tail_idx, right_tail_idx = numpy.searchsorted(cumsum, [tail * total, (1 - tail) * total]) |
koder aka kdanilov | 108ac36 | 2017-01-19 20:17:16 +0200 | [diff] [blame] | 145 | |
| 146 | # minimax and maximal non-zero elements |
koder aka kdanilov | a732a60 | 2017-02-01 20:29:56 +0200 | [diff] [blame] | 147 | non_zero = numpy.nonzero(aggregated)[0] |
koder aka kdanilov | 108ac36 | 2017-01-19 20:17:16 +0200 | [diff] [blame] | 148 | res.min = bins_edges[aggregated[non_zero[0]]] |
| 149 | res.max = bins_edges[non_zero[-1] + (1 if non_zero[-1] != len(bins_edges) else 0)] |
| 150 | |
koder aka kdanilov | a732a60 | 2017-02-01 20:29:56 +0200 | [diff] [blame] | 151 | res.log_bins = False |
| 152 | res.bins_populations, res.bins_edges = rebin_histogram(aggregated, bins_edges, rebins_count, |
| 153 | left_tail_idx, right_tail_idx) |
koder aka kdanilov | 108ac36 | 2017-01-19 20:17:16 +0200 | [diff] [blame] | 154 | |
koder aka kdanilov | f286517 | 2016-12-30 03:35:11 +0200 | [diff] [blame] | 155 | return res |
| 156 | |
| 157 | |
koder aka kdanilov | 7f59d56 | 2016-12-26 01:34:23 +0200 | [diff] [blame] | 158 | def groupby_globally(data: Iterable, key_func: Callable): |
| 159 | grouped = {} # type: ignore |
koder aka kdanilov | 6c49106 | 2015-04-09 22:33:13 +0300 | [diff] [blame] | 160 | grouped_iter = itertools.groupby(data, key_func) |
| 161 | |
| 162 | for (bs, cache_tp, act, conc), curr_data_it in grouped_iter: |
| 163 | key = (bs, cache_tp, act, conc) |
| 164 | grouped.setdefault(key, []).extend(curr_data_it) |
| 165 | |
| 166 | return grouped |
| 167 | |
| 168 | |
koder aka kdanilov | 7f59d56 | 2016-12-26 01:34:23 +0200 | [diff] [blame] | 169 | def approximate_curve(x: List[Number], y: List[float], xnew: List[Number], curved_coef: int) -> List[float]: |
koder aka kdanilov | 6c49106 | 2015-04-09 22:33:13 +0300 | [diff] [blame] | 170 | """returns ynew - y values of some curve approximation""" |
koder aka kdanilov | 7f59d56 | 2016-12-26 01:34:23 +0200 | [diff] [blame] | 171 | return cast(List[float], chebval(xnew, chebfit(x, y, curved_coef))) |
koder aka kdanilov | 6c49106 | 2015-04-09 22:33:13 +0300 | [diff] [blame] | 172 | |
| 173 | |
koder aka kdanilov | 7f59d56 | 2016-12-26 01:34:23 +0200 | [diff] [blame] | 174 | def approximate_line(x: List[Number], y: List[float], xnew: List[Number], relative_dist: bool = False) -> List[float]: |
| 175 | """ |
| 176 | x, y - test data, xnew - dots, where we want find approximation |
| 177 | if not relative_dist distance = y - newy |
| 178 | returns ynew - y values of linear approximation |
| 179 | """ |
| 180 | ox = numpy.array(x) |
| 181 | oy = numpy.array(y) |
koder aka kdanilov | 66839a9 | 2015-04-11 13:22:31 +0300 | [diff] [blame] | 182 | |
Ved-vampir | 0316644 | 2015-04-10 17:28:23 +0300 | [diff] [blame] | 183 | # set approximation function |
koder aka kdanilov | 66839a9 | 2015-04-11 13:22:31 +0300 | [diff] [blame] | 184 | def func_line(tpl, x): |
| 185 | return tpl[0] * x + tpl[1] |
| 186 | |
| 187 | def error_func_rel(tpl, x, y): |
| 188 | return 1.0 - y / func_line(tpl, x) |
| 189 | |
| 190 | def error_func_abs(tpl, x, y): |
| 191 | return y - func_line(tpl, x) |
| 192 | |
Ved-vampir | 0316644 | 2015-04-10 17:28:23 +0300 | [diff] [blame] | 193 | # choose distance mode |
koder aka kdanilov | 66839a9 | 2015-04-11 13:22:31 +0300 | [diff] [blame] | 194 | error_func = error_func_rel if relative_dist else error_func_abs |
| 195 | |
| 196 | tpl_initial = tuple(linalg.solve([[ox[0], 1.0], [ox[1], 1.0]], |
| 197 | oy[:2])) |
| 198 | |
Ved-vampir | 0316644 | 2015-04-10 17:28:23 +0300 | [diff] [blame] | 199 | # find line |
koder aka kdanilov | 7f59d56 | 2016-12-26 01:34:23 +0200 | [diff] [blame] | 200 | tpl_final, success = optimize.leastsq(error_func, tpl_initial[:], args=(ox, oy)) |
koder aka kdanilov | 66839a9 | 2015-04-11 13:22:31 +0300 | [diff] [blame] | 201 | |
Ved-vampir | 0316644 | 2015-04-10 17:28:23 +0300 | [diff] [blame] | 202 | # if error |
| 203 | if success not in range(1, 5): |
| 204 | raise ValueError("No line for this dots") |
koder aka kdanilov | 66839a9 | 2015-04-11 13:22:31 +0300 | [diff] [blame] | 205 | |
Ved-vampir | 0316644 | 2015-04-10 17:28:23 +0300 | [diff] [blame] | 206 | # return new dots |
koder aka kdanilov | 7f59d56 | 2016-12-26 01:34:23 +0200 | [diff] [blame] | 207 | return func_line(tpl_final, numpy.array(xnew)) |
koder aka kdanilov | 6c49106 | 2015-04-09 22:33:13 +0300 | [diff] [blame] | 208 | |
| 209 | |
koder aka kdanilov | a732a60 | 2017-02-01 20:29:56 +0200 | [diff] [blame] | 210 | def moving_average(data: numpy.array, window: int) -> numpy.array: |
| 211 | cumsum = numpy.cumsum(data) |
| 212 | cumsum[window:] = cumsum[window:] - cumsum[:-window] |
| 213 | return cumsum[window - 1:] / window |
| 214 | |
| 215 | |
| 216 | def moving_dev(data: numpy.array, window: int) -> numpy.array: |
| 217 | cumsum = numpy.cumsum(data) |
| 218 | cumsum2 = numpy.cumsum(data ** 2) |
| 219 | cumsum[window:] = cumsum[window:] - cumsum[:-window] |
| 220 | cumsum2[window:] = cumsum2[window:] - cumsum2[:-window] |
| 221 | return ((cumsum2[window - 1:] - cumsum[window - 1:] ** 2 / window) / (window - 1)) ** 0.5 |
| 222 | |
| 223 | |
| 224 | def find_ouliers(data: numpy.array, |
| 225 | center_range: Tuple[int, int] = (25, 75), |
kdanylov aka koder | 0e0cfcb | 2017-03-27 22:19:09 +0300 | [diff] [blame^] | 226 | cut_range: float = 3.0) -> numpy.array: |
koder aka kdanilov | a732a60 | 2017-02-01 20:29:56 +0200 | [diff] [blame] | 227 | v1, v2 = numpy.percentile(data, center_range) |
| 228 | return numpy.abs(data - (v1 + v2) / 2) > ((v2 - v1) / 2 * cut_range) |
| 229 | |
| 230 | |
| 231 | def find_ouliers_ts(data: numpy.array, |
| 232 | windows_size: int = 30, |
| 233 | center_range: Tuple[int, int] = (25, 75), |
kdanylov aka koder | 0e0cfcb | 2017-03-27 22:19:09 +0300 | [diff] [blame^] | 234 | cut_range: float = 3.0) -> numpy.array: |
koder aka kdanilov | a732a60 | 2017-02-01 20:29:56 +0200 | [diff] [blame] | 235 | outliers = numpy.empty(data.shape, dtype=bool) |
| 236 | |
| 237 | if len(data) < windows_size: |
| 238 | outliers[:] = False |
| 239 | return outliers |
| 240 | |
| 241 | begin_idx = 0 |
| 242 | if len(data) < windows_size * 2: |
| 243 | end_idx = (len(data) % windows_size) // 2 + windows_size |
| 244 | else: |
| 245 | end_idx = len(data) |
| 246 | |
| 247 | while True: |
| 248 | cdata = data[begin_idx: end_idx] |
| 249 | outliers[begin_idx: end_idx] = find_ouliers(cdata, center_range, cut_range) |
| 250 | begin_idx = end_idx |
| 251 | |
| 252 | if end_idx == len(data): |
| 253 | break |
| 254 | |
| 255 | end_idx += windows_size |
| 256 | if len(data) - end_idx < windows_size: |
| 257 | end_idx = len(data) |
| 258 | |
| 259 | return outliers |
| 260 | |
| 261 | |
| 262 | def hist_outliers_nd(bin_populations: numpy.array, |
| 263 | bin_centers: numpy.array, |
| 264 | center_range: Tuple[int, int] = (25, 75), |
| 265 | cut_range: float = 3.0) -> Tuple[int, int]: |
| 266 | assert len(bin_populations) == len(bin_centers) |
| 267 | total_count = bin_populations.sum() |
| 268 | |
| 269 | perc25 = total_count / 100.0 * center_range[0] |
| 270 | perc75 = total_count / 100.0 * center_range[1] |
| 271 | |
| 272 | perc25_idx, perc75_idx = numpy.searchsorted(numpy.cumsum(bin_populations), [perc25, perc75]) |
| 273 | middle = (bin_centers[perc75_idx] + bin_centers[perc25_idx]) / 2 |
| 274 | r = (bin_centers[perc75_idx] - bin_centers[perc25_idx]) / 2 |
| 275 | |
| 276 | lower_bound = middle - r * cut_range |
| 277 | upper_bound = middle + r * cut_range |
| 278 | |
| 279 | lower_cut_idx, upper_cut_idx = numpy.searchsorted(bin_centers, [lower_bound, upper_bound]) |
| 280 | return lower_cut_idx, upper_cut_idx |
| 281 | |
| 282 | |
| 283 | def hist_outliers_perc(bin_populations: numpy.array, |
| 284 | bounds_perc: Tuple[float, float] = (0.01, 0.99)) -> Tuple[int, int]: |
| 285 | assert len(bin_populations.shape) == 1 |
| 286 | total_count = bin_populations.sum() |
| 287 | lower_perc = total_count * bounds_perc[0] |
| 288 | upper_perc = total_count * bounds_perc[1] |
| 289 | return numpy.searchsorted(numpy.cumsum(bin_populations), [lower_perc, upper_perc]) |
| 290 | |
| 291 | |
| 292 | def ts_hist_outliers_perc(bin_populations: numpy.array, |
| 293 | window_size: int = 10, |
| 294 | bounds_perc: Tuple[float, float] = (0.01, 0.99)) -> Tuple[int, int]: |
| 295 | assert len(bin_populations.shape) == 2 |
| 296 | |
| 297 | points = list(range(0, len(bin_populations), window_size)) |
| 298 | if len(bin_populations) % window_size != 0: |
| 299 | points.append(points[-1] + window_size) |
| 300 | |
| 301 | ranges = [] |
| 302 | for begin, end in zip(points[:-1], points[1:]): |
| 303 | window_hist = bin_populations[begin:end].sum(axis=0) |
| 304 | ranges.append(hist_outliers_perc(window_hist, bounds_perc=bounds_perc)) |
| 305 | |
| 306 | return min(i[0] for i in ranges), max(i[1] for i in ranges) |
| 307 | |
| 308 | |
koder aka kdanilov | 7f59d56 | 2016-12-26 01:34:23 +0200 | [diff] [blame] | 309 | # TODO: revise next |
| 310 | # def difference(y, ynew): |
| 311 | # """returns average and maximum relative and |
| 312 | # absolute differences between y and ynew |
| 313 | # result may contain None values for y = 0 |
| 314 | # return value - tuple: |
| 315 | # [(abs dif, rel dif) * len(y)], |
| 316 | # (abs average, abs max), |
| 317 | # (rel average, rel max)""" |
| 318 | # |
| 319 | # abs_dlist = [] |
| 320 | # rel_dlist = [] |
| 321 | # |
| 322 | # for y1, y2 in zip(y, ynew): |
| 323 | # # absolute |
| 324 | # abs_dlist.append(y1 - y2) |
| 325 | # |
| 326 | # if y1 > 1E-6: |
| 327 | # rel_dlist.append(abs(abs_dlist[-1] / y1)) |
| 328 | # else: |
| 329 | # raise ZeroDivisionError("{0!r} is too small".format(y1)) |
| 330 | # |
| 331 | # da_avg = sum(abs_dlist) / len(abs_dlist) |
| 332 | # dr_avg = sum(rel_dlist) / len(rel_dlist) |
| 333 | # |
| 334 | # return (zip(abs_dlist, rel_dlist), |
| 335 | # (da_avg, max(abs_dlist)), (dr_avg, max(rel_dlist)) |
| 336 | # ) |