koder aka kdanilov | 0fdaaee | 2015-06-30 11:10:48 +0300 | [diff] [blame] | 1 | import os |
| 2 | import sys |
| 3 | import time |
| 4 | import signal |
| 5 | import logging |
| 6 | import argparse |
| 7 | import functools |
koder aka kdanilov | 0fdaaee | 2015-06-30 11:10:48 +0300 | [diff] [blame] | 8 | |
| 9 | from yaml import load as _yaml_load |
| 10 | |
| 11 | try: |
| 12 | from yaml import CLoader |
| 13 | yaml_load = functools.partial(_yaml_load, Loader=CLoader) |
| 14 | except ImportError: |
| 15 | yaml_load = _yaml_load |
| 16 | |
| 17 | |
| 18 | import texttable |
| 19 | |
| 20 | try: |
| 21 | import faulthandler |
| 22 | except ImportError: |
| 23 | faulthandler = None |
| 24 | |
| 25 | |
koder aka kdanilov | 3b4da8b | 2016-10-17 00:17:53 +0300 | [diff] [blame^] | 26 | from .timeseries import SensorDatastore |
| 27 | from . import utils, run_test, pretty_yaml |
| 28 | from .config import (load_config, |
| 29 | get_test_files, save_run_params, load_run_params) |
| 30 | from .logger import setup_loggers |
| 31 | from .stage import log_stage |
koder aka kdanilov | 0fdaaee | 2015-06-30 11:10:48 +0300 | [diff] [blame] | 32 | |
| 33 | |
| 34 | logger = logging.getLogger("wally") |
| 35 | |
| 36 | |
koder aka kdanilov | 0fdaaee | 2015-06-30 11:10:48 +0300 | [diff] [blame] | 37 | def get_test_names(raw_res): |
| 38 | res = [] |
| 39 | for tp, data in raw_res: |
| 40 | if not isinstance(data, list): |
| 41 | raise ValueError() |
| 42 | |
| 43 | keys = [] |
| 44 | for dt in data: |
| 45 | if not isinstance(dt, dict): |
| 46 | raise ValueError() |
| 47 | |
| 48 | keys.append(",".join(dt.keys())) |
| 49 | |
| 50 | res.append(tp + "(" + ",".join(keys) + ")") |
| 51 | return res |
| 52 | |
| 53 | |
| 54 | def list_results(path): |
| 55 | results = [] |
| 56 | |
| 57 | for dname in os.listdir(path): |
| 58 | try: |
| 59 | files_cfg = get_test_files(os.path.join(path, dname)) |
| 60 | |
| 61 | if not os.path.isfile(files_cfg['raw_results']): |
| 62 | continue |
| 63 | |
| 64 | mt = os.path.getmtime(files_cfg['raw_results']) |
| 65 | res_mtime = time.ctime(mt) |
| 66 | |
| 67 | raw_res = yaml_load(open(files_cfg['raw_results']).read()) |
| 68 | test_names = ",".join(sorted(get_test_names(raw_res))) |
| 69 | |
| 70 | params = load_run_params(files_cfg['run_params_file']) |
| 71 | |
| 72 | comm = params.get('comment') |
| 73 | results.append((mt, dname, test_names, res_mtime, |
| 74 | '-' if comm is None else comm)) |
| 75 | except ValueError: |
| 76 | pass |
| 77 | |
| 78 | tab = texttable.Texttable(max_width=200) |
| 79 | tab.set_deco(tab.HEADER | tab.VLINES | tab.BORDER) |
| 80 | tab.set_cols_align(["l", "l", "l", "l"]) |
| 81 | results.sort() |
| 82 | |
| 83 | for data in results[::-1]: |
| 84 | tab.add_row(data[1:]) |
| 85 | |
| 86 | tab.header(["Name", "Tests", "etime", "Comment"]) |
| 87 | |
| 88 | print(tab.draw()) |
| 89 | |
| 90 | |
koder aka kdanilov | 0fdaaee | 2015-06-30 11:10:48 +0300 | [diff] [blame] | 91 | def make_storage_dir_struct(cfg): |
| 92 | utils.mkdirs_if_unxists(cfg.results_dir) |
| 93 | utils.mkdirs_if_unxists(cfg.sensor_storage) |
| 94 | utils.mkdirs_if_unxists(cfg.hwinfo_directory) |
| 95 | utils.mkdirs_if_unxists(cfg.results_storage) |
| 96 | |
| 97 | |
| 98 | def log_nodes_statistic_stage(_, ctx): |
| 99 | utils.log_nodes_statistic(ctx.nodes) |
| 100 | |
| 101 | |
| 102 | def parse_args(argv): |
| 103 | descr = "Disk io performance test suite" |
| 104 | parser = argparse.ArgumentParser(prog='wally', description=descr) |
| 105 | parser.add_argument("-l", '--log-level', |
| 106 | help="print some extra log info") |
| 107 | |
| 108 | subparsers = parser.add_subparsers(dest='subparser_name') |
| 109 | |
| 110 | # --------------------------------------------------------------------- |
| 111 | compare_help = 'list all results' |
| 112 | report_parser = subparsers.add_parser('ls', help=compare_help) |
| 113 | report_parser.add_argument("result_storage", help="Folder with test results") |
| 114 | |
| 115 | # --------------------------------------------------------------------- |
| 116 | compare_help = 'compare two results' |
| 117 | report_parser = subparsers.add_parser('compare', help=compare_help) |
| 118 | report_parser.add_argument("data_path1", help="First folder with test results") |
| 119 | report_parser.add_argument("data_path2", help="Second folder with test results") |
| 120 | |
| 121 | # --------------------------------------------------------------------- |
| 122 | report_help = 'run report on previously obtained results' |
| 123 | report_parser = subparsers.add_parser('report', help=report_help) |
koder aka kdanilov | 49977e1 | 2016-10-13 22:54:24 +0300 | [diff] [blame] | 124 | report_parser.add_argument('--load_report', action='store_true') |
koder aka kdanilov | 0fdaaee | 2015-06-30 11:10:48 +0300 | [diff] [blame] | 125 | report_parser.add_argument("data_dir", help="folder with rest results") |
| 126 | |
| 127 | # --------------------------------------------------------------------- |
| 128 | test_parser = subparsers.add_parser('test', help='run tests') |
| 129 | test_parser.add_argument('--build-description', |
| 130 | type=str, default="Build info") |
| 131 | test_parser.add_argument('--build-id', type=str, default="id") |
| 132 | test_parser.add_argument('--build-type', type=str, default="GA") |
| 133 | test_parser.add_argument('-n', '--no-tests', action='store_true', |
| 134 | help="Don't run tests", default=False) |
koder aka kdanilov | 3b4da8b | 2016-10-17 00:17:53 +0300 | [diff] [blame^] | 135 | test_parser.add_argument('--load_report', action='store_true') |
koder aka kdanilov | 0fdaaee | 2015-06-30 11:10:48 +0300 | [diff] [blame] | 136 | test_parser.add_argument("-k", '--keep-vm', action='store_true', |
| 137 | help="Don't remove test vm's", default=False) |
| 138 | test_parser.add_argument("-d", '--dont-discover-nodes', action='store_true', |
| 139 | help="Don't connect/discover fuel nodes", |
| 140 | default=False) |
| 141 | test_parser.add_argument('--no-report', action='store_true', |
| 142 | help="Skip report stages", default=False) |
| 143 | test_parser.add_argument("comment", help="Test information") |
| 144 | test_parser.add_argument("config_file", help="Yaml config file") |
| 145 | |
| 146 | # --------------------------------------------------------------------- |
| 147 | |
| 148 | return parser.parse_args(argv[1:]) |
| 149 | |
| 150 | |
| 151 | def main(argv): |
| 152 | if faulthandler is not None: |
| 153 | faulthandler.register(signal.SIGUSR1, all_threads=True) |
| 154 | |
| 155 | opts = parse_args(argv) |
| 156 | stages = [] |
| 157 | report_stages = [] |
| 158 | |
| 159 | ctx = Context() |
| 160 | ctx.results = {} |
| 161 | ctx.sensors_data = SensorDatastore() |
| 162 | |
| 163 | if opts.subparser_name == 'test': |
| 164 | cfg = load_config(opts.config_file) |
| 165 | make_storage_dir_struct(cfg) |
| 166 | cfg.comment = opts.comment |
| 167 | save_run_params(cfg) |
| 168 | |
| 169 | with open(cfg.saved_config_file, 'w') as fd: |
| 170 | fd.write(pretty_yaml.dumps(cfg.__dict__)) |
| 171 | |
| 172 | stages = [ |
| 173 | run_test.discover_stage |
| 174 | ] |
| 175 | |
| 176 | stages.extend([ |
| 177 | run_test.reuse_vms_stage, |
| 178 | log_nodes_statistic_stage, |
| 179 | run_test.save_nodes_stage, |
| 180 | run_test.connect_stage]) |
| 181 | |
| 182 | if cfg.settings.get('collect_info', True): |
| 183 | stages.append(run_test.collect_hw_info_stage) |
| 184 | |
| 185 | stages.extend([ |
| 186 | # deploy_sensors_stage, |
| 187 | run_test.run_tests_stage, |
| 188 | run_test.store_raw_results_stage, |
| 189 | # gather_sensors_stage |
| 190 | ]) |
| 191 | |
| 192 | cfg.keep_vm = opts.keep_vm |
| 193 | cfg.no_tests = opts.no_tests |
| 194 | cfg.dont_discover_nodes = opts.dont_discover_nodes |
| 195 | |
| 196 | ctx.build_meta['build_id'] = opts.build_id |
| 197 | ctx.build_meta['build_descrption'] = opts.build_description |
| 198 | ctx.build_meta['build_type'] = opts.build_type |
| 199 | |
| 200 | elif opts.subparser_name == 'ls': |
| 201 | list_results(opts.result_storage) |
| 202 | return 0 |
| 203 | |
| 204 | elif opts.subparser_name == 'report': |
| 205 | cfg = load_config(get_test_files(opts.data_dir)['saved_config_file']) |
| 206 | stages.append(run_test.load_data_from(opts.data_dir)) |
| 207 | opts.no_report = False |
| 208 | # load build meta |
| 209 | |
| 210 | elif opts.subparser_name == 'compare': |
| 211 | x = run_test.load_data_from_path(opts.data_path1) |
| 212 | y = run_test.load_data_from_path(opts.data_path2) |
| 213 | print(run_test.IOPerfTest.format_diff_for_console( |
| 214 | [x['io'][0], y['io'][0]])) |
| 215 | return 0 |
| 216 | |
| 217 | if not opts.no_report: |
| 218 | report_stages.append(run_test.console_report_stage) |
| 219 | if opts.load_report: |
| 220 | report_stages.append(run_test.test_load_report_stage) |
| 221 | report_stages.append(run_test.html_report_stage) |
| 222 | |
| 223 | if opts.log_level is not None: |
| 224 | str_level = opts.log_level |
| 225 | else: |
| 226 | str_level = cfg.settings.get('log_level', 'INFO') |
| 227 | |
| 228 | setup_loggers(getattr(logging, str_level), cfg.log_file) |
| 229 | logger.info("All info would be stored into " + cfg.results_dir) |
| 230 | |
| 231 | for stage in stages: |
| 232 | ok = False |
| 233 | with log_stage(stage): |
| 234 | stage(cfg, ctx) |
| 235 | ok = True |
| 236 | if not ok: |
| 237 | break |
| 238 | |
| 239 | exc, cls, tb = sys.exc_info() |
| 240 | for stage in ctx.clear_calls_stack[::-1]: |
| 241 | with log_stage(stage): |
| 242 | stage(cfg, ctx) |
| 243 | |
| 244 | logger.debug("Start utils.cleanup") |
| 245 | for clean_func, args, kwargs in utils.iter_clean_func(): |
| 246 | with log_stage(clean_func): |
| 247 | clean_func(*args, **kwargs) |
| 248 | |
| 249 | if exc is None: |
| 250 | for report_stage in report_stages: |
| 251 | with log_stage(report_stage): |
| 252 | report_stage(cfg, ctx) |
| 253 | |
| 254 | logger.info("All info stored into " + cfg.results_dir) |
| 255 | |
| 256 | if exc is None: |
| 257 | logger.info("Tests finished successfully") |
| 258 | return 0 |
| 259 | else: |
| 260 | logger.error("Tests are failed. See detailed error above") |
| 261 | return 1 |