blob: 808630882abb106b3bd7aa88faafe9a68047e422 [file] [log] [blame]
gstepanov023c1e42015-04-08 15:50:19 +03001import os
koder aka kdanilov7acd6bd2015-02-12 14:28:30 -08002import sys
koder aka kdanilov12ae0632015-04-15 01:13:43 +03003import time
koder aka kdanilov2c473092015-03-29 17:12:13 +03004import Queue
koder aka kdanilov7acd6bd2015-02-12 14:28:30 -08005import pprint
koder aka kdanilove21d7472015-02-14 19:02:04 -08006import logging
koder aka kdanilov7acd6bd2015-02-12 14:28:30 -08007import argparse
koder aka kdanilov2c473092015-03-29 17:12:13 +03008import threading
9import collections
koder aka kdanilov7acd6bd2015-02-12 14:28:30 -080010
koder aka kdanilov66839a92015-04-11 13:22:31 +030011import yaml
koder aka kdanilov2c473092015-03-29 17:12:13 +030012from concurrent.futures import ThreadPoolExecutor
koder aka kdanilov6c491062015-04-09 22:33:13 +030013
koder aka kdanilov2c473092015-03-29 17:12:13 +030014import utils
koder aka kdanilov66839a92015-04-11 13:22:31 +030015import report
koder aka kdanilove06762a2015-03-22 23:32:09 +020016import ssh_utils
koder aka kdanilovda45e882015-04-06 02:24:42 +030017import start_vms
koder aka kdanilov66839a92015-04-11 13:22:31 +030018import pretty_yaml
koder aka kdanilove06762a2015-03-22 23:32:09 +020019from nodes import discover
koder aka kdanilov2c473092015-03-29 17:12:13 +030020from nodes.node import Node
koder aka kdanilov66839a92015-04-11 13:22:31 +030021from config import cfg_dict, load_config
koder aka kdanilov652cd802015-04-13 12:21:07 +030022from tests.itest import IOPerfTest, PgBenchTest
koder aka kdanilov66839a92015-04-11 13:22:31 +030023from formatters import format_results_for_console
koder aka kdanilov12ae0632015-04-15 01:13:43 +030024from sensors.api import start_monitoring, deploy_and_start_sensors
koder aka kdanilov2c473092015-03-29 17:12:13 +030025
26
koder aka kdanilove21d7472015-02-14 19:02:04 -080027logger = logging.getLogger("io-perf-tool")
koder aka kdanilove21d7472015-02-14 19:02:04 -080028
29
koder aka kdanilov652cd802015-04-13 12:21:07 +030030def color_me(color):
31 RESET_SEQ = "\033[0m"
32 COLOR_SEQ = "\033[1;%dm"
33
34 color_seq = COLOR_SEQ % (30 + color)
35
36 def closure(msg):
37 return color_seq + msg + RESET_SEQ
38 return closure
39
40
41class ColoredFormatter(logging.Formatter):
42 BLACK, RED, GREEN, YELLOW, BLUE, MAGENTA, CYAN, WHITE = range(8)
43
44 colors = {
45 'WARNING': color_me(YELLOW),
46 'DEBUG': color_me(BLUE),
47 'CRITICAL': color_me(YELLOW),
48 'ERROR': color_me(RED)
49 }
50
51 def __init__(self, msg, use_color=True):
52 logging.Formatter.__init__(self, msg)
53 self.use_color = use_color
54
55 def format(self, record):
56 levelname = record.levelname
57
koder aka kdanilovcee43342015-04-14 22:52:53 +030058 prn_name = ' ' * (6 - len(levelname)) + levelname
koder aka kdanilov652cd802015-04-13 12:21:07 +030059 if levelname in self.colors:
koder aka kdanilovcee43342015-04-14 22:52:53 +030060 record.levelname = self.colors[levelname](prn_name)
61 else:
62 record.levelname = prn_name
koder aka kdanilov652cd802015-04-13 12:21:07 +030063
64 return logging.Formatter.format(self, record)
65
66
koder aka kdanilovf4b82c22015-04-11 13:35:25 +030067def setup_logger(logger, level=logging.DEBUG, log_fname=None):
koder aka kdanilov652cd802015-04-13 12:21:07 +030068 logger.setLevel(logging.DEBUG)
koder aka kdanilovf4b82c22015-04-11 13:35:25 +030069 sh = logging.StreamHandler()
70 sh.setLevel(level)
Yulia Portnova7ddfa732015-02-24 17:32:58 +020071
koder aka kdanilovcee43342015-04-14 22:52:53 +030072 log_format = '%(asctime)s - %(levelname)s - %(name)s - %(message)s'
koder aka kdanilov652cd802015-04-13 12:21:07 +030073 colored_formatter = ColoredFormatter(log_format,
74 "%H:%M:%S")
75
koder aka kdanilov2c473092015-03-29 17:12:13 +030076 formatter = logging.Formatter(log_format,
77 "%H:%M:%S")
koder aka kdanilov652cd802015-04-13 12:21:07 +030078 sh.setFormatter(colored_formatter)
koder aka kdanilovf4b82c22015-04-11 13:35:25 +030079 logger.addHandler(sh)
koder aka kdanilov7acd6bd2015-02-12 14:28:30 -080080
koder aka kdanilovf4b82c22015-04-11 13:35:25 +030081 if log_fname is not None:
82 fh = logging.FileHandler(log_fname)
koder aka kdanilov652cd802015-04-13 12:21:07 +030083 fh.setFormatter(formatter)
koder aka kdanilovf4b82c22015-04-11 13:35:25 +030084 fh.setLevel(logging.DEBUG)
85 logger.addHandler(fh)
koder aka kdanilov6c491062015-04-09 22:33:13 +030086
koder aka kdanilovcee43342015-04-14 22:52:53 +030087 logger_api = logging.getLogger("io-perf-tool.fuel_api")
88 logger_api.addHandler(sh)
89 logger_api.setLevel(logging.WARNING)
90
koder aka kdanilov7acd6bd2015-02-12 14:28:30 -080091
Yulia Portnova7ddfa732015-02-24 17:32:58 +020092def format_result(res, formatter):
koder aka kdanilove21d7472015-02-14 19:02:04 -080093 data = "\n{0}\n".format("=" * 80)
94 data += pprint.pformat(res) + "\n"
95 data += "{0}\n".format("=" * 80)
koder aka kdanilovfe056622015-02-19 08:46:15 -080096 templ = "{0}\n\n====> {1}\n\n{2}\n\n"
Yulia Portnova7ddfa732015-02-24 17:32:58 +020097 return templ.format(data, formatter(res), "=" * 80)
koder aka kdanilove21d7472015-02-14 19:02:04 -080098
99
koder aka kdanilov1c2b5112015-04-10 16:53:51 +0300100class Context(object):
101 def __init__(self):
102 self.build_meta = {}
103 self.nodes = []
104 self.clear_calls_stack = []
105 self.openstack_nodes_ids = []
106
107
koder aka kdanilov5d589b42015-03-26 12:25:51 +0200108def connect_one(node):
109 try:
koder aka kdanilov2c473092015-03-29 17:12:13 +0300110 ssh_pref = "ssh://"
111 if node.conn_url.startswith(ssh_pref):
112 url = node.conn_url[len(ssh_pref):]
113 node.connection = ssh_utils.connect(url)
114 else:
115 raise ValueError("Unknown url type {0}".format(node.conn_url))
koder aka kdanilov3a6633e2015-03-26 18:20:00 +0200116 except Exception:
koder aka kdanilov2c473092015-03-29 17:12:13 +0300117 logger.exception("During connect to {0}".format(node))
koder aka kdanilov652cd802015-04-13 12:21:07 +0300118 raise
koder aka kdanilov5d589b42015-03-26 12:25:51 +0200119
120
121def connect_all(nodes):
koder aka kdanilov2c473092015-03-29 17:12:13 +0300122 logger.info("Connecting to nodes")
123 with ThreadPoolExecutor(32) as pool:
124 list(pool.map(connect_one, nodes))
koder aka kdanilovda45e882015-04-06 02:24:42 +0300125 logger.info("All nodes connected successfully")
koder aka kdanilov2c473092015-03-29 17:12:13 +0300126
127
koder aka kdanilov12ae0632015-04-15 01:13:43 +0300128def save_sensors_data(q, fd):
koder aka kdanilov2c473092015-03-29 17:12:13 +0300129 logger.info("Start receiving sensors data")
130 while True:
131 val = q.get()
132 if val is None:
koder aka kdanilov12ae0632015-04-15 01:13:43 +0300133 q.put([])
koder aka kdanilov2c473092015-03-29 17:12:13 +0300134 break
koder aka kdanilov12ae0632015-04-15 01:13:43 +0300135 fd.write("\n" + str(time.time()) + " : ")
136 fd.write(repr(val))
koder aka kdanilov2c473092015-03-29 17:12:13 +0300137 logger.info("Sensors thread exits")
138
139
koder aka kdanilov652cd802015-04-13 12:21:07 +0300140def test_thread(test, node, barrier, res_q):
koder aka kdanilov2c473092015-03-29 17:12:13 +0300141 try:
142 logger.debug("Run preparation for {0}".format(node.conn_url))
143 test.pre_run(node.connection)
144 logger.debug("Run test for {0}".format(node.conn_url))
145 test.run(node.connection, barrier)
koder aka kdanilov652cd802015-04-13 12:21:07 +0300146 except Exception as exc:
koder aka kdanilov2c473092015-03-29 17:12:13 +0300147 logger.exception("In test {0} for node {1}".format(test, node))
koder aka kdanilov652cd802015-04-13 12:21:07 +0300148 res_q.put(exc)
koder aka kdanilov2c473092015-03-29 17:12:13 +0300149
150
koder aka kdanilovcee43342015-04-14 22:52:53 +0300151def run_tests(test_block, nodes):
koder aka kdanilov2c473092015-03-29 17:12:13 +0300152 tool_type_mapper = {
153 "io": IOPerfTest,
154 "pgbench": PgBenchTest,
155 }
156
157 test_nodes = [node for node in nodes
158 if 'testnode' in node.roles]
159
160 res_q = Queue.Queue()
161
koder aka kdanilovcee43342015-04-14 22:52:53 +0300162 for name, params in test_block.items():
163 logger.info("Starting {0} tests".format(name))
koder aka kdanilov2c473092015-03-29 17:12:13 +0300164
koder aka kdanilovcee43342015-04-14 22:52:53 +0300165 threads = []
166 barrier = utils.Barrier(len(test_nodes))
167 for node in test_nodes:
168 msg = "Starting {0} test on {1} node"
169 logger.debug(msg.format(name, node.conn_url))
170 test = tool_type_mapper[name](params, res_q.put)
171 th = threading.Thread(None, test_thread, None,
172 (test, node, barrier, res_q))
173 threads.append(th)
174 th.daemon = True
175 th.start()
koder aka kdanilov2c473092015-03-29 17:12:13 +0300176
koder aka kdanilovcee43342015-04-14 22:52:53 +0300177 def gather_results(res_q, results):
178 while not res_q.empty():
179 val = res_q.get()
koder aka kdanilov66839a92015-04-11 13:22:31 +0300180
koder aka kdanilovcee43342015-04-14 22:52:53 +0300181 if isinstance(val, Exception):
182 msg = "Exception during test execution: {0}"
183 raise ValueError(msg.format(val.message))
koder aka kdanilov652cd802015-04-13 12:21:07 +0300184
koder aka kdanilovcee43342015-04-14 22:52:53 +0300185 results.append(val)
koder aka kdanilov652cd802015-04-13 12:21:07 +0300186
koder aka kdanilovcee43342015-04-14 22:52:53 +0300187 results = []
koder aka kdanilov652cd802015-04-13 12:21:07 +0300188
koder aka kdanilovcee43342015-04-14 22:52:53 +0300189 while True:
190 for th in threads:
191 th.join(1)
192 gather_results(res_q, results)
koder aka kdanilov652cd802015-04-13 12:21:07 +0300193
koder aka kdanilovcee43342015-04-14 22:52:53 +0300194 if all(not th.is_alive() for th in threads):
195 break
koder aka kdanilov652cd802015-04-13 12:21:07 +0300196
koder aka kdanilovcee43342015-04-14 22:52:53 +0300197 gather_results(res_q, results)
198 yield name, test.merge_results(results)
koder aka kdanilov2c473092015-03-29 17:12:13 +0300199
200
koder aka kdanilovda45e882015-04-06 02:24:42 +0300201def log_nodes_statistic(_, ctx):
202 nodes = ctx.nodes
koder aka kdanilov2c473092015-03-29 17:12:13 +0300203 logger.info("Found {0} nodes total".format(len(nodes)))
204 per_role = collections.defaultdict(lambda: 0)
205 for node in nodes:
206 for role in node.roles:
207 per_role[role] += 1
208
209 for role, count in sorted(per_role.items()):
210 logger.debug("Found {0} nodes with role {1}".format(count, role))
211
212
koder aka kdanilovda45e882015-04-06 02:24:42 +0300213def connect_stage(cfg, ctx):
214 ctx.clear_calls_stack.append(disconnect_stage)
215 connect_all(ctx.nodes)
216
217
218def discover_stage(cfg, ctx):
koder aka kdanilov652cd802015-04-13 12:21:07 +0300219 if cfg.get('discover') is not None:
koder aka kdanilovda45e882015-04-06 02:24:42 +0300220 discover_objs = [i.strip() for i in cfg['discover'].strip().split(",")]
koder aka kdanilovcee43342015-04-14 22:52:53 +0300221 ctx.nodes.extend(discover.discover(ctx, discover_objs, cfg['clouds']))
koder aka kdanilovda45e882015-04-06 02:24:42 +0300222
223 for url, roles in cfg.get('explicit_nodes', {}).items():
224 ctx.nodes.append(Node(url, roles.split(",")))
225
226
227def deploy_sensors_stage(cfg_dict, ctx):
koder aka kdanilovda45e882015-04-06 02:24:42 +0300228 if 'sensors' not in cfg_dict:
229 return
230
koder aka kdanilov652cd802015-04-13 12:21:07 +0300231 ctx.clear_calls_stack.append(remove_sensors_stage)
koder aka kdanilovda45e882015-04-06 02:24:42 +0300232 cfg = cfg_dict.get('sensors')
233 sens_cfg = []
234
235 for role, sensors_str in cfg["roles_mapping"].items():
236 sensors = [sens.strip() for sens in sensors_str.split(",")]
237
238 collect_cfg = dict((sensor, {}) for sensor in sensors)
239
240 for node in ctx.nodes:
241 if role in node.roles:
242 sens_cfg.append((node.connection, collect_cfg))
243
koder aka kdanilovda45e882015-04-06 02:24:42 +0300244 ctx.sensor_cm = start_monitoring(cfg["receiver_uri"], None,
245 connected_config=sens_cfg)
246
247 ctx.sensors_control_queue = ctx.sensor_cm.__enter__()
248
koder aka kdanilov12ae0632015-04-15 01:13:43 +0300249 fd = open(cfg_dict['sensor_storage'], "w")
koder aka kdanilovda45e882015-04-06 02:24:42 +0300250 th = threading.Thread(None, save_sensors_data, None,
koder aka kdanilov12ae0632015-04-15 01:13:43 +0300251 (ctx.sensors_control_queue, fd))
koder aka kdanilovda45e882015-04-06 02:24:42 +0300252 th.daemon = True
253 th.start()
254 ctx.sensor_listen_thread = th
255
256
257def remove_sensors_stage(cfg, ctx):
koder aka kdanilov12ae0632015-04-15 01:13:43 +0300258 ctx.sensor_cm.__exit__(None, None, None)
koder aka kdanilovda45e882015-04-06 02:24:42 +0300259 ctx.sensors_control_queue.put(None)
260 ctx.sensor_listen_thread.join()
261 ctx.sensor_data = ctx.sensors_control_queue.get()
262
263
koder aka kdanilovcee43342015-04-14 22:52:53 +0300264def get_os_credentials(cfg, ctx, creds_type):
265 creds = None
koder aka kdanilovda45e882015-04-06 02:24:42 +0300266
koder aka kdanilovcee43342015-04-14 22:52:53 +0300267 if creds_type == 'clouds':
268 if 'openstack' in cfg['clouds']:
koder aka kdanilov1c2b5112015-04-10 16:53:51 +0300269 os_cfg = cfg['clouds']['openstack']
koder aka kdanilovcee43342015-04-14 22:52:53 +0300270
koder aka kdanilov1c2b5112015-04-10 16:53:51 +0300271 tenant = os_cfg['OS_TENANT_NAME'].strip()
272 user = os_cfg['OS_USERNAME'].strip()
273 passwd = os_cfg['OS_PASSWORD'].strip()
274 auth_url = os_cfg['OS_AUTH_URL'].strip()
275
koder aka kdanilovcee43342015-04-14 22:52:53 +0300276 elif 'fuel' in cfg['clouds'] and \
277 'openstack_env' in cfg['clouds']['fuel']:
278 creds = ctx.fuel_openstack_creds
koder aka kdanilov1c2b5112015-04-10 16:53:51 +0300279
koder aka kdanilovcee43342015-04-14 22:52:53 +0300280 elif creds_type == 'ENV':
281 user, passwd, tenant, auth_url = start_vms.ostack_get_creds()
282 elif os.path.isfile(creds_type):
283 user, passwd, tenant, auth_url = start_vms.ostack_get_creds()
284 else:
285 msg = "Creds {0!r} isn't supported".format(creds_type)
286 raise ValueError(msg)
koder aka kdanilov1c2b5112015-04-10 16:53:51 +0300287
koder aka kdanilovcee43342015-04-14 22:52:53 +0300288 if creds is None:
289 creds = {'name': user,
290 'passwd': passwd,
291 'tenant': tenant,
292 'auth_url': auth_url}
koder aka kdanilov1c2b5112015-04-10 16:53:51 +0300293
koder aka kdanilovcee43342015-04-14 22:52:53 +0300294 return creds
koder aka kdanilov4e9f3ed2015-04-14 11:26:12 +0300295
koder aka kdanilov1c2b5112015-04-10 16:53:51 +0300296
koder aka kdanilovcee43342015-04-14 22:52:53 +0300297def run_tests_stage(cfg, ctx):
298 ctx.results = []
koder aka kdanilov1c2b5112015-04-10 16:53:51 +0300299
koder aka kdanilovcee43342015-04-14 22:52:53 +0300300 if 'tests' not in cfg:
301 return
gstepanov023c1e42015-04-08 15:50:19 +0300302
koder aka kdanilovcee43342015-04-14 22:52:53 +0300303 for group in cfg['tests']:
304
305 assert len(group.items()) == 1
306 key, config = group.items()[0]
307
308 if 'start_test_nodes' == key:
309 params = config['vm_params']
310 os_nodes_ids = []
311
312 os_creds_type = config['creds']
313 os_creds = get_os_credentials(cfg, ctx, os_creds_type)
314
315 start_vms.nova_connect(**os_creds)
316
317 # logger.info("Preparing openstack")
318 # start_vms.prepare_os(**os_creds)
319
320 new_nodes = []
321 try:
322 for new_node, node_id in start_vms.launch_vms(params):
323 new_node.roles.append('testnode')
324 ctx.nodes.append(new_node)
325 os_nodes_ids.append(node_id)
326 new_nodes.append(new_node)
327
328 store_nodes_in_log(cfg, os_nodes_ids)
329 ctx.openstack_nodes_ids = os_nodes_ids
330
331 connect_all(new_nodes)
332
koder aka kdanilov12ae0632015-04-15 01:13:43 +0300333 # deploy sensors on new nodes
334 # unify this code
335 if 'sensors' in cfg:
336 sens_cfg = []
337 sensors_str = cfg["sensors"]["roles_mapping"]['testnode']
338 sensors = [sens.strip() for sens in sensors_str.split(",")]
339
340 collect_cfg = dict((sensor, {}) for sensor in sensors)
341 for node in new_nodes:
342 sens_cfg.append((node.connection, collect_cfg))
343
344 uri = cfg["sensors"]["receiver_uri"]
345 deploy_and_start_sensors(uri, None,
346 connected_config=sens_cfg)
347
koder aka kdanilovcee43342015-04-14 22:52:53 +0300348 for test_group in config.get('tests', []):
349 ctx.results.extend(run_tests(test_group, ctx.nodes))
350
351 finally:
koder aka kdanilov12ae0632015-04-15 01:13:43 +0300352 shut_down_vms_stage(cfg, ctx)
koder aka kdanilovcee43342015-04-14 22:52:53 +0300353
354 elif 'tests' in key:
355 ctx.results.extend(run_tests(config, ctx.nodes))
koder aka kdanilovda45e882015-04-06 02:24:42 +0300356
gstepanov023c1e42015-04-08 15:50:19 +0300357
koder aka kdanilov1c2b5112015-04-10 16:53:51 +0300358def shut_down_vms_stage(cfg, ctx):
koder aka kdanilov66839a92015-04-11 13:22:31 +0300359 vm_ids_fname = cfg_dict['vm_ids_fname']
koder aka kdanilov1c2b5112015-04-10 16:53:51 +0300360 if ctx.openstack_nodes_ids is None:
koder aka kdanilov66839a92015-04-11 13:22:31 +0300361 nodes_ids = open(vm_ids_fname).read().split()
koder aka kdanilov1c2b5112015-04-10 16:53:51 +0300362 else:
363 nodes_ids = ctx.openstack_nodes_ids
364
koder aka kdanilov652cd802015-04-13 12:21:07 +0300365 if len(nodes_ids) != 0:
366 logger.info("Removing nodes")
367 start_vms.clear_nodes(nodes_ids)
368 logger.info("Nodes has been removed")
gstepanov023c1e42015-04-08 15:50:19 +0300369
koder aka kdanilov66839a92015-04-11 13:22:31 +0300370 if os.path.exists(vm_ids_fname):
371 os.remove(vm_ids_fname)
gstepanov023c1e42015-04-08 15:50:19 +0300372
koder aka kdanilov66839a92015-04-11 13:22:31 +0300373
374def store_nodes_in_log(cfg, nodes_ids):
375 with open(cfg['vm_ids_fname'], 'w') as fd:
376 fd.write("\n".join(nodes_ids))
gstepanov023c1e42015-04-08 15:50:19 +0300377
378
379def clear_enviroment(cfg, ctx):
koder aka kdanilov66839a92015-04-11 13:22:31 +0300380 if os.path.exists(cfg_dict['vm_ids_fname']):
koder aka kdanilov1c2b5112015-04-10 16:53:51 +0300381 shut_down_vms_stage(cfg, ctx)
gstepanov023c1e42015-04-08 15:50:19 +0300382
383
koder aka kdanilovda45e882015-04-06 02:24:42 +0300384def disconnect_stage(cfg, ctx):
koder aka kdanilov652cd802015-04-13 12:21:07 +0300385 ssh_utils.close_all_sessions()
386
koder aka kdanilovda45e882015-04-06 02:24:42 +0300387 for node in ctx.nodes:
388 if node.connection is not None:
389 node.connection.close()
390
391
koder aka kdanilov66839a92015-04-11 13:22:31 +0300392def yamable(data):
393 if isinstance(data, (tuple, list)):
394 return map(yamable, data)
395
396 if isinstance(data, unicode):
397 return str(data)
398
399 if isinstance(data, dict):
400 res = {}
401 for k, v in data.items():
402 res[yamable(k)] = yamable(v)
403 return res
404
405 return data
406
407
408def store_raw_results_stage(cfg, ctx):
409
410 raw_results = os.path.join(cfg_dict['var_dir'], 'raw_results.yaml')
411
412 if os.path.exists(raw_results):
413 cont = yaml.load(open(raw_results).read())
414 else:
415 cont = []
416
417 cont.extend(yamable(ctx.results))
418 raw_data = pretty_yaml.dumps(cont)
419
420 with open(raw_results, "w") as fd:
421 fd.write(raw_data)
422
423
424def console_report_stage(cfg, ctx):
425 for tp, data in ctx.results:
426 if 'io' == tp:
427 print format_results_for_console(data)
428
429
koder aka kdanilovda45e882015-04-06 02:24:42 +0300430def report_stage(cfg, ctx):
koder aka kdanilov1c2b5112015-04-10 16:53:51 +0300431
Yulia Portnova8ca20572015-04-14 14:09:39 +0300432 html_rep_fname = cfg['html_report_file']
433 report.make_io_report(ctx.results, html_rep_fname)
koder aka kdanilov652cd802015-04-13 12:21:07 +0300434
Yulia Portnova8ca20572015-04-14 14:09:39 +0300435 logger.info("Html report were stored in " + html_rep_fname)
koder aka kdanilov652cd802015-04-13 12:21:07 +0300436
437 text_rep_fname = cfg_dict['text_report_file']
438 with open(text_rep_fname, "w") as fd:
439 for tp, data in ctx.results:
440 if 'io' == tp:
441 fd.write(format_results_for_console(data))
442 fd.write("\n")
443 fd.flush()
444
445 logger.info("Text report were stored in " + text_rep_fname)
koder aka kdanilovda45e882015-04-06 02:24:42 +0300446
447
448def complete_log_nodes_statistic(cfg, ctx):
449 nodes = ctx.nodes
450 for node in nodes:
451 logger.debug(str(node))
452
453
koder aka kdanilov66839a92015-04-11 13:22:31 +0300454def load_data_from(var_dir):
koder aka kdanilov4e9f3ed2015-04-14 11:26:12 +0300455 def load_data_from_file(cfg, ctx):
koder aka kdanilov66839a92015-04-11 13:22:31 +0300456 raw_results = os.path.join(var_dir, 'raw_results.yaml')
457 ctx.results = yaml.load(open(raw_results).read())
koder aka kdanilov4e9f3ed2015-04-14 11:26:12 +0300458 return load_data_from_file
gstepanovcd256d62015-04-07 17:47:32 +0300459
460
koder aka kdanilovcee43342015-04-14 22:52:53 +0300461def parse_args(argv):
462 parser = argparse.ArgumentParser(
463 description="Run disk io performance test")
464
465 parser.add_argument("-l", dest='extra_logs',
466 action='store_true', default=False,
467 help="print some extra log info")
468
469 parser.add_argument("-b", '--build_description',
470 type=str, default="Build info")
471 parser.add_argument("-i", '--build_id', type=str, default="id")
472 parser.add_argument("-t", '--build_type', type=str, default="GA")
473 parser.add_argument("-u", '--username', type=str, default="admin")
474 parser.add_argument("-p", '--post-process-only', default=None)
475 parser.add_argument("-o", '--output-dest', nargs="*")
476 parser.add_argument("config_file", nargs="?", default="config.yaml")
477
478 return parser.parse_args(argv[1:])
479
480
koder aka kdanilov3f356262015-02-13 08:06:14 -0800481def main(argv):
koder aka kdanilove06762a2015-03-22 23:32:09 +0200482 opts = parse_args(argv)
koder aka kdanilov2c473092015-03-29 17:12:13 +0300483
koder aka kdanilov66839a92015-04-11 13:22:31 +0300484 if opts.post_process_only is not None:
485 stages = [
486 load_data_from(opts.post_process_only),
487 console_report_stage,
koder aka kdanilov652cd802015-04-13 12:21:07 +0300488 report_stage
koder aka kdanilov66839a92015-04-11 13:22:31 +0300489 ]
490 else:
491 stages = [
492 discover_stage,
493 log_nodes_statistic,
koder aka kdanilov66839a92015-04-11 13:22:31 +0300494 connect_stage,
koder aka kdanilov66839a92015-04-11 13:22:31 +0300495 deploy_sensors_stage,
496 run_tests_stage,
497 store_raw_results_stage,
498 console_report_stage,
499 report_stage
500 ]
501
koder aka kdanilovcee43342015-04-14 22:52:53 +0300502 load_config(opts.config_file, opts.post_process_only)
koder aka kdanilovf4b82c22015-04-11 13:35:25 +0300503
504 level = logging.DEBUG if opts.extra_logs else logging.WARNING
505 setup_logger(logger, level, cfg_dict['log_file'])
506
koder aka kdanilov652cd802015-04-13 12:21:07 +0300507 logger.info("All info would be stored into {0}".format(
508 cfg_dict['var_dir']))
gstepanovcd256d62015-04-07 17:47:32 +0300509
koder aka kdanilovda45e882015-04-06 02:24:42 +0300510 ctx = Context()
gstepanovaffcdb12015-04-07 17:18:29 +0300511 ctx.build_meta['build_id'] = opts.build_id
512 ctx.build_meta['build_descrption'] = opts.build_description
513 ctx.build_meta['build_type'] = opts.build_type
514 ctx.build_meta['username'] = opts.username
koder aka kdanilov6c491062015-04-09 22:33:13 +0300515
koder aka kdanilovda45e882015-04-06 02:24:42 +0300516 try:
517 for stage in stages:
518 logger.info("Start {0.__name__} stage".format(stage))
519 stage(cfg_dict, ctx)
520 finally:
521 exc, cls, tb = sys.exc_info()
522 for stage in ctx.clear_calls_stack[::-1]:
523 try:
524 logger.info("Start {0.__name__} stage".format(stage))
525 stage(cfg_dict, ctx)
koder aka kdanilov1c2b5112015-04-10 16:53:51 +0300526 except Exception as exc:
527 logger.exception("During {0.__name__} stage".format(stage))
koder aka kdanilov2c473092015-03-29 17:12:13 +0300528
koder aka kdanilovda45e882015-04-06 02:24:42 +0300529 if exc is not None:
530 raise exc, cls, tb
koder aka kdanilov2c473092015-03-29 17:12:13 +0300531
koder aka kdanilovcee43342015-04-14 22:52:53 +0300532 logger.info("All info stored into {0}".format(cfg_dict['var_dir']))
koder aka kdanilove06762a2015-03-22 23:32:09 +0200533 return 0
koder aka kdanilov3f356262015-02-13 08:06:14 -0800534
koder aka kdanilov7acd6bd2015-02-12 14:28:30 -0800535
koder aka kdanilov7acd6bd2015-02-12 14:28:30 -0800536if __name__ == '__main__':
koder aka kdanilove06762a2015-03-22 23:32:09 +0200537 exit(main(sys.argv))