koder aka kdanilov | 962ee5f | 2016-12-19 02:40:08 +0200 | [diff] [blame] | 1 | import time |
koder aka kdanilov | 23e6bdf | 2016-12-24 02:18:54 +0200 | [diff] [blame] | 2 | import json |
kdanylov aka koder | 736e5c1 | 2017-05-07 17:27:14 +0300 | [diff] [blame] | 3 | import copy |
koder aka kdanilov | e21d747 | 2015-02-14 19:02:04 -0800 | [diff] [blame] | 4 | import logging |
koder aka kdanilov | 39e449e | 2016-12-17 15:15:26 +0200 | [diff] [blame] | 5 | from concurrent.futures import Future |
| 6 | from typing import List, Dict, Tuple, Optional, Union, cast |
koder aka kdanilov | 88407ff | 2015-05-26 15:35:57 +0300 | [diff] [blame] | 7 | |
kdanylov aka koder | 026e5f2 | 2017-05-15 01:04:39 +0300 | [diff] [blame^] | 8 | from cephlib.wally_storage import WallyDB |
| 9 | from cephlib.node import NodeInfo, IRPCNode, get_hw_info, get_sw_info |
| 10 | from cephlib.ssh import parse_ssh_uri |
| 11 | from cephlib.node_impl import setup_rpc, connect |
| 12 | |
| 13 | from . import utils |
koder aka kdanilov | 39e449e | 2016-12-17 15:15:26 +0200 | [diff] [blame] | 14 | from .config import ConfigBlock |
koder aka kdanilov | 39e449e | 2016-12-17 15:15:26 +0200 | [diff] [blame] | 15 | from .stage import Stage, StepOrder |
koder aka kdanilov | 7f59d56 | 2016-12-26 01:34:23 +0200 | [diff] [blame] | 16 | from .sensors import collect_sensors_data |
koder aka kdanilov | 108ac36 | 2017-01-19 20:17:16 +0200 | [diff] [blame] | 17 | from .suits.all_suits import all_suits |
koder aka kdanilov | 39e449e | 2016-12-17 15:15:26 +0200 | [diff] [blame] | 18 | from .test_run_class import TestRun |
koder aka kdanilov | a732a60 | 2017-02-01 20:29:56 +0200 | [diff] [blame] | 19 | from .result_classes import SuiteConfig |
koder aka kdanilov | 63ad206 | 2015-04-27 13:11:40 +0300 | [diff] [blame] | 20 | |
koder aka kdanilov | 57ce4db | 2015-04-25 21:25:51 +0300 | [diff] [blame] | 21 | |
koder aka kdanilov | cff7b2e | 2015-04-18 20:48:15 +0300 | [diff] [blame] | 22 | logger = logging.getLogger("wally") |
koder aka kdanilov | cee4334 | 2015-04-14 22:52:53 +0300 | [diff] [blame] | 23 | |
koder aka kdanilov | 7acd6bd | 2015-02-12 14:28:30 -0800 | [diff] [blame] | 24 | |
koder aka kdanilov | 39e449e | 2016-12-17 15:15:26 +0200 | [diff] [blame] | 25 | class ConnectStage(Stage): |
| 26 | """Connect to nodes stage""" |
koder aka kdanilov | e21d747 | 2015-02-14 19:02:04 -0800 | [diff] [blame] | 27 | |
koder aka kdanilov | 39e449e | 2016-12-17 15:15:26 +0200 | [diff] [blame] | 28 | priority = StepOrder.CONNECT |
koder aka kdanilov | 0fdaaee | 2015-06-30 11:10:48 +0300 | [diff] [blame] | 29 | |
koder aka kdanilov | 39e449e | 2016-12-17 15:15:26 +0200 | [diff] [blame] | 30 | def run(self, ctx: TestRun) -> None: |
koder aka kdanilov | 7308462 | 2016-11-16 21:51:08 +0200 | [diff] [blame] | 31 | with ctx.get_pool() as pool: |
koder aka kdanilov | 39e449e | 2016-12-17 15:15:26 +0200 | [diff] [blame] | 32 | logger.info("Connecting to %s nodes", len(ctx.nodes_info)) |
koder aka kdanilov | 4af1c1d | 2015-05-18 15:48:58 +0300 | [diff] [blame] | 33 | |
koder aka kdanilov | 39e449e | 2016-12-17 15:15:26 +0200 | [diff] [blame] | 34 | def connect_ext(node_info: NodeInfo) -> Tuple[bool, Union[IRPCNode, NodeInfo]]: |
| 35 | try: |
| 36 | ssh_node = connect(node_info, conn_timeout=ctx.config.connect_timeout) |
koder aka kdanilov | 23e6bdf | 2016-12-24 02:18:54 +0200 | [diff] [blame] | 37 | |
koder aka kdanilov | 962ee5f | 2016-12-19 02:40:08 +0200 | [diff] [blame] | 38 | return True, setup_rpc(ssh_node, |
| 39 | ctx.rpc_code, |
| 40 | ctx.default_rpc_plugins, |
| 41 | log_level=ctx.config.rpc_log_level) |
koder aka kdanilov | 39e449e | 2016-12-17 15:15:26 +0200 | [diff] [blame] | 42 | except Exception as exc: |
koder aka kdanilov | 962ee5f | 2016-12-19 02:40:08 +0200 | [diff] [blame] | 43 | logger.exception("During connect to %s: %s", node_info, exc) |
koder aka kdanilov | 39e449e | 2016-12-17 15:15:26 +0200 | [diff] [blame] | 44 | return False, node_info |
koder aka kdanilov | 0fdaaee | 2015-06-30 11:10:48 +0300 | [diff] [blame] | 45 | |
koder aka kdanilov | 39e449e | 2016-12-17 15:15:26 +0200 | [diff] [blame] | 46 | failed_testnodes = [] # type: List[NodeInfo] |
| 47 | failed_nodes = [] # type: List[NodeInfo] |
| 48 | ctx.nodes = [] |
koder aka kdanilov | 0fdaaee | 2015-06-30 11:10:48 +0300 | [diff] [blame] | 49 | |
koder aka kdanilov | 962ee5f | 2016-12-19 02:40:08 +0200 | [diff] [blame] | 50 | for ok, node in pool.map(connect_ext, ctx.nodes_info.values()): |
koder aka kdanilov | 39e449e | 2016-12-17 15:15:26 +0200 | [diff] [blame] | 51 | if not ok: |
| 52 | node = cast(NodeInfo, node) |
| 53 | if 'testnode' in node.roles: |
| 54 | failed_testnodes.append(node) |
| 55 | else: |
| 56 | failed_nodes.append(node) |
| 57 | else: |
| 58 | ctx.nodes.append(cast(IRPCNode, node)) |
koder aka kdanilov | 22d134e | 2016-11-08 11:33:19 +0200 | [diff] [blame] | 59 | |
koder aka kdanilov | 39e449e | 2016-12-17 15:15:26 +0200 | [diff] [blame] | 60 | if failed_nodes: |
| 61 | msg = "Node(s) {} would be excluded - can't connect" |
koder aka kdanilov | 962ee5f | 2016-12-19 02:40:08 +0200 | [diff] [blame] | 62 | logger.warning(msg.format(", ".join(map(str, failed_nodes)))) |
koder aka kdanilov | 4af1c1d | 2015-05-18 15:48:58 +0300 | [diff] [blame] | 63 | |
koder aka kdanilov | 39e449e | 2016-12-17 15:15:26 +0200 | [diff] [blame] | 64 | if failed_testnodes: |
koder aka kdanilov | 23e6bdf | 2016-12-24 02:18:54 +0200 | [diff] [blame] | 65 | msg = "Can't start RPC on testnode(s) " + ",".join(map(str, failed_testnodes)) |
koder aka kdanilov | c368eb6 | 2015-04-28 18:22:01 +0300 | [diff] [blame] | 66 | logger.error(msg) |
| 67 | raise utils.StopTestError(msg) |
| 68 | |
koder aka kdanilov | 39e449e | 2016-12-17 15:15:26 +0200 | [diff] [blame] | 69 | if not failed_nodes: |
| 70 | logger.info("All nodes connected successfully") |
koder aka kdanilov | cee4334 | 2015-04-14 22:52:53 +0300 | [diff] [blame] | 71 | |
kdanylov aka koder | 3a9e5db | 2017-05-09 20:00:44 +0300 | [diff] [blame] | 72 | def get_time(node): |
| 73 | return node.conn.sys.time() |
| 74 | |
| 75 | t_start = time.time() |
| 76 | tms = pool.map(get_time, ctx.nodes) |
| 77 | t_end = time.time() |
| 78 | |
| 79 | for node, val in zip(ctx.nodes, tms): |
kdanylov aka koder | b083333 | 2017-05-13 20:39:17 +0300 | [diff] [blame] | 80 | delta = 0 |
| 81 | if val > t_end: |
| 82 | delta = val - t_end |
| 83 | elif t_start > val: |
| 84 | delta = t_start - val |
| 85 | |
| 86 | if delta > ctx.config.max_time_diff_ms: |
kdanylov aka koder | 3a9e5db | 2017-05-09 20:00:44 +0300 | [diff] [blame] | 87 | msg = ("Too large time shift {}ms on node {}. Stopping test." + |
| 88 | " Fix time on cluster nodes and restart test, or change " + |
kdanylov aka koder | b083333 | 2017-05-13 20:39:17 +0300 | [diff] [blame] | 89 | "max_time_diff_ms(={}ms) setting in config").format(delta, |
kdanylov aka koder | 3a9e5db | 2017-05-09 20:00:44 +0300 | [diff] [blame] | 90 | str(node), |
| 91 | ctx.config.max_time_diff_ms) |
| 92 | logger.error(msg) |
kdanylov aka koder | b083333 | 2017-05-13 20:39:17 +0300 | [diff] [blame] | 93 | raise utils.StopTestError(msg) |
| 94 | if delta > 0: |
| 95 | logger.warning("Node %s has time shift at least %s ms", node, delta) |
kdanylov aka koder | 3a9e5db | 2017-05-09 20:00:44 +0300 | [diff] [blame] | 96 | |
| 97 | |
koder aka kdanilov | 39e449e | 2016-12-17 15:15:26 +0200 | [diff] [blame] | 98 | def cleanup(self, ctx: TestRun) -> None: |
koder aka kdanilov | 962ee5f | 2016-12-19 02:40:08 +0200 | [diff] [blame] | 99 | if ctx.config.get("download_rpc_logs", False): |
kdanylov aka koder | 3a9e5db | 2017-05-09 20:00:44 +0300 | [diff] [blame] | 100 | logger.info("Killing all outstanding processes") |
koder aka kdanilov | 962ee5f | 2016-12-19 02:40:08 +0200 | [diff] [blame] | 101 | for node in ctx.nodes: |
kdanylov aka koder | 3a9e5db | 2017-05-09 20:00:44 +0300 | [diff] [blame] | 102 | node.conn.cli.killall() |
| 103 | |
| 104 | logger.info("Downloading RPC servers logs") |
| 105 | for node in ctx.nodes: |
| 106 | node.conn.cli.killall() |
koder aka kdanilov | 962ee5f | 2016-12-19 02:40:08 +0200 | [diff] [blame] | 107 | if node.rpc_log_file is not None: |
koder aka kdanilov | 108ac36 | 2017-01-19 20:17:16 +0200 | [diff] [blame] | 108 | nid = node.node_id |
kdanylov aka koder | 026e5f2 | 2017-05-15 01:04:39 +0300 | [diff] [blame^] | 109 | path = WallyDB.rpc_logs.format(node_id=nid) |
koder aka kdanilov | 962ee5f | 2016-12-19 02:40:08 +0200 | [diff] [blame] | 110 | node.conn.server.flush_logs() |
| 111 | log = node.get_file_content(node.rpc_log_file) |
koder aka kdanilov | 7f59d56 | 2016-12-26 01:34:23 +0200 | [diff] [blame] | 112 | if path in ctx.storage: |
koder aka kdanilov | ffaf48d | 2016-12-27 02:25:29 +0200 | [diff] [blame] | 113 | ctx.storage.append_raw(log, path) |
koder aka kdanilov | 7f59d56 | 2016-12-26 01:34:23 +0200 | [diff] [blame] | 114 | else: |
koder aka kdanilov | ffaf48d | 2016-12-27 02:25:29 +0200 | [diff] [blame] | 115 | ctx.storage.put_raw(log, path) |
koder aka kdanilov | 962ee5f | 2016-12-19 02:40:08 +0200 | [diff] [blame] | 116 | logger.debug("RPC log from node {} stored into storage::{}".format(nid, path)) |
| 117 | |
kdanylov aka koder | 3a9e5db | 2017-05-09 20:00:44 +0300 | [diff] [blame] | 118 | logger.info("Disconnecting") |
koder aka kdanilov | 962ee5f | 2016-12-19 02:40:08 +0200 | [diff] [blame] | 119 | with ctx.get_pool() as pool: |
| 120 | list(pool.map(lambda node: node.disconnect(stop=True), ctx.nodes)) |
koder aka kdanilov | cee4334 | 2015-04-14 22:52:53 +0300 | [diff] [blame] | 121 | |
koder aka kdanilov | 0fdaaee | 2015-06-30 11:10:48 +0300 | [diff] [blame] | 122 | |
koder aka kdanilov | 39e449e | 2016-12-17 15:15:26 +0200 | [diff] [blame] | 123 | class CollectInfoStage(Stage): |
| 124 | """Collect node info""" |
koder aka kdanilov | 3d2bc4f | 2016-11-12 18:31:18 +0200 | [diff] [blame] | 125 | |
koder aka kdanilov | 23e6bdf | 2016-12-24 02:18:54 +0200 | [diff] [blame] | 126 | priority = StepOrder.START_SENSORS - 2 |
koder aka kdanilov | 39e449e | 2016-12-17 15:15:26 +0200 | [diff] [blame] | 127 | config_block = 'collect_info' |
| 128 | |
| 129 | def run(self, ctx: TestRun) -> None: |
| 130 | if not ctx.config.collect_info: |
| 131 | return |
| 132 | |
koder aka kdanilov | 962ee5f | 2016-12-19 02:40:08 +0200 | [diff] [blame] | 133 | futures = {} # type: Dict[Tuple[str, str], Future] |
koder aka kdanilov | 39e449e | 2016-12-17 15:15:26 +0200 | [diff] [blame] | 134 | |
| 135 | with ctx.get_pool() as pool: |
koder aka kdanilov | 962ee5f | 2016-12-19 02:40:08 +0200 | [diff] [blame] | 136 | # can't make next RPC request until finish with previous |
koder aka kdanilov | 39e449e | 2016-12-17 15:15:26 +0200 | [diff] [blame] | 137 | for node in ctx.nodes: |
koder aka kdanilov | 108ac36 | 2017-01-19 20:17:16 +0200 | [diff] [blame] | 138 | nid = node.node_id |
kdanylov aka koder | 026e5f2 | 2017-05-15 01:04:39 +0300 | [diff] [blame^] | 139 | hw_info_path = WallyDB.hw_info.format(node_id=nid) |
koder aka kdanilov | 39e449e | 2016-12-17 15:15:26 +0200 | [diff] [blame] | 140 | if hw_info_path not in ctx.storage: |
kdanylov aka koder | 026e5f2 | 2017-05-15 01:04:39 +0300 | [diff] [blame^] | 141 | futures[(hw_info_path, nid)] = pool.submit(get_hw_info, node) |
koder aka kdanilov | 39e449e | 2016-12-17 15:15:26 +0200 | [diff] [blame] | 142 | |
koder aka kdanilov | 962ee5f | 2016-12-19 02:40:08 +0200 | [diff] [blame] | 143 | for (path, nid), future in futures.items(): |
| 144 | try: |
koder aka kdanilov | 7f59d56 | 2016-12-26 01:34:23 +0200 | [diff] [blame] | 145 | ctx.storage.put(future.result(), path) |
koder aka kdanilov | 962ee5f | 2016-12-19 02:40:08 +0200 | [diff] [blame] | 146 | except Exception: |
| 147 | logger.exception("During collecting hardware info from %s", nid) |
| 148 | raise utils.StopTestError() |
| 149 | |
| 150 | futures.clear() |
| 151 | for node in ctx.nodes: |
koder aka kdanilov | 108ac36 | 2017-01-19 20:17:16 +0200 | [diff] [blame] | 152 | nid = node.node_id |
kdanylov aka koder | 026e5f2 | 2017-05-15 01:04:39 +0300 | [diff] [blame^] | 153 | sw_info_path = WallyDB.sw_info.format(node_id=nid) |
koder aka kdanilov | 39e449e | 2016-12-17 15:15:26 +0200 | [diff] [blame] | 154 | if sw_info_path not in ctx.storage: |
kdanylov aka koder | 026e5f2 | 2017-05-15 01:04:39 +0300 | [diff] [blame^] | 155 | futures[(sw_info_path, nid)] = pool.submit(get_sw_info, node) |
koder aka kdanilov | 39e449e | 2016-12-17 15:15:26 +0200 | [diff] [blame] | 156 | |
koder aka kdanilov | 962ee5f | 2016-12-19 02:40:08 +0200 | [diff] [blame] | 157 | for (path, nid), future in futures.items(): |
| 158 | try: |
koder aka kdanilov | 7f59d56 | 2016-12-26 01:34:23 +0200 | [diff] [blame] | 159 | ctx.storage.put(future.result(), path) |
koder aka kdanilov | 962ee5f | 2016-12-19 02:40:08 +0200 | [diff] [blame] | 160 | except Exception: |
| 161 | logger.exception("During collecting software info from %s", nid) |
| 162 | raise utils.StopTestError() |
koder aka kdanilov | 39e449e | 2016-12-17 15:15:26 +0200 | [diff] [blame] | 163 | |
| 164 | |
| 165 | class ExplicitNodesStage(Stage): |
| 166 | """add explicit nodes""" |
| 167 | |
| 168 | priority = StepOrder.DISCOVER |
| 169 | config_block = 'nodes' |
| 170 | |
| 171 | def run(self, ctx: TestRun) -> None: |
kdanylov aka koder | 026e5f2 | 2017-05-15 01:04:39 +0300 | [diff] [blame^] | 172 | if WallyDB.all_nodes in ctx.storage: |
koder aka kdanilov | 962ee5f | 2016-12-19 02:40:08 +0200 | [diff] [blame] | 173 | logger.info("Skip explicid nodes filling, as all_nodes all ready in storage") |
| 174 | return |
koder aka kdanilov | 39e449e | 2016-12-17 15:15:26 +0200 | [diff] [blame] | 175 | |
koder aka kdanilov | bbbe1dc | 2016-12-20 01:19:56 +0200 | [diff] [blame] | 176 | for url, roles in ctx.config.get('nodes', {}).raw().items(): |
kdanylov aka koder | 026e5f2 | 2017-05-15 01:04:39 +0300 | [diff] [blame^] | 177 | ctx.merge_node(parse_ssh_uri(url), set(role.strip() for role in roles.split(","))) |
koder aka kdanilov | bbbe1dc | 2016-12-20 01:19:56 +0200 | [diff] [blame] | 178 | logger.debug("Add node %s with roles %s", url, roles) |
koder aka kdanilov | 39e449e | 2016-12-17 15:15:26 +0200 | [diff] [blame] | 179 | |
| 180 | |
koder aka kdanilov | 962ee5f | 2016-12-19 02:40:08 +0200 | [diff] [blame] | 181 | class SleepStage(Stage): |
| 182 | """Save nodes list to file""" |
| 183 | |
| 184 | priority = StepOrder.TEST |
| 185 | config_block = 'sleep' |
| 186 | |
| 187 | def run(self, ctx: TestRun) -> None: |
| 188 | logger.debug("Will sleep for %r seconds", ctx.config.sleep) |
kdanylov aka koder | 3a9e5db | 2017-05-09 20:00:44 +0300 | [diff] [blame] | 189 | stime = time.time() |
koder aka kdanilov | 962ee5f | 2016-12-19 02:40:08 +0200 | [diff] [blame] | 190 | time.sleep(ctx.config.sleep) |
kdanylov aka koder | 3a9e5db | 2017-05-09 20:00:44 +0300 | [diff] [blame] | 191 | ctx.storage.put([int(stime), int(time.time())], 'idle') |
koder aka kdanilov | 39e449e | 2016-12-17 15:15:26 +0200 | [diff] [blame] | 192 | |
| 193 | |
koder aka kdanilov | 23e6bdf | 2016-12-24 02:18:54 +0200 | [diff] [blame] | 194 | class PrepareNodes(Stage): |
| 195 | priority = StepOrder.START_SENSORS - 1 |
| 196 | |
| 197 | def __init__(self): |
| 198 | Stage.__init__(self) |
| 199 | self.nodeepscrub_updated = False |
| 200 | self.noscrub_updated = False |
| 201 | |
| 202 | def run(self, ctx: TestRun) -> None: |
| 203 | ceph_sett = ctx.config.get('ceph_settings', "").split() |
| 204 | if ceph_sett: |
| 205 | for node in ctx.nodes: |
| 206 | if "ceph-mon" in node.info.roles or "ceph-osd" in node.info.roles: |
| 207 | state = json.loads(node.run("ceph health --format json"))["summary"]["summary"] |
| 208 | if 'noscrub' in ceph_sett: |
| 209 | if 'noscrub' in state: |
| 210 | logger.debug("noscrub already set on cluster") |
| 211 | else: |
| 212 | logger.info("Applying noscrub settings to ceph cluster") |
| 213 | node.run("ceph osd set noscrub") |
| 214 | self.noscrub_updated = True |
| 215 | |
| 216 | if 'nodeepscrub' in ceph_sett: |
| 217 | if 'nodeepscrub' in state: |
| 218 | logger.debug("noscrub already set on cluster") |
| 219 | else: |
| 220 | logger.info("Applying noscrub settings to ceph cluster") |
| 221 | node.run("ceph osd set noscrub") |
| 222 | self.nodeepscrub_updated = True |
| 223 | break |
| 224 | |
| 225 | def cleanup(self, ctx: TestRun) -> None: |
| 226 | if self.nodeepscrub_updated or self.noscrub_updated: |
| 227 | for node in ctx.nodes: |
| 228 | if "ceph-mon" in node.info.roles or "ceph-osd" in node.info.roles : |
| 229 | if self.noscrub_updated: |
| 230 | logger.info("Reverting noscrub setting for ceph cluster") |
| 231 | node.run("ceph osd unset noscrub") |
| 232 | self.noscrub_updated = False |
| 233 | |
| 234 | if self.nodeepscrub_updated: |
| 235 | logger.info("Reverting noscrub setting for ceph cluster") |
| 236 | node.run("ceph osd unset nodeepscrub") |
| 237 | self.nodeepscrub_updated = False |
| 238 | |
| 239 | |
koder aka kdanilov | 39e449e | 2016-12-17 15:15:26 +0200 | [diff] [blame] | 240 | class RunTestsStage(Stage): |
| 241 | |
| 242 | priority = StepOrder.TEST |
| 243 | config_block = 'tests' |
| 244 | |
| 245 | def run(self, ctx: TestRun) -> None: |
koder aka kdanilov | f286517 | 2016-12-30 03:35:11 +0200 | [diff] [blame] | 246 | if ctx.config.no_tests: |
| 247 | logger.info("Skiping tests, as 'no_tests' config settings is True") |
| 248 | return |
koder aka kdanilov | 3d2bc4f | 2016-11-12 18:31:18 +0200 | [diff] [blame] | 249 | |
koder aka kdanilov | f286517 | 2016-12-30 03:35:11 +0200 | [diff] [blame] | 250 | for suite_idx, test_suite in enumerate(ctx.config.get('tests', [])): |
| 251 | test_nodes = [node for node in ctx.nodes if 'testnode' in node.info.roles] |
koder aka kdanilov | da45e88 | 2015-04-06 02:24:42 +0300 | [diff] [blame] | 252 | |
koder aka kdanilov | f286517 | 2016-12-30 03:35:11 +0200 | [diff] [blame] | 253 | if not test_nodes: |
| 254 | logger.error("No test nodes found") |
kdanylov aka koder | b083333 | 2017-05-13 20:39:17 +0300 | [diff] [blame] | 255 | raise utils.StopTestError() |
gstepanov | 023c1e4 | 2015-04-08 15:50:19 +0300 | [diff] [blame] | 256 | |
koder aka kdanilov | f286517 | 2016-12-30 03:35:11 +0200 | [diff] [blame] | 257 | if len(test_suite) != 1: |
| 258 | logger.error("Test suite %s contain more than one test. Put each test in separated group", suite_idx) |
kdanylov aka koder | b083333 | 2017-05-13 20:39:17 +0300 | [diff] [blame] | 259 | raise utils.StopTestError() |
koder aka kdanilov | 7022706 | 2016-11-26 23:23:21 +0200 | [diff] [blame] | 260 | |
koder aka kdanilov | f286517 | 2016-12-30 03:35:11 +0200 | [diff] [blame] | 261 | name, params = list(test_suite.items())[0] |
| 262 | vm_count = params.get('node_limit', None) # type: Optional[int] |
koder aka kdanilov | 7022706 | 2016-11-26 23:23:21 +0200 | [diff] [blame] | 263 | |
koder aka kdanilov | f286517 | 2016-12-30 03:35:11 +0200 | [diff] [blame] | 264 | # select test nodes |
| 265 | if vm_count is None: |
| 266 | curr_test_nodes = test_nodes |
| 267 | else: |
| 268 | curr_test_nodes = test_nodes[:vm_count] |
koder aka kdanilov | 7022706 | 2016-11-26 23:23:21 +0200 | [diff] [blame] | 269 | |
koder aka kdanilov | f286517 | 2016-12-30 03:35:11 +0200 | [diff] [blame] | 270 | if not curr_test_nodes: |
| 271 | logger.error("No nodes found for test, skipping it.") |
| 272 | continue |
| 273 | |
kdanylov aka koder | 150b219 | 2017-04-01 16:53:01 +0300 | [diff] [blame] | 274 | if name not in all_suits: |
| 275 | logger.error("Test suite %r not found. Only suits [%s] available", name, ", ".join(all_suits)) |
kdanylov aka koder | b083333 | 2017-05-13 20:39:17 +0300 | [diff] [blame] | 276 | raise utils.StopTestError() |
kdanylov aka koder | 150b219 | 2017-04-01 16:53:01 +0300 | [diff] [blame] | 277 | |
koder aka kdanilov | 108ac36 | 2017-01-19 20:17:16 +0200 | [diff] [blame] | 278 | test_cls = all_suits[name] |
koder aka kdanilov | f286517 | 2016-12-30 03:35:11 +0200 | [diff] [blame] | 279 | remote_dir = ctx.config.default_test_local_folder.format(name=name, uuid=ctx.config.run_uuid) |
koder aka kdanilov | a732a60 | 2017-02-01 20:29:56 +0200 | [diff] [blame] | 280 | suite = SuiteConfig(test_cls.name, |
| 281 | params=params, |
| 282 | run_uuid=ctx.config.run_uuid, |
| 283 | nodes=test_nodes, |
| 284 | remote_dir=remote_dir, |
| 285 | idx=suite_idx, |
| 286 | keep_raw_files=ctx.config.keep_raw_files) |
koder aka kdanilov | f286517 | 2016-12-30 03:35:11 +0200 | [diff] [blame] | 287 | |
kdanylov aka koder | b083333 | 2017-05-13 20:39:17 +0300 | [diff] [blame] | 288 | test_cls(storage=ctx.rstorage, |
koder aka kdanilov | 108ac36 | 2017-01-19 20:17:16 +0200 | [diff] [blame] | 289 | suite=suite, |
koder aka kdanilov | f286517 | 2016-12-30 03:35:11 +0200 | [diff] [blame] | 290 | on_idle=lambda: collect_sensors_data(ctx, False)).run() |
gstepanov | 023c1e4 | 2015-04-08 15:50:19 +0300 | [diff] [blame] | 291 | |
koder aka kdanilov | 39e449e | 2016-12-17 15:15:26 +0200 | [diff] [blame] | 292 | @classmethod |
| 293 | def validate_config(cls, cfg: ConfigBlock) -> None: |
| 294 | pass |
koder aka kdanilov | 7f59d56 | 2016-12-26 01:34:23 +0200 | [diff] [blame] | 295 | |
| 296 | |
kdanylov aka koder | 736e5c1 | 2017-05-07 17:27:14 +0300 | [diff] [blame] | 297 | class SaveNodesStage(Stage): |
| 298 | """Save nodes list to file""" |
kdanylov aka koder | 736e5c1 | 2017-05-07 17:27:14 +0300 | [diff] [blame] | 299 | priority = StepOrder.UPDATE_NODES_INFO + 1 |
| 300 | |
| 301 | def run(self, ctx: TestRun) -> None: |
| 302 | infos = list(ctx.nodes_info.values()) |
| 303 | params = {node.node_id: node.params for node in infos} |
| 304 | ninfos = [copy.copy(node) for node in infos] |
| 305 | for node in ninfos: |
kdanylov aka koder | 026e5f2 | 2017-05-15 01:04:39 +0300 | [diff] [blame^] | 306 | node.params = {"in file": WallyDB.nodes_params} |
| 307 | ctx.storage.put_list(ninfos, WallyDB.all_nodes) |
| 308 | ctx.storage.put_raw(json.dumps(params).encode('utf8'), WallyDB.nodes_params) |
kdanylov aka koder | 736e5c1 | 2017-05-07 17:27:14 +0300 | [diff] [blame] | 309 | |
| 310 | |
koder aka kdanilov | 7f59d56 | 2016-12-26 01:34:23 +0200 | [diff] [blame] | 311 | class LoadStoredNodesStage(Stage): |
| 312 | priority = StepOrder.DISCOVER |
| 313 | |
| 314 | def run(self, ctx: TestRun) -> None: |
kdanylov aka koder | 026e5f2 | 2017-05-15 01:04:39 +0300 | [diff] [blame^] | 315 | if WallyDB.all_nodes in ctx.storage: |
koder aka kdanilov | 7f59d56 | 2016-12-26 01:34:23 +0200 | [diff] [blame] | 316 | if ctx.nodes_info: |
| 317 | logger.error("Internal error: Some nodes already stored in " + |
| 318 | "nodes_info before LoadStoredNodesStage stage") |
kdanylov aka koder | b083333 | 2017-05-13 20:39:17 +0300 | [diff] [blame] | 319 | raise utils.StopTestError() |
kdanylov aka koder | 736e5c1 | 2017-05-07 17:27:14 +0300 | [diff] [blame] | 320 | |
kdanylov aka koder | 026e5f2 | 2017-05-15 01:04:39 +0300 | [diff] [blame^] | 321 | ctx.nodes_info = {node.node_id: node for node in ctx.rstorage.load_nodes()} |
koder aka kdanilov | 7f59d56 | 2016-12-26 01:34:23 +0200 | [diff] [blame] | 322 | logger.info("%s nodes loaded from database", len(ctx.nodes_info)) |