blob: e62cfb1d3473f5f36be22317acedcf7d765b6ee0 [file] [log] [blame]
gstepanov023c1e42015-04-08 15:50:19 +03001import os
koder aka kdanilov57ce4db2015-04-25 21:25:51 +03002import time
koder aka kdanilove21d7472015-02-14 19:02:04 -08003import logging
koder aka kdanilov168f6092015-04-19 02:33:38 +03004import functools
koder aka kdanilovd5ed4da2015-05-07 23:33:23 +03005import contextlib
koder aka kdanilov2c473092015-03-29 17:12:13 +03006import collections
koder aka kdanilov3d2bc4f2016-11-12 18:31:18 +02007from typing import List, Dict, Iterable, Any, Iterator, Mapping, Callable, Tuple, Optional, Union, cast
koder aka kdanilov22d134e2016-11-08 11:33:19 +02008from concurrent.futures import ThreadPoolExecutor, Future
koder aka kdanilov88407ff2015-05-26 15:35:57 +03009
koder aka kdanilov3d2bc4f2016-11-12 18:31:18 +020010from .node_interfaces import NodeInfo, IRPCNode
koder aka kdanilov3b4da8b2016-10-17 00:17:53 +030011from .test_run_class import TestRun
koder aka kdanilov3d2bc4f2016-11-12 18:31:18 +020012from .discover import discover
koder aka kdanilov22d134e2016-11-08 11:33:19 +020013from . import pretty_yaml, utils, report, ssh_utils, start_vms, hw_info
koder aka kdanilov3d2bc4f2016-11-12 18:31:18 +020014from .config import ConfigBlock, Config
koder aka kdanilov3b4da8b2016-10-17 00:17:53 +030015
16from .suits.mysql import MysqlTest
17from .suits.itest import TestConfig
18from .suits.io.fio import IOPerfTest
19from .suits.postgres import PgBenchTest
20from .suits.omgbench import OmgTest
koder aka kdanilovbc2c8982015-06-13 02:50:43 +030021
22
koder aka kdanilov4af1c1d2015-05-18 15:48:58 +030023TOOL_TYPE_MAPPER = {
24 "io": IOPerfTest,
25 "pgbench": PgBenchTest,
26 "mysql": MysqlTest,
Yulia Portnovab0c977c2015-12-11 19:23:28 +020027 "omg": OmgTest,
koder aka kdanilov4af1c1d2015-05-18 15:48:58 +030028}
koder aka kdanilov63ad2062015-04-27 13:11:40 +030029
koder aka kdanilov57ce4db2015-04-25 21:25:51 +030030
koder aka kdanilovcff7b2e2015-04-18 20:48:15 +030031logger = logging.getLogger("wally")
koder aka kdanilovcee43342015-04-14 22:52:53 +030032
koder aka kdanilov7acd6bd2015-02-12 14:28:30 -080033
koder aka kdanilov3d2bc4f2016-11-12 18:31:18 +020034def connect_all(nodes_info: List[NodeInfo],
koder aka kdanilov22d134e2016-11-08 11:33:19 +020035 pool: ThreadPoolExecutor,
36 conn_timeout: int = 30,
koder aka kdanilov3d2bc4f2016-11-12 18:31:18 +020037 rpc_conn_callback: ssh_utils.RPCBeforeConnCallback = None) -> List[IRPCNode]:
38 """Connect to all nodes, log errors"""
koder aka kdanilove21d7472015-02-14 19:02:04 -080039
koder aka kdanilov3d2bc4f2016-11-12 18:31:18 +020040 logger.info("Connecting to %s nodes", len(nodes_info))
koder aka kdanilov0fdaaee2015-06-30 11:10:48 +030041
koder aka kdanilov3d2bc4f2016-11-12 18:31:18 +020042 def connect_ext(node_info: NodeInfo) -> Tuple[bool, Union[IRPCNode, NodeInfo]]:
koder aka kdanilov0fdaaee2015-06-30 11:10:48 +030043 try:
koder aka kdanilov3d2bc4f2016-11-12 18:31:18 +020044 ssh_node = ssh_utils.connect(node_info.ssh_conn_url, conn_timeout=conn_timeout)
45 return True, ssh_utils.setup_rpc(ssh_node, rpc_conn_callback=rpc_conn_callback)
koder aka kdanilov0fdaaee2015-06-30 11:10:48 +030046 except Exception as exc:
koder aka kdanilov3b4da8b2016-10-17 00:17:53 +030047 logger.error("During connect to {}: {!s}".format(node, exc))
koder aka kdanilov3d2bc4f2016-11-12 18:31:18 +020048 return False, node_info
koder aka kdanilov0fdaaee2015-06-30 11:10:48 +030049
koder aka kdanilov3d2bc4f2016-11-12 18:31:18 +020050 failed_testnodes = [] # type: List[NodeInfo]
51 failed_nodes = [] # type: List[NodeInfo]
52 ready = [] # type: List[IRPCNode]
koder aka kdanilov0fdaaee2015-06-30 11:10:48 +030053
koder aka kdanilov3d2bc4f2016-11-12 18:31:18 +020054 for ok, node in pool.map(connect_ext, nodes_info):
55 if not ok:
56 node = cast(NodeInfo, node)
koder aka kdanilov0fdaaee2015-06-30 11:10:48 +030057 if 'testnode' in node.roles:
koder aka kdanilov3b4da8b2016-10-17 00:17:53 +030058 failed_testnodes.append(node)
koder aka kdanilov0fdaaee2015-06-30 11:10:48 +030059 else:
koder aka kdanilov3b4da8b2016-10-17 00:17:53 +030060 failed_nodes.append(node)
koder aka kdanilov3d2bc4f2016-11-12 18:31:18 +020061 else:
62 ready.append(cast(IRPCNode, node))
koder aka kdanilov0fdaaee2015-06-30 11:10:48 +030063
koder aka kdanilov3b4da8b2016-10-17 00:17:53 +030064 if failed_nodes:
65 msg = "Node(s) {} would be excluded - can't connect"
66 logger.warning(msg.format(",".join(map(str, failed_nodes))))
koder aka kdanilov0fdaaee2015-06-30 11:10:48 +030067
koder aka kdanilov3b4da8b2016-10-17 00:17:53 +030068 if failed_testnodes:
69 msg = "Can't connect to testnode(s) " + \
70 ",".join(map(str, failed_testnodes))
koder aka kdanilov0fdaaee2015-06-30 11:10:48 +030071 logger.error(msg)
72 raise utils.StopTestError(msg)
73
koder aka kdanilov3b4da8b2016-10-17 00:17:53 +030074 if not failed_nodes:
koder aka kdanilov0fdaaee2015-06-30 11:10:48 +030075 logger.info("All nodes connected successfully")
koder aka kdanilov2c473092015-03-29 17:12:13 +030076
koder aka kdanilov3d2bc4f2016-11-12 18:31:18 +020077 return ready
koder aka kdanilov2c473092015-03-29 17:12:13 +030078
koder aka kdanilov3d2bc4f2016-11-12 18:31:18 +020079
80def collect_info_stage(ctx: TestRun, nodes: Iterable[IRPCNode]) -> None:
81 futures = {} # type: Dict[str, Future]]
koder aka kdanilovf86d7af2015-05-06 04:01:54 +030082
koder aka kdanilov22d134e2016-11-08 11:33:19 +020083 with ctx.get_pool() as pool:
koder aka kdanilov3b4da8b2016-10-17 00:17:53 +030084 for node in nodes:
koder aka kdanilov22d134e2016-11-08 11:33:19 +020085 hw_info_path = "hw_info/{}".format(node.node_id())
86 if hw_info_path not in ctx.storage:
koder aka kdanilov3d2bc4f2016-11-12 18:31:18 +020087 futures[hw_info_path] = pool.submit(hw_info.get_hw_info, node), node
koder aka kdanilovf86d7af2015-05-06 04:01:54 +030088
koder aka kdanilov22d134e2016-11-08 11:33:19 +020089 sw_info_path = "sw_info/{}".format(node.node_id())
90 if sw_info_path not in ctx.storage:
91 futures[sw_info_path] = pool.submit(hw_info.get_sw_info, node)
koder aka kdanilovf86d7af2015-05-06 04:01:54 +030092
koder aka kdanilov22d134e2016-11-08 11:33:19 +020093 for path, future in futures.items():
94 ctx.storage[path] = future.result()
koder aka kdanilovf86d7af2015-05-06 04:01:54 +030095
96
koder aka kdanilov0fdaaee2015-06-30 11:10:48 +030097@contextlib.contextmanager
koder aka kdanilov3d2bc4f2016-11-12 18:31:18 +020098def suspend_vm_nodes_ctx(unused_nodes: List[IRPCNode]) -> Iterator[List[int]]:
koder aka kdanilov3b4da8b2016-10-17 00:17:53 +030099
koder aka kdanilov3d2bc4f2016-11-12 18:31:18 +0200100 pausable_nodes_ids = [cast(int, node.info.os_vm_id)
101 for node in unused_nodes
102 if node.info.os_vm_id is not None]
koder aka kdanilov3b4da8b2016-10-17 00:17:53 +0300103
koder aka kdanilov4af1c1d2015-05-18 15:48:58 +0300104 non_pausable = len(unused_nodes) - len(pausable_nodes_ids)
koder aka kdanilov2c473092015-03-29 17:12:13 +0300105
koder aka kdanilov3d2bc4f2016-11-12 18:31:18 +0200106 if non_pausable:
107 logger.warning("Can't pause {} nodes".format(non_pausable))
koder aka kdanilov4af1c1d2015-05-18 15:48:58 +0300108
koder aka kdanilov3d2bc4f2016-11-12 18:31:18 +0200109 if pausable_nodes_ids:
110 logger.debug("Try to pause {} unused nodes".format(len(pausable_nodes_ids)))
koder aka kdanilov4af1c1d2015-05-18 15:48:58 +0300111 start_vms.pause(pausable_nodes_ids)
112
koder aka kdanilov0fdaaee2015-06-30 11:10:48 +0300113 try:
114 yield pausable_nodes_ids
115 finally:
koder aka kdanilov3d2bc4f2016-11-12 18:31:18 +0200116 if pausable_nodes_ids:
117 logger.debug("Unpausing {} nodes".format(len(pausable_nodes_ids)))
koder aka kdanilov0fdaaee2015-06-30 11:10:48 +0300118 start_vms.unpause(pausable_nodes_ids)
119
120
koder aka kdanilov3d2bc4f2016-11-12 18:31:18 +0200121def run_tests(ctx: TestRun, test_block: ConfigBlock, nodes: List[IRPCNode]) -> None:
koder aka kdanilov22d134e2016-11-08 11:33:19 +0200122 """Run test from test block"""
123
koder aka kdanilov3d2bc4f2016-11-12 18:31:18 +0200124 test_nodes = [node for node in nodes if 'testnode' in node.info.roles]
koder aka kdanilov4af1c1d2015-05-18 15:48:58 +0300125
koder aka kdanilov3d2bc4f2016-11-12 18:31:18 +0200126 if not test_nodes:
koder aka kdanilovd5ed4da2015-05-07 23:33:23 +0300127 logger.error("No test nodes found")
128 return
129
koder aka kdanilovcee43342015-04-14 22:52:53 +0300130 for name, params in test_block.items():
koder aka kdanilov3d2bc4f2016-11-12 18:31:18 +0200131 vm_count = params.get('node_limit', None) # type: Optional[int]
koder aka kdanilov0fdaaee2015-06-30 11:10:48 +0300132
koder aka kdanilov3d2bc4f2016-11-12 18:31:18 +0200133 # select test nodes
134 if vm_count is None:
135 curr_test_nodes = test_nodes
136 unused_nodes = []
koder aka kdanilov416b87a2015-05-12 00:26:04 +0300137 else:
koder aka kdanilov3d2bc4f2016-11-12 18:31:18 +0200138 curr_test_nodes = test_nodes[:vm_count]
139 unused_nodes = test_nodes[vm_count:]
koder aka kdanilov652cd802015-04-13 12:21:07 +0300140
koder aka kdanilov3d2bc4f2016-11-12 18:31:18 +0200141 if not curr_test_nodes:
142 logger.error("No nodes found for test, skipping it.")
143 continue
koder aka kdanilove87ae652015-04-20 02:14:35 +0300144
koder aka kdanilov3d2bc4f2016-11-12 18:31:18 +0200145 # results_path = generate_result_dir_name(cfg.results_storage, name, params)
146 # utils.mkdirs_if_unxists(results_path)
koder aka kdanilov652cd802015-04-13 12:21:07 +0300147
koder aka kdanilov3d2bc4f2016-11-12 18:31:18 +0200148 # suspend all unused virtual nodes
149 if ctx.config.get('suspend_unused_vms', True):
150 suspend_ctx = suspend_vm_nodes_ctx(unused_nodes)
151 else:
152 suspend_ctx = utils.empty_ctx()
koder aka kdanilovfd2cfa52015-05-20 03:17:42 +0300153
koder aka kdanilov3d2bc4f2016-11-12 18:31:18 +0200154 with suspend_ctx:
155 resumable_nodes_ids = [cast(int, node.info.os_vm_id)
156 for node in curr_test_nodes
157 if node.info.os_vm_id is not None]
koder aka kdanilovfd2cfa52015-05-20 03:17:42 +0300158
koder aka kdanilov3d2bc4f2016-11-12 18:31:18 +0200159 if resumable_nodes_ids:
160 logger.debug("Check and unpause {} nodes".format(len(resumable_nodes_ids)))
161 start_vms.unpause(resumable_nodes_ids)
koder aka kdanilovfd2cfa52015-05-20 03:17:42 +0300162
koder aka kdanilov3d2bc4f2016-11-12 18:31:18 +0200163 test_cls = TOOL_TYPE_MAPPER[name]
koder aka kdanilov416b87a2015-05-12 00:26:04 +0300164
koder aka kdanilov3d2bc4f2016-11-12 18:31:18 +0200165 remote_dir = ctx.config.default_test_local_folder.format(name=name, uuid=ctx.config.run_uuid)
koder aka kdanilov416b87a2015-05-12 00:26:04 +0300166
koder aka kdanilov3d2bc4f2016-11-12 18:31:18 +0200167 test_cfg = TestConfig(test_cls.__name__,
168 params=params,
169 run_uuid=ctx.config.run_uuid,
170 nodes=test_nodes,
171 storage=ctx.storage,
172 remote_dir=remote_dir)
koder aka kdanilov0fdaaee2015-06-30 11:10:48 +0300173
koder aka kdanilov3d2bc4f2016-11-12 18:31:18 +0200174 test_cls(test_cfg).run()
koder aka kdanilov2c473092015-03-29 17:12:13 +0300175
176
koder aka kdanilov3d2bc4f2016-11-12 18:31:18 +0200177def connect_stage(ctx: TestRun) -> None:
koder aka kdanilovda45e882015-04-06 02:24:42 +0300178 ctx.clear_calls_stack.append(disconnect_stage)
koder aka kdanilov3d2bc4f2016-11-12 18:31:18 +0200179
180 with ctx.get_pool() as pool:
181 ctx.nodes = connect_all(ctx.nodes_info, pool, rpc_conn_callback=ctx.before_conn_callback)
koder aka kdanilovcff7b2e2015-04-18 20:48:15 +0300182
183
koder aka kdanilov3d2bc4f2016-11-12 18:31:18 +0200184def discover_stage(ctx: TestRun) -> None:
koder aka kdanilov3b4da8b2016-10-17 00:17:53 +0300185 """discover clusters and nodes stage"""
186
koder aka kdanilov3d2bc4f2016-11-12 18:31:18 +0200187 discover_info = ctx.config.get('discover')
188 if discover_info:
189 discover_objs = [i.strip() for i in discover_info.strip().split(",")]
koder aka kdanilovcff7b2e2015-04-18 20:48:15 +0300190
koder aka kdanilov3d2bc4f2016-11-12 18:31:18 +0200191 nodes_info = discover.discover(ctx, discover_objs,
192 ctx.config.clouds,
193 ctx.storage,
194 not ctx.config.dont_discover_nodes)
koder aka kdanilov168f6092015-04-19 02:33:38 +0300195
koder aka kdanilov3d2bc4f2016-11-12 18:31:18 +0200196 ctx.nodes_info.extend(nodes_info)
koder aka kdanilovda45e882015-04-06 02:24:42 +0300197
koder aka kdanilov3d2bc4f2016-11-12 18:31:18 +0200198 for url, roles in ctx.config.get('explicit_nodes', {}).items():
199 ctx.nodes_info.append(NodeInfo(url, set(roles.split(","))))
koder aka kdanilovda45e882015-04-06 02:24:42 +0300200
201
koder aka kdanilov3d2bc4f2016-11-12 18:31:18 +0200202def save_nodes_stage(ctx: TestRun) -> None:
koder aka kdanilov3b4da8b2016-10-17 00:17:53 +0300203 """Save nodes list to file"""
koder aka kdanilov3d2bc4f2016-11-12 18:31:18 +0200204 ctx.storage['nodes'] = ctx.nodes_info
koder aka kdanilovf86d7af2015-05-06 04:01:54 +0300205
206
koder aka kdanilov3d2bc4f2016-11-12 18:31:18 +0200207def reuse_vms_stage(ctx: TestRun) -> None:
208 vms_patterns = ctx.config.get('clouds/openstack/vms', [])
209 private_key_path = get_vm_keypair(ctx.config)['keypair_file_private']
koder aka kdanilov416b87a2015-05-12 00:26:04 +0300210
koder aka kdanilov0fdaaee2015-06-30 11:10:48 +0300211 for creds in vms_patterns:
212 user_name, vm_name_pattern = creds.split("@", 1)
koder aka kdanilov3b4da8b2016-10-17 00:17:53 +0300213 msg = "Vm like {} lookup failed".format(vm_name_pattern)
koder aka kdanilov0fdaaee2015-06-30 11:10:48 +0300214
koder aka kdanilov3b4da8b2016-10-17 00:17:53 +0300215 with utils.LogError(msg):
koder aka kdanilovf86d7af2015-05-06 04:01:54 +0300216 msg = "Looking for vm with name like {0}".format(vm_name_pattern)
217 logger.debug(msg)
218
koder aka kdanilov416b87a2015-05-12 00:26:04 +0300219 if not start_vms.is_connected():
koder aka kdanilov3d2bc4f2016-11-12 18:31:18 +0200220 os_creds = get_OS_credentials(ctx)
koder aka kdanilov416b87a2015-05-12 00:26:04 +0300221 else:
koder aka kdanilovb7197432015-07-15 00:40:43 +0300222 os_creds = None
koder aka kdanilov416b87a2015-05-12 00:26:04 +0300223
koder aka kdanilovb7197432015-07-15 00:40:43 +0300224 conn = start_vms.nova_connect(os_creds)
koder aka kdanilovd5ed4da2015-05-07 23:33:23 +0300225 for ip, vm_id in start_vms.find_vms(conn, vm_name_pattern):
koder aka kdanilov0fdaaee2015-06-30 11:10:48 +0300226 conn_url = "ssh://{user}@{ip}::{key}".format(user=user_name,
227 ip=ip,
228 key=private_key_path)
koder aka kdanilov3d2bc4f2016-11-12 18:31:18 +0200229 node_info = NodeInfo(conn_url, ['testnode'])
230 node_info.os_vm_id = vm_id
231 ctx.nodes_info.append(node_info)
koder aka kdanilovf86d7af2015-05-06 04:01:54 +0300232
koder aka kdanilov416b87a2015-05-12 00:26:04 +0300233
koder aka kdanilov3d2bc4f2016-11-12 18:31:18 +0200234def get_OS_credentials(ctx: TestRun) -> None:
koder aka kdanilovcee43342015-04-14 22:52:53 +0300235 creds = None
koder aka kdanilovb7197432015-07-15 00:40:43 +0300236 os_creds = None
koder aka kdanilov8fbb27f2015-07-17 22:23:31 +0300237 force_insecure = False
koder aka kdanilov3d2bc4f2016-11-12 18:31:18 +0200238 cfg = ctx.config
koder aka kdanilovfd2cfa52015-05-20 03:17:42 +0300239
koder aka kdanilov0fdaaee2015-06-30 11:10:48 +0300240 if 'openstack' in cfg.clouds:
241 os_cfg = cfg.clouds['openstack']
koder aka kdanilov416b87a2015-05-12 00:26:04 +0300242 if 'OPENRC' in os_cfg:
243 logger.info("Using OS credentials from " + os_cfg['OPENRC'])
koder aka kdanilovb7197432015-07-15 00:40:43 +0300244 creds_tuple = utils.get_creds_openrc(os_cfg['OPENRC'])
245 os_creds = start_vms.OSCreds(*creds_tuple)
koder aka kdanilov416b87a2015-05-12 00:26:04 +0300246 elif 'ENV' in os_cfg:
247 logger.info("Using OS credentials from shell environment")
koder aka kdanilovb7197432015-07-15 00:40:43 +0300248 os_creds = start_vms.ostack_get_creds()
koder aka kdanilovfd2cfa52015-05-20 03:17:42 +0300249 elif 'OS_TENANT_NAME' in os_cfg:
koder aka kdanilov416b87a2015-05-12 00:26:04 +0300250 logger.info("Using predefined credentials")
koder aka kdanilovb7197432015-07-15 00:40:43 +0300251 os_creds = start_vms.OSCreds(os_cfg['OS_USERNAME'].strip(),
252 os_cfg['OS_PASSWORD'].strip(),
253 os_cfg['OS_TENANT_NAME'].strip(),
254 os_cfg['OS_AUTH_URL'].strip(),
255 os_cfg.get('OS_INSECURE', False))
koder aka kdanilov1c2b5112015-04-10 16:53:51 +0300256
koder aka kdanilov8fbb27f2015-07-17 22:23:31 +0300257 elif 'OS_INSECURE' in os_cfg:
258 force_insecure = os_cfg.get('OS_INSECURE', False)
259
koder aka kdanilovb7197432015-07-15 00:40:43 +0300260 if os_creds is None and 'fuel' in cfg.clouds and \
koder aka kdanilov0fdaaee2015-06-30 11:10:48 +0300261 'openstack_env' in cfg.clouds['fuel'] and \
koder aka kdanilov88407ff2015-05-26 15:35:57 +0300262 ctx.fuel_openstack_creds is not None:
koder aka kdanilov416b87a2015-05-12 00:26:04 +0300263 logger.info("Using fuel creds")
koder aka kdanilovb7197432015-07-15 00:40:43 +0300264 creds = start_vms.OSCreds(**ctx.fuel_openstack_creds)
265 elif os_creds is None:
koder aka kdanilov416b87a2015-05-12 00:26:04 +0300266 logger.error("Can't found OS credentials")
267 raise utils.StopTestError("Can't found OS credentials", None)
koder aka kdanilov1c2b5112015-04-10 16:53:51 +0300268
koder aka kdanilovcee43342015-04-14 22:52:53 +0300269 if creds is None:
koder aka kdanilovb7197432015-07-15 00:40:43 +0300270 creds = os_creds
koder aka kdanilov1c2b5112015-04-10 16:53:51 +0300271
koder aka kdanilov8fbb27f2015-07-17 22:23:31 +0300272 if force_insecure and not creds.insecure:
273 creds = start_vms.OSCreds(creds.name,
274 creds.passwd,
275 creds.tenant,
276 creds.auth_url,
277 True)
278
koder aka kdanilov05e15b92016-02-07 19:32:46 +0200279 logger.debug(("OS_CREDS: user={0.name} tenant={0.tenant} " +
koder aka kdanilovb7197432015-07-15 00:40:43 +0300280 "auth_url={0.auth_url} insecure={0.insecure}").format(creds))
koder aka kdanilov0fdaaee2015-06-30 11:10:48 +0300281
koder aka kdanilovcee43342015-04-14 22:52:53 +0300282 return creds
koder aka kdanilov4e9f3ed2015-04-14 11:26:12 +0300283
koder aka kdanilov1c2b5112015-04-10 16:53:51 +0300284
koder aka kdanilov3d2bc4f2016-11-12 18:31:18 +0200285def get_vm_keypair(cfg: Config) -> Tuple[str, str]:
286 key_name = cfg.vm_configs['keypair_name']
287 private_path = os.path.join(cfg.settings_dir, key_name + "_private.pem")
288 public_path = os.path.join(cfg.settings_dir, key_name + "_public.pub")
289 return (private_path, public_path)
koder aka kdanilov0fdaaee2015-06-30 11:10:48 +0300290
291
koder aka kdanilov168f6092015-04-19 02:33:38 +0300292@contextlib.contextmanager
koder aka kdanilov3d2bc4f2016-11-12 18:31:18 +0200293def create_vms_ctx(ctx: TestRun, vm_config: ConfigBlock, already_has_count: int = 0) -> Iterator[List[NodeInfo]]:
294 if vm_config['count'].startswith('='):
295 count = int(vm_config['count'][1:])
koder aka kdanilov0fdaaee2015-06-30 11:10:48 +0300296 if count <= already_has_count:
297 logger.debug("Not need new vms")
298 yield []
299 return
300
koder aka kdanilov416b87a2015-05-12 00:26:04 +0300301 if not start_vms.is_connected():
koder aka kdanilov3d2bc4f2016-11-12 18:31:18 +0200302 os_creds = get_OS_credentials(ctx)
koder aka kdanilov416b87a2015-05-12 00:26:04 +0300303 else:
koder aka kdanilovb7197432015-07-15 00:40:43 +0300304 os_creds = None
koder aka kdanilov0fdaaee2015-06-30 11:10:48 +0300305
koder aka kdanilovb7197432015-07-15 00:40:43 +0300306 nova = start_vms.nova_connect(os_creds)
koder aka kdanilov168f6092015-04-19 02:33:38 +0300307
koder aka kdanilov3d2bc4f2016-11-12 18:31:18 +0200308 os_nodes_ids = ctx.storage.get('spawned_vm_ids', []) # # type: List[int]
309 new_nodes = [] # type: List[IRPCNode]
koder aka kdanilovfd2cfa52015-05-20 03:17:42 +0300310
koder aka kdanilov3d2bc4f2016-11-12 18:31:18 +0200311 if not os_nodes_ids:
312 params = ctx.config.vm_configs[vm_config['cfg_name']].copy()
313 params.update(vm_config)
314 params.update(get_vm_keypair(ctx.config))
315 params['group_name'] = ctx.config.run_uuid
316 params['keypair_name'] = ctx.config.vm_configs['keypair_name']
koder aka kdanilovc368eb62015-04-28 18:22:01 +0300317
koder aka kdanilov3d2bc4f2016-11-12 18:31:18 +0200318 if not vm_config.get('skip_preparation', False):
319 logger.info("Preparing openstack")
320 start_vms.prepare_os(nova, params, os_creds)
321 else:
322 # TODO(koder): reconnect to old VM's
323 raise NotImplementedError("Reconnect to old vms is not implemented")
koder aka kdanilov168f6092015-04-19 02:33:38 +0300324
koder aka kdanilov3d2bc4f2016-11-12 18:31:18 +0200325 already_has_count += len(os_nodes_ids)
koder aka kdanilov0fdaaee2015-06-30 11:10:48 +0300326 old_nodes = ctx.nodes[:]
koder aka kdanilov168f6092015-04-19 02:33:38 +0300327
koder aka kdanilov3d2bc4f2016-11-12 18:31:18 +0200328 for node_info, node_id in start_vms.launch_vms(nova, params, already_has_count):
329 node_info.roles.append('testnode')
330 os_nodes_ids.append(node_id)
331 new_nodes.append(node_info)
332 ctx.storage['spawned_vm_ids'] = os_nodes_ids
koder aka kdanilov168f6092015-04-19 02:33:38 +0300333
koder aka kdanilov3d2bc4f2016-11-12 18:31:18 +0200334 yield new_nodes
koder aka kdanilov168f6092015-04-19 02:33:38 +0300335
koder aka kdanilov3d2bc4f2016-11-12 18:31:18 +0200336 # keep nodes in case of error for future test restart
337 if not ctx.config.keep_vm:
338 shut_down_vms_stage(ctx, os_nodes_ids)
339 ctx.storage['spawned_vm_ids'] = []
koder aka kdanilov168f6092015-04-19 02:33:38 +0300340
341
koder aka kdanilov3d2bc4f2016-11-12 18:31:18 +0200342def run_tests_stage(ctx: TestRun) -> None:
343 for group in ctx.config.get('tests', []):
koder aka kdanilov22d134e2016-11-08 11:33:19 +0200344 gitems = list(group.items())
345 if len(gitems) != 1:
koder aka kdanilov170936a2015-06-27 22:51:17 +0300346 msg = "Items in tests section should have len == 1"
347 logger.error(msg)
348 raise utils.StopTestError(msg)
349
koder aka kdanilov22d134e2016-11-08 11:33:19 +0200350 key, config = gitems[0]
koder aka kdanilovcee43342015-04-14 22:52:53 +0300351
352 if 'start_test_nodes' == key:
koder aka kdanilovc368eb62015-04-28 18:22:01 +0300353 if 'openstack' not in config:
354 msg = "No openstack block in config - can't spawn vm's"
355 logger.error(msg)
356 raise utils.StopTestError(msg)
357
koder aka kdanilov3d2bc4f2016-11-12 18:31:18 +0200358 num_test_nodes = len([node for node in ctx.nodes if 'testnode' in node.info.roles])
359 vm_ctx = create_vms_ctx(ctx, config['openstack'], num_test_nodes)
koder aka kdanilov0fdaaee2015-06-30 11:10:48 +0300360 tests = config.get('tests', [])
361 else:
362 vm_ctx = utils.empty_ctx([])
363 tests = [group]
koder aka kdanilovcee43342015-04-14 22:52:53 +0300364
koder aka kdanilov3d2bc4f2016-11-12 18:31:18 +0200365 with vm_ctx as new_nodes: # type: List[NodeInfo]
366 if new_nodes:
367 with ctx.get_pool() as pool:
368 new_rpc_nodes = connect_all(new_nodes, pool, rpc_conn_callback=ctx.before_conn_callback)
koder aka kdanilovcee43342015-04-14 22:52:53 +0300369
koder aka kdanilov3d2bc4f2016-11-12 18:31:18 +0200370 test_nodes = ctx.nodes + new_rpc_nodes
koder aka kdanilov0fdaaee2015-06-30 11:10:48 +0300371
koder aka kdanilov3d2bc4f2016-11-12 18:31:18 +0200372 if ctx.config.get('sensors'):
373 sensor_ctx = sensor_monitoring(ctx.config.get('sensors'), test_nodes)
374 else:
375 sensor_ctx = utils.empty_ctx([])
376
377 if not ctx.config.no_tests:
koder aka kdanilov0fdaaee2015-06-30 11:10:48 +0300378 for test_group in tests:
379 with sensor_ctx:
koder aka kdanilov3d2bc4f2016-11-12 18:31:18 +0200380 run_tests(ctx, test_group, test_nodes)
381
382 for node in new_rpc_nodes:
383 node.disconnect()
koder aka kdanilovda45e882015-04-06 02:24:42 +0300384
gstepanov023c1e42015-04-08 15:50:19 +0300385
koder aka kdanilov3d2bc4f2016-11-12 18:31:18 +0200386def shut_down_vms_stage(ctx: TestRun, nodes_ids: List[int]) -> None:
387 if nodes_ids:
koder aka kdanilov652cd802015-04-13 12:21:07 +0300388 logger.info("Removing nodes")
389 start_vms.clear_nodes(nodes_ids)
390 logger.info("Nodes has been removed")
gstepanov023c1e42015-04-08 15:50:19 +0300391
koder aka kdanilov3d2bc4f2016-11-12 18:31:18 +0200392
393def clear_enviroment(ctx: TestRun) -> None:
394 shut_down_vms_stage(ctx, ctx.storage.get('spawned_vm_ids', []))
395 ctx.storage['spawned_vm_ids'] = []
gstepanov023c1e42015-04-08 15:50:19 +0300396
koder aka kdanilov66839a92015-04-11 13:22:31 +0300397
koder aka kdanilov3d2bc4f2016-11-12 18:31:18 +0200398def disconnect_stage(ctx: TestRun) -> None:
399 # TODO(koder): what next line was for?
400 # ssh_utils.close_all_sessions()
koder aka kdanilov652cd802015-04-13 12:21:07 +0300401
koder aka kdanilovda45e882015-04-06 02:24:42 +0300402 for node in ctx.nodes:
koder aka kdanilov22d134e2016-11-08 11:33:19 +0200403 node.disconnect()
koder aka kdanilovda45e882015-04-06 02:24:42 +0300404
405
koder aka kdanilov3d2bc4f2016-11-12 18:31:18 +0200406def console_report_stage(ctx: TestRun) -> None:
407 # TODO(koder): load data from storage
408 raise NotImplementedError("...")
409 # first_report = True
410 # text_rep_fname = ctx.config.text_report_file
411 #
412 # with open(text_rep_fname, "w") as fd:
413 # for tp, data in ctx.results.items():
414 # if 'io' == tp and data is not None:
415 # rep_lst = []
416 # for result in data:
417 # rep_lst.append(
418 # IOPerfTest.format_for_console(list(result)))
419 # rep = "\n\n".join(rep_lst)
420 # elif tp in ['mysql', 'pgbench'] and data is not None:
421 # rep = MysqlTest.format_for_console(data)
422 # elif tp == 'omg':
423 # rep = OmgTest.format_for_console(data)
424 # else:
425 # logger.warning("Can't generate text report for " + tp)
426 # continue
427 #
428 # fd.write(rep)
429 # fd.write("\n")
430 #
431 # if first_report:
432 # logger.info("Text report were stored in " + text_rep_fname)
433 # first_report = False
434 #
435 # print("\n" + rep + "\n")
koder aka kdanilov66839a92015-04-11 13:22:31 +0300436
437
koder aka kdanilov3d2bc4f2016-11-12 18:31:18 +0200438# def test_load_report_stage(cfg: Config, ctx: TestRun) -> None:
439# load_rep_fname = cfg.load_report_file
440# found = False
441# for idx, (tp, data) in enumerate(ctx.results.items()):
442# if 'io' == tp and data is not None:
443# if found:
444# logger.error("Making reports for more than one " +
445# "io block isn't supported! All " +
446# "report, except first are skipped")
447# continue
448# found = True
449# report.make_load_report(idx, cfg['results'], load_rep_fname)
450#
451#
koder aka kdanilov416b87a2015-05-12 00:26:04 +0300452
koder aka kdanilov3d2bc4f2016-11-12 18:31:18 +0200453def html_report_stage(ctx: TestRun) -> None:
454 # TODO(koder): load data from storage
455 raise NotImplementedError("...")
456 # html_rep_fname = cfg.html_report_file
457 # found = False
458 # for tp, data in ctx.results.items():
459 # if 'io' == tp and data is not None:
460 # if found or len(data) > 1:
461 # logger.error("Making reports for more than one " +
462 # "io block isn't supported! All " +
463 # "report, except first are skipped")
464 # continue
465 # found = True
466 # report.make_io_report(list(data[0]),
467 # cfg.get('comment', ''),
468 # html_rep_fname,
469 # lab_info=ctx.nodes)
koder aka kdanilov416b87a2015-05-12 00:26:04 +0300470
koder aka kdanilov3d2bc4f2016-11-12 18:31:18 +0200471#
472# def load_data_from_path(test_res_dir: str) -> Mapping[str, List[Any]]:
473# files = get_test_files(test_res_dir)
474# raw_res = yaml_load(open(files['raw_results']).read())
475# res = collections.defaultdict(list)
476#
477# for tp, test_lists in raw_res:
478# for tests in test_lists:
479# for suite_name, suite_data in tests.items():
480# result_folder = suite_data[0]
481# res[tp].append(TOOL_TYPE_MAPPER[tp].load(suite_name, result_folder))
482#
483# return res
484#
485#
486# def load_data_from_path_stage(var_dir: str, _, ctx: TestRun) -> None:
487# for tp, vals in load_data_from_path(var_dir).items():
488# ctx.results.setdefault(tp, []).extend(vals)
489#
490#
491# def load_data_from(var_dir: str) -> Callable[[TestRun], None]:
492# return functools.partial(load_data_from_path_stage, var_dir)