blob: fde217feca7d0e1e4153d68bf057a298273a5aad [file] [log] [blame]
Alexe0c5b9e2019-04-23 18:51:23 -05001import json
Alex Savatieiev9b2f6512019-02-20 18:05:00 -06002import os
Alex3ebc5632019-04-18 16:47:18 -05003from copy import deepcopy
Alex9a4ad212020-10-01 18:04:25 -05004from multiprocessing.dummy import Pool
Alex Savatieiev9b2f6512019-02-20 18:05:00 -06005
Alex9a4ad212020-10-01 18:04:25 -05006from cfg_checker.clients import get_salt_remote, get_kube_remote
7from cfg_checker.common.const import all_salt_roles_map, all_kube_roles_map
Alexe9908f72020-05-19 16:04:53 -05008from cfg_checker.common.const import NODE_UP, NODE_DOWN, NODE_SKIP
Alex9a4ad212020-10-01 18:04:25 -05009from cfg_checker.common.const import ubuntu_versions, nova_openstack_versions
Alex7c9494e2019-04-22 10:40:59 -050010from cfg_checker.common import logger, logger_cli
Alexe0c5b9e2019-04-23 18:51:23 -050011from cfg_checker.common import utils
Alex9a4ad212020-10-01 18:04:25 -050012from cfg_checker.common.file_utils import create_temp_file_with_content
13from cfg_checker.common.exception import SaltException, KubeException
14from cfg_checker.common.ssh_utils import PortForward, SshShell
15from cfg_checker.common.settings import pkg_dir, ENV_TYPE_KUBE, ENV_TYPE_SALT
16from cfg_checker.helpers.console_utils import Progress
17
Alex Savatieiev9b2f6512019-02-20 18:05:00 -060018
19node_tmpl = {
20 'role': '',
21 'node_group': '',
Alexe9908f72020-05-19 16:04:53 -050022 'status': NODE_DOWN,
Alex Savatieiev9b2f6512019-02-20 18:05:00 -060023 'pillars': {},
Alex9a4ad212020-10-01 18:04:25 -050024 'grains': {},
25 'raw': {}
Alex Savatieiev9b2f6512019-02-20 18:05:00 -060026}
27
28
Alex9a4ad212020-10-01 18:04:25 -050029def _prepare_skipped_nodes(_names, skip_list, skip_list_file):
30 _skipped_minions = []
31 # skip list file
32 if skip_list_file:
33 _valid, _invalid = utils.get_nodes_list(skip_list_file)
Alex9a4ad212020-10-01 18:04:25 -050034 _skipped_minions.extend(_valid)
Alex359e5752021-08-16 17:28:30 -050035 if len(_invalid) < 1:
36 logger_cli.info(
37 "\n# WARNING: Detected invalid entries "
38 "in nodes skip list:\n{}\n".format(
39 "\n".join(_invalid)
40 )
41 )
Alexe8643642021-08-23 14:08:46 -050042
Alex9a4ad212020-10-01 18:04:25 -050043 # process wildcard, create node list out of mask
44 if skip_list:
45 _list = []
46 _invalid = []
47 for _item in skip_list:
48 if '*' in _item:
49 _str = _item[:_item.index('*')]
50 _nodes = [_m for _m in _names if _m.startswith(_str)]
51 if not _nodes:
52 logger_cli.warn(
53 "# WARNING: No nodes found for {}".format(_item)
54 )
55 _list.extend(_nodes)
56 else:
57 if _item in _names:
58 _list += _item
59 else:
60 logger_cli.warn(
61 "# WARNING: No node found for {}".format(_item)
62 )
63 # removing duplicates
64 _list = list(set(_list))
65 _skipped_minions.extend(_list)
66
67 return _skipped_minions
68
69
70class Nodes(object):
71 def __init__(self, config):
72 self.nodes = None
73 self.env_config = config
74
75 def skip_node(self, node):
76 # Add node to skip list
77 # Fro example if it is fails to comply with the rules
78
79 # check if we know such node
80 if node in self.nodes.keys() and node not in self.skip_list:
81 # yes, add it
82 self.skip_list.append(node)
83 return True
84 else:
85 return False
86
87 def get_nodes(self, skip_list=None, skip_list_file=None):
88 if not self.nodes:
89 if not skip_list and self.env_config.skip_nodes:
90 self.gather_node_info(
91 self.env_config.skip_nodes,
92 skip_list_file
93 )
94 else:
95 self.gather_node_info(skip_list, skip_list_file)
96 return self.nodes
97
98 def get_info(self):
99 _info = {
100 'mcp_release': self.mcp_release,
101 'openstack_release': self.openstack_release
102 }
103 return _info
104
105 def is_node_available(self, node, log=True):
106 if node in self.skip_list:
107 if log:
108 logger_cli.info("-> node '{}' not active".format(node))
109 return False
110 elif node in self.not_responded:
111 if log:
112 logger_cli.info("-> node '{}' not responded".format(node))
113 return False
114 else:
115 return True
116
117
118class SaltNodes(Nodes):
119 def __init__(self, config):
120 super(SaltNodes, self).__init__(config)
Alexe0c5b9e2019-04-23 18:51:23 -0500121 logger_cli.info("# Gathering environment information")
Alex Savatieiev9b2f6512019-02-20 18:05:00 -0600122 # simple salt rest client
Alex9a4ad212020-10-01 18:04:25 -0500123 self.salt = None
124 self.env_type = ENV_TYPE_SALT
Alex3ebc5632019-04-18 16:47:18 -0500125
Alexe9908f72020-05-19 16:04:53 -0500126 def gather_node_info(self, skip_list, skip_list_file):
Alex Savatieiev9b2f6512019-02-20 18:05:00 -0600127 # Keys for all nodes
128 # this is not working in scope of 2016.8.3, will overide with list
Alexb151fbe2019-04-22 16:53:30 -0500129 logger_cli.debug("... collecting node names existing in the cloud")
Alexe0c5b9e2019-04-23 18:51:23 -0500130 if not self.salt:
Alex9a4ad212020-10-01 18:04:25 -0500131 self.salt = get_salt_remote(self.env_config)
Alexe0c5b9e2019-04-23 18:51:23 -0500132
Alex Savatieiev9df93a92019-02-27 17:40:16 -0600133 try:
134 _keys = self.salt.list_keys()
135 _str = []
Alex3bc95f62020-03-05 17:00:04 -0600136 for _k, _v in _keys.items():
Alex Savatieiev9df93a92019-02-27 17:40:16 -0600137 _str.append("{}: {}".format(_k, len(_v)))
138 logger_cli.info("-> keys collected: {}".format(", ".join(_str)))
Alex Savatieiev9b2f6512019-02-20 18:05:00 -0600139
Alex Savatieiev9df93a92019-02-27 17:40:16 -0600140 self.node_keys = {
141 'minions': _keys['minions']
142 }
Alex3ebc5632019-04-18 16:47:18 -0500143 except Exception:
Alex Savatieiev9df93a92019-02-27 17:40:16 -0600144 _keys = None
145 self.node_keys = None
Alex3ebc5632019-04-18 16:47:18 -0500146
Alex Savatieiev9df93a92019-02-27 17:40:16 -0600147 # List of minions with grains
148 _minions = self.salt.list_minions()
149 if _minions:
Alex3ebc5632019-04-18 16:47:18 -0500150 logger_cli.info(
151 "-> api reported {} active minions".format(len(_minions))
152 )
Alex Savatieiev9df93a92019-02-27 17:40:16 -0600153 elif not self.node_keys:
154 # this is the last resort
Alex9a4ad212020-10-01 18:04:25 -0500155 _minions = self.env_config.load_nodes_list()
Alex3ebc5632019-04-18 16:47:18 -0500156 logger_cli.info(
157 "-> {} nodes loaded from list file".format(len(_minions))
158 )
Alex Savatieiev9df93a92019-02-27 17:40:16 -0600159 else:
160 _minions = self.node_keys['minions']
Alex Savatieiev9b2f6512019-02-20 18:05:00 -0600161
Alexe9908f72020-05-19 16:04:53 -0500162 # Skip nodes if needed
Alex9a4ad212020-10-01 18:04:25 -0500163 _skipped_minions = \
164 _prepare_skipped_nodes(_minions, skip_list, skip_list_file)
Alexe9908f72020-05-19 16:04:53 -0500165
Alex Savatieiev9df93a92019-02-27 17:40:16 -0600166 # in case API not listed minions, we need all that answer ping
Alex Savatieiev9b2f6512019-02-20 18:05:00 -0600167 _active = self.salt.get_active_nodes()
Alex Savatieiev9df93a92019-02-27 17:40:16 -0600168 logger_cli.info("-> nodes responded: {}".format(len(_active)))
Alex Savatieiev9b2f6512019-02-20 18:05:00 -0600169 # iterate through all accepted nodes and create a dict for it
170 self.nodes = {}
Alex Savatieievefa79c42019-03-14 19:14:04 -0500171 self.skip_list = []
Alexe9908f72020-05-19 16:04:53 -0500172 _domains = set()
Alex Savatieiev9df93a92019-02-27 17:40:16 -0600173 for _name in _minions:
Alex Savatieiev9b2f6512019-02-20 18:05:00 -0600174 _nc = utils.get_node_code(_name)
Alex9a4ad212020-10-01 18:04:25 -0500175 _rmap = all_salt_roles_map
Alex Savatieiev9b2f6512019-02-20 18:05:00 -0600176 _role = _rmap[_nc] if _nc in _rmap else 'unknown'
Alexe9908f72020-05-19 16:04:53 -0500177 if _name in _skipped_minions:
178 _status = NODE_SKIP
Alex Savatieievefa79c42019-03-14 19:14:04 -0500179 self.skip_list.append(_name)
Alexe9908f72020-05-19 16:04:53 -0500180 else:
181 _status = NODE_UP if _name in _active else NODE_DOWN
182 if _status == NODE_DOWN:
183 self.skip_list.append(_name)
184 logger_cli.info(
185 "-> '{}' is down, "
186 "added to skip list".format(
187 _name
188 )
189 )
Alex Savatieiev9b2f6512019-02-20 18:05:00 -0600190 self.nodes[_name] = deepcopy(node_tmpl)
Alexe9908f72020-05-19 16:04:53 -0500191 self.nodes[_name]['shortname'] = _name.split(".", 1)[0]
192 _domains.add(_name.split(".", 1)[1])
Alex Savatieiev9b2f6512019-02-20 18:05:00 -0600193 self.nodes[_name]['node_group'] = _nc
194 self.nodes[_name]['role'] = _role
195 self.nodes[_name]['status'] = _status
Alexe9908f72020-05-19 16:04:53 -0500196 _domains = list(_domains)
197 if len(_domains) > 1:
198 logger_cli.warning(
199 "Multiple domains detected: {}".format(",".join(_domains))
200 )
Alex205546c2020-12-30 19:22:30 -0600201 # TODO: Use domain with biggest node count by default
202 # or force it via config option
Alexe9908f72020-05-19 16:04:53 -0500203 else:
204 self.domain = _domains[0]
Alex Savatieievefa79c42019-03-14 19:14:04 -0500205 logger_cli.info("-> {} nodes inactive".format(len(self.skip_list)))
Alex Savatieiev9b2f6512019-02-20 18:05:00 -0600206 logger_cli.info("-> {} nodes collected".format(len(self.nodes)))
207
Alex Savatieiev9b2f6512019-02-20 18:05:00 -0600208 # form an all nodes compound string to use in salt
Alex Savatieiev01f0d7f2019-03-07 17:53:29 -0600209 self.active_nodes_compound = self.salt.compound_string_from_list(
Alex Savatieiev9b2f6512019-02-20 18:05:00 -0600210 filter(
Alexe9908f72020-05-19 16:04:53 -0500211 lambda nd: self.nodes[nd]['status'] == NODE_UP,
Alex Savatieiev9b2f6512019-02-20 18:05:00 -0600212 self.nodes
213 )
214 )
Alex41485522019-04-12 17:26:18 -0500215 # get master node fqdn
Alex3bc95f62020-03-05 17:00:04 -0600216 # _filtered = filter(
217 # lambda nd: self.nodes[nd]['role'] == const.all_roles_map['cfg'],
218 # self.nodes
219 # )
Alex9a4ad212020-10-01 18:04:25 -0500220 _role = all_salt_roles_map['cfg']
Alex3bc95f62020-03-05 17:00:04 -0600221 _filtered = [n for n, v in self.nodes.items() if v['role'] == _role]
Alexe0c5b9e2019-04-23 18:51:23 -0500222 if len(_filtered) < 1:
223 raise SaltException(
224 "No master node detected! Check/Update node role map."
225 )
226 else:
227 self.salt.master_node = _filtered[0]
Alex3ebc5632019-04-18 16:47:18 -0500228
Alex41485522019-04-12 17:26:18 -0500229 # OpenStack versions
230 self.mcp_release = self.salt.pillar_get(
Alexe0c5b9e2019-04-23 18:51:23 -0500231 self.salt.master_node,
Alex41485522019-04-12 17:26:18 -0500232 "_param:apt_mk_version"
Alexe0c5b9e2019-04-23 18:51:23 -0500233 )[self.salt.master_node]
Alex41485522019-04-12 17:26:18 -0500234 self.openstack_release = self.salt.pillar_get(
Alexe0c5b9e2019-04-23 18:51:23 -0500235 self.salt.master_node,
Alex41485522019-04-12 17:26:18 -0500236 "_param:openstack_version"
Alexe0c5b9e2019-04-23 18:51:23 -0500237 )[self.salt.master_node]
Alexd0391d42019-05-21 18:48:55 -0500238 # Preload codenames
239 # do additional queries to get linux codename and arch for each node
240 self.get_specific_pillar_for_nodes("_param:linux_system_codename")
241 self.get_specific_pillar_for_nodes("_param:linux_system_architecture")
242 for _name in self.nodes.keys():
Alexe9547d82019-06-03 15:22:50 -0500243 _n = self.nodes[_name]
244 if _name not in self.skip_list:
245 _p = _n['pillars']['_param']
246 _n['linux_codename'] = _p['linux_system_codename']
247 _n['linux_arch'] = _p['linux_system_architecture']
Alex41485522019-04-12 17:26:18 -0500248
Alex1839bbf2019-08-22 17:17:21 -0500249 def get_cmd_for_nodes(self, cmd, target_key, target_dict=None, nodes=None):
Alex836fac82019-08-22 13:36:16 -0500250 """Function runs. cmd.run and parses result into place
251 or into dict structure provided
252
253 :return: no return value, data pulished internally
254 """
255 logger_cli.debug(
256 "... collecting results for '{}'".format(cmd)
257 )
258 if target_dict:
259 _nodes = target_dict
260 else:
261 _nodes = self.nodes
Alex1839bbf2019-08-22 17:17:21 -0500262 _result = self.execute_cmd_on_active_nodes(cmd, nodes=nodes)
Alex3bc95f62020-03-05 17:00:04 -0600263 for node, data in _nodes.items():
Alexf3dbe862019-10-07 15:17:04 -0500264
Alex836fac82019-08-22 13:36:16 -0500265 if node in self.skip_list:
266 logger_cli.debug(
267 "... '{}' skipped while collecting '{}'".format(
268 node,
269 cmd
270 )
271 )
272 continue
273 # Prepare target key
274 if target_key not in data:
275 data[target_key] = None
276 # Save data
Alexe9908f72020-05-19 16:04:53 -0500277 if data['status'] in [NODE_DOWN, NODE_SKIP]:
Alex836fac82019-08-22 13:36:16 -0500278 data[target_key] = None
Alex1839bbf2019-08-22 17:17:21 -0500279 elif node not in _result:
280 continue
Alex836fac82019-08-22 13:36:16 -0500281 elif not _result[node]:
282 logger_cli.debug(
283 "... '{}' not responded after '{}'".format(
284 node,
Alex9a4ad212020-10-01 18:04:25 -0500285 self.env_config.salt_timeout
Alex836fac82019-08-22 13:36:16 -0500286 )
287 )
288 data[target_key] = None
289 else:
290 data[target_key] = _result[node]
291
Alex Savatieiev01f0d7f2019-03-07 17:53:29 -0600292 def get_specific_pillar_for_nodes(self, pillar_path):
293 """Function gets pillars on given path for all nodes
294
295 :return: no return value, data pulished internally
296 """
Alex3ebc5632019-04-18 16:47:18 -0500297 logger_cli.debug(
Alexb151fbe2019-04-22 16:53:30 -0500298 "... collecting node pillars for '{}'".format(pillar_path)
Alex3ebc5632019-04-18 16:47:18 -0500299 )
Alex Savatieiev01f0d7f2019-03-07 17:53:29 -0600300 _result = self.salt.pillar_get(self.active_nodes_compound, pillar_path)
Alex Savatieievefa79c42019-03-14 19:14:04 -0500301 self.not_responded = []
Alex3bc95f62020-03-05 17:00:04 -0600302 for node, data in self.nodes.items():
Alex Savatieievefa79c42019-03-14 19:14:04 -0500303 if node in self.skip_list:
304 logger_cli.debug(
305 "... '{}' skipped while collecting '{}'".format(
306 node,
307 pillar_path
308 )
309 )
310 continue
Alex Savatieiev01f0d7f2019-03-07 17:53:29 -0600311 _pillar_keys = pillar_path.split(':')
312 _data = data['pillars']
313 # pre-create nested dict
314 for idx in range(0, len(_pillar_keys)-1):
315 _key = _pillar_keys[idx]
316 if _key not in _data:
317 _data[_key] = {}
318 _data = _data[_key]
Alexe9908f72020-05-19 16:04:53 -0500319 if data['status'] in [NODE_DOWN, NODE_SKIP]:
Alex Savatieievefa79c42019-03-14 19:14:04 -0500320 _data[_pillar_keys[-1]] = None
321 elif not _result[node]:
322 logger_cli.debug(
323 "... '{}' not responded after '{}'".format(
324 node,
Alex9a4ad212020-10-01 18:04:25 -0500325 self.env_config.salt_timeout
Alex Savatieievefa79c42019-03-14 19:14:04 -0500326 )
327 )
328 _data[_pillar_keys[-1]] = None
329 self.not_responded.append(node)
330 else:
331 _data[_pillar_keys[-1]] = _result[node]
Alex3ebc5632019-04-18 16:47:18 -0500332
Alexe0c5b9e2019-04-23 18:51:23 -0500333 def prepare_json_on_node(self, node, _dict, filename):
Alex359e5752021-08-16 17:28:30 -0500334 if node in self.skip_list:
335 logger_cli.debug(
336 "... '{}' skipped while preparing json file of '{}'".format(
337 node,
338 filename
339 )
340 )
341
Alexe0c5b9e2019-04-23 18:51:23 -0500342 # this function assumes that all folders are created
343 _dumps = json.dumps(_dict, indent=2).splitlines()
344 _storage_path = os.path.join(
Alex9a4ad212020-10-01 18:04:25 -0500345 self.env_config.salt_file_root, self.env_config.salt_scripts_folder
Alexe0c5b9e2019-04-23 18:51:23 -0500346 )
347 logger_cli.debug(
348 "... uploading data as '{}' "
349 "to master's file cache folder: '{}'".format(
350 filename,
351 _storage_path
352 )
353 )
354 _cache_path = os.path.join(_storage_path, filename)
355 _source_path = os.path.join(
356 'salt://',
Alex9a4ad212020-10-01 18:04:25 -0500357 self.env_config.salt_scripts_folder,
Alexe0c5b9e2019-04-23 18:51:23 -0500358 filename
359 )
360 _target_path = os.path.join(
361 '/root',
Alex9a4ad212020-10-01 18:04:25 -0500362 self.env_config.salt_scripts_folder,
Alexe0c5b9e2019-04-23 18:51:23 -0500363 filename
364 )
365
366 logger_cli.debug("... creating file in cache '{}'".format(_cache_path))
367 self.salt.f_touch_master(_cache_path)
368 self.salt.f_append_master(_cache_path, _dumps)
369 logger.debug("... syncing file to '{}'".format(node))
370 self.salt.get_file(
371 node,
372 _source_path,
373 _target_path,
374 tgt_type="compound"
375 )
376 return _target_path
377
378 def prepare_script_on_active_nodes(self, script_filename):
Alex Savatieiev9b2f6512019-02-20 18:05:00 -0600379 # Prepare script
380 _p = os.path.join(pkg_dir, 'scripts', script_filename)
381 with open(_p, 'rt') as fd:
382 _script = fd.read().splitlines()
383 _storage_path = os.path.join(
Alex9a4ad212020-10-01 18:04:25 -0500384 self.env_config.salt_file_root, self.env_config.salt_scripts_folder
Alex Savatieiev9b2f6512019-02-20 18:05:00 -0600385 )
386 logger_cli.debug(
Alexb151fbe2019-04-22 16:53:30 -0500387 "... uploading script {} "
Alex3ebc5632019-04-18 16:47:18 -0500388 "to master's file cache folder: '{}'".format(
Alex Savatieiev9b2f6512019-02-20 18:05:00 -0600389 script_filename,
390 _storage_path
391 )
392 )
Alexe0c5b9e2019-04-23 18:51:23 -0500393 self.salt.mkdir(self.salt.master_node, _storage_path)
Alex Savatieiev9b2f6512019-02-20 18:05:00 -0600394 # Form cache, source and target path
395 _cache_path = os.path.join(_storage_path, script_filename)
396 _source_path = os.path.join(
397 'salt://',
Alex9a4ad212020-10-01 18:04:25 -0500398 self.env_config.salt_scripts_folder,
Alex Savatieiev9b2f6512019-02-20 18:05:00 -0600399 script_filename
400 )
401 _target_path = os.path.join(
402 '/root',
Alex9a4ad212020-10-01 18:04:25 -0500403 self.env_config.salt_scripts_folder,
Alex Savatieiev9b2f6512019-02-20 18:05:00 -0600404 script_filename
405 )
406
Alexb151fbe2019-04-22 16:53:30 -0500407 logger_cli.debug("... creating file in cache '{}'".format(_cache_path))
Alex3ebc5632019-04-18 16:47:18 -0500408 self.salt.f_touch_master(_cache_path)
409 self.salt.f_append_master(_cache_path, _script)
Alex Savatieiev9b2f6512019-02-20 18:05:00 -0600410 # command salt to copy file to minions
Alex3ebc5632019-04-18 16:47:18 -0500411 logger_cli.debug(
Alexb151fbe2019-04-22 16:53:30 -0500412 "... creating script target folder '{}'".format(
Alex3ebc5632019-04-18 16:47:18 -0500413 _cache_path
414 )
415 )
416 self.salt.mkdir(
Alex Savatieiev01f0d7f2019-03-07 17:53:29 -0600417 self.active_nodes_compound,
Alex Savatieiev9b2f6512019-02-20 18:05:00 -0600418 os.path.join(
419 '/root',
Alex9a4ad212020-10-01 18:04:25 -0500420 self.env_config.salt_scripts_folder
Alex Savatieiev9b2f6512019-02-20 18:05:00 -0600421 ),
422 tgt_type="compound"
423 )
Alex3ebc5632019-04-18 16:47:18 -0500424 logger.debug("... syncing file to nodes")
425 self.salt.get_file(
Alex Savatieiev01f0d7f2019-03-07 17:53:29 -0600426 self.active_nodes_compound,
Alex Savatieiev9b2f6512019-02-20 18:05:00 -0600427 _source_path,
428 _target_path,
429 tgt_type="compound"
430 )
Alexe0c5b9e2019-04-23 18:51:23 -0500431 # return path on nodes, just in case
432 return _target_path
433
434 def execute_script_on_node(self, node, script_filename, args=[]):
435 # Prepare path
436 _target_path = os.path.join(
437 '/root',
Alex9a4ad212020-10-01 18:04:25 -0500438 self.env_config.salt_scripts_folder,
Alexe0c5b9e2019-04-23 18:51:23 -0500439 script_filename
440 )
441
442 # execute script
443 logger.debug("... running script on '{}'".format(node))
444 # handle results for each node
445 _script_arguments = " ".join(args) if args else ""
446 self.not_responded = []
447 _r = self.salt.cmd(
448 node,
449 'cmd.run',
450 param='python {} {}'.format(_target_path, _script_arguments),
451 expr_form="compound"
452 )
453
454 # all false returns means that there is no response
455 self.not_responded = [_n for _n in _r.keys() if not _r[_n]]
456 return _r
457
458 def execute_script_on_active_nodes(self, script_filename, args=[]):
459 # Prepare path
460 _target_path = os.path.join(
461 '/root',
Alex9a4ad212020-10-01 18:04:25 -0500462 self.env_config.salt_scripts_folder,
Alexe0c5b9e2019-04-23 18:51:23 -0500463 script_filename
464 )
465
466 # execute script
Alexd0391d42019-05-21 18:48:55 -0500467 logger_cli.debug("... running script")
Alex Savatieiev9b2f6512019-02-20 18:05:00 -0600468 # handle results for each node
469 _script_arguments = " ".join(args) if args else ""
Alex Savatieievefa79c42019-03-14 19:14:04 -0500470 self.not_responded = []
471 _r = self.salt.cmd(
Alex Savatieiev01f0d7f2019-03-07 17:53:29 -0600472 self.active_nodes_compound,
Alex Savatieiev9b2f6512019-02-20 18:05:00 -0600473 'cmd.run',
474 param='python {} {}'.format(_target_path, _script_arguments),
475 expr_form="compound"
476 )
477
Alex Savatieievefa79c42019-03-14 19:14:04 -0500478 # all false returns means that there is no response
Alex3ebc5632019-04-18 16:47:18 -0500479 self.not_responded = [_n for _n in _r.keys() if not _r[_n]]
Alex Savatieievefa79c42019-03-14 19:14:04 -0500480 return _r
Alex Savatieiev9b2f6512019-02-20 18:05:00 -0600481
Alex1839bbf2019-08-22 17:17:21 -0500482 def execute_cmd_on_active_nodes(self, cmd, nodes=None):
Alex836fac82019-08-22 13:36:16 -0500483 # execute cmd
484 self.not_responded = []
485 _r = self.salt.cmd(
Alex1839bbf2019-08-22 17:17:21 -0500486 nodes if nodes else self.active_nodes_compound,
Alex836fac82019-08-22 13:36:16 -0500487 'cmd.run',
488 param=cmd,
489 expr_form="compound"
490 )
491
492 # all false returns means that there is no response
493 self.not_responded = [_n for _n in _r.keys() if not _r[_n]]
494 return _r
495
Alex9a4ad212020-10-01 18:04:25 -0500496
497class KubeNodes(Nodes):
498 def __init__(self, config):
499 super(KubeNodes, self).__init__(config)
500 logger_cli.info("# Gathering environment information")
501 # simple salt rest client
502 self.kube = get_kube_remote(self.env_config)
503 self.env_type = ENV_TYPE_KUBE
504
505 def gather_node_info(self, skip_list, skip_list_file):
506 # Gather nodes info and query pod lists for each node
507 logger_cli.debug("... collecting node names existing in the cloud")
508
509 # Gather node names and info
510 _nodes = self.kube.get_node_info()
511 _node_names = list(_nodes.keys())
512 # Skip nodes if needed
513 _skipped_nodes = \
514 _prepare_skipped_nodes(_node_names, skip_list, skip_list_file)
515
516 # Count how many nodes active
517 self._active = [n for n, v in _nodes.items()
518 if v['conditions']['ready']['status']]
519
520 # iterate through all accepted nodes and create a dict for it
521 self.nodes = {}
522 self.skip_list = []
523 # _domains = set()
524 for _name in _node_names:
525 if _name in _skipped_nodes:
526 _status = NODE_SKIP
527 self.skip_list.append(_name)
528 else:
529 _status = NODE_UP if _name in self._active else NODE_DOWN
530 if _status == NODE_DOWN:
531 self.skip_list.append(_name)
532 logger_cli.info(
533 "-> '{}' shows 'Ready' as 'False', "
534 "added to skip list".format(
535 _name
536 )
537 )
538 _roles = {}
539 _labels = {}
540 for _label, _value in _nodes[_name]['labels'].items():
541 if _label in all_kube_roles_map:
542 _roles[all_kube_roles_map[_label]] = _value
543 else:
544 _labels[_label] = _value
545
546 self.nodes[_name] = deepcopy(node_tmpl)
547 self.nodes[_name].pop("grains")
548 self.nodes[_name].pop("pillars")
549
550 # hostname
551 self.nodes[_name]['shortname'] = \
552 _nodes[_name]['addresses']['hostname']['address']
553 self.nodes[_name]['internalip'] = \
554 _nodes[_name]['addresses']['internalip']['address']
555 # _domains.add(_name.split(".", 1)[1])
556 self.nodes[_name]['node_group'] = None
557 self.nodes[_name]['labels'] = _labels
558 self.nodes[_name]['roles'] = _roles
559 self.nodes[_name]['status'] = _status
560 # Backward compatibility
561 _info = _nodes[_name]['status']['node_info']
562 self.nodes[_name]['linux_image'] = _info['os_image']
563 self.nodes[_name]['linux_arch'] = _info['architecture']
564
565 _codename = "unknown"
566 _n, _v, _c = _info['os_image'].split()
567 if _n.lower() == 'ubuntu':
568 _v, _, _ = _v.rpartition('.') if '.' in _v else (_v, "", "")
569 if _v in ubuntu_versions:
570 _codename = ubuntu_versions[_v].split()[0].lower()
571 self.nodes[_name]['linux_codename'] = _codename
572
573 # Consider per-data type transfer
574 self.nodes[_name]["raw"] = _nodes[_name]
575 # TODO: Investigate how to handle domains in Kube, probably - skip
576 # _domains = list(_domains)
577 # if len(_domains) > 1:
578 # logger_cli.warning(
579 # "Multiple domains detected: {}".format(",".join(_domains))
580 # )
581 # else:
582 # self.domain = _domains[0]
583 logger_cli.info(
584 "-> {} nodes collected: {} - active, {} - not active".format(
585 len(self.nodes),
586 len(self._active),
587 len(self.skip_list)
588 )
589 )
590
591 _role = "k8s-master"
592 _filtered = [n for n, v in self.nodes.items() if _role in v['roles']]
593 if len(_filtered) < 1:
594 raise KubeException(
595 "No k8s-master nodes detected! Check/Update node role map."
596 )
Alex Savatieievefa79c42019-03-14 19:14:04 -0500597 else:
Alex9a4ad212020-10-01 18:04:25 -0500598 _r = [n for n, v in self.nodes.items()
599 if v['status'] != NODE_UP and _role in v['roles']]
600 if len(_r) > 0:
601 logger_cli.warn(
602 "Master nodes are reporting 'NotReady:\n{}".format(
603 "\n".join(_r)
604 )
605 )
606 self.kube.master_node = _filtered[0]
Alexe0c5b9e2019-04-23 18:51:23 -0500607
Alex9a4ad212020-10-01 18:04:25 -0500608 # get specific data upfront
609 # OpenStack versions
610 self.mcp_release = ""
611 # Quick and Dirty way to detect OS release
Alexccb72e02021-01-20 16:38:03 -0600612 try:
613 _nova_version = self.kube.exec_on_target_pod(
614 "nova-manage --version",
615 "nova-api-osapi",
616 "openstack"
617 )
618 _nmajor = _nova_version.partition('.')[0]
619 self.openstack_release = nova_openstack_versions[_nmajor]
620 except KubeException as e:
621 logger_cli.warn("Openstack not detected: {}".format(e.message))
622 self.openstack_release = nova_openstack_versions["00"]
Alexe0c5b9e2019-04-23 18:51:23 -0500623
Alex9a4ad212020-10-01 18:04:25 -0500624 return
625
626 @staticmethod
627 def _get_ssh_shell(_h, _u, _k, _p, _q, _pipe):
628 _ssh = SshShell(
629 _h,
630 user=_u,
631 keypath=_k,
632 port=_p,
633 silent=_q,
634 piped=_pipe
635 )
636 return _ssh.connect()
637
638 @staticmethod
639 def _do_ssh_cmd(_cmd, _h, _u, _k, _p, _q, _pipe):
640 with SshShell(
641 _h,
642 user=_u,
643 keypath=_k,
644 port=_p,
645 silent=_q,
646 piped=_pipe
647 ) as ssh:
648 _r = ssh.do(_cmd)
649 logger_cli.debug("'{}'".format(_r))
650 return _r
651
652 def node_shell(
653 self,
654 node,
655 silent=True,
656 piped=True,
657 use_sudo=True,
658 fport=None
659 ):
660 _u = self.env_config.kube_node_user
661 _k = self.env_config.kube_node_keypath
662 _h = self.nodes[node]['internalip']
663 _p = 22
Alexeffa0682021-06-04 12:18:33 -0500664 if self.kube.is_local or self.kube.config.ssh_direct:
Alex9a4ad212020-10-01 18:04:25 -0500665 return None, self._get_ssh_shell(_h, _u, _k, _p, silent, piped)
666 else:
667 _fh = "localhost"
668 _p = 10022 if not fport else fport
669 _pfwd = PortForward(
670 self.env_config.ssh_host,
671 _h,
672 user=_u,
673 keypath=self.env_config.ssh_key,
674 loc_port=_p
675 )
676 _pfwd.connect()
677 _ssh = self._get_ssh_shell(_fh, _u, _k, _p, silent, piped)
678 return _pfwd, _ssh
679
680 def execute_script_on_node(self, node, script_filename, args=[]):
681 # Prepare path
682 _target_path = os.path.join(
Alexccb72e02021-01-20 16:38:03 -0600683 self.env_config.kube_node_homepath,
Alex9a4ad212020-10-01 18:04:25 -0500684 self.env_config.kube_scripts_folder,
685 script_filename
686 )
687
688 # execute script
689 logger_cli.debug("... running script on '{}'".format(node))
690 # handle results for each node
691 _script_arguments = " ".join(args) if args else ""
692 self.not_responded = []
693 # get result
694 _nr = self.node_shell(
695 node,
696 "python {} {}".format(
697 _target_path,
698 _script_arguments
699 )
700 )
701
702 if not _nr:
703 self.not_responded.append(node)
704 return {}
705 else:
706 return {node: _nr}
707
708 def execute_cmd_on_active_nodes(self, cmd, nodes=None):
709 # execute script
710 logger_cli.debug("...running '{}' on active nodes".format(cmd))
711 # handle results for each node
712 self.not_responded = []
713 _r = {}
714 # TODO: Use threading and pool
715 for node in self._active:
716 _nr = self.node_shell(
717 node,
718 cmd
719 )
720
721 if not _nr:
722 self.not_responded.append(node)
723 else:
724 _r[node] = _nr
725
726 return _r
727
728 def _exec_script(self, params):
729 """
730 Threadsafe method to get shell to node,
731 check/copy script and get results
732 [
733 node_name,
734 src_path,
735 tgt_path,
736 conf,
737 args
738 ]
739 """
740 _name = params[0]
741 _src = params[1]
742 _tgt = params[2]
743 _conf = params[3]
744 _args = params[4]
745 _port = params[5]
746 _log_name = "["+_name+"]:"
747 _check = "echo $(if [[ -s '{}' ]]; then echo True; " \
748 "else echo False; fi)"
749 _fwd_sh, _sh = self.node_shell(
750 _name,
751 use_sudo=False,
752 fport=_port
753 )
754 # check python3
755 _python = _sh.do("which python3")
756 _python = utils.to_bool(
757 _sh.do(_check.format(_python))
758 )
759 if not _python:
760 _sh.do("apt install python3", sudo=True)
761 # check if script already there
762 _folder = os.path.join(
Alexccb72e02021-01-20 16:38:03 -0600763 self.env_config.kube_node_homepath,
Alex9a4ad212020-10-01 18:04:25 -0500764 _conf.kube_scripts_folder
765 )
766 # check if folder exists
767 _folder_exists = utils.to_bool(
768 _sh.do(_check.format(_folder))
769 )
770 if not _folder_exists:
771 _sh.do("mkdir " + _folder)
772 logger.info("{} Syncing file".format(_log_name))
773 _code, _r, _e = _sh.scp(
774 _src,
775 _sh.get_host_path(_tgt),
776 )
777 # handle error code
778 if _code:
779 logger_cli.warn(
780 "{} Error in scp:\n"
781 "\tstdout:'{}'\n"
782 "\tstderr:'{}'".format(_log_name, _r, _e)
783 )
784
785 # execute script
786 logger.debug("{} Running script".format(_log_name))
787 _out = _sh.do(
788 "python3 {}{}".format(
789 _tgt,
790 _args
791 ),
792 sudo=True
793 )
794
795 if _fwd_sh:
796 _fwd_sh.kill()
797 _sh.kill()
798
799 return [_name, _out]
800
801 def execute_script_on_active_nodes(self, script_filename, args=[]):
802 # Prepare script
803 _source_path = os.path.join(pkg_dir, 'scripts', script_filename)
804 _target_path = os.path.join(
Alexccb72e02021-01-20 16:38:03 -0600805 self.env_config.kube_node_homepath,
Alex9a4ad212020-10-01 18:04:25 -0500806 self.env_config.kube_scripts_folder,
807 script_filename
808 )
809 # handle results for each node
810 _script_arguments = " ".join(args) if args else ""
811 if _script_arguments:
812 _script_arguments = " " + _script_arguments
813 self.not_responded = []
814 _results = {}
815 logger_cli.debug(
816 "...running '{}' on active nodes, {} worker threads".format(
817 script_filename,
818 self.env_config.threads
819 )
820 )
821 # Workers pool
822 pool = Pool(self.env_config.threads)
823
824 # init the parameters
825 # node_name,
826 # src_path,
827 # tgt_path,
828 # conf,
829 # args
830 _params = []
831 _port = 10022
832 for node in self._active:
833 # build parameter blocks
834 _p_list = [
835 node,
836 _source_path,
837 _target_path,
838 self.env_config,
839 _script_arguments,
840 _port
841 ]
842 _params.append(_p_list)
843 _port += 1
844
845 _progress = Progress(len(_params))
846 results = pool.imap_unordered(self._exec_script, _params)
847
848 for ii in enumerate(results, start=1):
849 if not ii[1][1]:
850 self.not_responded.append(ii[1][0])
851 else:
852 _results[ii[1][0]] = ii[1][1]
853 _progress.write_progress(ii[0])
854
855 _progress.end()
856 pool.close()
857 pool.join()
858
859 # return path on nodes, just in case
860 return _results
861
862 def prepare_json_on_node(self, node, _dict, filename):
863 # this function assumes that all folders are created
864 _dumps = json.dumps(_dict, indent=2).splitlines()
865
866 _source_path = create_temp_file_with_content(_dumps)
867 _target_path = os.path.join(
Alexccb72e02021-01-20 16:38:03 -0600868 self.env_config.kube_node_homepath,
Alex9a4ad212020-10-01 18:04:25 -0500869 self.env_config.kube_scripts_folder,
870 filename
871 )
872 _folder = os.path.join(
Alexccb72e02021-01-20 16:38:03 -0600873 self.env_config.kube_node_homepath,
Alex9a4ad212020-10-01 18:04:25 -0500874 self.env_config.kube_scripts_folder
875 )
876 _check = "echo $(if [[ -s '{}' ]]; then echo True; " \
877 "else echo False; fi)"
878 _fwd_sh, _sh = self.node_shell(
879 node,
880 use_sudo=False
881 )
882
883 # check if folder exists
884 _folder_exists = utils.to_bool(
885 _sh.do(_check.format(_folder))
886 )
887 if not _folder_exists:
888 _sh.do("mkdir " + _folder)
889 logger_cli.debug(
890 "...create data on node '{}':'{}'".format(node, _target_path)
891 )
892 _code, _r, _e = _sh.scp(
893 _source_path,
894 _sh.get_host_path(_target_path),
895 )
896 # handle error code
897 if _code:
898 logger_cli.warn(
899 "Error in scp:\n"
900 "\tstdout:'{}'\n"
901 "\tstderr:'{}'".format(_r, _e)
902 )
903
904 _fwd_sh.kill()
905 _sh.kill()
906 return _target_path