blob: 1dcec2a722fde893d7e6ace6e05869c7e6144e93 [file] [log] [blame]
Alexe0c5b9e2019-04-23 18:51:23 -05001import json
Alex Savatieiev9b2f6512019-02-20 18:05:00 -06002import os
Alex1f90e7b2021-09-03 15:31:28 -05003import yaml
Alex3ebc5632019-04-18 16:47:18 -05004from copy import deepcopy
Alex9a4ad212020-10-01 18:04:25 -05005from multiprocessing.dummy import Pool
Alex1f90e7b2021-09-03 15:31:28 -05006from time import sleep
Alex Savatieiev9b2f6512019-02-20 18:05:00 -06007
Alex9a4ad212020-10-01 18:04:25 -05008from cfg_checker.clients import get_salt_remote, get_kube_remote
9from cfg_checker.common.const import all_salt_roles_map, all_kube_roles_map
Alexe9908f72020-05-19 16:04:53 -050010from cfg_checker.common.const import NODE_UP, NODE_DOWN, NODE_SKIP
Alex9a4ad212020-10-01 18:04:25 -050011from cfg_checker.common.const import ubuntu_versions, nova_openstack_versions
Alex7c9494e2019-04-22 10:40:59 -050012from cfg_checker.common import logger, logger_cli
Alexe0c5b9e2019-04-23 18:51:23 -050013from cfg_checker.common import utils
Alex9a4ad212020-10-01 18:04:25 -050014from cfg_checker.common.file_utils import create_temp_file_with_content
15from cfg_checker.common.exception import SaltException, KubeException
16from cfg_checker.common.ssh_utils import PortForward, SshShell
17from cfg_checker.common.settings import pkg_dir, ENV_TYPE_KUBE, ENV_TYPE_SALT
18from cfg_checker.helpers.console_utils import Progress
19
Alex Savatieiev9b2f6512019-02-20 18:05:00 -060020
21node_tmpl = {
22 'role': '',
23 'node_group': '',
Alexe9908f72020-05-19 16:04:53 -050024 'status': NODE_DOWN,
Alex Savatieiev9b2f6512019-02-20 18:05:00 -060025 'pillars': {},
Alex9a4ad212020-10-01 18:04:25 -050026 'grains': {},
27 'raw': {}
Alex Savatieiev9b2f6512019-02-20 18:05:00 -060028}
29
30
Alex9a4ad212020-10-01 18:04:25 -050031def _prepare_skipped_nodes(_names, skip_list, skip_list_file):
32 _skipped_minions = []
33 # skip list file
34 if skip_list_file:
35 _valid, _invalid = utils.get_nodes_list(skip_list_file)
Alex9a4ad212020-10-01 18:04:25 -050036 _skipped_minions.extend(_valid)
Alex359e5752021-08-16 17:28:30 -050037 if len(_invalid) < 1:
38 logger_cli.info(
39 "\n# WARNING: Detected invalid entries "
40 "in nodes skip list:\n{}\n".format(
41 "\n".join(_invalid)
42 )
43 )
Alexe8643642021-08-23 14:08:46 -050044
Alex9a4ad212020-10-01 18:04:25 -050045 # process wildcard, create node list out of mask
46 if skip_list:
47 _list = []
48 _invalid = []
49 for _item in skip_list:
50 if '*' in _item:
51 _str = _item[:_item.index('*')]
52 _nodes = [_m for _m in _names if _m.startswith(_str)]
53 if not _nodes:
54 logger_cli.warn(
55 "# WARNING: No nodes found for {}".format(_item)
56 )
57 _list.extend(_nodes)
58 else:
59 if _item in _names:
60 _list += _item
61 else:
62 logger_cli.warn(
63 "# WARNING: No node found for {}".format(_item)
64 )
65 # removing duplicates
66 _list = list(set(_list))
67 _skipped_minions.extend(_list)
68
69 return _skipped_minions
70
71
72class Nodes(object):
73 def __init__(self, config):
74 self.nodes = None
75 self.env_config = config
76
77 def skip_node(self, node):
78 # Add node to skip list
79 # Fro example if it is fails to comply with the rules
80
81 # check if we know such node
82 if node in self.nodes.keys() and node not in self.skip_list:
83 # yes, add it
84 self.skip_list.append(node)
85 return True
86 else:
87 return False
88
89 def get_nodes(self, skip_list=None, skip_list_file=None):
90 if not self.nodes:
91 if not skip_list and self.env_config.skip_nodes:
92 self.gather_node_info(
93 self.env_config.skip_nodes,
94 skip_list_file
95 )
96 else:
97 self.gather_node_info(skip_list, skip_list_file)
98 return self.nodes
99
100 def get_info(self):
101 _info = {
102 'mcp_release': self.mcp_release,
103 'openstack_release': self.openstack_release
104 }
105 return _info
106
107 def is_node_available(self, node, log=True):
108 if node in self.skip_list:
109 if log:
110 logger_cli.info("-> node '{}' not active".format(node))
111 return False
112 elif node in self.not_responded:
113 if log:
114 logger_cli.info("-> node '{}' not responded".format(node))
115 return False
116 else:
117 return True
118
119
120class SaltNodes(Nodes):
121 def __init__(self, config):
122 super(SaltNodes, self).__init__(config)
Alexe0c5b9e2019-04-23 18:51:23 -0500123 logger_cli.info("# Gathering environment information")
Alex Savatieiev9b2f6512019-02-20 18:05:00 -0600124 # simple salt rest client
Alex9a4ad212020-10-01 18:04:25 -0500125 self.salt = None
126 self.env_type = ENV_TYPE_SALT
Alex3ebc5632019-04-18 16:47:18 -0500127
Alexe9908f72020-05-19 16:04:53 -0500128 def gather_node_info(self, skip_list, skip_list_file):
Alex Savatieiev9b2f6512019-02-20 18:05:00 -0600129 # Keys for all nodes
130 # this is not working in scope of 2016.8.3, will overide with list
Alexb151fbe2019-04-22 16:53:30 -0500131 logger_cli.debug("... collecting node names existing in the cloud")
Alexe0c5b9e2019-04-23 18:51:23 -0500132 if not self.salt:
Alex9a4ad212020-10-01 18:04:25 -0500133 self.salt = get_salt_remote(self.env_config)
Alexe0c5b9e2019-04-23 18:51:23 -0500134
Alex Savatieiev9df93a92019-02-27 17:40:16 -0600135 try:
136 _keys = self.salt.list_keys()
137 _str = []
Alex3bc95f62020-03-05 17:00:04 -0600138 for _k, _v in _keys.items():
Alex Savatieiev9df93a92019-02-27 17:40:16 -0600139 _str.append("{}: {}".format(_k, len(_v)))
140 logger_cli.info("-> keys collected: {}".format(", ".join(_str)))
Alex Savatieiev9b2f6512019-02-20 18:05:00 -0600141
Alex Savatieiev9df93a92019-02-27 17:40:16 -0600142 self.node_keys = {
143 'minions': _keys['minions']
144 }
Alex3ebc5632019-04-18 16:47:18 -0500145 except Exception:
Alex Savatieiev9df93a92019-02-27 17:40:16 -0600146 _keys = None
147 self.node_keys = None
Alex3ebc5632019-04-18 16:47:18 -0500148
Alex Savatieiev9df93a92019-02-27 17:40:16 -0600149 # List of minions with grains
150 _minions = self.salt.list_minions()
151 if _minions:
Alex3ebc5632019-04-18 16:47:18 -0500152 logger_cli.info(
153 "-> api reported {} active minions".format(len(_minions))
154 )
Alex Savatieiev9df93a92019-02-27 17:40:16 -0600155 elif not self.node_keys:
156 # this is the last resort
Alex9a4ad212020-10-01 18:04:25 -0500157 _minions = self.env_config.load_nodes_list()
Alex3ebc5632019-04-18 16:47:18 -0500158 logger_cli.info(
159 "-> {} nodes loaded from list file".format(len(_minions))
160 )
Alex Savatieiev9df93a92019-02-27 17:40:16 -0600161 else:
162 _minions = self.node_keys['minions']
Alex Savatieiev9b2f6512019-02-20 18:05:00 -0600163
Alexe9908f72020-05-19 16:04:53 -0500164 # Skip nodes if needed
Alex9a4ad212020-10-01 18:04:25 -0500165 _skipped_minions = \
166 _prepare_skipped_nodes(_minions, skip_list, skip_list_file)
Alexe9908f72020-05-19 16:04:53 -0500167
Alex Savatieiev9df93a92019-02-27 17:40:16 -0600168 # in case API not listed minions, we need all that answer ping
Alex Savatieiev9b2f6512019-02-20 18:05:00 -0600169 _active = self.salt.get_active_nodes()
Alex Savatieiev9df93a92019-02-27 17:40:16 -0600170 logger_cli.info("-> nodes responded: {}".format(len(_active)))
Alex Savatieiev9b2f6512019-02-20 18:05:00 -0600171 # iterate through all accepted nodes and create a dict for it
172 self.nodes = {}
Alex Savatieievefa79c42019-03-14 19:14:04 -0500173 self.skip_list = []
Alexe9908f72020-05-19 16:04:53 -0500174 _domains = set()
Alex Savatieiev9df93a92019-02-27 17:40:16 -0600175 for _name in _minions:
Alex Savatieiev9b2f6512019-02-20 18:05:00 -0600176 _nc = utils.get_node_code(_name)
Alex9a4ad212020-10-01 18:04:25 -0500177 _rmap = all_salt_roles_map
Alex Savatieiev9b2f6512019-02-20 18:05:00 -0600178 _role = _rmap[_nc] if _nc in _rmap else 'unknown'
Alexe9908f72020-05-19 16:04:53 -0500179 if _name in _skipped_minions:
180 _status = NODE_SKIP
Alex Savatieievefa79c42019-03-14 19:14:04 -0500181 self.skip_list.append(_name)
Alexe9908f72020-05-19 16:04:53 -0500182 else:
183 _status = NODE_UP if _name in _active else NODE_DOWN
184 if _status == NODE_DOWN:
185 self.skip_list.append(_name)
186 logger_cli.info(
187 "-> '{}' is down, "
188 "added to skip list".format(
189 _name
190 )
191 )
Alex Savatieiev9b2f6512019-02-20 18:05:00 -0600192 self.nodes[_name] = deepcopy(node_tmpl)
Alexe9908f72020-05-19 16:04:53 -0500193 self.nodes[_name]['shortname'] = _name.split(".", 1)[0]
194 _domains.add(_name.split(".", 1)[1])
Alex Savatieiev9b2f6512019-02-20 18:05:00 -0600195 self.nodes[_name]['node_group'] = _nc
196 self.nodes[_name]['role'] = _role
197 self.nodes[_name]['status'] = _status
Alexe9908f72020-05-19 16:04:53 -0500198 _domains = list(_domains)
199 if len(_domains) > 1:
200 logger_cli.warning(
201 "Multiple domains detected: {}".format(",".join(_domains))
202 )
Alex205546c2020-12-30 19:22:30 -0600203 # TODO: Use domain with biggest node count by default
204 # or force it via config option
Alexe9908f72020-05-19 16:04:53 -0500205 else:
206 self.domain = _domains[0]
Alex Savatieievefa79c42019-03-14 19:14:04 -0500207 logger_cli.info("-> {} nodes inactive".format(len(self.skip_list)))
Alex Savatieiev9b2f6512019-02-20 18:05:00 -0600208 logger_cli.info("-> {} nodes collected".format(len(self.nodes)))
209
Alex Savatieiev9b2f6512019-02-20 18:05:00 -0600210 # form an all nodes compound string to use in salt
Alex Savatieiev01f0d7f2019-03-07 17:53:29 -0600211 self.active_nodes_compound = self.salt.compound_string_from_list(
Alex Savatieiev9b2f6512019-02-20 18:05:00 -0600212 filter(
Alexe9908f72020-05-19 16:04:53 -0500213 lambda nd: self.nodes[nd]['status'] == NODE_UP,
Alex Savatieiev9b2f6512019-02-20 18:05:00 -0600214 self.nodes
215 )
216 )
Alex41485522019-04-12 17:26:18 -0500217 # get master node fqdn
Alex3bc95f62020-03-05 17:00:04 -0600218 # _filtered = filter(
219 # lambda nd: self.nodes[nd]['role'] == const.all_roles_map['cfg'],
220 # self.nodes
221 # )
Alex9a4ad212020-10-01 18:04:25 -0500222 _role = all_salt_roles_map['cfg']
Alex3bc95f62020-03-05 17:00:04 -0600223 _filtered = [n for n, v in self.nodes.items() if v['role'] == _role]
Alexe0c5b9e2019-04-23 18:51:23 -0500224 if len(_filtered) < 1:
225 raise SaltException(
226 "No master node detected! Check/Update node role map."
227 )
228 else:
229 self.salt.master_node = _filtered[0]
Alex3ebc5632019-04-18 16:47:18 -0500230
Alex41485522019-04-12 17:26:18 -0500231 # OpenStack versions
232 self.mcp_release = self.salt.pillar_get(
Alexe0c5b9e2019-04-23 18:51:23 -0500233 self.salt.master_node,
Alex41485522019-04-12 17:26:18 -0500234 "_param:apt_mk_version"
Alexe0c5b9e2019-04-23 18:51:23 -0500235 )[self.salt.master_node]
Alex41485522019-04-12 17:26:18 -0500236 self.openstack_release = self.salt.pillar_get(
Alexe0c5b9e2019-04-23 18:51:23 -0500237 self.salt.master_node,
Alex41485522019-04-12 17:26:18 -0500238 "_param:openstack_version"
Alexe0c5b9e2019-04-23 18:51:23 -0500239 )[self.salt.master_node]
Alexd0391d42019-05-21 18:48:55 -0500240 # Preload codenames
241 # do additional queries to get linux codename and arch for each node
242 self.get_specific_pillar_for_nodes("_param:linux_system_codename")
243 self.get_specific_pillar_for_nodes("_param:linux_system_architecture")
244 for _name in self.nodes.keys():
Alexe9547d82019-06-03 15:22:50 -0500245 _n = self.nodes[_name]
246 if _name not in self.skip_list:
247 _p = _n['pillars']['_param']
248 _n['linux_codename'] = _p['linux_system_codename']
249 _n['linux_arch'] = _p['linux_system_architecture']
Alex41485522019-04-12 17:26:18 -0500250
Alex1839bbf2019-08-22 17:17:21 -0500251 def get_cmd_for_nodes(self, cmd, target_key, target_dict=None, nodes=None):
Alex836fac82019-08-22 13:36:16 -0500252 """Function runs. cmd.run and parses result into place
253 or into dict structure provided
254
255 :return: no return value, data pulished internally
256 """
257 logger_cli.debug(
258 "... collecting results for '{}'".format(cmd)
259 )
260 if target_dict:
261 _nodes = target_dict
262 else:
263 _nodes = self.nodes
Alex1839bbf2019-08-22 17:17:21 -0500264 _result = self.execute_cmd_on_active_nodes(cmd, nodes=nodes)
Alex3bc95f62020-03-05 17:00:04 -0600265 for node, data in _nodes.items():
Alexf3dbe862019-10-07 15:17:04 -0500266
Alex836fac82019-08-22 13:36:16 -0500267 if node in self.skip_list:
268 logger_cli.debug(
269 "... '{}' skipped while collecting '{}'".format(
270 node,
271 cmd
272 )
273 )
274 continue
275 # Prepare target key
276 if target_key not in data:
277 data[target_key] = None
278 # Save data
Alexe9908f72020-05-19 16:04:53 -0500279 if data['status'] in [NODE_DOWN, NODE_SKIP]:
Alex836fac82019-08-22 13:36:16 -0500280 data[target_key] = None
Alex1839bbf2019-08-22 17:17:21 -0500281 elif node not in _result:
282 continue
Alex836fac82019-08-22 13:36:16 -0500283 elif not _result[node]:
284 logger_cli.debug(
285 "... '{}' not responded after '{}'".format(
286 node,
Alex9a4ad212020-10-01 18:04:25 -0500287 self.env_config.salt_timeout
Alex836fac82019-08-22 13:36:16 -0500288 )
289 )
290 data[target_key] = None
291 else:
292 data[target_key] = _result[node]
293
Alex Savatieiev01f0d7f2019-03-07 17:53:29 -0600294 def get_specific_pillar_for_nodes(self, pillar_path):
295 """Function gets pillars on given path for all nodes
296
297 :return: no return value, data pulished internally
298 """
Alex3ebc5632019-04-18 16:47:18 -0500299 logger_cli.debug(
Alexb151fbe2019-04-22 16:53:30 -0500300 "... collecting node pillars for '{}'".format(pillar_path)
Alex3ebc5632019-04-18 16:47:18 -0500301 )
Alex Savatieiev01f0d7f2019-03-07 17:53:29 -0600302 _result = self.salt.pillar_get(self.active_nodes_compound, pillar_path)
Alex Savatieievefa79c42019-03-14 19:14:04 -0500303 self.not_responded = []
Alex3bc95f62020-03-05 17:00:04 -0600304 for node, data in self.nodes.items():
Alex Savatieievefa79c42019-03-14 19:14:04 -0500305 if node in self.skip_list:
306 logger_cli.debug(
307 "... '{}' skipped while collecting '{}'".format(
308 node,
309 pillar_path
310 )
311 )
312 continue
Alex Savatieiev01f0d7f2019-03-07 17:53:29 -0600313 _pillar_keys = pillar_path.split(':')
314 _data = data['pillars']
315 # pre-create nested dict
316 for idx in range(0, len(_pillar_keys)-1):
317 _key = _pillar_keys[idx]
318 if _key not in _data:
319 _data[_key] = {}
320 _data = _data[_key]
Alexe9908f72020-05-19 16:04:53 -0500321 if data['status'] in [NODE_DOWN, NODE_SKIP]:
Alex Savatieievefa79c42019-03-14 19:14:04 -0500322 _data[_pillar_keys[-1]] = None
323 elif not _result[node]:
324 logger_cli.debug(
325 "... '{}' not responded after '{}'".format(
326 node,
Alex9a4ad212020-10-01 18:04:25 -0500327 self.env_config.salt_timeout
Alex Savatieievefa79c42019-03-14 19:14:04 -0500328 )
329 )
330 _data[_pillar_keys[-1]] = None
331 self.not_responded.append(node)
332 else:
333 _data[_pillar_keys[-1]] = _result[node]
Alex3ebc5632019-04-18 16:47:18 -0500334
Alexe0c5b9e2019-04-23 18:51:23 -0500335 def prepare_json_on_node(self, node, _dict, filename):
Alex359e5752021-08-16 17:28:30 -0500336 if node in self.skip_list:
337 logger_cli.debug(
338 "... '{}' skipped while preparing json file of '{}'".format(
339 node,
340 filename
341 )
342 )
343
Alexe0c5b9e2019-04-23 18:51:23 -0500344 # this function assumes that all folders are created
345 _dumps = json.dumps(_dict, indent=2).splitlines()
346 _storage_path = os.path.join(
Alex9a4ad212020-10-01 18:04:25 -0500347 self.env_config.salt_file_root, self.env_config.salt_scripts_folder
Alexe0c5b9e2019-04-23 18:51:23 -0500348 )
349 logger_cli.debug(
350 "... uploading data as '{}' "
351 "to master's file cache folder: '{}'".format(
352 filename,
353 _storage_path
354 )
355 )
356 _cache_path = os.path.join(_storage_path, filename)
357 _source_path = os.path.join(
358 'salt://',
Alex9a4ad212020-10-01 18:04:25 -0500359 self.env_config.salt_scripts_folder,
Alexe0c5b9e2019-04-23 18:51:23 -0500360 filename
361 )
362 _target_path = os.path.join(
363 '/root',
Alex9a4ad212020-10-01 18:04:25 -0500364 self.env_config.salt_scripts_folder,
Alexe0c5b9e2019-04-23 18:51:23 -0500365 filename
366 )
367
368 logger_cli.debug("... creating file in cache '{}'".format(_cache_path))
369 self.salt.f_touch_master(_cache_path)
370 self.salt.f_append_master(_cache_path, _dumps)
371 logger.debug("... syncing file to '{}'".format(node))
372 self.salt.get_file(
373 node,
374 _source_path,
375 _target_path,
376 tgt_type="compound"
377 )
378 return _target_path
379
380 def prepare_script_on_active_nodes(self, script_filename):
Alex Savatieiev9b2f6512019-02-20 18:05:00 -0600381 # Prepare script
382 _p = os.path.join(pkg_dir, 'scripts', script_filename)
383 with open(_p, 'rt') as fd:
384 _script = fd.read().splitlines()
385 _storage_path = os.path.join(
Alex9a4ad212020-10-01 18:04:25 -0500386 self.env_config.salt_file_root, self.env_config.salt_scripts_folder
Alex Savatieiev9b2f6512019-02-20 18:05:00 -0600387 )
388 logger_cli.debug(
Alexb151fbe2019-04-22 16:53:30 -0500389 "... uploading script {} "
Alex3ebc5632019-04-18 16:47:18 -0500390 "to master's file cache folder: '{}'".format(
Alex Savatieiev9b2f6512019-02-20 18:05:00 -0600391 script_filename,
392 _storage_path
393 )
394 )
Alexe0c5b9e2019-04-23 18:51:23 -0500395 self.salt.mkdir(self.salt.master_node, _storage_path)
Alex Savatieiev9b2f6512019-02-20 18:05:00 -0600396 # Form cache, source and target path
397 _cache_path = os.path.join(_storage_path, script_filename)
398 _source_path = os.path.join(
399 'salt://',
Alex9a4ad212020-10-01 18:04:25 -0500400 self.env_config.salt_scripts_folder,
Alex Savatieiev9b2f6512019-02-20 18:05:00 -0600401 script_filename
402 )
403 _target_path = os.path.join(
404 '/root',
Alex9a4ad212020-10-01 18:04:25 -0500405 self.env_config.salt_scripts_folder,
Alex Savatieiev9b2f6512019-02-20 18:05:00 -0600406 script_filename
407 )
408
Alexb151fbe2019-04-22 16:53:30 -0500409 logger_cli.debug("... creating file in cache '{}'".format(_cache_path))
Alex3ebc5632019-04-18 16:47:18 -0500410 self.salt.f_touch_master(_cache_path)
411 self.salt.f_append_master(_cache_path, _script)
Alex Savatieiev9b2f6512019-02-20 18:05:00 -0600412 # command salt to copy file to minions
Alex3ebc5632019-04-18 16:47:18 -0500413 logger_cli.debug(
Alexb151fbe2019-04-22 16:53:30 -0500414 "... creating script target folder '{}'".format(
Alex3ebc5632019-04-18 16:47:18 -0500415 _cache_path
416 )
417 )
418 self.salt.mkdir(
Alex Savatieiev01f0d7f2019-03-07 17:53:29 -0600419 self.active_nodes_compound,
Alex Savatieiev9b2f6512019-02-20 18:05:00 -0600420 os.path.join(
421 '/root',
Alex9a4ad212020-10-01 18:04:25 -0500422 self.env_config.salt_scripts_folder
Alex Savatieiev9b2f6512019-02-20 18:05:00 -0600423 ),
424 tgt_type="compound"
425 )
Alex3ebc5632019-04-18 16:47:18 -0500426 logger.debug("... syncing file to nodes")
427 self.salt.get_file(
Alex Savatieiev01f0d7f2019-03-07 17:53:29 -0600428 self.active_nodes_compound,
Alex Savatieiev9b2f6512019-02-20 18:05:00 -0600429 _source_path,
430 _target_path,
431 tgt_type="compound"
432 )
Alexe0c5b9e2019-04-23 18:51:23 -0500433 # return path on nodes, just in case
434 return _target_path
435
436 def execute_script_on_node(self, node, script_filename, args=[]):
437 # Prepare path
438 _target_path = os.path.join(
439 '/root',
Alex9a4ad212020-10-01 18:04:25 -0500440 self.env_config.salt_scripts_folder,
Alexe0c5b9e2019-04-23 18:51:23 -0500441 script_filename
442 )
443
444 # execute script
445 logger.debug("... running script on '{}'".format(node))
446 # handle results for each node
447 _script_arguments = " ".join(args) if args else ""
448 self.not_responded = []
449 _r = self.salt.cmd(
450 node,
451 'cmd.run',
452 param='python {} {}'.format(_target_path, _script_arguments),
453 expr_form="compound"
454 )
455
456 # all false returns means that there is no response
457 self.not_responded = [_n for _n in _r.keys() if not _r[_n]]
458 return _r
459
Alex1f90e7b2021-09-03 15:31:28 -0500460 def execute_script_on_active_nodes(self, script_filename, args=None):
Alexe0c5b9e2019-04-23 18:51:23 -0500461 # Prepare path
462 _target_path = os.path.join(
463 '/root',
Alex9a4ad212020-10-01 18:04:25 -0500464 self.env_config.salt_scripts_folder,
Alexe0c5b9e2019-04-23 18:51:23 -0500465 script_filename
466 )
467
468 # execute script
Alexd0391d42019-05-21 18:48:55 -0500469 logger_cli.debug("... running script")
Alex Savatieiev9b2f6512019-02-20 18:05:00 -0600470 # handle results for each node
Alex1f90e7b2021-09-03 15:31:28 -0500471 _script_arguments = args if args else ""
Alex Savatieievefa79c42019-03-14 19:14:04 -0500472 self.not_responded = []
473 _r = self.salt.cmd(
Alex Savatieiev01f0d7f2019-03-07 17:53:29 -0600474 self.active_nodes_compound,
Alex Savatieiev9b2f6512019-02-20 18:05:00 -0600475 'cmd.run',
476 param='python {} {}'.format(_target_path, _script_arguments),
477 expr_form="compound"
478 )
479
Alex Savatieievefa79c42019-03-14 19:14:04 -0500480 # all false returns means that there is no response
Alex3ebc5632019-04-18 16:47:18 -0500481 self.not_responded = [_n for _n in _r.keys() if not _r[_n]]
Alex Savatieievefa79c42019-03-14 19:14:04 -0500482 return _r
Alex Savatieiev9b2f6512019-02-20 18:05:00 -0600483
Alex1839bbf2019-08-22 17:17:21 -0500484 def execute_cmd_on_active_nodes(self, cmd, nodes=None):
Alex836fac82019-08-22 13:36:16 -0500485 # execute cmd
486 self.not_responded = []
487 _r = self.salt.cmd(
Alex1839bbf2019-08-22 17:17:21 -0500488 nodes if nodes else self.active_nodes_compound,
Alex836fac82019-08-22 13:36:16 -0500489 'cmd.run',
490 param=cmd,
491 expr_form="compound"
492 )
493
494 # all false returns means that there is no response
495 self.not_responded = [_n for _n in _r.keys() if not _r[_n]]
496 return _r
497
Alex9a4ad212020-10-01 18:04:25 -0500498
499class KubeNodes(Nodes):
500 def __init__(self, config):
501 super(KubeNodes, self).__init__(config)
502 logger_cli.info("# Gathering environment information")
503 # simple salt rest client
504 self.kube = get_kube_remote(self.env_config)
505 self.env_type = ENV_TYPE_KUBE
Alex1f90e7b2021-09-03 15:31:28 -0500506 self._namespace = "qa-space"
507 self._configmap_name = self.env_config.kube_scripts_folder
508
509 # prepare needed resources
510 self._check_namespace()
511 self._scripts = self._check_config_map()
Alexb78191f2021-11-02 16:35:46 -0500512 self.prepared_daemonsets = []
Alex1f90e7b2021-09-03 15:31:28 -0500513
514 def _check_namespace(self):
515 # ensure namespace
516 logger_cli.debug(
517 "... checking namespace '{}'".format(self._namespace)
518 )
519 if not self.kube.ensure_namespace(self._namespace):
520 raise KubeException(
521 "Failed to manage namespace '{}'".format(self._namespace)
522 )
523
524 def _check_config_map(self):
525 # ensure config map exists
526 logger_cli.debug(
527 "... checking config map '{}'".format(self._configmap_name)
528 )
529 _source = os.path.join(pkg_dir, 'scripts')
530 return self.kube.create_config_map(
531 self._namespace,
532 self._configmap_name,
533 _source
534 )
Alex9a4ad212020-10-01 18:04:25 -0500535
536 def gather_node_info(self, skip_list, skip_list_file):
537 # Gather nodes info and query pod lists for each node
538 logger_cli.debug("... collecting node names existing in the cloud")
539
540 # Gather node names and info
541 _nodes = self.kube.get_node_info()
542 _node_names = list(_nodes.keys())
543 # Skip nodes if needed
544 _skipped_nodes = \
545 _prepare_skipped_nodes(_node_names, skip_list, skip_list_file)
546
547 # Count how many nodes active
548 self._active = [n for n, v in _nodes.items()
549 if v['conditions']['ready']['status']]
550
551 # iterate through all accepted nodes and create a dict for it
552 self.nodes = {}
553 self.skip_list = []
Alex9a4ad212020-10-01 18:04:25 -0500554 for _name in _node_names:
555 if _name in _skipped_nodes:
556 _status = NODE_SKIP
557 self.skip_list.append(_name)
558 else:
559 _status = NODE_UP if _name in self._active else NODE_DOWN
560 if _status == NODE_DOWN:
561 self.skip_list.append(_name)
562 logger_cli.info(
563 "-> '{}' shows 'Ready' as 'False', "
564 "added to skip list".format(
565 _name
566 )
567 )
568 _roles = {}
569 _labels = {}
570 for _label, _value in _nodes[_name]['labels'].items():
571 if _label in all_kube_roles_map:
572 _roles[all_kube_roles_map[_label]] = _value
573 else:
574 _labels[_label] = _value
575
576 self.nodes[_name] = deepcopy(node_tmpl)
577 self.nodes[_name].pop("grains")
578 self.nodes[_name].pop("pillars")
579
580 # hostname
581 self.nodes[_name]['shortname'] = \
582 _nodes[_name]['addresses']['hostname']['address']
583 self.nodes[_name]['internalip'] = \
584 _nodes[_name]['addresses']['internalip']['address']
Alex9a4ad212020-10-01 18:04:25 -0500585 self.nodes[_name]['node_group'] = None
586 self.nodes[_name]['labels'] = _labels
587 self.nodes[_name]['roles'] = _roles
588 self.nodes[_name]['status'] = _status
589 # Backward compatibility
590 _info = _nodes[_name]['status']['node_info']
591 self.nodes[_name]['linux_image'] = _info['os_image']
592 self.nodes[_name]['linux_arch'] = _info['architecture']
593
594 _codename = "unknown"
595 _n, _v, _c = _info['os_image'].split()
596 if _n.lower() == 'ubuntu':
597 _v, _, _ = _v.rpartition('.') if '.' in _v else (_v, "", "")
598 if _v in ubuntu_versions:
599 _codename = ubuntu_versions[_v].split()[0].lower()
600 self.nodes[_name]['linux_codename'] = _codename
601
602 # Consider per-data type transfer
603 self.nodes[_name]["raw"] = _nodes[_name]
604 # TODO: Investigate how to handle domains in Kube, probably - skip
605 # _domains = list(_domains)
606 # if len(_domains) > 1:
607 # logger_cli.warning(
608 # "Multiple domains detected: {}".format(",".join(_domains))
609 # )
610 # else:
Alex1f90e7b2021-09-03 15:31:28 -0500611 self.domain = "no.domain.in.kube.yet"
Alex9a4ad212020-10-01 18:04:25 -0500612 logger_cli.info(
613 "-> {} nodes collected: {} - active, {} - not active".format(
614 len(self.nodes),
615 len(self._active),
616 len(self.skip_list)
617 )
618 )
619
620 _role = "k8s-master"
621 _filtered = [n for n, v in self.nodes.items() if _role in v['roles']]
622 if len(_filtered) < 1:
623 raise KubeException(
624 "No k8s-master nodes detected! Check/Update node role map."
625 )
Alex Savatieievefa79c42019-03-14 19:14:04 -0500626 else:
Alex9a4ad212020-10-01 18:04:25 -0500627 _r = [n for n, v in self.nodes.items()
628 if v['status'] != NODE_UP and _role in v['roles']]
629 if len(_r) > 0:
630 logger_cli.warn(
631 "Master nodes are reporting 'NotReady:\n{}".format(
632 "\n".join(_r)
633 )
634 )
635 self.kube.master_node = _filtered[0]
Alexe0c5b9e2019-04-23 18:51:23 -0500636
Alex9a4ad212020-10-01 18:04:25 -0500637 # get specific data upfront
638 # OpenStack versions
639 self.mcp_release = ""
640 # Quick and Dirty way to detect OS release
Alexccb72e02021-01-20 16:38:03 -0600641 try:
642 _nova_version = self.kube.exec_on_target_pod(
643 "nova-manage --version",
644 "nova-api-osapi",
645 "openstack"
646 )
647 _nmajor = _nova_version.partition('.')[0]
648 self.openstack_release = nova_openstack_versions[_nmajor]
649 except KubeException as e:
650 logger_cli.warn("Openstack not detected: {}".format(e.message))
651 self.openstack_release = nova_openstack_versions["00"]
Alexe0c5b9e2019-04-23 18:51:23 -0500652
Alex9a4ad212020-10-01 18:04:25 -0500653 return
654
655 @staticmethod
Alex1f90e7b2021-09-03 15:31:28 -0500656 def _get_ssh_shell(_h, _u, _k, _p, _q, _pipe, timeout=15):
Alex9a4ad212020-10-01 18:04:25 -0500657 _ssh = SshShell(
658 _h,
659 user=_u,
660 keypath=_k,
661 port=_p,
662 silent=_q,
Alex1f90e7b2021-09-03 15:31:28 -0500663 piped=_pipe,
664 timeout=timeout
Alex9a4ad212020-10-01 18:04:25 -0500665 )
666 return _ssh.connect()
667
668 @staticmethod
Alex1f90e7b2021-09-03 15:31:28 -0500669 def _do_ssh_cmd(_cmd, _h, _u, _k, _p, _q, _pipe, timeout=None):
Alex9a4ad212020-10-01 18:04:25 -0500670 with SshShell(
671 _h,
672 user=_u,
673 keypath=_k,
674 port=_p,
675 silent=_q,
676 piped=_pipe
677 ) as ssh:
Alex1f90e7b2021-09-03 15:31:28 -0500678 if timeout is None:
679 _r = ssh.do(_cmd)
680 else:
681 _r = ssh.do(_cmd, timeout=timeout)
Alex9a4ad212020-10-01 18:04:25 -0500682 logger_cli.debug("'{}'".format(_r))
683 return _r
684
685 def node_shell(
686 self,
687 node,
688 silent=True,
689 piped=True,
690 use_sudo=True,
691 fport=None
692 ):
693 _u = self.env_config.kube_node_user
694 _k = self.env_config.kube_node_keypath
695 _h = self.nodes[node]['internalip']
696 _p = 22
Alexeffa0682021-06-04 12:18:33 -0500697 if self.kube.is_local or self.kube.config.ssh_direct:
Alexf6ec91b2021-09-10 10:11:17 -0500698 logger.debug("Getting shell with no port forward")
699 return [None, self._get_ssh_shell(
Alex1f90e7b2021-09-03 15:31:28 -0500700 _h, _u, _k, _p, silent, piped,
701 timeout=self.kube.config.ssh_connect_timeout
Alexf6ec91b2021-09-10 10:11:17 -0500702 )]
Alex9a4ad212020-10-01 18:04:25 -0500703 else:
Alexf6ec91b2021-09-10 10:11:17 -0500704 logger.debug("Getting shell with with forward")
Alex9a4ad212020-10-01 18:04:25 -0500705 _fh = "localhost"
706 _p = 10022 if not fport else fport
707 _pfwd = PortForward(
708 self.env_config.ssh_host,
709 _h,
710 user=_u,
711 keypath=self.env_config.ssh_key,
Alex1f90e7b2021-09-03 15:31:28 -0500712 loc_port=_p,
713 timeout=self.kube.config.ssh_connect_timeout
Alex9a4ad212020-10-01 18:04:25 -0500714 )
715 _pfwd.connect()
Alex1f90e7b2021-09-03 15:31:28 -0500716 _ssh = self._get_ssh_shell(
717 _fh,
718 _u,
719 _k,
720 _p,
721 silent,
722 piped,
723 timeout=self.kube.config.ssh_connect_timeout
724 )
Alexf6ec91b2021-09-10 10:11:17 -0500725 return [_pfwd, _ssh]
Alex9a4ad212020-10-01 18:04:25 -0500726
727 def execute_script_on_node(self, node, script_filename, args=[]):
728 # Prepare path
729 _target_path = os.path.join(
Alexccb72e02021-01-20 16:38:03 -0600730 self.env_config.kube_node_homepath,
Alex9a4ad212020-10-01 18:04:25 -0500731 self.env_config.kube_scripts_folder,
732 script_filename
733 )
734
735 # execute script
736 logger_cli.debug("... running script on '{}'".format(node))
737 # handle results for each node
738 _script_arguments = " ".join(args) if args else ""
739 self.not_responded = []
740 # get result
741 _nr = self.node_shell(
742 node,
743 "python {} {}".format(
744 _target_path,
745 _script_arguments
746 )
747 )
748
749 if not _nr:
750 self.not_responded.append(node)
751 return {}
752 else:
753 return {node: _nr}
754
755 def execute_cmd_on_active_nodes(self, cmd, nodes=None):
756 # execute script
757 logger_cli.debug("...running '{}' on active nodes".format(cmd))
758 # handle results for each node
759 self.not_responded = []
760 _r = {}
761 # TODO: Use threading and pool
762 for node in self._active:
763 _nr = self.node_shell(
764 node,
765 cmd
766 )
767
768 if not _nr:
769 self.not_responded.append(node)
770 else:
771 _r[node] = _nr
772
773 return _r
774
Alex1f90e7b2021-09-03 15:31:28 -0500775 def _ssh_exec_script(self, params):
Alex9a4ad212020-10-01 18:04:25 -0500776 """
777 Threadsafe method to get shell to node,
778 check/copy script and get results
779 [
780 node_name,
781 src_path,
782 tgt_path,
783 conf,
784 args
785 ]
786 """
Alex1f90e7b2021-09-03 15:31:28 -0500787 _timeout = self.kube.config.script_execution_timeout
Alex9a4ad212020-10-01 18:04:25 -0500788 _name = params[0]
789 _src = params[1]
790 _tgt = params[2]
791 _conf = params[3]
792 _args = params[4]
793 _port = params[5]
794 _log_name = "["+_name+"]:"
795 _check = "echo $(if [[ -s '{}' ]]; then echo True; " \
796 "else echo False; fi)"
797 _fwd_sh, _sh = self.node_shell(
798 _name,
799 use_sudo=False,
800 fport=_port
801 )
802 # check python3
803 _python = _sh.do("which python3")
804 _python = utils.to_bool(
805 _sh.do(_check.format(_python))
806 )
807 if not _python:
Alex1f90e7b2021-09-03 15:31:28 -0500808 _sh.do("apt install python3", sudo=True, timeout=_timeout)
Alex9a4ad212020-10-01 18:04:25 -0500809 # check if script already there
810 _folder = os.path.join(
Alexccb72e02021-01-20 16:38:03 -0600811 self.env_config.kube_node_homepath,
Alex9a4ad212020-10-01 18:04:25 -0500812 _conf.kube_scripts_folder
813 )
814 # check if folder exists
815 _folder_exists = utils.to_bool(
816 _sh.do(_check.format(_folder))
817 )
818 if not _folder_exists:
819 _sh.do("mkdir " + _folder)
820 logger.info("{} Syncing file".format(_log_name))
821 _code, _r, _e = _sh.scp(
822 _src,
823 _sh.get_host_path(_tgt),
824 )
825 # handle error code
826 if _code:
827 logger_cli.warn(
828 "{} Error in scp:\n"
829 "\tstdout:'{}'\n"
830 "\tstderr:'{}'".format(_log_name, _r, _e)
831 )
832
833 # execute script
834 logger.debug("{} Running script".format(_log_name))
835 _out = _sh.do(
836 "python3 {}{}".format(
837 _tgt,
838 _args
839 ),
Alex1f90e7b2021-09-03 15:31:28 -0500840 sudo=True,
841 timeout=_timeout
Alex9a4ad212020-10-01 18:04:25 -0500842 )
843
844 if _fwd_sh:
845 _fwd_sh.kill()
846 _sh.kill()
847
848 return [_name, _out]
849
Alex1f90e7b2021-09-03 15:31:28 -0500850 def execute_script_on_active_nodes(self, script_filename, args=None):
Alex9a4ad212020-10-01 18:04:25 -0500851 # Prepare script
852 _source_path = os.path.join(pkg_dir, 'scripts', script_filename)
853 _target_path = os.path.join(
Alexccb72e02021-01-20 16:38:03 -0600854 self.env_config.kube_node_homepath,
Alex9a4ad212020-10-01 18:04:25 -0500855 self.env_config.kube_scripts_folder,
856 script_filename
857 )
858 # handle results for each node
859 _script_arguments = " ".join(args) if args else ""
860 if _script_arguments:
861 _script_arguments = " " + _script_arguments
862 self.not_responded = []
863 _results = {}
864 logger_cli.debug(
Alexc4f59622021-08-27 13:42:00 -0500865 "... running '{}' on active nodes, {} worker threads".format(
Alex9a4ad212020-10-01 18:04:25 -0500866 script_filename,
867 self.env_config.threads
868 )
869 )
870 # Workers pool
871 pool = Pool(self.env_config.threads)
872
873 # init the parameters
874 # node_name,
875 # src_path,
876 # tgt_path,
877 # conf,
878 # args
879 _params = []
880 _port = 10022
881 for node in self._active:
882 # build parameter blocks
883 _p_list = [
884 node,
885 _source_path,
886 _target_path,
887 self.env_config,
888 _script_arguments,
889 _port
890 ]
891 _params.append(_p_list)
892 _port += 1
893
894 _progress = Progress(len(_params))
Alex1f90e7b2021-09-03 15:31:28 -0500895 results = pool.imap_unordered(self._ssh_exec_script, _params)
Alex9a4ad212020-10-01 18:04:25 -0500896
897 for ii in enumerate(results, start=1):
898 if not ii[1][1]:
899 self.not_responded.append(ii[1][0])
900 else:
901 _results[ii[1][0]] = ii[1][1]
902 _progress.write_progress(ii[0])
903
904 _progress.end()
905 pool.close()
906 pool.join()
907
908 # return path on nodes, just in case
909 return _results
910
911 def prepare_json_on_node(self, node, _dict, filename):
912 # this function assumes that all folders are created
913 _dumps = json.dumps(_dict, indent=2).splitlines()
914
915 _source_path = create_temp_file_with_content(_dumps)
916 _target_path = os.path.join(
Alexccb72e02021-01-20 16:38:03 -0600917 self.env_config.kube_node_homepath,
Alex9a4ad212020-10-01 18:04:25 -0500918 self.env_config.kube_scripts_folder,
919 filename
920 )
921 _folder = os.path.join(
Alexccb72e02021-01-20 16:38:03 -0600922 self.env_config.kube_node_homepath,
Alex9a4ad212020-10-01 18:04:25 -0500923 self.env_config.kube_scripts_folder
924 )
925 _check = "echo $(if [[ -s '{}' ]]; then echo True; " \
926 "else echo False; fi)"
927 _fwd_sh, _sh = self.node_shell(
928 node,
929 use_sudo=False
930 )
931
932 # check if folder exists
933 _folder_exists = utils.to_bool(
934 _sh.do(_check.format(_folder))
935 )
936 if not _folder_exists:
937 _sh.do("mkdir " + _folder)
938 logger_cli.debug(
Alexc4f59622021-08-27 13:42:00 -0500939 "... create data on node '{}':'{}'".format(node, _target_path)
Alex9a4ad212020-10-01 18:04:25 -0500940 )
941 _code, _r, _e = _sh.scp(
942 _source_path,
943 _sh.get_host_path(_target_path),
944 )
945 # handle error code
946 if _code:
947 logger_cli.warn(
948 "Error in scp:\n"
949 "\tstdout:'{}'\n"
950 "\tstderr:'{}'".format(_r, _e)
951 )
952
953 _fwd_sh.kill()
954 _sh.kill()
955 return _target_path
Alex1f90e7b2021-09-03 15:31:28 -0500956
Alex7b0ee9a2021-09-21 17:16:17 -0500957 def prepare_daemonset(self, template_filename):
Alex1f90e7b2021-09-03 15:31:28 -0500958 # load template
959 _yaml_file = os.path.join(pkg_dir, 'templates', template_filename)
960 logger_cli.debug("... loading template '{}'".format(_yaml_file))
961 _ds = {}
962 with open(_yaml_file) as dsFile:
963 _ds = yaml.load(dsFile, Loader=yaml.SafeLoader)
964
965 # Add scripts to pod template as volumeMounts
966 _tspec = _ds['spec']['template']['spec']
967 _tspec['containers'][0]['volumeMounts'] = [
968 {
969 "name": "scripts",
970 "mountPath": os.path.join(
971 "/",
972 self.env_config.kube_scripts_folder
973 )
974 }
975 ]
976
977 _tspec['volumes'] = [
978 {
979 "name": "scripts",
980 "configMap": {
981 "name": self._configmap_name
982 }
983 }
984 ]
985
986 # create daemonset
987 logger_cli.debug("... preparing daemonset")
Alexb78191f2021-11-02 16:35:46 -0500988 _ds = self.kube.prepare_daemonset_from_yaml(self._namespace, _ds)
989 # Save prepared daemonset
990 self.prepared_daemonsets.append(_ds)
991 # return it
992 return _ds
Alex1f90e7b2021-09-03 15:31:28 -0500993
994 def wait_for_daemonset(self, ds, timeout=120):
995 # iteration timeout
996 _sleep_time = 5
997 _timeout = timeout
998
999 # query daemonset and check that desired=scheduled=ready
1000 _ds = self.kube.get_daemon_set_by_name(
1001 ds.metadata.namespace,
1002 ds.metadata.name
1003 )
1004
1005 _total = len(self.nodes)
1006 # _scheduled = _ds.status.scheduled
1007 # _ready = _ds.status.ready
1008
1009 # Init Progress bar to show daemonset readiness
1010 _progress = Progress(_total)
1011 while _timeout > 0:
1012 # get new status
1013 _ds = self.kube.get_daemon_set_by_name(
1014 ds.metadata.namespace,
1015 ds.metadata.name
1016 )
1017 _desired = _ds.status.desired_number_scheduled
1018 _scheduled = _ds.status.current_number_scheduled
1019 _ready = _ds.status.number_ready
1020 _updated = _ds.status.updated_number_scheduled
1021 # print it
1022 _progress.write_progress(
1023 _ready,
1024 note="desired: {}, scheduled: {}, ready: {},"
1025 " up-to-date: {}".format(
1026 _desired,
1027 _scheduled,
1028 _ready,
1029 _updated
1030 )
1031 )
1032
1033 # check values and return
1034 # In case of Update, also checking _updated value
1035 if _ready == _updated and _ready == _total:
1036 # close progress bar class
1037 _progress.end()
1038 logger_cli.debug("... daemonset is ready")
1039 return True
1040 # iterate
1041 _timeout -= _sleep_time
1042 # wait
1043 sleep(_sleep_time)
1044
1045 # timed out
1046 _progress.end()
1047 # log it
1048 logger_cli.error("Timed out waiting for Daemonset to be ready")
1049 return False
1050
Alexdcb792f2021-10-04 14:24:21 -05001051 def exec_script_on_target_pod(self, pod_name, script_filename, args=None):
Alex7b0ee9a2021-09-21 17:16:17 -05001052 """
1053 Run script from configmap on target pod assuming it is present
1054 """
1055 _arguments = args if args else ""
1056 _cmd = [
1057 "python3",
1058 os.path.join(
1059 "/",
1060 self.env_config.kube_scripts_folder,
1061 script_filename
1062 )
1063 ] + _arguments
1064 _result = self.kube.exec_on_target_pod(
1065 _cmd,
1066 pod_name,
1067 self._namespace,
1068 strict=True
1069 )
1070 return _result
1071
Alexdcb792f2021-10-04 14:24:21 -05001072 def exec_cmd_on_target_pod(self, pod_name, ns, command_str):
1073 """
1074 Run script from configmap on target pod assuming it is present
1075 """
1076 _result = self.kube.exec_on_target_pod(
1077 command_str,
1078 pod_name,
1079 ns,
1080 strict=True
1081 )
1082 return _result
1083
Alexb78191f2021-11-02 16:35:46 -05001084 def execute_cmd_on_daemon_set(
1085 self,
1086 ds,
1087 cmd,
1088 args=None,
1089 is_script=False
1090 ):
Alex1f90e7b2021-09-03 15:31:28 -05001091 """
1092 Query daemonset for pods and execute script on all of them
1093 """
1094 def _kube_exec_on_pod(plist):
1095 return [
1096 plist[1], # node
1097 plist[3], # pod name
1098 plist[0].kube.exec_on_target_pod( # pointer to function
1099 plist[4], # cmd
1100 plist[3], # pod name
1101 plist[2], # namespace
1102 strict=True,
1103 _request_timeout=120,
Alexb78191f2021-11-02 16:35:46 -05001104 arguments=plist[5]
Alex1f90e7b2021-09-03 15:31:28 -05001105 )
1106 ]
1107
Alex7b0ee9a2021-09-21 17:16:17 -05001108 _pods = self.kube.get_pods_for_daemonset(ds)
Alex1f90e7b2021-09-03 15:31:28 -05001109 # Create map for threads: [[node_name, ns, pod_name, cmd]...]
1110 logger_cli.debug(
1111 "... runnning script on {} pods using {} threads at a time".format(
1112 len(_pods.items),
1113 self.env_config.threads
1114 )
1115 )
1116 _plist = []
1117 _arguments = args if args else ""
Alexb78191f2021-11-02 16:35:46 -05001118 if is_script:
1119 _cmd = [
1120 "python3",
1121 os.path.join(
1122 "/",
1123 self.env_config.kube_scripts_folder,
1124 cmd
1125 ),
1126 _arguments
1127 ]
1128 _cmd = " ".join(_cmd)
1129 else:
1130 # decide if we are to wrap it to bash
1131 if '|' in cmd:
1132 _cmd = "bash -c"
1133 _arguments = cmd
1134 else:
1135 _cmd = cmd
Alex1f90e7b2021-09-03 15:31:28 -05001136 for item in _pods.items:
1137 _plist.append(
1138 [
1139 self,
1140 item.spec.node_name,
1141 item.metadata.namespace,
1142 item.metadata.name,
Alexb78191f2021-11-02 16:35:46 -05001143 _cmd,
1144 _arguments
Alex1f90e7b2021-09-03 15:31:28 -05001145 ]
1146 )
1147
1148 # map func and cmd
Alex1f90e7b2021-09-03 15:31:28 -05001149 pool = Pool(self.env_config.threads)
1150 _results = {}
1151 self.not_responded = []
1152 # create result list
1153 _progress = Progress(len(_plist))
1154 ret = pool.imap_unordered(_kube_exec_on_pod, _plist)
1155
1156 for ii in enumerate(ret, start=1):
1157 if not ii[1][1]:
1158 self.not_responded.append(ii[1][0])
1159 else:
1160 _results[ii[1][0]] = ii[1][2]
1161 _progress.write_progress(ii[0])
1162
1163 _progress.end()
1164 pool.close()
1165 pool.join()
1166 logger_cli.debug(
1167 "... done, {} total outputs; {} not responded".format(
1168 len(_results),
1169 len(self.not_responded)
1170 )
1171 )
1172 return _results
1173
1174 def delete_daemonset(self, ds):
1175 # Try to delete daemonset
1176 try:
1177 _r = self.kube.delete_daemon_set_by_name(
1178 ds.metadata.namespace,
1179 ds.metadata.name
1180 )
1181 except Exception as e:
1182 logger_cli.warning("Failed to delete daemonset '{}': {}".format(
1183 ds.metadata.name,
1184 e.reason
1185 ))
1186 _r = None
1187 return _r
Alex7b0ee9a2021-09-21 17:16:17 -05001188
1189 def get_pod_name_in_daemonset_by_node(self, nodename, daemonset):
1190 _podname = None
1191 _pods = self.kube.get_pods_for_daemonset(daemonset)
1192 for item in _pods.items:
1193 if item.spec.node_name == nodename:
1194 _podname = item.metadata.name
1195
1196 return _podname
1197
1198 def prepare_json_in_pod(self, podname, namespace, targets, filename):
1199 # Iterate pods in daemonset and prepare json file on each one
1200 _target_path = os.path.join(
1201 "/",
1202 "tmp",
1203 filename
1204 )
1205 # check folder will probably not needed as the daemonset links
1206 # configmap there on creation
1207 # _folder = os.path.join(
1208 # self.env_config.kube_node_homepath,
1209 # self.env_config.kube_scripts_folder
1210 # )
1211 # prepare data
1212 buffer = json.dumps(targets, indent=2).encode('utf-8')
1213
1214 # write data to pod using fancy websocket function
1215 self.kube.put_string_buffer_to_pod_as_textfile(
1216 podname,
1217 namespace,
1218 buffer,
1219 _target_path
1220 )
1221
1222 # TODO: Exception handling
1223
1224 return _target_path
Alexb78191f2021-11-02 16:35:46 -05001225
1226 def get_cmd_for_nodes(self, cmd, target_key, target_dict=None, nodes=None):
1227 """Function runs command on daemonset and parses result into place
1228 or into dict structure provided
1229
1230 :return: no return value, data pulished internally
1231 """
1232 logger_cli.debug(
1233 "... collecting results for '{}'".format(cmd)
1234 )
1235 if target_dict:
1236 _nodes = target_dict
1237 else:
1238 _nodes = self.nodes
1239 # Dirty way to get daemonset that was used in checker and not deleted
1240 _ds = self.prepared_daemonsets[0]
1241 _result = self.execute_cmd_on_daemon_set(_ds, cmd)
1242 for node, data in _nodes.items():
1243
1244 if node in self.skip_list:
1245 logger_cli.debug(
1246 "... '{}' skipped while collecting '{}'".format(
1247 node,
1248 cmd
1249 )
1250 )
1251 continue
1252 # Prepare target key
1253 if target_key not in data:
1254 data[target_key] = None
1255 # Save data
1256 if data['status'] in [NODE_DOWN, NODE_SKIP]:
1257 data[target_key] = None
1258 elif node not in _result:
1259 continue
1260 elif not _result[node]:
1261 logger_cli.debug(
1262 "... '{}' not responded after '{}'".format(
1263 node,
1264 self.env_config.salt_timeout
1265 )
1266 )
1267 data[target_key] = None
1268 else:
1269 data[target_key] = _result[node]