Alex | 0989ecf | 2022-03-29 13:43:21 -0500 | [diff] [blame] | 1 | # Author: Alex Savatieiev (osavatieiev@mirantis.com; a.savex@gmail.com) |
| 2 | # Copyright 2019-2022 Mirantis, Inc. |
Alex | 9a4ad21 | 2020-10-01 18:04:25 -0500 | [diff] [blame] | 3 | """ |
| 4 | Module to handle interaction with Kube |
| 5 | """ |
| 6 | import base64 |
| 7 | import os |
| 8 | import urllib3 |
| 9 | import yaml |
| 10 | |
Alex | 5cace3b | 2021-11-10 16:40:37 -0600 | [diff] [blame] | 11 | from kubernetes import client as kclient, config as kconfig, watch |
Alex | 9a4ad21 | 2020-10-01 18:04:25 -0500 | [diff] [blame] | 12 | from kubernetes.stream import stream |
Alex | 7b0ee9a | 2021-09-21 17:16:17 -0500 | [diff] [blame] | 13 | from kubernetes.client.rest import ApiException |
Alex | 0bcf31b | 2022-03-29 17:38:58 -0500 | [diff] [blame] | 14 | from urllib3.exceptions import MaxRetryError |
Alex | 5cace3b | 2021-11-10 16:40:37 -0600 | [diff] [blame] | 15 | from time import time, sleep |
Alex | 9a4ad21 | 2020-10-01 18:04:25 -0500 | [diff] [blame] | 16 | |
| 17 | from cfg_checker.common import logger, logger_cli |
Alex | 7b0ee9a | 2021-09-21 17:16:17 -0500 | [diff] [blame] | 18 | from cfg_checker.common.decorators import retry |
Alex | 5cace3b | 2021-11-10 16:40:37 -0600 | [diff] [blame] | 19 | from cfg_checker.common.exception import CheckerException, \ |
| 20 | InvalidReturnException, KubeException |
Alex | 9a4ad21 | 2020-10-01 18:04:25 -0500 | [diff] [blame] | 21 | from cfg_checker.common.file_utils import create_temp_file_with_content |
| 22 | from cfg_checker.common.other import utils, shell |
| 23 | from cfg_checker.common.ssh_utils import ssh_shell_p |
Alex | 359e575 | 2021-08-16 17:28:30 -0500 | [diff] [blame] | 24 | from cfg_checker.common.const import ENV_LOCAL |
Alex | 9a4ad21 | 2020-10-01 18:04:25 -0500 | [diff] [blame] | 25 | |
Alex | 7b0ee9a | 2021-09-21 17:16:17 -0500 | [diff] [blame] | 26 | |
Alex | 9a4ad21 | 2020-10-01 18:04:25 -0500 | [diff] [blame] | 27 | urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning) |
| 28 | |
| 29 | |
| 30 | def _init_kube_conf_local(config): |
| 31 | # Init kube library locally |
Alex | 359e575 | 2021-08-16 17:28:30 -0500 | [diff] [blame] | 32 | _path = "local:{}".format(config.kube_config_path) |
Alex | 9a4ad21 | 2020-10-01 18:04:25 -0500 | [diff] [blame] | 33 | try: |
Alex | c4f5962 | 2021-08-27 13:42:00 -0500 | [diff] [blame] | 34 | kconfig.load_kube_config(config_file=config.kube_config_path) |
Alex | 3374781 | 2021-04-07 10:11:39 -0500 | [diff] [blame] | 35 | if config.insecure: |
| 36 | kconfig.assert_hostname = False |
| 37 | kconfig.client_side_validation = False |
Alex | 9a4ad21 | 2020-10-01 18:04:25 -0500 | [diff] [blame] | 38 | logger_cli.debug( |
Alex | c4f5962 | 2021-08-27 13:42:00 -0500 | [diff] [blame] | 39 | "... found Kube env: core, {}". format( |
Alex | 9a4ad21 | 2020-10-01 18:04:25 -0500 | [diff] [blame] | 40 | ",".join( |
| 41 | kclient.CoreApi().get_api_versions().versions |
| 42 | ) |
| 43 | ) |
| 44 | ) |
Alex | c4f5962 | 2021-08-27 13:42:00 -0500 | [diff] [blame] | 45 | return kconfig, kclient.ApiClient(), _path |
Alex | 9a4ad21 | 2020-10-01 18:04:25 -0500 | [diff] [blame] | 46 | except Exception as e: |
| 47 | logger.warn("Failed to init local Kube client: {}".format( |
| 48 | str(e) |
| 49 | ) |
| 50 | ) |
Alex | 359e575 | 2021-08-16 17:28:30 -0500 | [diff] [blame] | 51 | return None, None, _path |
Alex | 9a4ad21 | 2020-10-01 18:04:25 -0500 | [diff] [blame] | 52 | |
| 53 | |
| 54 | def _init_kube_conf_remote(config): |
| 55 | # init remote client |
| 56 | # Preload Kube token |
| 57 | """ |
| 58 | APISERVER=$(kubectl config view --minify | |
| 59 | grep server | cut -f 2- -d ":" | tr -d " ") |
| 60 | SECRET_NAME=$(kubectl get secrets | |
| 61 | grep ^default | cut -f1 -d ' ') |
| 62 | TOKEN=$(kubectl describe secret $SECRET_NAME | |
| 63 | grep -E '^token' | cut -f2 -d':' | tr -d " ") |
| 64 | |
| 65 | echo "Detected API Server at: '${APISERVER}'" |
| 66 | echo "Got secret: '${SECRET_NAME}'" |
| 67 | echo "Loaded token: '${TOKEN}'" |
| 68 | |
| 69 | curl $APISERVER/api |
| 70 | --header "Authorization: Bearer $TOKEN" --insecure |
| 71 | """ |
| 72 | import yaml |
Alex | 359e575 | 2021-08-16 17:28:30 -0500 | [diff] [blame] | 73 | _path = '' |
Alex | c4f5962 | 2021-08-27 13:42:00 -0500 | [diff] [blame] | 74 | # Try to load remote config only if it was not detected already |
| 75 | if not config.kube_config_detected and not config.env_name == ENV_LOCAL: |
Alex | 359e575 | 2021-08-16 17:28:30 -0500 | [diff] [blame] | 76 | _path = "{}@{}:{}".format( |
| 77 | config.ssh_user, |
| 78 | config.ssh_host, |
| 79 | config.kube_config_path |
| 80 | ) |
Alex | 9d91353 | 2021-03-24 18:01:45 -0500 | [diff] [blame] | 81 | _c_data = ssh_shell_p( |
Alex | c4f5962 | 2021-08-27 13:42:00 -0500 | [diff] [blame] | 82 | "cat " + config.kube_config_path, |
Alex | 9d91353 | 2021-03-24 18:01:45 -0500 | [diff] [blame] | 83 | config.ssh_host, |
| 84 | username=config.ssh_user, |
| 85 | keypath=config.ssh_key, |
| 86 | piped=False, |
| 87 | use_sudo=config.ssh_uses_sudo, |
| 88 | ) |
| 89 | else: |
Alex | 359e575 | 2021-08-16 17:28:30 -0500 | [diff] [blame] | 90 | _path = "local:{}".format(config.kube_config_path) |
Alex | 9d91353 | 2021-03-24 18:01:45 -0500 | [diff] [blame] | 91 | with open(config.kube_config_path, 'r') as ff: |
| 92 | _c_data = ff.read() |
Alex | 9a4ad21 | 2020-10-01 18:04:25 -0500 | [diff] [blame] | 93 | |
Alex | 359e575 | 2021-08-16 17:28:30 -0500 | [diff] [blame] | 94 | if len(_c_data) < 1: |
| 95 | return None, None, _path |
| 96 | |
Alex | 9a4ad21 | 2020-10-01 18:04:25 -0500 | [diff] [blame] | 97 | _conf = yaml.load(_c_data, Loader=yaml.SafeLoader) |
| 98 | |
| 99 | _kube_conf = kclient.Configuration() |
| 100 | # A remote host configuration |
| 101 | |
| 102 | # To work with remote cluster, we need to extract these |
| 103 | # keys = ['host', 'ssl_ca_cert', 'cert_file', 'key_file', 'verify_ssl'] |
| 104 | # When v12 of the client will be release, we will use load_from_dict |
| 105 | |
| 106 | _kube_conf.ssl_ca_cert = create_temp_file_with_content( |
| 107 | base64.standard_b64decode( |
| 108 | _conf['clusters'][0]['cluster']['certificate-authority-data'] |
| 109 | ) |
| 110 | ) |
| 111 | _host = _conf['clusters'][0]['cluster']['server'] |
| 112 | _kube_conf.cert_file = create_temp_file_with_content( |
| 113 | base64.standard_b64decode( |
| 114 | _conf['users'][0]['user']['client-certificate-data'] |
| 115 | ) |
| 116 | ) |
| 117 | _kube_conf.key_file = create_temp_file_with_content( |
| 118 | base64.standard_b64decode( |
| 119 | _conf['users'][0]['user']['client-key-data'] |
| 120 | ) |
| 121 | ) |
| 122 | if "http" not in _host or "443" not in _host: |
| 123 | logger_cli.error( |
| 124 | "Failed to extract Kube host: '{}'".format(_host) |
| 125 | ) |
| 126 | else: |
| 127 | logger_cli.debug( |
Alex | c4f5962 | 2021-08-27 13:42:00 -0500 | [diff] [blame] | 128 | "... 'context' host extracted: '{}' via SSH@{}".format( |
Alex | 9a4ad21 | 2020-10-01 18:04:25 -0500 | [diff] [blame] | 129 | _host, |
| 130 | config.ssh_host |
| 131 | ) |
| 132 | ) |
| 133 | |
| 134 | # Substitute context host to ours |
| 135 | _tmp = _host.split(':') |
| 136 | _kube_conf.host = \ |
| 137 | _tmp[0] + "://" + config.mcp_host + ":" + _tmp[2] |
| 138 | config.kube_port = _tmp[2] |
| 139 | logger_cli.debug( |
Alex | c4f5962 | 2021-08-27 13:42:00 -0500 | [diff] [blame] | 140 | "... kube remote host updated to {}".format( |
Alex | 9a4ad21 | 2020-10-01 18:04:25 -0500 | [diff] [blame] | 141 | _kube_conf.host |
| 142 | ) |
| 143 | ) |
| 144 | _kube_conf.verify_ssl = False |
| 145 | _kube_conf.debug = config.debug |
Alex | 3374781 | 2021-04-07 10:11:39 -0500 | [diff] [blame] | 146 | if config.insecure: |
| 147 | _kube_conf.assert_hostname = False |
| 148 | _kube_conf.client_side_validation = False |
| 149 | |
Alex | 9a4ad21 | 2020-10-01 18:04:25 -0500 | [diff] [blame] | 150 | # Nevertheless if you want to do it |
| 151 | # you can with these 2 parameters |
| 152 | # configuration.verify_ssl=True |
| 153 | # ssl_ca_cert is the filepath |
| 154 | # to the file that contains the certificate. |
| 155 | # configuration.ssl_ca_cert="certificate" |
| 156 | |
| 157 | # _kube_conf.api_key = { |
| 158 | # "authorization": "Bearer " + config.kube_token |
| 159 | # } |
| 160 | |
| 161 | # Create a ApiClient with our config |
| 162 | _kube_api = kclient.ApiClient(_kube_conf) |
| 163 | |
Alex | 359e575 | 2021-08-16 17:28:30 -0500 | [diff] [blame] | 164 | return _kube_conf, _kube_api, _path |
Alex | 9a4ad21 | 2020-10-01 18:04:25 -0500 | [diff] [blame] | 165 | |
| 166 | |
| 167 | class KubeApi(object): |
| 168 | def __init__(self, config): |
| 169 | self.config = config |
Alex | 359e575 | 2021-08-16 17:28:30 -0500 | [diff] [blame] | 170 | self.initialized = self._init_kclient() |
Alex | 9a4ad21 | 2020-10-01 18:04:25 -0500 | [diff] [blame] | 171 | self.last_response = None |
| 172 | |
| 173 | def _init_kclient(self): |
| 174 | # if there is no password - try to get local, if this available |
Alex | 359e575 | 2021-08-16 17:28:30 -0500 | [diff] [blame] | 175 | logger_cli.debug("... init kube config") |
Alex | 9a4ad21 | 2020-10-01 18:04:25 -0500 | [diff] [blame] | 176 | if self.config.env_name == "local": |
Alex | 359e575 | 2021-08-16 17:28:30 -0500 | [diff] [blame] | 177 | self.kConf, self.kApi, self.kConfigPath = _init_kube_conf_local( |
| 178 | self.config |
| 179 | ) |
Alex | 9a4ad21 | 2020-10-01 18:04:25 -0500 | [diff] [blame] | 180 | self.is_local = True |
Alex | c4f5962 | 2021-08-27 13:42:00 -0500 | [diff] [blame] | 181 | # Try to load local config data |
| 182 | if self.config.kube_config_path and \ |
| 183 | os.path.exists(self.config.kube_config_path): |
| 184 | _cmd = "cat " + self.config.kube_config_path |
| 185 | _c_data = shell(_cmd) |
Alex | 9a4ad21 | 2020-10-01 18:04:25 -0500 | [diff] [blame] | 186 | _conf = yaml.load(_c_data, Loader=yaml.SafeLoader) |
| 187 | self.user_keypath = create_temp_file_with_content( |
| 188 | base64.standard_b64decode( |
| 189 | _conf['users'][0]['user']['client-key-data'] |
| 190 | ) |
| 191 | ) |
| 192 | self.yaml_conf = _c_data |
| 193 | else: |
Alex | 359e575 | 2021-08-16 17:28:30 -0500 | [diff] [blame] | 194 | self.kConf, self.kApi, self.kConfigPath = _init_kube_conf_remote( |
| 195 | self.config |
| 196 | ) |
Alex | 9a4ad21 | 2020-10-01 18:04:25 -0500 | [diff] [blame] | 197 | self.is_local = False |
| 198 | |
Alex | 359e575 | 2021-08-16 17:28:30 -0500 | [diff] [blame] | 199 | if self.kConf is None or self.kApi is None: |
| 200 | return False |
| 201 | else: |
| 202 | return True |
| 203 | |
Alex | 9a4ad21 | 2020-10-01 18:04:25 -0500 | [diff] [blame] | 204 | def get_versions_api(self): |
| 205 | # client.CoreApi().get_api_versions().versions |
| 206 | return kclient.VersionApi(self.kApi) |
| 207 | |
| 208 | |
| 209 | class KubeRemote(KubeApi): |
| 210 | def __init__(self, config): |
| 211 | super(KubeRemote, self).__init__(config) |
Alex | 1f90e7b | 2021-09-03 15:31:28 -0500 | [diff] [blame] | 212 | self._appsV1 = None |
| 213 | self._podV1 = None |
Alex | dcb792f | 2021-10-04 14:24:21 -0500 | [diff] [blame] | 214 | self._custom = None |
| 215 | |
| 216 | @property |
| 217 | def CustomObjects(self): |
| 218 | if not self._custom: |
| 219 | self._custom = kclient.CustomObjectsApi(self.kApi) |
| 220 | return self._custom |
Alex | 9a4ad21 | 2020-10-01 18:04:25 -0500 | [diff] [blame] | 221 | |
| 222 | @property |
| 223 | def CoreV1(self): |
Alex | b212954 | 2021-11-23 15:49:42 -0600 | [diff] [blame] | 224 | if self.is_local: |
| 225 | return kclient.CoreV1Api(kclient.ApiClient()) |
| 226 | else: |
| 227 | return kclient.CoreV1Api(kclient.ApiClient(self.kConf)) |
Alex | 9a4ad21 | 2020-10-01 18:04:25 -0500 | [diff] [blame] | 228 | |
Alex | 1f90e7b | 2021-09-03 15:31:28 -0500 | [diff] [blame] | 229 | @property |
| 230 | def AppsV1(self): |
| 231 | if not self._appsV1: |
| 232 | self._appsV1 = kclient.AppsV1Api(self.kApi) |
| 233 | return self._appsV1 |
| 234 | |
| 235 | @property |
| 236 | def PodsV1(self): |
| 237 | if not self._podsV1: |
| 238 | self._podsV1 = kclient.V1Pod(self.kApi) |
| 239 | return self._podsV1 |
| 240 | |
Alex | 9a4ad21 | 2020-10-01 18:04:25 -0500 | [diff] [blame] | 241 | @staticmethod |
| 242 | def _typed_list_to_dict(i_list): |
| 243 | _dict = {} |
| 244 | for _item in i_list: |
| 245 | _d = _item.to_dict() |
| 246 | _type = _d.pop("type") |
| 247 | _dict[_type.lower()] = _d |
| 248 | |
| 249 | return _dict |
| 250 | |
| 251 | @staticmethod |
| 252 | def _get_listed_attrs(items, _path): |
| 253 | _list = [] |
| 254 | for _n in items: |
| 255 | _list.append(utils.rgetattr(_n, _path)) |
| 256 | |
| 257 | return _list |
| 258 | |
Alex | 1f90e7b | 2021-09-03 15:31:28 -0500 | [diff] [blame] | 259 | @staticmethod |
| 260 | def safe_get_item_by_name(api_resource, _name): |
| 261 | for item in api_resource.items: |
| 262 | if item.metadata.name == _name: |
| 263 | return item |
| 264 | |
| 265 | return None |
| 266 | |
Alex | 2a7657c | 2021-11-10 20:51:34 -0600 | [diff] [blame] | 267 | def wait_for_phase_on_start(self, _func, phase, *args, **kwargs): |
Alex | 5cace3b | 2021-11-10 16:40:37 -0600 | [diff] [blame] | 268 | w = watch.Watch() |
| 269 | start_time = time() |
| 270 | for event in w.stream(_func, *args, **kwargs): |
| 271 | if event["object"].status.phase == phase: |
| 272 | w.stop() |
| 273 | end_time = time() |
| 274 | logger_cli.debug( |
| 275 | "... bacame '{}' in {:0.2f} sec".format( |
| 276 | phase, |
| 277 | end_time-start_time |
| 278 | ) |
| 279 | ) |
| 280 | return |
| 281 | # event.type: ADDED, MODIFIED, DELETED |
| 282 | if event["type"] == "DELETED": |
| 283 | # Pod was deleted while we were waiting for it to start. |
| 284 | logger_cli.debug("... deleted before started") |
| 285 | w.stop() |
| 286 | return |
| 287 | |
Alex | 2a7657c | 2021-11-10 20:51:34 -0600 | [diff] [blame] | 288 | def wait_for_event(self, _func, event, *args, **kwargs): |
| 289 | w = watch.Watch() |
| 290 | for event in w.stream(_func, *args, **kwargs): |
| 291 | # event.type: ADDED, MODIFIED, DELETED |
| 292 | if event["type"] == event: |
| 293 | # Pod was deleted while we were waiting for it to start. |
| 294 | logger_cli.debug("... got {} event".format(event["type"])) |
| 295 | w.stop() |
| 296 | return |
| 297 | |
Alex | 9a4ad21 | 2020-10-01 18:04:25 -0500 | [diff] [blame] | 298 | def get_node_info(self, http=False): |
| 299 | # Query API for the nodes and do some presorting |
| 300 | _nodes = {} |
| 301 | if http: |
| 302 | _raw_nodes = self.CoreV1.list_node_with_http_info() |
| 303 | else: |
| 304 | _raw_nodes = self.CoreV1.list_node() |
| 305 | |
| 306 | if not isinstance(_raw_nodes, kclient.models.v1_node_list.V1NodeList): |
| 307 | raise InvalidReturnException( |
| 308 | "Invalid return type: '{}'".format(type(_raw_nodes)) |
| 309 | ) |
| 310 | |
| 311 | for _n in _raw_nodes.items: |
| 312 | _name = _n.metadata.name |
| 313 | _d = _n.to_dict() |
| 314 | # parse inner data classes as dicts |
| 315 | _d['addresses'] = self._typed_list_to_dict(_n.status.addresses) |
| 316 | _d['conditions'] = self._typed_list_to_dict(_n.status.conditions) |
| 317 | # Update 'status' type |
| 318 | if isinstance(_d['conditions']['ready']['status'], str): |
| 319 | _d['conditions']['ready']['status'] = utils.to_bool( |
| 320 | _d['conditions']['ready']['status'] |
| 321 | ) |
| 322 | # Parse image names? |
| 323 | # TODO: Here is the place where we can parse each node image names |
| 324 | |
| 325 | # Parse roles |
| 326 | _d['labels'] = {} |
| 327 | for _label, _data in _d["metadata"]["labels"].items(): |
| 328 | if _data.lower() in ["true", "false"]: |
| 329 | _d['labels'][_label] = utils.to_bool(_data) |
| 330 | else: |
| 331 | _d['labels'][_label] = _data |
| 332 | |
| 333 | # Save |
| 334 | _nodes[_name] = _d |
| 335 | |
| 336 | # debug report on how many nodes detected |
| 337 | logger_cli.debug("...node items returned '{}'".format(len(_nodes))) |
| 338 | |
| 339 | return _nodes |
| 340 | |
Alex | dcb792f | 2021-10-04 14:24:21 -0500 | [diff] [blame] | 341 | def get_pod_names_by_partial_name(self, partial_name, ns): |
| 342 | logger_cli.debug('... searching for pods with {}'.format(partial_name)) |
| 343 | _pods = self.CoreV1.list_namespaced_pod(ns) |
| 344 | _names = self._get_listed_attrs(_pods.items, "metadata.name") |
| 345 | _pnames = [n for n in _names if partial_name in n] |
| 346 | if len(_pnames) > 1: |
| 347 | logger_cli.debug( |
| 348 | "... more than one pod found for '{}': {}\n".format( |
| 349 | partial_name, |
| 350 | ", ".join(_pnames) |
| 351 | ) |
| 352 | ) |
| 353 | elif len(_pnames) < 1: |
| 354 | logger_cli.warning( |
| 355 | "WARNING: No pods found for '{}'".format(partial_name) |
| 356 | ) |
| 357 | |
| 358 | return _pnames |
| 359 | |
| 360 | def get_pods_by_partial_name(self, partial_name, ns): |
| 361 | logger_cli.debug('... searching for pods with {}'.format(partial_name)) |
| 362 | _all_pods = self.CoreV1.list_namespaced_pod(ns) |
| 363 | # _names = self._get_listed_attrs(_pods.items, "metadata.name") |
| 364 | _pods = [_pod for _pod in _all_pods.items |
| 365 | if partial_name in _pod.metadata.name] |
| 366 | if len(_pods) > 1: |
| 367 | logger_cli.debug( |
| 368 | "... more than one pod found for '{}': {}\n".format( |
| 369 | partial_name, |
| 370 | ", ".join(partial_name) |
| 371 | ) |
| 372 | ) |
| 373 | elif len(_pods) < 1: |
| 374 | logger_cli.warning( |
| 375 | "WARNING: No pods found for '{}'".format(partial_name) |
| 376 | ) |
| 377 | |
| 378 | return _pods |
| 379 | |
Alex | 30380a4 | 2021-12-20 16:11:20 -0600 | [diff] [blame] | 380 | @retry(ApiException, initial_wait=10) |
Alex | 9a4ad21 | 2020-10-01 18:04:25 -0500 | [diff] [blame] | 381 | def exec_on_target_pod( |
| 382 | self, |
| 383 | cmd, |
| 384 | pod_name, |
| 385 | namespace, |
| 386 | strict=False, |
| 387 | _request_timeout=120, |
Alex | b78191f | 2021-11-02 16:35:46 -0500 | [diff] [blame] | 388 | arguments=None, |
Alex | 9a4ad21 | 2020-10-01 18:04:25 -0500 | [diff] [blame] | 389 | **kwargs |
| 390 | ): |
Alex | dcb792f | 2021-10-04 14:24:21 -0500 | [diff] [blame] | 391 | _pname = "" |
Alex | 9a4ad21 | 2020-10-01 18:04:25 -0500 | [diff] [blame] | 392 | if not strict: |
Alex | 1f90e7b | 2021-09-03 15:31:28 -0500 | [diff] [blame] | 393 | logger_cli.debug( |
| 394 | "... searching for pods with the name '{}'".format(pod_name) |
| 395 | ) |
| 396 | _pods = {} |
Alex | 7b0ee9a | 2021-09-21 17:16:17 -0500 | [diff] [blame] | 397 | _pods = self.CoreV1.list_namespaced_pod(namespace) |
Alex | 1f90e7b | 2021-09-03 15:31:28 -0500 | [diff] [blame] | 398 | _names = self._get_listed_attrs(_pods.items, "metadata.name") |
Alex | 3374781 | 2021-04-07 10:11:39 -0500 | [diff] [blame] | 399 | _pnames = [n for n in _names if n.startswith(pod_name)] |
| 400 | if len(_pnames) > 1: |
Alex | 9a4ad21 | 2020-10-01 18:04:25 -0500 | [diff] [blame] | 401 | logger_cli.debug( |
Alex | c4f5962 | 2021-08-27 13:42:00 -0500 | [diff] [blame] | 402 | "... more than one pod found for '{}': {}\n" |
| 403 | "... using first one".format( |
Alex | 9a4ad21 | 2020-10-01 18:04:25 -0500 | [diff] [blame] | 404 | pod_name, |
Alex | 3374781 | 2021-04-07 10:11:39 -0500 | [diff] [blame] | 405 | ", ".join(_pnames) |
Alex | 9a4ad21 | 2020-10-01 18:04:25 -0500 | [diff] [blame] | 406 | ) |
| 407 | ) |
Alex | dcb792f | 2021-10-04 14:24:21 -0500 | [diff] [blame] | 408 | elif len(_pnames) < 1: |
Alex | 9a4ad21 | 2020-10-01 18:04:25 -0500 | [diff] [blame] | 409 | raise KubeException("No pods found for '{}'".format(pod_name)) |
Alex | b78191f | 2021-11-02 16:35:46 -0500 | [diff] [blame] | 410 | # in case of >1 and =1 we are taking 1st anyway |
| 411 | _pname = _pnames[0] |
Alex | 9a4ad21 | 2020-10-01 18:04:25 -0500 | [diff] [blame] | 412 | else: |
| 413 | _pname = pod_name |
Alex | 3374781 | 2021-04-07 10:11:39 -0500 | [diff] [blame] | 414 | logger_cli.debug( |
Alex | b78191f | 2021-11-02 16:35:46 -0500 | [diff] [blame] | 415 | "... cmd: [CoreV1] exec {} -n {} -- {} '{}'".format( |
Alex | 3374781 | 2021-04-07 10:11:39 -0500 | [diff] [blame] | 416 | _pname, |
| 417 | namespace, |
Alex | b78191f | 2021-11-02 16:35:46 -0500 | [diff] [blame] | 418 | cmd, |
| 419 | arguments |
Alex | 3374781 | 2021-04-07 10:11:39 -0500 | [diff] [blame] | 420 | ) |
| 421 | ) |
Alex | 1f90e7b | 2021-09-03 15:31:28 -0500 | [diff] [blame] | 422 | # Set preload_content to False to preserve JSON |
| 423 | # If not, output gets converted to str |
| 424 | # Which causes to change " to ' |
| 425 | # After that json.loads(...) fail |
Alex | 7b0ee9a | 2021-09-21 17:16:17 -0500 | [diff] [blame] | 426 | cmd = cmd if isinstance(cmd, list) else cmd.split() |
Alex | b78191f | 2021-11-02 16:35:46 -0500 | [diff] [blame] | 427 | if arguments: |
| 428 | cmd += [arguments] |
Alex | b212954 | 2021-11-23 15:49:42 -0600 | [diff] [blame] | 429 | # Make sure that CoreV1 is fresh before calling it |
Alex | 1f90e7b | 2021-09-03 15:31:28 -0500 | [diff] [blame] | 430 | _pod_stream = stream( |
Alex | 9a4ad21 | 2020-10-01 18:04:25 -0500 | [diff] [blame] | 431 | self.CoreV1.connect_get_namespaced_pod_exec, |
| 432 | _pname, |
| 433 | namespace, |
Alex | 7b0ee9a | 2021-09-21 17:16:17 -0500 | [diff] [blame] | 434 | command=cmd, |
Alex | 9a4ad21 | 2020-10-01 18:04:25 -0500 | [diff] [blame] | 435 | stderr=True, |
| 436 | stdin=False, |
| 437 | stdout=True, |
| 438 | tty=False, |
| 439 | _request_timeout=_request_timeout, |
Alex | 1f90e7b | 2021-09-03 15:31:28 -0500 | [diff] [blame] | 440 | _preload_content=False, |
Alex | 9a4ad21 | 2020-10-01 18:04:25 -0500 | [diff] [blame] | 441 | **kwargs |
| 442 | ) |
Alex | 1f90e7b | 2021-09-03 15:31:28 -0500 | [diff] [blame] | 443 | # run for timeout |
| 444 | _pod_stream.run_forever(timeout=_request_timeout) |
| 445 | # read the output |
Alex | 7b0ee9a | 2021-09-21 17:16:17 -0500 | [diff] [blame] | 446 | _output = _pod_stream.read_stdout() |
Alex | b78191f | 2021-11-02 16:35:46 -0500 | [diff] [blame] | 447 | _error = _pod_stream.read_stderr() |
| 448 | if _error: |
| 449 | # copy error to output |
Alex | e4de114 | 2022-11-04 19:26:03 -0500 | [diff] [blame] | 450 | logger.warning( |
Alex | b78191f | 2021-11-02 16:35:46 -0500 | [diff] [blame] | 451 | "WARNING: cmd of '{}' returned error:\n{}\n".format( |
| 452 | " ".join(cmd), |
| 453 | _error |
| 454 | ) |
| 455 | ) |
| 456 | if not _output: |
| 457 | _output = _error |
Alex | 7b0ee9a | 2021-09-21 17:16:17 -0500 | [diff] [blame] | 458 | # Send output |
| 459 | return _output |
Alex | 9a4ad21 | 2020-10-01 18:04:25 -0500 | [diff] [blame] | 460 | |
Alex | 1f90e7b | 2021-09-03 15:31:28 -0500 | [diff] [blame] | 461 | def ensure_namespace(self, ns): |
| 462 | """ |
| 463 | Ensure that given namespace exists |
| 464 | """ |
| 465 | # list active namespaces |
| 466 | _v1NamespaceList = self.CoreV1.list_namespace() |
| 467 | _ns = self.safe_get_item_by_name(_v1NamespaceList, ns) |
| 468 | |
| 469 | if _ns is None: |
| 470 | logger_cli.debug("... creating namespace '{}'".format(ns)) |
Alex | dcb792f | 2021-10-04 14:24:21 -0500 | [diff] [blame] | 471 | _new_ns = kclient.V1Namespace() |
| 472 | _new_ns.metadata = kclient.V1ObjectMeta(name=ns) |
| 473 | _r = self.CoreV1.create_namespace(_new_ns) |
Alex | 1f90e7b | 2021-09-03 15:31:28 -0500 | [diff] [blame] | 474 | # TODO: check return on fail |
| 475 | if not _r: |
| 476 | return False |
| 477 | else: |
| 478 | logger_cli.debug("... found existing namespace '{}'".format(ns)) |
| 479 | |
| 480 | return True |
| 481 | |
| 482 | def get_daemon_set_by_name(self, ns, name): |
| 483 | return self.safe_get_item_by_name( |
| 484 | self.AppsV1.list_namespaced_daemon_set(ns), |
| 485 | name |
| 486 | ) |
| 487 | |
| 488 | def create_config_map(self, ns, name, source, recreate=True): |
| 489 | """ |
| 490 | Creates/Overwrites ConfigMap in working namespace |
| 491 | """ |
| 492 | # Prepare source |
| 493 | logger_cli.debug( |
| 494 | "... preparing config map '{}/{}' with files from '{}'".format( |
| 495 | ns, |
| 496 | name, |
| 497 | source |
| 498 | ) |
| 499 | ) |
| 500 | _data = {} |
| 501 | if os.path.isfile(source): |
| 502 | # populate data with one file |
| 503 | with open(source, 'rt') as fS: |
| 504 | _data[os.path.split(source)[1]] = fS.read() |
| 505 | elif os.path.isdir(source): |
| 506 | # walk dirs and populate all 'py' files |
| 507 | for path, dirs, files in os.walk(source): |
| 508 | _e = ('.py') |
| 509 | _subfiles = (_fl for _fl in files |
| 510 | if _fl.endswith(_e) and not _fl.startswith('.')) |
| 511 | for _file in _subfiles: |
| 512 | with open(os.path.join(path, _file), 'rt') as fS: |
| 513 | _data[_file] = fS.read() |
| 514 | |
| 515 | _cm = kclient.V1ConfigMap() |
| 516 | _cm.metadata = kclient.V1ObjectMeta(name=name, namespace=ns) |
| 517 | _cm.data = _data |
| 518 | logger_cli.debug( |
| 519 | "... prepared config map with {} scripts".format(len(_data)) |
| 520 | ) |
| 521 | # Query existing configmap, delete if needed |
| 522 | _existing_cm = self.safe_get_item_by_name( |
| 523 | self.CoreV1.list_namespaced_config_map(namespace=ns), |
| 524 | name |
| 525 | ) |
| 526 | if _existing_cm is not None: |
| 527 | self.CoreV1.replace_namespaced_config_map( |
| 528 | namespace=ns, |
| 529 | name=name, |
| 530 | body=_cm |
| 531 | ) |
| 532 | logger_cli.debug( |
| 533 | "... replaced existing config map '{}/{}'".format( |
| 534 | ns, |
| 535 | name |
| 536 | ) |
| 537 | ) |
| 538 | else: |
| 539 | # Create it |
| 540 | self.CoreV1.create_namespaced_config_map( |
| 541 | namespace=ns, |
| 542 | body=_cm |
| 543 | ) |
| 544 | logger_cli.debug("... created config map '{}/{}'".format( |
| 545 | ns, |
| 546 | name |
| 547 | )) |
| 548 | |
| 549 | return _data.keys() |
| 550 | |
| 551 | def prepare_daemonset_from_yaml(self, ns, ds_yaml): |
| 552 | _name = ds_yaml['metadata']['name'] |
| 553 | _ds = self.get_daemon_set_by_name(ns, _name) |
| 554 | |
| 555 | if _ds is not None: |
| 556 | logger_cli.debug( |
| 557 | "... found existing daemonset '{}'".format(_name) |
| 558 | ) |
| 559 | _r = self.AppsV1.replace_namespaced_daemon_set( |
| 560 | _ds.metadata.name, |
| 561 | _ds.metadata.namespace, |
| 562 | body=ds_yaml |
| 563 | ) |
| 564 | logger_cli.debug( |
| 565 | "... replacing existing daemonset '{}'".format(_name) |
| 566 | ) |
| 567 | return _r |
| 568 | else: |
| 569 | logger_cli.debug( |
| 570 | "... creating daemonset '{}'".format(_name) |
| 571 | ) |
| 572 | _r = self.AppsV1.create_namespaced_daemon_set(ns, body=ds_yaml) |
| 573 | return _r |
| 574 | |
| 575 | def delete_daemon_set_by_name(self, ns, name): |
| 576 | return self.AppsV1.delete_namespaced_daemon_set(name, ns) |
| 577 | |
| 578 | def exec_on_all_pods(self, pods): |
| 579 | """ |
| 580 | Create multiple threads to execute script on all target pods |
| 581 | """ |
| 582 | # Create map for threads: [[node_name, ns, pod_name]...] |
| 583 | _pod_list = [] |
| 584 | for item in pods.items: |
| 585 | _pod_list.append( |
| 586 | [ |
| 587 | item.spec.nodeName, |
| 588 | item.metadata.namespace, |
| 589 | item.metadata.name |
| 590 | ] |
| 591 | ) |
| 592 | |
| 593 | # map func and cmd |
Alex | dcb792f | 2021-10-04 14:24:21 -0500 | [diff] [blame] | 594 | logger_cli.error("ERROR: 'exec_on_all_pods'is not implemented yet") |
Alex | 1f90e7b | 2021-09-03 15:31:28 -0500 | [diff] [blame] | 595 | # create result list |
| 596 | |
| 597 | return [] |
Alex | 7b0ee9a | 2021-09-21 17:16:17 -0500 | [diff] [blame] | 598 | |
Alex | b212954 | 2021-11-23 15:49:42 -0600 | [diff] [blame] | 599 | @retry(ApiException, initial_wait=5) |
Alex | 7b0ee9a | 2021-09-21 17:16:17 -0500 | [diff] [blame] | 600 | def get_pods_for_daemonset(self, ds): |
| 601 | # get all pod names for daemonset |
| 602 | logger_cli.debug( |
| 603 | "... extracting pod names from daemonset '{}'".format( |
| 604 | ds.metadata.name |
| 605 | ) |
| 606 | ) |
| 607 | _ns = ds.metadata.namespace |
| 608 | _name = ds.metadata.name |
| 609 | _pods = self.CoreV1.list_namespaced_pod( |
| 610 | namespace=_ns, |
| 611 | label_selector='name={}'.format(_name) |
| 612 | ) |
| 613 | return _pods |
| 614 | |
Alex | bdc7274 | 2021-12-23 13:26:05 -0600 | [diff] [blame] | 615 | @retry(ApiException, initial_wait=10) |
Alex | 7b0ee9a | 2021-09-21 17:16:17 -0500 | [diff] [blame] | 616 | def put_string_buffer_to_pod_as_textfile( |
| 617 | self, |
| 618 | pod_name, |
| 619 | namespace, |
| 620 | buffer, |
| 621 | filepath, |
| 622 | _request_timeout=120, |
| 623 | **kwargs |
| 624 | ): |
| 625 | _command = ['/bin/sh'] |
| 626 | response = stream( |
| 627 | self.CoreV1.connect_get_namespaced_pod_exec, |
| 628 | pod_name, |
| 629 | namespace, |
| 630 | command=_command, |
| 631 | stderr=True, |
| 632 | stdin=True, |
| 633 | stdout=True, |
| 634 | tty=False, |
| 635 | _request_timeout=_request_timeout, |
| 636 | _preload_content=False, |
| 637 | **kwargs |
| 638 | ) |
| 639 | |
| 640 | # if json |
| 641 | # buffer = json.dumps(_dict, indent=2).encode('utf-8') |
| 642 | |
| 643 | commands = [ |
| 644 | bytes("cat <<'EOF' >" + filepath + "\n", 'utf-8'), |
| 645 | buffer, |
| 646 | bytes("\n" + "EOF\n", 'utf-8') |
| 647 | ] |
| 648 | |
| 649 | while response.is_open(): |
| 650 | response.update(timeout=1) |
| 651 | if response.peek_stdout(): |
| 652 | logger_cli.debug("... STDOUT: %s" % response.read_stdout()) |
| 653 | if response.peek_stderr(): |
| 654 | logger_cli.debug("... STDERR: %s" % response.read_stderr()) |
| 655 | if commands: |
| 656 | c = commands.pop(0) |
Alex | b212954 | 2021-11-23 15:49:42 -0600 | [diff] [blame] | 657 | logger_cli.debug("... running command... {}".format(c)) |
Alex | 7b0ee9a | 2021-09-21 17:16:17 -0500 | [diff] [blame] | 658 | response.write_stdin(str(c, encoding='utf-8')) |
| 659 | else: |
| 660 | break |
| 661 | response.close() |
| 662 | |
Alex | 7b0ee9a | 2021-09-21 17:16:17 -0500 | [diff] [blame] | 663 | return |
Alex | dcb792f | 2021-10-04 14:24:21 -0500 | [diff] [blame] | 664 | |
| 665 | def get_custom_resource(self, group, version, plural): |
| 666 | # Get it |
| 667 | # Example: |
| 668 | # kubernetes.client.CustomObjectsApi().list_cluster_custom_object( |
| 669 | # group="networking.istio.io", |
| 670 | # version="v1alpha3", |
| 671 | # plural="serviceentries" |
| 672 | # ) |
| 673 | return self.CustomObjects.list_cluster_custom_object( |
| 674 | group=group, |
| 675 | version=version, |
| 676 | plural=plural |
| 677 | ) |
Alex | 5cace3b | 2021-11-10 16:40:37 -0600 | [diff] [blame] | 678 | |
| 679 | def init_pvc_resource( |
| 680 | self, |
| 681 | name, |
| 682 | storage_class, |
| 683 | size, |
| 684 | ns="qa-space", |
| 685 | mode="ReadWriteOnce" |
| 686 | ): |
| 687 | """Return the Kubernetes PVC resource""" |
| 688 | return kclient.V1PersistentVolumeClaim( |
| 689 | api_version='v1', |
| 690 | kind='PersistentVolumeClaim', |
| 691 | metadata=kclient.V1ObjectMeta( |
| 692 | name=name, |
| 693 | namespace=ns, |
| 694 | labels={"name": name} |
| 695 | ), |
| 696 | spec=kclient.V1PersistentVolumeClaimSpec( |
| 697 | storage_class_name=storage_class, |
| 698 | access_modes=[mode], |
| 699 | resources=kclient.V1ResourceRequirements( |
| 700 | requests={'storage': size} |
| 701 | ) |
| 702 | ) |
| 703 | ) |
| 704 | |
| 705 | def init_pv_resource( |
| 706 | self, |
| 707 | name, |
| 708 | storage_class, |
| 709 | size, |
| 710 | path, |
| 711 | ns="qa-space", |
| 712 | mode="ReadWriteOnce" |
| 713 | ): |
| 714 | """Return the Kubernetes PVC resource""" |
| 715 | return kclient.V1PersistentVolume( |
| 716 | api_version='v1', |
| 717 | kind='PersistentVolume', |
| 718 | metadata=kclient.V1ObjectMeta( |
| 719 | name=name, |
| 720 | namespace=ns, |
| 721 | labels={"name": name} |
| 722 | ), |
| 723 | spec=kclient.V1PersistentVolumeSpec( |
| 724 | storage_class_name=storage_class, |
| 725 | access_modes=[mode], |
| 726 | capacity={'storage': size}, |
| 727 | host_path=kclient.V1HostPathVolumeSource(path=path) |
| 728 | ) |
| 729 | ) |
| 730 | |
| 731 | def init_service( |
| 732 | self, |
| 733 | name, |
| 734 | port, |
| 735 | clusterip=None, |
| 736 | ns="qa-space" |
| 737 | ): |
| 738 | """ Inits a V1Service object with data for benchmark agent""" |
| 739 | _meta = kclient.V1ObjectMeta( |
| 740 | name=name, |
| 741 | namespace=ns, |
| 742 | labels={"name": name} |
| 743 | ) |
| 744 | _port = kclient.V1ServicePort( |
| 745 | port=port, |
| 746 | protocol="TCP", |
| 747 | target_port=port |
| 748 | ) |
| 749 | _spec = kclient.V1ServiceSpec( |
| 750 | # cluster_ip=clusterip, |
| 751 | selector={"name": name}, |
| 752 | # type="ClusterIP", |
| 753 | ports=[_port] |
| 754 | ) |
| 755 | return kclient.V1Service( |
| 756 | api_version="v1", |
| 757 | kind="Service", |
| 758 | metadata=_meta, |
| 759 | spec=_spec |
| 760 | ) |
| 761 | |
| 762 | def prepare_pv(self, pv_object): |
Alex | 2a7657c | 2021-11-10 20:51:34 -0600 | [diff] [blame] | 763 | _existing = self.get_pv_by_name(pv_object.metadata.name) |
Alex | 5cace3b | 2021-11-10 16:40:37 -0600 | [diff] [blame] | 764 | if _existing is not None: |
| 765 | self.CoreV1.replace_persistent_volume( |
| 766 | pv_object.metadata.name, |
| 767 | pv_object |
| 768 | ) |
| 769 | else: |
| 770 | self.CoreV1.create_persistent_volume(pv_object) |
| 771 | |
Alex | 2a7657c | 2021-11-10 20:51:34 -0600 | [diff] [blame] | 772 | return self.wait_for_phase( |
| 773 | "pv", |
| 774 | pv_object.metadata.name, |
| 775 | None, |
| 776 | ["Available", "Bound"] |
Alex | 5cace3b | 2021-11-10 16:40:37 -0600 | [diff] [blame] | 777 | ) |
| 778 | |
| 779 | def prepare_pvc(self, pvc_object): |
Alex | 2a7657c | 2021-11-10 20:51:34 -0600 | [diff] [blame] | 780 | _existing = self.get_pvc_by_name_and_ns( |
| 781 | pvc_object.metadata.name, |
| 782 | pvc_object.metadata.namespace |
Alex | 5cace3b | 2021-11-10 16:40:37 -0600 | [diff] [blame] | 783 | ) |
| 784 | if _existing is not None: |
| 785 | _size_r = pvc_object.spec.resources.requests["storage"] |
| 786 | _size_e = _existing.spec.resources.requests["storage"] |
Alex | 2a7657c | 2021-11-10 20:51:34 -0600 | [diff] [blame] | 787 | logger_cli.info( |
| 788 | "-> Found PVC '{}/{}' with {}. Requested: {}'".format( |
Alex | 5cace3b | 2021-11-10 16:40:37 -0600 | [diff] [blame] | 789 | pvc_object.metadata.namespace, |
| 790 | pvc_object.metadata.name, |
| 791 | _size_e, |
| 792 | _size_r |
| 793 | ) |
| 794 | ) |
| 795 | if _size_r != _size_e: |
| 796 | raise CheckerException( |
| 797 | "ERROR: PVC exists on the cloud with different size " |
| 798 | "than needed. Please cleanup!" |
| 799 | ) |
| 800 | else: |
| 801 | logger_cli.debug( |
| 802 | "... creating pvc '{}'".format(pvc_object.metadata.name) |
| 803 | ) |
| 804 | self.CoreV1.create_namespaced_persistent_volume_claim( |
| 805 | pvc_object.metadata.namespace, |
| 806 | pvc_object |
| 807 | ) |
| 808 | |
Alex | 2a7657c | 2021-11-10 20:51:34 -0600 | [diff] [blame] | 809 | return self.wait_for_phase( |
| 810 | "pvc", |
| 811 | pvc_object.metadata.name, |
| 812 | pvc_object.metadata.namespace, |
| 813 | ["Available", "Bound"] |
| 814 | ) |
| 815 | |
| 816 | def get_pod_by_name_and_ns(self, name, ns): |
| 817 | return self.safe_get_item_by_name( |
| 818 | self.CoreV1.list_namespaced_pod( |
| 819 | ns, |
| 820 | label_selector='name={}'.format(name) |
| 821 | ), |
| 822 | name |
| 823 | ) |
| 824 | |
Alex | b212954 | 2021-11-23 15:49:42 -0600 | [diff] [blame] | 825 | def list_pods(self, ns, label_str=None): |
| 826 | return self.CoreV1.list_namespaced_pod( |
| 827 | ns, |
| 828 | label_selector=label_str |
| 829 | ) |
| 830 | |
Alex | 2a7657c | 2021-11-10 20:51:34 -0600 | [diff] [blame] | 831 | def get_svc_by_name_and_ns(self, name, ns): |
| 832 | return self.safe_get_item_by_name( |
| 833 | self.CoreV1.list_namespaced_service( |
| 834 | ns, |
| 835 | label_selector='name={}'.format(name) |
| 836 | ), |
| 837 | name |
| 838 | ) |
| 839 | |
Alex | b212954 | 2021-11-23 15:49:42 -0600 | [diff] [blame] | 840 | def list_svc(self, ns, label_str=None): |
| 841 | return self.CoreV1.list_namespaced_service( |
| 842 | ns, |
| 843 | label_selector=label_str |
| 844 | ) |
| 845 | |
Alex | 2a7657c | 2021-11-10 20:51:34 -0600 | [diff] [blame] | 846 | def get_pvc_by_name_and_ns(self, name, ns): |
| 847 | return self.safe_get_item_by_name( |
| 848 | self.CoreV1.list_namespaced_persistent_volume_claim( |
| 849 | ns, |
| 850 | label_selector='name={}'.format(name) |
| 851 | ), |
| 852 | name |
| 853 | ) |
| 854 | |
Alex | b212954 | 2021-11-23 15:49:42 -0600 | [diff] [blame] | 855 | def list_pvc(self, ns, label_str=None): |
| 856 | return self.CoreV1.list_namespaced_persistent_volume_claim( |
| 857 | ns, |
| 858 | label_selector=label_str |
| 859 | ) |
| 860 | |
Alex | 2a7657c | 2021-11-10 20:51:34 -0600 | [diff] [blame] | 861 | def get_pv_by_name(self, name): |
| 862 | return self.safe_get_item_by_name( |
| 863 | self.CoreV1.list_persistent_volume( |
| 864 | label_selector='name={}'.format(name) |
| 865 | ), |
| 866 | name |
| 867 | ) |
| 868 | |
Alex | b212954 | 2021-11-23 15:49:42 -0600 | [diff] [blame] | 869 | def list_pv(self, label_str=None): |
| 870 | return self.CoreV1.list_persistent_volume( |
| 871 | label_selector=label_str |
| 872 | ) |
| 873 | |
Alex | 2a7657c | 2021-11-10 20:51:34 -0600 | [diff] [blame] | 874 | def wait_for_phase(self, ttype, name, ns, phase_list, timeout=120): |
| 875 | logger_cli.debug( |
| 876 | "... waiting '{}'s until {} is '{}'".format( |
| 877 | timeout, |
| 878 | ttype, |
| 879 | ", ".join(phase_list) |
| 880 | ) |
| 881 | ) |
| 882 | while timeout > 0: |
| 883 | if ttype == "pod": |
| 884 | _t = self.get_pod_by_name_and_ns(name, ns) |
| 885 | elif ttype == "svc": |
| 886 | _t = self.get_svc_by_name_and_ns(name, ns) |
| 887 | elif ttype == "pvc": |
| 888 | _t = self.get_pvc_by_name_and_ns(name, ns) |
| 889 | elif ttype == "pv": |
| 890 | _t = self.get_pv_by_name(name) |
| 891 | if "Terminated" in phase_list and not _t: |
| 892 | if ns: |
| 893 | _s = "... {} {}/{} not found".format(ttype, ns, name) |
Alex | 5cace3b | 2021-11-10 16:40:37 -0600 | [diff] [blame] | 894 | else: |
Alex | 2a7657c | 2021-11-10 20:51:34 -0600 | [diff] [blame] | 895 | _s = "... {} '{}' not found".format(ttype, name) |
| 896 | logger_cli.debug(_s) |
| 897 | return None |
| 898 | logger_cli.debug("... {} is '{}'".format(ttype, _t.status.phase)) |
| 899 | if _t.status.phase in phase_list: |
| 900 | return _t |
| 901 | sleep(2) |
| 902 | timeout -= 2 |
Alex | 5cace3b | 2021-11-10 16:40:37 -0600 | [diff] [blame] | 903 | raise CheckerException( |
Alex | 2a7657c | 2021-11-10 20:51:34 -0600 | [diff] [blame] | 904 | "Timed out waiting for {} '{}' in '{}'".format( |
| 905 | ttype, |
| 906 | name, |
| 907 | ", ".join(ttype) |
Alex | 5cace3b | 2021-11-10 16:40:37 -0600 | [diff] [blame] | 908 | ) |
| 909 | ) |
| 910 | |
| 911 | def prepare_pod_from_yaml(self, pod_yaml): |
Alex | 2a7657c | 2021-11-10 20:51:34 -0600 | [diff] [blame] | 912 | _existing = self.get_pod_by_name_and_ns( |
| 913 | pod_yaml['metadata']['name'], |
| 914 | pod_yaml['metadata']['namespace'] |
Alex | 5cace3b | 2021-11-10 16:40:37 -0600 | [diff] [blame] | 915 | ) |
| 916 | if _existing is not None: |
Alex | bfa947c | 2021-11-11 18:14:28 -0600 | [diff] [blame] | 917 | logger_cli.info( |
| 918 | "-> Found pod '{}/{}'. Reusing.".format( |
Alex | 5cace3b | 2021-11-10 16:40:37 -0600 | [diff] [blame] | 919 | pod_yaml['metadata']['namespace'], |
| 920 | pod_yaml['metadata']['name'] |
| 921 | ) |
| 922 | ) |
| 923 | return _existing |
| 924 | else: |
| 925 | self.CoreV1.create_namespaced_pod( |
| 926 | pod_yaml['metadata']['namespace'], |
| 927 | pod_yaml |
| 928 | ) |
Alex | 2a7657c | 2021-11-10 20:51:34 -0600 | [diff] [blame] | 929 | return self.wait_for_phase( |
| 930 | "pod", |
| 931 | pod_yaml['metadata']['name'], |
| 932 | pod_yaml['metadata']['namespace'], |
| 933 | ["Running"] |
Alex | 5cace3b | 2021-11-10 16:40:37 -0600 | [diff] [blame] | 934 | ) |
| 935 | |
| 936 | def expose_pod_port(self, pod_object, port, ns="qa-space"): |
Alex | 2a7657c | 2021-11-10 20:51:34 -0600 | [diff] [blame] | 937 | _existing = self.get_svc_by_name_and_ns( |
| 938 | pod_object.metadata.name, |
| 939 | pod_object.metadata.namespace |
Alex | 5cace3b | 2021-11-10 16:40:37 -0600 | [diff] [blame] | 940 | ) |
| 941 | if _existing is not None: |
| 942 | # TODO: Check port number? |
Alex | 2a7657c | 2021-11-10 20:51:34 -0600 | [diff] [blame] | 943 | logger_cli.info( |
| 944 | "-> Pod already exposed '{}/{}:{}'. Reusing.".format( |
Alex | 5cace3b | 2021-11-10 16:40:37 -0600 | [diff] [blame] | 945 | pod_object.metadata.namespace, |
| 946 | pod_object.metadata.name, |
| 947 | port |
| 948 | ) |
| 949 | ) |
| 950 | return _existing |
| 951 | else: |
| 952 | logger_cli.debug( |
| 953 | "... creating service for pod {}/{}: {}:{}".format( |
| 954 | pod_object.metadata.namespace, |
| 955 | pod_object.metadata.name, |
| 956 | pod_object.status.pod_ip, |
| 957 | port |
| 958 | ) |
| 959 | ) |
| 960 | _svc = self.init_service( |
| 961 | pod_object.metadata.name, |
| 962 | port |
| 963 | ) |
| 964 | return self.CoreV1.create_namespaced_service( |
| 965 | pod_object.metadata.namespace, |
| 966 | _svc |
| 967 | ) |
Alex | 0989ecf | 2022-03-29 13:43:21 -0500 | [diff] [blame] | 968 | |
Alex | 0bcf31b | 2022-03-29 17:38:58 -0500 | [diff] [blame] | 969 | def list_namespaces(self): |
| 970 | return self.CoreV1.list_namespace() |
| 971 | |
| 972 | @retry(ApiException, initial_wait=2) |
| 973 | def get_pod_logs(self, podname, container, ns, tail_lines=50): |
Alex | 0989ecf | 2022-03-29 13:43:21 -0500 | [diff] [blame] | 974 | # Params |
| 975 | # read log of the specified Pod # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True |
| 976 | |
| 977 | # >>> thread = api.read_namespaced_pod_log(name, namespace, |
| 978 | # async_req=True) |
| 979 | # >>> result = thread.get() |
| 980 | # :param async_req bool: execute request asynchronously |
| 981 | # :param str name: name of the Pod (required) |
| 982 | # :param str namespace: object name and auth scope, such as for teams |
| 983 | # and projects (required) |
| 984 | # :param str container: The container for which to stream logs. |
| 985 | # Defaults to only container if there is one container in |
| 986 | # the pod. |
| 987 | # :param bool follow: Follow the log stream of the pod. Defaults to |
| 988 | # false. |
| 989 | # :param bool insecure_skip_tls_verify_backend: |
| 990 | # insecureSkipTLSVerifyBackend indicates that the apiserver |
| 991 | # should not confirm the validity of the serving certificate |
| 992 | # of the backend it is connecting to. This will make the |
| 993 | # HTTPS connection between the apiserver and the backend |
| 994 | # insecure. This means the apiserver cannot verify the log |
| 995 | # data it is receiving came from the real kubelet. If the |
| 996 | # kubelet is configured to verify the apiserver's TLS |
| 997 | # credentials, it does not mean the connection to the real |
| 998 | # kubelet is vulnerable to a man in the middle attack (e.g. |
| 999 | # an attacker could not intercept the actual log data coming |
| 1000 | # from the real kubelet). |
| 1001 | # :param int limit_bytes: If set, the number of bytes to read from the |
| 1002 | # server before terminating the log output. This may not |
| 1003 | # display a complete final line of logging, and may return |
| 1004 | # slightly more or slightly less than the specified limit. |
| 1005 | # :param str pretty: If 'true', then the output is pretty printed. |
| 1006 | # :param bool previous: Return previous terminated container logs. |
| 1007 | # Defaults to false. |
| 1008 | # :param int since_seconds: A relative time in seconds before the |
| 1009 | # current time from which to show logs. If this value precedes |
| 1010 | # the time a pod was started, only logs since the pod start will |
| 1011 | # be returned. If this value is in the future, no logs will be |
| 1012 | # returned. Only one of sinceSeconds or sinceTime may be |
| 1013 | # specified. |
| 1014 | # :param int tail_lines: If set, the number of lines from the end of |
| 1015 | # the logs to show. If not specified, logs are shown from the |
| 1016 | # creation of the container or sinceSeconds or sinceTime |
| 1017 | # :param bool timestamps: If true, add an RFC3339 or RFC3339Nano |
| 1018 | # timestamp at the beginning of every line of log output. |
| 1019 | # Defaults to false. |
| 1020 | # :param _preload_content: if False, the urllib3.HTTPResponse object |
| 1021 | # will be returned without reading/decoding response data. |
| 1022 | # Default is True. |
| 1023 | # :param _request_timeout: timeout setting for this request. If one |
| 1024 | # number provided, it will be total request timeout. It can |
| 1025 | # also be a pair (tuple) of (connection, read) timeouts. |
| 1026 | # :return: str |
| 1027 | # If the method is called asynchronously, returns the request |
| 1028 | # thread. |
| 1029 | |
Alex | 0bcf31b | 2022-03-29 17:38:58 -0500 | [diff] [blame] | 1030 | try: |
| 1031 | return self.CoreV1.read_namespaced_pod_log( |
| 1032 | name=podname, |
| 1033 | namespace=ns, |
| 1034 | container=container, |
| 1035 | timestamps=True, |
| 1036 | tail_lines=tail_lines, |
| 1037 | # pretty=True, |
| 1038 | _request_timeout=(1, 5) |
| 1039 | ) |
| 1040 | except MaxRetryError as e: |
| 1041 | logger_cli.warning( |
| 1042 | "WARNING: Failed to retrieve log {}/{}:{}:\n{}".format( |
| 1043 | ns, |
| 1044 | podname, |
| 1045 | container, |
| 1046 | e.reason |
| 1047 | ) |
| 1048 | ) |
| 1049 | return "" |