Multi env support and Kube client integration
Kube friendly Beta
Package versions supports Kube env
Added:
- Env type detection
- New option: --use-env, for selecting env
when function supports multiple detected envs
- Updated config loading
- Each module and command type has supported env check
and stops execution if it is on unsupported env
- Functions can support multiple envs
- Kubernetes dependency
- Kubenernetes API detection: local and remote
- Package checking class hierachy for using Salt or Kube
- Remote pod execution routine
- Flexible SSH/SSH Forwarder classes: with, ssh,do(), etc
- Multithreaded SSH script execution
- Number of workers parameter, default 5
Fixed:
- Config dependency
- Command loading with supported envs list
- Unittests structure and execution flow updated
- Unittests fixes
- Fixed debug mode handling
- Unified command type/support routine
- Nested attrs getter/setter
Change-Id: I3ade693ac21536e2b5dcee4b24d511749dc72759
Related-PROD: PROD-35811
diff --git a/cfg_checker/common/kube_utils.py b/cfg_checker/common/kube_utils.py
new file mode 100644
index 0000000..3f56f0e
--- /dev/null
+++ b/cfg_checker/common/kube_utils.py
@@ -0,0 +1,282 @@
+"""
+Module to handle interaction with Kube
+"""
+import base64
+import os
+import urllib3
+import yaml
+
+from kubernetes import client as kclient, config as kconfig
+from kubernetes.stream import stream
+
+from cfg_checker.common import logger, logger_cli
+from cfg_checker.common.exception import InvalidReturnException, KubeException
+from cfg_checker.common.file_utils import create_temp_file_with_content
+from cfg_checker.common.other import utils, shell
+from cfg_checker.common.ssh_utils import ssh_shell_p
+
+urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
+
+
+def _init_kube_conf_local(config):
+ # Init kube library locally
+ try:
+ kconfig.load_kube_config()
+ logger_cli.debug(
+ "...found Kube env: core, {}". format(
+ ",".join(
+ kclient.CoreApi().get_api_versions().versions
+ )
+ )
+ )
+ return kconfig, kclient.ApiClient()
+ except Exception as e:
+ logger.warn("Failed to init local Kube client: {}".format(
+ str(e)
+ )
+ )
+ return None, None
+
+
+def _init_kube_conf_remote(config):
+ # init remote client
+ # Preload Kube token
+ """
+ APISERVER=$(kubectl config view --minify |
+ grep server | cut -f 2- -d ":" | tr -d " ")
+ SECRET_NAME=$(kubectl get secrets |
+ grep ^default | cut -f1 -d ' ')
+ TOKEN=$(kubectl describe secret $SECRET_NAME |
+ grep -E '^token' | cut -f2 -d':' | tr -d " ")
+
+ echo "Detected API Server at: '${APISERVER}'"
+ echo "Got secret: '${SECRET_NAME}'"
+ echo "Loaded token: '${TOKEN}'"
+
+ curl $APISERVER/api
+ --header "Authorization: Bearer $TOKEN" --insecure
+ """
+ import yaml
+
+ _c_data = ssh_shell_p(
+ "sudo cat " + config.kube_config_path,
+ config.ssh_host,
+ username=config.ssh_user,
+ keypath=config.ssh_key,
+ piped=False,
+ use_sudo=config.ssh_uses_sudo,
+ )
+
+ _conf = yaml.load(_c_data, Loader=yaml.SafeLoader)
+
+ _kube_conf = kclient.Configuration()
+ # A remote host configuration
+
+ # To work with remote cluster, we need to extract these
+ # keys = ['host', 'ssl_ca_cert', 'cert_file', 'key_file', 'verify_ssl']
+ # When v12 of the client will be release, we will use load_from_dict
+
+ _kube_conf.ssl_ca_cert = create_temp_file_with_content(
+ base64.standard_b64decode(
+ _conf['clusters'][0]['cluster']['certificate-authority-data']
+ )
+ )
+ _host = _conf['clusters'][0]['cluster']['server']
+ _kube_conf.cert_file = create_temp_file_with_content(
+ base64.standard_b64decode(
+ _conf['users'][0]['user']['client-certificate-data']
+ )
+ )
+ _kube_conf.key_file = create_temp_file_with_content(
+ base64.standard_b64decode(
+ _conf['users'][0]['user']['client-key-data']
+ )
+ )
+ if "http" not in _host or "443" not in _host:
+ logger_cli.error(
+ "Failed to extract Kube host: '{}'".format(_host)
+ )
+ else:
+ logger_cli.debug(
+ "...'context' host extracted: '{}' via SSH@{}".format(
+ _host,
+ config.ssh_host
+ )
+ )
+
+ # Substitute context host to ours
+ _tmp = _host.split(':')
+ _kube_conf.host = \
+ _tmp[0] + "://" + config.mcp_host + ":" + _tmp[2]
+ config.kube_port = _tmp[2]
+ logger_cli.debug(
+ "...kube remote host updated to {}".format(
+ _kube_conf.host
+ )
+ )
+ _kube_conf.verify_ssl = False
+ _kube_conf.debug = config.debug
+ # Nevertheless if you want to do it
+ # you can with these 2 parameters
+ # configuration.verify_ssl=True
+ # ssl_ca_cert is the filepath
+ # to the file that contains the certificate.
+ # configuration.ssl_ca_cert="certificate"
+
+ # _kube_conf.api_key = {
+ # "authorization": "Bearer " + config.kube_token
+ # }
+
+ # Create a ApiClient with our config
+ _kube_api = kclient.ApiClient(_kube_conf)
+
+ return _kube_conf, _kube_api
+
+
+class KubeApi(object):
+ def __init__(self, config):
+ self.config = config
+ self._init_kclient()
+ self.last_response = None
+
+ def _init_kclient(self):
+ # if there is no password - try to get local, if this available
+ logger_cli.debug("# Initializong Kube config...")
+ if self.config.env_name == "local":
+ self.kConf, self.kApi = _init_kube_conf_local(self.config)
+ self.is_local = True
+ # Load local config data
+ if os.path.exists(self.config.kube_config_path):
+ _c_data = shell("sudo cat " + self.config.kube_config_path)
+ _conf = yaml.load(_c_data, Loader=yaml.SafeLoader)
+ self.user_keypath = create_temp_file_with_content(
+ base64.standard_b64decode(
+ _conf['users'][0]['user']['client-key-data']
+ )
+ )
+ self.yaml_conf = _c_data
+ else:
+ self.kConf, self.kApi = _init_kube_conf_remote(self.config)
+ self.is_local = False
+
+ def get_versions_api(self):
+ # client.CoreApi().get_api_versions().versions
+ return kclient.VersionApi(self.kApi)
+
+
+class KubeRemote(KubeApi):
+ def __init__(self, config):
+ super(KubeRemote, self).__init__(config)
+ self._coreV1 = None
+
+ @property
+ def CoreV1(self):
+ if not self._coreV1:
+ self._coreV1 = kclient.CoreV1Api(self.kApi)
+ return self._coreV1
+
+ @staticmethod
+ def _typed_list_to_dict(i_list):
+ _dict = {}
+ for _item in i_list:
+ _d = _item.to_dict()
+ _type = _d.pop("type")
+ _dict[_type.lower()] = _d
+
+ return _dict
+
+ @staticmethod
+ def _get_listed_attrs(items, _path):
+ _list = []
+ for _n in items:
+ _list.append(utils.rgetattr(_n, _path))
+
+ return _list
+
+ def get_node_info(self, http=False):
+ # Query API for the nodes and do some presorting
+ _nodes = {}
+ if http:
+ _raw_nodes = self.CoreV1.list_node_with_http_info()
+ else:
+ _raw_nodes = self.CoreV1.list_node()
+
+ if not isinstance(_raw_nodes, kclient.models.v1_node_list.V1NodeList):
+ raise InvalidReturnException(
+ "Invalid return type: '{}'".format(type(_raw_nodes))
+ )
+
+ for _n in _raw_nodes.items:
+ _name = _n.metadata.name
+ _d = _n.to_dict()
+ # parse inner data classes as dicts
+ _d['addresses'] = self._typed_list_to_dict(_n.status.addresses)
+ _d['conditions'] = self._typed_list_to_dict(_n.status.conditions)
+ # Update 'status' type
+ if isinstance(_d['conditions']['ready']['status'], str):
+ _d['conditions']['ready']['status'] = utils.to_bool(
+ _d['conditions']['ready']['status']
+ )
+ # Parse image names?
+ # TODO: Here is the place where we can parse each node image names
+
+ # Parse roles
+ _d['labels'] = {}
+ for _label, _data in _d["metadata"]["labels"].items():
+ if _data.lower() in ["true", "false"]:
+ _d['labels'][_label] = utils.to_bool(_data)
+ else:
+ _d['labels'][_label] = _data
+
+ # Save
+ _nodes[_name] = _d
+
+ # debug report on how many nodes detected
+ logger_cli.debug("...node items returned '{}'".format(len(_nodes)))
+
+ return _nodes
+
+ def exec_on_target_pod(
+ self,
+ cmd,
+ pod_name,
+ namespace,
+ strict=False,
+ _request_timeout=120,
+ **kwargs
+ ):
+ _pods = {}
+ _pods = self._coreV1.list_namespaced_pod(namespace)
+ _names = self._get_listed_attrs(_pods.items, "metadata.name")
+
+ _pname = ""
+ if not strict:
+ _pname = [n for n in _names if n.startswith(pod_name)]
+ if len(_pname) > 1:
+ logger_cli.debug(
+ "...more than one pod found for '{}': {}\n"
+ "...using first one".format(
+ pod_name,
+ ", ".join(_pname)
+ )
+ )
+ _pname = _pname[0]
+ elif len(_pname) < 1:
+ raise KubeException("No pods found for '{}'".format(pod_name))
+ else:
+ _pname = pod_name
+
+ _r = stream(
+ self.CoreV1.connect_get_namespaced_pod_exec,
+ _pname,
+ namespace,
+ command=cmd.split(),
+ stderr=True,
+ stdin=False,
+ stdout=True,
+ tty=False,
+ _request_timeout=_request_timeout,
+ **kwargs
+ )
+
+ return _r