Refactoring of k8s manager and tests
Changes:
- Official kubernetes python lib
- Rewrite k8s api wrapper in OOP manner
- Use api where its possible instead of cli
- Remove virtlet code because its can be replaced with pod api
- Remove unused/oudated manager code
- Remove bug workaround in k8s upgrade template
- Remove netchecker obsolete code
- Remove unfinished test_rbd_flexvolume_driver
Change-Id: I446a240123282196a6ba54f588aea84791f175ba
Related-PROD: PROD-21700
diff --git a/tcp_tests/helpers/netchecker.py b/tcp_tests/helpers/netchecker.py
index 24ab271..dc58d9c 100644
--- a/tcp_tests/helpers/netchecker.py
+++ b/tcp_tests/helpers/netchecker.py
@@ -12,11 +12,9 @@
# License for the specific language governing permissions and limitations
# under the License.
-import json
import requests
from devops.helpers import helpers
-from k8sclient.client import rest
from tcp_tests import logger
from tcp_tests.helpers import utils
@@ -24,570 +22,195 @@
LOG = logger.logger
+NETCHECKER_NAMESPACE = "netchecker"
+NETCHECKER_SERVICE_PREFIX = "netchecker"
+NETCHECKER_SERVER_PREFIX = "netchecker-server-"
+NETCHECKER_AGENT_PREFIX = "netchecker-agent-"
-NETCHECKER_SERVICE_NAME = "netchecker-service"
-NETCHECKER_CONTAINER_PORT = NETCHECKER_SERVICE_PORT = 8081
-NETCHECKER_NODE_PORT = 31081
-NETCHECKER_REPORT_INTERVAL = 30
-NETCHECKER_SERVER_REPLICAS = 1
-NETCHECKER_PROBEURLS = "http://ipinfo.io"
-NETCHECKER_SVC_CFG = {
- "apiVersion": "v1",
- "kind": "Service",
- "metadata": {
- "name": NETCHECKER_SERVICE_NAME
- },
- "spec": {
- "ports": [
- {
- "nodePort": NETCHECKER_NODE_PORT,
- "port": NETCHECKER_SERVICE_PORT,
- "protocol": "TCP",
- "targetPort": NETCHECKER_CONTAINER_PORT
+class Netchecker(object):
+ def __init__(self, k8sapi, namespace=NETCHECKER_NAMESPACE):
+ self._api = k8sapi
+ self._namespace = namespace
+
+ def get_netchecker_pod_ip(self, prefix=NETCHECKER_SERVER_PREFIX):
+ pods = self._api.pods.list(self._namespace, name_prefix=prefix)
+ assert len(pods) > 0, "No '{}' pods found!".format(prefix)
+ return pods[0].read().status.host_ip
+
+ def get_netchecker_service(self, prefix=NETCHECKER_SERVICE_PREFIX):
+ services = self._api.services.list(self._namespace, name_prefix=prefix)
+ assert len(services) > 0, "No '{}' services found!".format(prefix)
+ return services[0]
+
+ @utils.retry(3, requests.exceptions.RequestException)
+ def get_connectivity_status(self):
+ kube_host_ip = self.get_netchecker_pod_ip()
+
+ net_status_url = 'http://{0}:{1}/api/v1/connectivity_check'.format(
+ kube_host_ip, self.get_service_port())
+
+ response = requests.get(net_status_url, timeout=5)
+ LOG.debug('Connectivity check status: [{0}] {1}'.format(
+ response.status_code, response.text.strip()))
+ return response
+
+ @utils.retry(3, requests.exceptions.RequestException)
+ def wait_netchecker_pods_running(self, prefix):
+ for pod in self._api.pods.list(self._namespace, name_prefix=prefix):
+ pod.wait_running(timeout=600)
+
+ def check_network(self, works):
+ if works:
+ assert self.get_connectivity_status().status_code in (200, 204)
+ else:
+ assert self.get_connectivity_status().status_code == 400
+
+ def wait_check_network(self, works, timeout=60, interval=10):
+ helpers.wait_pass(
+ lambda: self.check_network(works=works),
+ timeout=timeout,
+ interval=interval)
+
+ def kubernetes_block_traffic_namespace(self,
+ namespace=NETCHECKER_NAMESPACE):
+ self._api.namespaces.get(name=namespace).patch({
+ "metadata": {
+ "annotations": {
+ "net.beta.kubernetes.io/network-policy":
+ '{"ingress": {"isolation": "DefaultDeny"}}',
+ }
}
- ],
- "selector": {
- "app": "netchecker-server"
- },
- "type": "NodePort"
- }
-}
+ })
-NETCHECKER_DEPLOYMENT_CFG = {
- "kind": "Deployment",
- "spec": {
- "template": {
+ def calico_allow_netchecker_connections(self):
+ srv_pod_ip = self.get_netchecker_pod_ip()
+
+ body = {
+ "apiVersion": "extensions/v1beta1",
+ "kind": "NetworkPolicy",
+ "metadata": {
+ "name": "access-netchecker",
+ "namespace": self._namespace,
+ },
"spec": {
- "containers": [
+ "ingress": [{
+ "from": [{
+ "ipBlock": {
+ "cidr": srv_pod_ip + "/24"
+ }
+ }]
+ }],
+ "podSelector": {
+ "matchLabels": {
+ "app": "netchecker-server"
+ }
+ }
+ }
+ }
+
+ self._api.networkpolicies.create(namespace=self._namespace, body=body)
+
+ def kubernetes_allow_traffic_from_agents(self):
+ self._api.namespaces.get('default').patch({
+ "metadata": {
+ "labels": {
+ "name": 'default',
+ "net.beta.kubernetes.io/network-policy": None,
+ }
+ }
+ })
+
+ kubernetes_policy = {
+ "apiVersion": "extensions/v1beta1",
+ "kind": "NetworkPolicy",
+ "metadata": {
+ "name": "access-netchecker-agent",
+ "namespace": self._namespace,
+ },
+ "spec": {
+ "ingress": [
{
- "name": "netchecker-server",
- "env": None,
- "imagePullPolicy": "IfNotPresent",
- "image": "mirantis/k8s-netchecker-server:latest",
- "args": [
- "-v=5",
- "-logtostderr",
- "-kubeproxyinit",
- "-endpoint=0.0.0.0:{0}".format(
- NETCHECKER_CONTAINER_PORT)
- ],
- "ports": [
+ "from": [
{
- "containerPort": NETCHECKER_CONTAINER_PORT,
- "hostPort": NETCHECKER_NODE_PORT
+ "namespaceSelector": {
+ "matchLabels": {
+ "name": self._namespace
+ }
+ }
+ },
+ {
+ "podSelector": {
+ "matchLabels": {
+ "app": "netchecker-agent"
+ }
+ }
}
]
}
- ]
- },
- "metadata": {
- "labels": {
- "app": "netchecker-server"
- },
- "name": "netchecker-server"
- }
- },
- "replicas": NETCHECKER_SERVER_REPLICAS
- },
- "apiVersion": "extensions/v1beta1",
- "metadata": {
- "name": "netchecker-server"
- }
-}
-
-NETCHECKER_DS_CFG = [
- {
- "apiVersion": "extensions/v1beta1",
- "kind": "DaemonSet",
- "metadata": {
- "labels": {
- "app": "netchecker-agent"
- },
- "name": "netchecker-agent"
- },
- "spec": {
- "template": {
- "metadata": {
- "labels": {
- "app": "netchecker-agent"
- },
- "name": "netchecker-agent"
- },
- "spec": {
- "tolerations": [
- {
- "key": "node-role.kubernetes.io/master",
- "effect": "NoSchedule"
- }
- ],
- "containers": [
- {
- "env": [
- {
- "name": "MY_POD_NAME",
- "valueFrom": {
- "fieldRef": {
- "fieldPath": "metadata.name"
- }
- }
- },
- {
- "name": "MY_NODE_NAME",
- "valueFrom": {
- "fieldRef": {
- "fieldPath": "spec.nodeName"
- }
- }
- },
- {
- "name": "REPORT_INTERVAL",
- "value": str(NETCHECKER_REPORT_INTERVAL)
- },
- ],
- "image": "mirantis/k8s-netchecker-agent:latest",
- "imagePullPolicy": "IfNotPresent",
- "name": "netchecker-agent",
- "command": ["netchecker-agent"],
- "args": [
- "-v=5",
- "-logtostderr",
- "-probeurls={0}".format(NETCHECKER_PROBEURLS)
- ]
- }
- ],
- }
- },
- "updateStrategy": {
- "type": "RollingUpdate"
- }
- }
- },
- {
- "apiVersion": "extensions/v1beta1",
- "kind": "DaemonSet",
- "metadata": {
- "labels": {
- "app": "netchecker-agent-hostnet"
- },
- "name": "netchecker-agent-hostnet"
- },
- "spec": {
- "template": {
- "metadata": {
- "labels": {
- "app": "netchecker-agent-hostnet"
- },
- "name": "netchecker-agent-hostnet"
- },
- "spec": {
- "tolerations": [
- {
- "key": "node-role.kubernetes.io/master",
- "effect": "NoSchedule"
- }
- ],
- "containers": [
- {
- "env": [
- {
- "name": "MY_POD_NAME",
- "valueFrom": {
- "fieldRef": {
- "fieldPath": "metadata.name"
- }
- }
- },
- {
- "name": "MY_NODE_NAME",
- "valueFrom": {
- "fieldRef": {
- "fieldPath": "spec.nodeName"
- }
- }
- },
- {
- "name": "REPORT_INTERVAL",
- "value": str(NETCHECKER_REPORT_INTERVAL)
- },
- ],
- "image": "mirantis/k8s-netchecker-agent:latest",
- "imagePullPolicy": "IfNotPresent",
- "name": "netchecker-agent",
- "command": ["netchecker-agent"],
- "args": [
- "-v=5",
- "-logtostderr",
- "-probeurls={0}".format(NETCHECKER_PROBEURLS)
- ]
- }
- ],
- "hostNetwork": True,
- "dnsPolicy": "ClusterFirstWithHostNet",
- "updateStrategy": {
- "type": "RollingUpdate"
+ ],
+ "podSelector": {
+ "matchLabels": {
+ "app": "netchecker-server"
}
}
+ }
+ }
+
+ kubernetes_policy_hostnet = {
+ "apiVersion": "extensions/v1beta1",
+ "kind": "NetworkPolicy",
+ "metadata": {
+ "name": "access-netchecker-agent-hostnet",
+ "namespace": self._namespace,
},
- "updateStrategy": {
- "type": "RollingUpdate"
- }
- }
- }
-]
-
-NETCHECKER_BLOCK_POLICY = {
- "kind": "policy",
- "spec": {
- "ingress": [
- {
- "action": "allow"
- },
- {
- "action": "deny",
- "destination": {
- "ports": [
- NETCHECKER_SERVICE_PORT
- ]
- },
- "protocol": "tcp"
- }
- ]
- },
- "apiVersion": "v1",
- "metadata": {
- "name": "deny-netchecker"
- }
-}
-
-
-def start_server(k8s, config, namespace=None,
- deploy_spec=NETCHECKER_DEPLOYMENT_CFG,
- svc_spec=NETCHECKER_SVC_CFG):
- """Start netchecker server in k8s cluster
-
- :param k8s: K8SManager
- :param config: fixture provides oslo.config
- :param namespace: str
- :param deploy_spec: dict
- :param svc_spec: dict
- :return: None
- """
- for container in deploy_spec['spec']['template']['spec']['containers']:
- if container['name'] == 'netchecker-server':
- container['image'] = \
- config.k8s_deploy.kubernetes_netchecker_server_image
- try:
- if k8s.api.deployments.get(name=deploy_spec['metadata']['name'],
- namespace=namespace):
- LOG.debug('Network checker server deployment "{}" '
- 'already exists! Skipping resource '
- 'creation'.format(deploy_spec['metadata']['name']))
- except rest.ApiException as e:
- if e.status == 404:
- n = k8s.check_deploy_create(body=deploy_spec, namespace=namespace)
- k8s.wait_deploy_ready(n.name, namespace=namespace)
- else:
- raise e
- try:
- if k8s.api.services.get(name=svc_spec['metadata']['name']):
- LOG.debug('Network checker server service {} is '
- 'already running! Skipping resource creation'
- '.'.format(svc_spec['metadata']['name']))
- except rest.ApiException as e:
- if e.status == 404:
- k8s.check_service_create(body=svc_spec, namespace=namespace)
- else:
- raise e
-
-
-def start_agent(k8s, config, namespace=None, ds_spec=NETCHECKER_DS_CFG,
- service_namespace=None):
- """Start netchecker agent in k8s cluster
-
- :param k8s: K8SManager
- :param config: fixture provides oslo.config
- :param namespace: str
- :param ds_spec: str
- :return: None
- """
- for ds in ds_spec:
- for container in ds['spec']['template']['spec']['containers']:
- if container['name'] == 'netchecker-agent':
- container['image'] = \
- config.k8s_deploy.kubernetes_netchecker_agent_image
- if service_namespace is not None:
- container['args'].append(
- "-serverendpoint={0}.{1}.svc.cluster.local:{2}".format(
- NETCHECKER_SERVICE_NAME,
- service_namespace,
- NETCHECKER_SERVICE_PORT))
- k8s.check_ds_create(body=ds, namespace=namespace)
- k8s.wait_ds_ready(dsname=ds['metadata']['name'], namespace=namespace)
- k8s.wait_pods_phase(pods=[pod for pod in k8s.api.pods.list()
- if 'netchecker-agent' in pod.name],
- phase='Running',
- timeout=600)
-
-
-@utils.retry(3, requests.exceptions.RequestException)
-def get_connectivity_status(k8sclient,
- netchecker_pod_port=NETCHECKER_NODE_PORT,
- pod_name='netchecker-server', namespace='default'):
-
- netchecker_srv_pod_names = [pod.name for pod in
- k8sclient.pods.list(namespace=namespace)
- if pod_name in pod.name]
-
- assert len(netchecker_srv_pod_names) > 0, \
- "No netchecker-server pods found!"
-
- netchecker_srv_pod = k8sclient.pods.get(name=netchecker_srv_pod_names[0],
- namespace=namespace)
- kube_host_ip = netchecker_srv_pod.status.host_ip
- net_status_url = 'http://{0}:{1}/api/v1/connectivity_check'.format(
- kube_host_ip, netchecker_pod_port)
- response = requests.get(net_status_url, timeout=5)
- LOG.debug('Connectivity check status: [{0}] {1}'.format(
- response.status_code, response.text.strip()))
- return response
-
-
-@utils.retry(3, requests.exceptions.RequestException)
-def get_netchecker_pod_status(k8s,
- pod_name='netchecker-server',
- namespace='default'):
-
- k8s.wait_pods_phase(
- pods=[pod for pod in k8s.api.pods.list(namespace=namespace)
- if pod_name in pod.name], phase='Running', timeout=600)
-
-
-def check_network(k8sclient, netchecker_pod_port,
- namespace='default', works=True):
- if works:
- assert get_connectivity_status(
- k8sclient, namespace=namespace,
- netchecker_pod_port=netchecker_pod_port).status_code in (200, 204)
- else:
- assert get_connectivity_status(
- k8sclient, namespace=namespace,
- netchecker_pod_port=netchecker_pod_port).status_code == 400
-
-
-def wait_check_network(k8sclient, namespace='default', works=True, timeout=300,
- interval=10, netchecker_pod_port=NETCHECKER_NODE_PORT):
- helpers.wait_pass(
- lambda: check_network(
- k8sclient, netchecker_pod_port=netchecker_pod_port,
- namespace=namespace,
- works=works),
- timeout=timeout,
- interval=interval)
-
-
-def calico_block_traffic_on_node(underlay, target_node):
- cmd = "echo '{0}' | calicoctl create -f -".format(NETCHECKER_BLOCK_POLICY)
- underlay.sudo_check_call(cmd, node_name=target_node)
- LOG.info('Blocked traffic to the network checker service from '
- 'containers on node "{}".'.format(target_node))
-
-
-def calico_unblock_traffic_on_node(underlay, target_node):
- cmd = "echo '{0}' | calicoctl delete -f -".format(NETCHECKER_BLOCK_POLICY)
-
- underlay.sudo_check_call(cmd, node_name=target_node)
- LOG.info('Unblocked traffic to the network checker service from '
- 'containers on node "{}".'.format(target_node))
-
-
-def calico_get_version(underlay, target_node):
- raw_version = underlay.sudo_check_call('calicoctl version',
- node_name=target_node)
-
- assert raw_version['exit_code'] == 0 and len(raw_version['stdout']) > 0, \
- "Unable to get calico version!"
-
- if len(raw_version['stdout']) > 1:
- ctl_version = raw_version['stdout'][0].split()[1].strip()
- else:
- ctl_version = raw_version['stdout'][0].strip()
-
- LOG.debug("Calico (calicoctl) version on '{0}': '{1}'".format(target_node,
- ctl_version))
- return ctl_version
-
-
-def kubernetes_block_traffic_namespace(underlay, kube_host_ip, namespace):
- # TODO(apanchenko): do annotation using kubernetes API
- cmd = ('kubectl annotate ns {0} \'net.beta.kubernetes.io/'
- 'network-policy={{"ingress": {{"isolation":'
- ' "DefaultDeny"}}}}\'').format(namespace)
- underlay.sudo_check_call(cmd=cmd, host=kube_host_ip)
-
-
-def calico_allow_netchecker_connections(underlay, k8sclient, kube_host_ip,
- namespace):
- netchecker_srv_pod_names = [pod.name for pod in
- k8sclient.pods.list(namespace=namespace)
- if 'netchecker-server' in pod.name]
-
- assert len(netchecker_srv_pod_names) > 0, \
- "No netchecker-server pods found!"
-
- netchecker_srv_pod = k8sclient.pods.get(name=netchecker_srv_pod_names[0],
- namespace=namespace)
- nc_host_ip = netchecker_srv_pod.status.host_ip
-
- kubernetes_policy = {
- "apiVersion": "extensions/v1beta1",
- "kind": "NetworkPolicy",
- "metadata": {
- "name": "access-netchecker",
- "namespace": namespace,
- },
- "spec": {
- "ingress": [
- {
- "from": [
- {
- "ipBlock": {
- "cidr": nc_host_ip + "/24"
+ "spec": {
+ "ingress": [
+ {
+ "from": [
+ {
+ "namespaceSelector": {
+ "matchLabels": {
+ "name": self._namespace
+ }
+ }
+ },
+ {
+ "podSelector": {
+ "matchLabels": {
+ "app": "netchecker-agent-hostnet"
+ }
+ }
}
- }
- ]
- }
- ],
- "podSelector": {
- "matchLabels": {
- "app": "netchecker-server"
+ ]
+ }
+ ],
+ "podSelector": {
+ "matchLabels": {
+ "app": "netchecker-server"
+ }
}
}
}
- }
- cmd_add_policy = "echo '{0}' | kubectl create -f -".format(
- json.dumps(kubernetes_policy))
- underlay.sudo_check_call(cmd=cmd_add_policy, host=kube_host_ip)
+ self._api.networkpolicies.create(
+ namespace=self._namespace, body=kubernetes_policy)
+ self._api.networkpolicies.create(
+ namespace=self._namespace, body=kubernetes_policy_hostnet)
+ @utils.retry(3, requests.exceptions.RequestException)
+ def get_metric(self):
+ kube_host_ip = self.get_netchecker_pod_ip()
-def kubernetes_allow_traffic_from_agents(underlay, kube_host_ip, namespace):
- # TODO(apanchenko): add network policies using kubernetes API
- label_namespace_cmd = "kubectl label namespace default name=default"
- underlay.sudo_check_call(cmd=label_namespace_cmd, host=kube_host_ip)
- kubernetes_policy = {
- "apiVersion": "extensions/v1beta1",
- "kind": "NetworkPolicy",
- "metadata": {
- "name": "access-netchecker-agent",
- "namespace": namespace,
- },
- "spec": {
- "ingress": [
- {
- "from": [
- {
- "namespaceSelector": {
- "matchLabels": {
- "name": namespace
- }
- }
- },
- {
- "podSelector": {
- "matchLabels": {
- "app": "netchecker-agent"
- }
- }
- }
- ]
- }
- ],
- "podSelector": {
- "matchLabels": {
- "app": "netchecker-server"
- }
- }
- }
- }
+ metrics_url = 'http://{0}:{1}/metrics'.format(
+ kube_host_ip, self.get_service_port())
- kubernetes_policy_hostnet = {
- "apiVersion": "extensions/v1beta1",
- "kind": "NetworkPolicy",
- "metadata": {
- "name": "access-netchecker-agent-hostnet",
- "namespace": namespace,
- },
- "spec": {
- "ingress": [
- {
- "from": [
- {
- "namespaceSelector": {
- "matchLabels": {
- "name": namespace
- }
- }
- },
- {
- "podSelector": {
- "matchLabels": {
- "app": "netchecker-agent-hostnet"
- }
- }
- }
- ]
- }
- ],
- "podSelector": {
- "matchLabels": {
- "app": "netchecker-server"
- }
- }
- }
- }
+ response = requests.get(metrics_url, timeout=30)
+ LOG.debug('Metrics: [{0}] {1}'.format(
+ response.status_code, response.text.strip()))
+ return response
- cmd_add_policy = "echo '{0}' | kubectl create -f -".format(
- json.dumps(kubernetes_policy))
- underlay.sudo_check_call(cmd=cmd_add_policy, host=kube_host_ip)
-
- cmd_add_policy_hostnet = "echo '{0}' | kubectl create -f -".format(
- json.dumps(kubernetes_policy_hostnet))
- underlay.sudo_check_call(cmd=cmd_add_policy_hostnet, host=kube_host_ip)
-
-
-@utils.retry(3, requests.exceptions.RequestException)
-def get_metric(k8sclient, netchecker_pod_port,
- pod_name='netchecker-server', namespace='default'):
-
- netchecker_srv_pod_names = [pod.name for pod in
- k8sclient.pods.list(namespace=namespace)
- if pod_name in pod.name]
-
- assert len(netchecker_srv_pod_names) > 0, \
- "No netchecker-server pods found!"
- netchecker_srv_pod = k8sclient.pods.get(name=netchecker_srv_pod_names[0],
- namespace=namespace)
-
- kube_host_ip = netchecker_srv_pod.status.host_ip
- metrics_url = 'http://{0}:{1}/metrics'.format(
- kube_host_ip, netchecker_pod_port)
- response = requests.get(metrics_url, timeout=30)
- LOG.debug('Metrics: [{0}] {1}'.format(
- response.status_code, response.text.strip()))
- return response
-
-
-def get_service_port(k8sclient, service_name='netchecker',
- namespace='netchecker'):
- full_service_name = [service.name for service
- in k8sclient.services.list(namespace=namespace)
- if service_name in service.name]
- assert len(full_service_name) > 0, "No netchecker service run"
-
- service_details = k8sclient.services.get(name=full_service_name[0],
- namespace=namespace)
-
- LOG.debug('Necthcecker service details {0}'.format(service_details))
- netchecker_port = service_details.spec.ports[0].node_port
- return netchecker_port
+ def get_service_port(self):
+ service_details = self.get_netchecker_service()
+ LOG.debug('Netchecker service details {0}'.format(service_details))
+ return service_details.read().spec.ports[0].node_port
diff --git a/tcp_tests/managers/k8s/__init__.py b/tcp_tests/managers/k8s/__init__.py
index bd76aa7..9a5c573 100644
--- a/tcp_tests/managers/k8s/__init__.py
+++ b/tcp_tests/managers/k8s/__init__.py
@@ -11,10 +11,7 @@
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
-import urllib3
-
from tcp_tests.managers.k8s.cluster import K8sCluster
+from tcp_tests.managers.k8s.base import read_yaml_file, read_yaml_str
-__all__ = ['K8sCluster']
-
-urllib3.disable_warnings() # Supress https insecure warning
+__all__ = ['K8sCluster', 'read_yaml_file', 'read_yaml_str', 'read_yaml_url']
diff --git a/tcp_tests/managers/k8s/base.py b/tcp_tests/managers/k8s/base.py
index 7adb41a..6ca6a88 100644
--- a/tcp_tests/managers/k8s/base.py
+++ b/tcp_tests/managers/k8s/base.py
@@ -12,97 +12,140 @@
# License for the specific language governing permissions and limitations
-class K8sBaseResource(object):
- """docstring for K8sBaseResource"""
+import yaml
+import requests
+import os
- def __init__(self, manager, data):
+from tcp_tests import logger
+
+LOG = logger.logger
+
+
+class K8sBaseResource(object):
+ resource_type = None
+
+ def __init__(self, manager, name=None, namespace=None, data=None):
self._manager = manager
- self._add_details(data)
+ self._name = name
+ self._namespace = namespace
+ self._read_cache = None
+ if data is not None:
+ self._update_cache(data)
def __repr__(self):
- reprkeys = sorted(k
- for k in self.__dict__.keys()
- if k[0] != '_' and
- k not in ['manager'])
- info = ", ".join("%s=%s" % (k, getattr(self, k)) for k in reprkeys)
- return "<%s %s>" % (self.__class__.__name__, info)
+ uid = 'unknown-uid'
+ if self._read_cache is not None:
+ uid = self.uid
+ return "<{0} name='{1}' namespace='{2}' uuid='{3}'>".format(
+ self.__class__.__name__, self.name, self.namespace, uid)
@property
- def api_version(self):
- return self._data.api_version
-
- def _add_details(self, data):
- self._data = data
- for k in [k for k in dir(data)
- if not any((k.startswith('_'), k in ('to_dict', 'to_str')))]:
- try:
- setattr(self, k, getattr(data, k))
- except AttributeError:
- # In this case we already defined the attribute on the class
- pass
-
- def __eq__(self, other):
- if not isinstance(other, K8sBaseResource):
- return NotImplemented
- # two resources of different types are not equal
- if not isinstance(other, self.__class__):
- return False
- return self._info == other._info
-
-
-class K8sBaseManager(object):
-
- resource_class = None
-
- def __init__(self, api, namespace):
- self._api = api
- self._namespace = namespace
- self._raw = None
-
- @property
- def api(self):
- return self._api
+ def name(self):
+ return self._name
@property
def namespace(self):
return self._namespace
- def get(self, *args, **kwargs):
- if not hasattr(self, '_get'):
- raise NotImplementedError(
- '{} does not have {}'.format(self, '_get'))
+ @property
+ def uid(self):
+ return self.read(cached=True).metadata.uid
- return self.resource_class(self, self._get(*args, **kwargs))
+ def _update_cache(self, data):
+ self._read_cache = data
+ self._namespace = data.metadata.namespace
+ self._name = data.metadata.name
- def list(self, *args, **kwargs):
- if not hasattr(self, '_list'):
- raise NotImplementedError(
- '{} does not have {}'.format(self, '_list'))
+ def read(self, cached=False, **kwargs):
+ if not cached:
+ self._update_cache(self._read(**kwargs))
+ return self._read_cache
- lst = self._list(*args, **kwargs)
+ def create(self, body, **kwargs):
+ LOG.info("K8S API Creating {0} with body:\n{1}".format(
+ self.resource_type, body))
- return [self.resource_class(self, item) for item in lst.items]
+ self._update_cache(self._create(body, **kwargs))
+ return self
- def create(self, *args, **kwargs):
- if not hasattr(self, '_create'):
- raise NotImplementedError(
- '{} does not have {}'.format(self, '_create'))
- return self.resource_class(self, self._create(*args, **kwargs))
+ def patch(self, body, **kwargs):
- def replace(self, *args, **kwargs):
- if not hasattr(self, '_replace'):
- raise NotImplementedError(
- '{} does not have {}'.format(self, '_replace'))
- return self._replace(*args, **kwargs)
+ LOG.info("K8S API Patching {0} name={1} ns={2} with body:\n{3}".format(
+ self.resource_type, self.name, self.namespace, body))
- def delete(self, *args, **kwargs):
- if not hasattr(self, '_delete'):
- raise NotImplementedError(
- '{} does not have {}'.format(self, '_delete'))
- return self._delete(*args, **kwargs)
+ self._update_cache(self._patch(body, **kwargs))
+ return self
- def deletecollection(self, *args, **kwargs):
- if not hasattr(self, '_deletecollection'):
- raise NotImplementedError(
- '{} does not have {}'.format(self, '_deletecollection'))
- return self._deletecollection(*args, **kwargs)
+ def replace(self, body, **kwargs):
+ self._update_cache(self._replace(body, **kwargs))
+ return self
+
+ def delete(self, **kwargs):
+ self._delete(**kwargs)
+ return self
+
+ def __eq__(self, other):
+ if not isinstance(other, K8sBaseResource):
+ return NotImplemented
+
+ if not isinstance(other, self.__class__):
+ return False
+
+ return self.uid == other.uid
+
+
+class K8sBaseManager(object):
+ resource_class = None
+
+ def __init__(self, cluster):
+ self._cluster = cluster
+
+ @property
+ def resource_type(self):
+ return self.resource_class.resource_type
+
+ def get(self, name=None, namespace=None, data=None):
+ namespace = namespace or self._cluster.default_namespace
+ return self.resource_class(self, name, namespace, data)
+
+ def create(self, name=None, namespace=None, body=None, **kwargs):
+ return self.get(name=name, namespace=namespace).create(body, **kwargs)
+
+ def __resource_from_data(self, data):
+ return self.resource_class(self, data=data)
+
+ def __list_filter(self, items, name_prefix=None):
+ if name_prefix is not None:
+ items = [item for item in items if
+ item.metadata.name.startswith(name_prefix)]
+ return items
+
+ def __list_to_resource(self, items):
+ return [self.__resource_from_data(item) for item in items]
+
+ def list(self, namespace=None, name_prefix=None, **kwargs):
+ namespace = namespace or self._cluster.default_namespace
+ items = self._list(namespace=namespace, **kwargs).items
+ items = self.__list_filter(items, name_prefix=name_prefix)
+ return self.__list_to_resource(items)
+
+ def list_all(self, name_prefix=None, **kwargs):
+ items = self._list_all(**kwargs).items
+ items = self.__list_filter(items, name_prefix=name_prefix)
+ return self.__list_to_resource(items)
+
+
+def read_yaml_str(yaml_str):
+ """ load yaml from string helper """
+ return yaml.safe_load(yaml_str)
+
+
+def read_yaml_file(file_path, *args):
+ """ load yaml from joined file_path and *args helper """
+ with open(os.path.join(file_path, *args)) as f:
+ return yaml.safe_load(f)
+
+
+def read_yaml_url(yaml_file_url):
+ """ load yaml from url helper """
+ return yaml.safe_load(requests.get(yaml_file_url).text)
diff --git a/tcp_tests/managers/k8s/cluster.py b/tcp_tests/managers/k8s/cluster.py
index 4bda03f..8ffb4d1 100644
--- a/tcp_tests/managers/k8s/cluster.py
+++ b/tcp_tests/managers/k8s/cluster.py
@@ -12,19 +12,14 @@
# License for the specific language governing permissions and limitations
-import base64
-import ssl
-
-from k8sclient.client import api_client
-from k8sclient.client.apis import apiv_api
-from k8sclient.client.apis import apisextensionsvbeta_api
-from k8sclient.client.apis import apisbatchv_api
+import kubernetes
+from kubernetes import client
from tcp_tests.managers.k8s.componentstatuses import \
K8sComponentStatusManager
from tcp_tests.managers.k8s.daemonsets import K8sDaemonSetManager
from tcp_tests.managers.k8s.deployments import K8sDeploymentManager
-from tcp_tests.managers.k8s.endpoints import K8sEndpointManager
+from tcp_tests.managers.k8s.endpoints import K8sEndpointsManager
from tcp_tests.managers.k8s.events import K8sEventManager
from tcp_tests.managers.k8s.horizontalpodautoscalers import \
K8sHorizontalPodAutoscalerManager
@@ -46,65 +41,73 @@
K8sServiceAccountManager
from tcp_tests.managers.k8s.services import K8sServiceManager
from tcp_tests.managers.k8s.replicasets import K8sReplicaSetManager
+from tcp_tests.managers.k8s.networkpolicies import K8sNetworkPolicyManager
class K8sCluster(object):
- """docstring for K8sCluster"""
-
- def __init__(self, schema="https", user=None, password=None,
+ def __init__(self, schema="https", user=None, password=None, ca=None,
host='localhost', port='443', default_namespace='default'):
- if user and password:
- auth_string = '%s:%s' % (user, password)
- auth = base64.encodestring(auth_string.encode()).decode()[:-1]
- auth = "Basic {}".format(auth)
- self._client = api_client.ApiClient(
- '{schema}://{host}:{port}'.format(
- schema=schema, host=host, port=port))
- self._client.set_default_header('Authorization', auth)
- restcli_impl = self._client.RESTClient.IMPL
- restcli_impl.ssl_pool_manager.connection_pool_kw['cert_reqs'] = \
- ssl.CERT_NONE
+ self.default_namespace = default_namespace
- else:
- self._client = api_client.ApiClient(
- '{schema}://{host}:{port}'.format(
- schema=schema, host=host, port=port))
- self._api = apiv_api.ApivApi(self._client)
- self._bapi = apisbatchv_api.ApisbatchvApi(self._client)
- self._eapi = apisextensionsvbeta_api.ApisextensionsvbetaApi(
- self._client)
- self._default_namespace = default_namespace
+ api_server = '{0}://{1}:{2}'.format(schema, host, port)
- self.nodes = K8sNodeManager(self._api, self._default_namespace)
- self.pods = K8sPodManager(self._api, self._default_namespace)
- self.endpoints = K8sEndpointManager(self._api, self._default_namespace)
- self.namespaces = K8sNamespaceManager(self._api,
- self._default_namespace)
- self.services = K8sServiceManager(self._api, self._default_namespace)
- self.serviceaccounts = K8sServiceAccountManager(
- self._api, self._default_namespace)
- self.secrets = K8sSecretManager(self._api, self._default_namespace)
- self.events = K8sEventManager(self._api, self._default_namespace)
- self.limitranges = K8sLimitRangeManager(self._api,
- self._default_namespace)
- self.jobs = K8sJobManager(self._bapi, self._default_namespace)
- self.daemonsets = K8sDaemonSetManager(self._eapi,
- self._default_namespace)
- self.ingresses = K8sIngressManager(self._eapi, self._default_namespace)
- self.deployments = K8sDeploymentManager(self._eapi,
- self._default_namespace)
- self.horizontalpodautoscalers = K8sHorizontalPodAutoscalerManager(
- self._eapi, self._default_namespace)
- self.componentstatuses = K8sComponentStatusManager(
- self._api, self._default_namespace)
- self.resourcequotas = K8sResourceQuotaManager(
- self._api, self._default_namespace)
- self.replicationcontrollers = K8sReplicationControllerManager(
- self._api, self._default_namespace)
- self.pvolumeclaims = K8sPersistentVolumeClaimManager(
- self._api, self._default_namespace)
- self.pvolumes = K8sPersistentVolumeManager(
- self._api, self._default_namespace)
- self.replicasets = K8sReplicaSetManager(
- self._eapi, self._default_namespace
- )
+ config_data = {
+ 'apiVersion': 'v1',
+ 'kind': 'Config',
+ 'preferences': {},
+ 'current-context': 'cluster-remote',
+ 'clusters': [{
+ 'name': 'cluster',
+ 'cluster': {
+ 'server': api_server,
+ 'certificate-authority-data': ca,
+ },
+ }],
+ 'users': [{
+ 'name': 'remote',
+ 'user': {
+ 'password': password,
+ 'username': user,
+ },
+ }],
+ 'contexts': [{
+ 'name': 'cluster-remote',
+ 'context': {
+ 'cluster': 'cluster',
+ 'user': 'remote',
+ },
+ }],
+ }
+
+ configuration = type.__call__(client.Configuration)
+ loader = kubernetes.config.kube_config.KubeConfigLoader(config_data)
+ loader.load_and_set(configuration)
+ api_client = client.ApiClient(configuration=configuration)
+
+ self.api_core = client.CoreV1Api(api_client)
+ self.api_apps = client.AppsV1Api(api_client)
+ self.api_extensions = client.ExtensionsV1beta1Api(api_client)
+ self.api_autoscaling = client.AutoscalingV1Api(api_client)
+ self.api_batch = client.BatchV1Api(api_client)
+
+ self.nodes = K8sNodeManager(self)
+ self.pods = K8sPodManager(self)
+ self.endpoints = K8sEndpointsManager(self)
+ self.namespaces = K8sNamespaceManager(self)
+ self.services = K8sServiceManager(self)
+ self.serviceaccounts = K8sServiceAccountManager(self)
+ self.secrets = K8sSecretManager(self)
+ self.events = K8sEventManager(self)
+ self.limitranges = K8sLimitRangeManager(self)
+ self.jobs = K8sJobManager(self)
+ self.daemonsets = K8sDaemonSetManager(self)
+ self.ingresses = K8sIngressManager(self)
+ self.deployments = K8sDeploymentManager(self)
+ self.horizontalpodautoscalers = K8sHorizontalPodAutoscalerManager(self)
+ self.componentstatuses = K8sComponentStatusManager(self)
+ self.resourcequotas = K8sResourceQuotaManager(self)
+ self.replicationcontrollers = K8sReplicationControllerManager(self)
+ self.pvolumeclaims = K8sPersistentVolumeClaimManager(self)
+ self.pvolumes = K8sPersistentVolumeManager(self)
+ self.replicasets = K8sReplicaSetManager(self)
+ self.networkpolicies = K8sNetworkPolicyManager(self)
diff --git a/tcp_tests/managers/k8s/componentstatuses.py b/tcp_tests/managers/k8s/componentstatuses.py
index a991576..7a9c27a 100644
--- a/tcp_tests/managers/k8s/componentstatuses.py
+++ b/tcp_tests/managers/k8s/componentstatuses.py
@@ -17,23 +17,21 @@
class K8sComponentStatus(K8sBaseResource):
- """docstring for K8sComponentStatus"""
+ resource_type = 'componentstatus'
- def __repr__(self):
- return "<K8sComponentStatus: %s>" % self.name
-
- @property
- def name(self):
- return self.metadata.name
+ def _read(self, **kwargs):
+ return self._manager.api.read_component_status(self.name, **kwargs)
class K8sComponentStatusManager(K8sBaseManager):
- """docstring for ClassName"""
-
resource_class = K8sComponentStatus
- def _get(self, name, **kwargs):
- return self.api.read_namespaced_component_status(name=name, **kwargs)
+ @property
+ def api(self):
+ return self._cluster.api_core
- def _list(self, **kwargs):
- return self.api.list_namespaced_component_status(**kwargs)
+ def _list(self, namespace, **kwargs):
+ return self.api.list_component_status(**kwargs)
+
+ def _list_all(self, **kwargs):
+ return self._list(None, **kwargs)
diff --git a/tcp_tests/managers/k8s/daemonsets.py b/tcp_tests/managers/k8s/daemonsets.py
index dc2c5f7..15e6076 100644
--- a/tcp_tests/managers/k8s/daemonsets.py
+++ b/tcp_tests/managers/k8s/daemonsets.py
@@ -12,68 +12,45 @@
# License for the specific language governing permissions and limitations
+from kubernetes import client
+
from tcp_tests.managers.k8s.base import K8sBaseResource
from tcp_tests.managers.k8s.base import K8sBaseManager
class K8sDaemonSet(K8sBaseResource):
- """docstring for K8sDaemonSet"""
+ resource_type = 'daemonset'
- def __repr__(self):
- return "<K8sDaemonSet: %s>" % self.name
+ def _read(self, **kwargs):
+ return self._manager.api.read_namespaced_daemon_set(
+ self.name, self.namespace, **kwargs)
- @property
- def name(self):
- return self.metadata.name
+ def _create(self, body, **kwargs):
+ return self._manager.api.create_namespaced_daemon_set(
+ self.namespace, body, **kwargs)
- @property
- def namespace(self):
- return self.metadata.namespace
+ def _patch(self, body, **kwargs):
+ return self._manager.api.patch_namespaced_daemon_set(
+ self.name, self.namespace, body, **kwargs)
+
+ def _replace(self, body, **kwargs):
+ return self._manager.api.replace_namespaced_daemon_set(
+ self.name, self.namespace, body, **kwargs)
+
+ def _delete(self, **kwargs):
+ self._manager.api.delete_namespaced_daemon_set(
+ self.name, self.namespace, client.V1DeleteOptions(), **kwargs)
class K8sDaemonSetManager(K8sBaseManager):
- """docstring for ClassName"""
-
resource_class = K8sDaemonSet
- def _get(self, name, namespace=None, **kwargs):
- namespace = namespace or self.namespace
- return self.api.read_namespaced_daemon_set(
- name=name, namespace=namespace, **kwargs)
+ @property
+ def api(self):
+ return self._cluster.api_apps
- def _list(self, namespace=None, **kwargs):
- namespace = namespace or self.namespace
- return self.api.list_namespaced_daemon_set(
- namespace=namespace, **kwargs)
+ def _list(self, namespace, **kwargs):
+ return self.api.list_namespaced_daemon_set(namespace, **kwargs)
- def _full_list(self, **kwargs):
- return self.api.list_daemon_set(**kwargs)
-
- def _create(self, body, namespace=None, **kwargs):
- namespace = namespace or self.namespace
- return self.api.create_namespaced_daemon_set(
- body=body, namespace=namespace, **kwargs)
-
- def _replace(self, body, name, namespace=None, **kwargs):
- namespace = namespace or self.namespace
- return self.api.replace_namespaced_daemon_set(
- body=body, name=name, namespace=namespace, **kwargs)
-
- def _delete(self, body, name, namespace=None, **kwargs):
- namespace = namespace or self.namespace
- return self.api.delete_namespaced_daemon_set(
- body=body, name=name, namespace=namespace, **kwargs)
-
- def _deletecollection(self, namespace=None, **kwargs):
- namespace = namespace or self.namespace
- return self.api.deletecollection_namespaced_daemon_set(
- namespace=namespace, **kwargs)
-
- def full_list(self, *args, **kwargs):
- lst = self._full_list(*args, **kwargs)
- return [self.resource_class(self, item) for item in lst.items]
-
- def update(self, body, name, namespace=None, **kwargs):
- namespace = namespace or self.namespace
- return self.api.patch_namespaced_daemon_set(
- body=body, name=name, namespace=namespace, **kwargs)
+ def _list_all(self, **kwargs):
+ return self.api.list_daemon_set_for_all_namespaces(**kwargs)
diff --git a/tcp_tests/managers/k8s/deployments.py b/tcp_tests/managers/k8s/deployments.py
index 5d47d70..3894ac4 100644
--- a/tcp_tests/managers/k8s/deployments.py
+++ b/tcp_tests/managers/k8s/deployments.py
@@ -12,63 +12,56 @@
# License for the specific language governing permissions and limitations
+from kubernetes import client
+
+from devops.helpers import helpers
+
from tcp_tests.managers.k8s.base import K8sBaseResource
from tcp_tests.managers.k8s.base import K8sBaseManager
class K8sDeployment(K8sBaseResource):
- """docstring for K8sDeployment"""
+ resource_type = 'deployment'
- def __repr__(self):
- return "<K8sDeployment: %s>" % self.name
+ def _read(self, **kwargs):
+ return self._manager.api.read_namespaced_deployment(
+ self.name, self.namespace, **kwargs)
- @property
- def name(self):
- return self.metadata.name
+ def _create(self, body, **kwargs):
+ return self._manager.api.create_namespaced_deployment(
+ self.namespace, body, **kwargs)
- @property
- def namespace(self):
- return self.metadata.namespace
+ def _patch(self, body, **kwargs):
+ return self._manager.api.patch_namespaced_deployment(
+ self.name, self.namespace, body, **kwargs)
+
+ def _replace(self, body, **kwargs):
+ return self._manager.api.replace_namespaced_deployment(
+ self.name, self.namespace, body, **kwargs)
+
+ def _delete(self, **kwargs):
+ self._manager.api.delete_namespaced_deployment(
+ self.name, self.namespace, client.V1DeleteOptions(), **kwargs)
+
+ def is_ready(self):
+ dep = self.read()
+ return dep.status.available_replicas == dep.status.replicas
+
+ def wait_ready(self, timeout=120, interval=5):
+ helpers.wait(lambda: self.is_ready(),
+ timeout=timeout, interval=interval)
+ return self
class K8sDeploymentManager(K8sBaseManager):
- """docstring for ClassName"""
-
resource_class = K8sDeployment
- def _get(self, name, namespace=None, **kwargs):
- namespace = namespace or self.namespace
- return self.api.read_namespaced_deployment(
- name=name, namespace=namespace, **kwargs)
+ @property
+ def api(self):
+ return self._cluster.api_apps
- def _list(self, namespace=None, **kwargs):
- namespace = namespace or self.namespace
- return self.api.list_namespaced_deployment(
- namespace=namespace, **kwargs)
+ def _list(self, namespace, **kwargs):
+ return self.api.list_namespaced_deployment(namespace, **kwargs)
- def _full_list(self, **kwargs):
- return self.api.list_deployment(**kwargs)
-
- def _create(self, body, namespace=None, **kwargs):
- namespace = namespace or self.namespace
- return self.api.create_namespaced_deployment(
- body=body, namespace=namespace, **kwargs)
-
- def _replace(self, body, name, namespace=None, **kwargs):
- namespace = namespace or self.namespace
- return self.api.replace_namespaced_deployment(
- body=body, name=name, namespace=namespace, **kwargs)
-
- def _delete(self, body, name, namespace=None, **kwargs):
- namespace = namespace or self.namespace
- return self.api.delete_namespaced_deployment(
- body=body, name=name, namespace=namespace, **kwargs)
-
- def _deletecollection(self, namespace=None, **kwargs):
- namespace = namespace or self.namespace
- return self.api.deletecollection_namespaced_deployment(
- namespace=namespace, **kwargs)
-
- def full_list(self, *args, **kwargs):
- lst = self._full_list(*args, **kwargs)
- return [self.resource_class(self, item) for item in lst.items]
+ def _list_all(self, **kwargs):
+ return self.api.list_deployment_for_all_namespaces(**kwargs)
diff --git a/tcp_tests/managers/k8s/endpoints.py b/tcp_tests/managers/k8s/endpoints.py
index ed1066e..0ddb8ae 100644
--- a/tcp_tests/managers/k8s/endpoints.py
+++ b/tcp_tests/managers/k8s/endpoints.py
@@ -12,59 +12,45 @@
# License for the specific language governing permissions and limitations
+from kubernetes import client
+
from tcp_tests.managers.k8s.base import K8sBaseResource
from tcp_tests.managers.k8s.base import K8sBaseManager
-class K8sEndpoint(K8sBaseResource):
- """docstring for K8sEndpoint"""
+class K8sEndpoints(K8sBaseResource):
+ resource_type = 'endpoints'
- def __repr__(self):
- return "<K8sEndpoint: %s>" % self.name
+ def _read(self, **kwargs):
+ return self._manager.api.read_namespaced_endpoints(
+ self.name, self.namespace, **kwargs)
+
+ def _create(self, body, **kwargs):
+ return self._manager.api.create_namespaced_endpoints(
+ self.namespace, body, **kwargs)
+
+ def _patch(self, body, **kwargs):
+ return self._manager.api.patch_namespaced_endpoints(
+ self.name, self.namespace, body, **kwargs)
+
+ def _replace(self, body, **kwargs):
+ return self._manager.api.replace_namespaced_endpoints(
+ self.name, self.namespace, body, **kwargs)
+
+ def _delete(self, **kwargs):
+ self._manager.api.delete_namespaced_endpoints(
+ self.name, self.namespace, client.V1DeleteOptions(), **kwargs)
+
+
+class K8sEndpointsManager(K8sBaseManager):
+ resource_class = K8sEndpoints
@property
- def name(self):
- return self.metadata.name
+ def api(self):
+ return self._cluster.api_core
+ def _list(self, namespace, **kwargs):
+ return self.api.list_namespaced_endpoints(namespace, **kwargs)
-class K8sEndpointManager(K8sBaseManager):
- """docstring for ClassName"""
-
- resource_class = K8sEndpoint
-
- def _get(self, name, namespace=None, **kwargs):
- namespace = namespace or self.namespace
- return self.api.read_namespaced_endpoints(
- name=name, namespace=namespace, **kwargs)
-
- def _list(self, namespace=None, **kwargs):
- namespace = namespace or self.namespace
- return self.api.list_namespaced_endpoints(
- namespace=namespace, **kwargs)
-
- def _full_list(self, **kwargs):
- return self.api.list_endpoints(**kwargs)
-
- def _create(self, body, namespace=None, **kwargs):
- namespace = namespace or self.namespace
- return self.api.create_namespaced_endpoints(
- body=body, namespace=namespace, **kwargs)
-
- def _replace(self, body, name, namespace=None, **kwargs):
- namespace = namespace or self.namespace
- return self.api.replace_namespaced_endpoints(
- body=body, name=body, namespace=namespace, **kwargs)
-
- def _delete(self, body, name, namespace=None, **kwargs):
- namespace = namespace or self.namespace
- return self.api.delete_namespaced_endpoints(
- body=body, name=name, namespace=namespace, **kwargs)
-
- def _deletecollection(self, namespace=None, **kwargs):
- namespace = namespace or self.namespace
- return self.api.deletecollection_namespaced_endpoints(
- namespace=namespace, **kwargs)
-
- def full_list(self, *args, **kwargs):
- lst = self._full_list(*args, **kwargs)
- return [self.resource_class(self, item) for item in lst.items]
+ def _list_all(self, **kwargs):
+ return self.api.list_endpoints_for_all_namespaces(**kwargs)
diff --git a/tcp_tests/managers/k8s/events.py b/tcp_tests/managers/k8s/events.py
index 099e9e4..a0d865a 100644
--- a/tcp_tests/managers/k8s/events.py
+++ b/tcp_tests/managers/k8s/events.py
@@ -12,58 +12,45 @@
# License for the specific language governing permissions and limitations
+from kubernetes import client
+
from tcp_tests.managers.k8s.base import K8sBaseResource
from tcp_tests.managers.k8s.base import K8sBaseManager
class K8sEvent(K8sBaseResource):
- """docstring for K8sEvent"""
+ resource_type = 'event'
- def __repr__(self):
- return "<K8sEvent: %s>" % self.name
+ def _read(self, **kwargs):
+ return self._manager.api.read_namespaced_event(
+ self.name, self.namespace, **kwargs)
- @property
- def name(self):
- return self.metadata.name
+ def _create(self, body, **kwargs):
+ return self._manager.api.create_namespaced_event(
+ self.namespace, body, **kwargs)
+
+ def _patch(self, body, **kwargs):
+ return self._manager.api.patch_namespaced_event(
+ self.name, self.namespace, body, **kwargs)
+
+ def _replace(self, body, **kwargs):
+ return self._manager.api.replace_namespaced_event(
+ self.name, self.namespace, body, **kwargs)
+
+ def _delete(self, **kwargs):
+ self._manager.api.delete_namespaced_event(
+ self.name, self.namespace, client.V1DeleteOptions(), **kwargs)
class K8sEventManager(K8sBaseManager):
- """docstring for ClassName"""
-
resource_class = K8sEvent
- def _get(self, name, namespace=None, **kwargs):
- namespace = namespace or self.namespace
- return self.api.read_namespaced_event(
- name=name, namespace=namespace, **kwargs)
+ @property
+ def api(self):
+ return self._cluster.api_core
- def _list(self, namespace=None, **kwargs):
- namespace = namespace or self.namespace
- return self.api.list_namespaced_event(namespace=namespace, **kwargs)
+ def _list(self, namespace, **kwargs):
+ return self.api.list_namespaced_event(namespace, **kwargs)
- def _full_list(self, **kwargs):
- return self.api.list_event(**kwargs)
-
- def _create(self, body, namespace=None, **kwargs):
- namespace = namespace or self.namespace
- return self.api.create_namespaced_event(
- body=body, namespace=namespace, **kwargs)
-
- def _replace(self, body, name, namespace=None, **kwargs):
- namespace = namespace or self.namespace
- return self.api.replace_namespaced_event(
- body=body, name=name, namespace=namespace, **kwargs)
-
- def _delete(self, body, name, namespace=None, **kwargs):
- namespace = namespace or self.namespace
- return self.api.delete_namespaced_event(
- body=body, name=name, namespace=namespace, **kwargs)
-
- def _deletecollection(self, namespace=None, **kwargs):
- namespace = namespace or self.namespace
- return self.api.deletecollection_namespaced_event(
- namespace=namespace, **kwargs)
-
- def full_list(self, *args, **kwargs):
- lst = self._full_list(*args, **kwargs)
- return [self.resource_class(self, item) for item in lst.items]
+ def _list_all(self, **kwargs):
+ return self.api.list_event_for_all_namespaces(**kwargs)
diff --git a/tcp_tests/managers/k8s/horizontalpodautoscalers.py b/tcp_tests/managers/k8s/horizontalpodautoscalers.py
index 6ce78e7..b7842ac 100644
--- a/tcp_tests/managers/k8s/horizontalpodautoscalers.py
+++ b/tcp_tests/managers/k8s/horizontalpodautoscalers.py
@@ -12,59 +12,47 @@
# License for the specific language governing permissions and limitations
+from kubernetes import client
+
from tcp_tests.managers.k8s.base import K8sBaseResource
from tcp_tests.managers.k8s.base import K8sBaseManager
class K8sHorizontalPodAutoscaler(K8sBaseResource):
- """docstring for K8sHorizontalPodAutoscaler"""
+ resource_type = 'horizontalpodautoscaler'
- def __repr__(self):
- return "<K8sHorizontalPodAutoscaler: %s>" % self.name
+ def _read(self, **kwargs):
+ return self._manager.api.read_namespaced_horizontal_pod_autoscaler(
+ self.name, self.namespace, **kwargs)
- @property
- def name(self):
- return self.metadata.name
+ def _create(self, body, **kwargs):
+ return self._manager.api.create_namespaced_horizontal_pod_autoscaler(
+ self.namespace, body, **kwargs)
+
+ def _patch(self, body, **kwargs):
+ return self._manager.api.patch_namespaced_horizontal_pod_autoscaler(
+ self.name, self.namespace, body, **kwargs)
+
+ def _replace(self, body, **kwargs):
+ return self._manager.api.replace_namespaced_horizontal_pod_autoscaler(
+ self.name, self.namespace, body, **kwargs)
+
+ def _delete(self, **kwargs):
+ self._manager.api.delete_namespaced_horizontal_pod_autoscaler(
+ self.name, self.namespace, client.V1DeleteOptions(), **kwargs)
class K8sHorizontalPodAutoscalerManager(K8sBaseManager):
- """docstring for ClassName"""
-
resource_class = K8sHorizontalPodAutoscaler
- def _get(self, name, namespace=None, **kwargs):
- namespace = namespace or self.namespace
- return self.api.read_namespaced_horizontal_pod_autoscaler(
- name=name, namespace=namespace, **kwargs)
+ @property
+ def api(self):
+ return self._cluster.api_autoscaling
- def _list(self, namespace=None, **kwargs):
- namespace = namespace or self.namespace
+ def _list(self, namespace, **kwargs):
return self.api.list_namespaced_horizontal_pod_autoscaler(
- namespace=namespace, **kwargs)
+ namespace, **kwargs)
- def _full_list(self, **kwargs):
- return self.api.list_horizontal_pod_autoscaler(**kwargs)
-
- def _create(self, body, namespace=None, **kwargs):
- namespace = namespace or self.namespace
- return self.api.create_namespaced_horizontal_pod_autoscaler(
- body=body, namespace=namespace, **kwargs)
-
- def _replace(self, body, name, namespace=None, **kwargs):
- namespace = namespace or self.namespace
- return self.api.replace_namespaced_horizontal_pod_autoscaler(
- body=body, name=name, namespace=namespace, **kwargs)
-
- def _delete(self, body, name, namespace=None, **kwargs):
- namespace = namespace or self.namespace
- return self.api.delete_namespaced_horizontal_pod_autoscaler(
- body=body, name=name, namespace=namespace, **kwargs)
-
- def _deletecollection(self, namespace=None, **kwargs):
- namespace = namespace or self.namespace
- return self.api.deletecollection_namespaced_horizontal_pod_autoscaler(
- namespace=namespace, **kwargs)
-
- def full_list(self, *args, **kwargs):
- lst = self._full_list(*args, **kwargs)
- return [self.resource_class(self, item) for item in lst.items]
+ def _list_all(self, **kwargs):
+ return self.api.list_horizontal_pod_autoscaler_for_all_namespaces(
+ **kwargs)
diff --git a/tcp_tests/managers/k8s/ingresses.py b/tcp_tests/managers/k8s/ingresses.py
index 81b240e..906dc31 100644
--- a/tcp_tests/managers/k8s/ingresses.py
+++ b/tcp_tests/managers/k8s/ingresses.py
@@ -12,59 +12,45 @@
# License for the specific language governing permissions and limitations
+from kubernetes import client
+
from tcp_tests.managers.k8s.base import K8sBaseResource
from tcp_tests.managers.k8s.base import K8sBaseManager
class K8sIngress(K8sBaseResource):
- """docstring for K8sIngress"""
+ resource_type = 'ingress'
- def __repr__(self):
- return "<K8sIngress: %s>" % self.name
+ def _read(self, **kwargs):
+ return self._manager.api.read_namespaced_ingress(
+ self.name, self.namespace, **kwargs)
- @property
- def name(self):
- return self.metadata.name
+ def _create(self, body, **kwargs):
+ return self._manager.api.create_namespaced_ingress(
+ self.namespace, body, **kwargs)
+
+ def _patch(self, body, **kwargs):
+ return self._manager.api.patch_namespaced_ingress(
+ self.name, self.namespace, body, **kwargs)
+
+ def _replace(self, body, **kwargs):
+ return self._manager.api.replace_namespaced_ingress(
+ self.name, self.namespace, body, **kwargs)
+
+ def _delete(self, **kwargs):
+ self._manager.api.delete_namespaced_ingress(
+ self.name, self.namespace, client.V1DeleteOptions(), **kwargs)
class K8sIngressManager(K8sBaseManager):
- """docstring for ClassName"""
-
resource_class = K8sIngress
- def _get(self, name, namespace=None, **kwargs):
- namespace = namespace or self.namespace
- return self.api.read_namespaced_ingress(
- name=name, namespace=namespace, **kwargs)
+ @property
+ def api(self):
+ return self._cluster.api_extensions
- def _list(self, namespace=None, **kwargs):
- namespace = namespace or self.namespace
- return self.api.list_namespaced_ingress(
- namespace=namespace, **kwargs)
+ def _list(self, namespace, **kwargs):
+ return self.api.list_namespaced_ingress(namespace, **kwargs)
- def _full_list(self, **kwargs):
- return self.api.list_ingress(**kwargs)
-
- def _create(self, body, namespace=None, **kwargs):
- namespace = namespace or self.namespace
- return self.api.create_namespaced_ingress(
- body=body, namespace=namespace, **kwargs)
-
- def _replace(self, body, name, namespace=None, **kwargs):
- namespace = namespace or self.namespace
- return self.api.replace_namespaced_ingress(
- body=body, name=name, namespace=namespace, **kwargs)
-
- def _delete(self, body, name, namespace=None, **kwargs):
- namespace = namespace or self.namespace
- return self.api.delete_namespaced_ingress(
- body=body, name=name, namespace=namespace, **kwargs)
-
- def _deletecollection(self, namespace=None, **kwargs):
- namespace = namespace or self.namespace
- return self.api.deletecollection_namespaced_ingress(
- namespace=namespace, **kwargs)
-
- def full_list(self, *args, **kwargs):
- lst = self._full_list(*args, **kwargs)
- return [self.resource_class(self, item) for item in lst.items]
+ def _list_all(self, **kwargs):
+ return self.api.list_ingress_for_all_namespaces(**kwargs)
diff --git a/tcp_tests/managers/k8s/jobs.py b/tcp_tests/managers/k8s/jobs.py
index a2dbb81..740a662 100644
--- a/tcp_tests/managers/k8s/jobs.py
+++ b/tcp_tests/managers/k8s/jobs.py
@@ -12,58 +12,45 @@
# License for the specific language governing permissions and limitations
+from kubernetes import client
+
from tcp_tests.managers.k8s.base import K8sBaseResource
from tcp_tests.managers.k8s.base import K8sBaseManager
class K8sJob(K8sBaseResource):
- """docstring for K8sJob"""
+ resource_type = 'job'
- def __repr__(self):
- return "<K8sJob: %s>" % self.name
+ def _read(self, **kwargs):
+ return self._manager.api.read_namespaced_job(
+ self.name, self.namespace, **kwargs)
- @property
- def name(self):
- return self.metadata.name
+ def _create(self, body, **kwargs):
+ return self._manager.api.create_namespaced_job(
+ self.namespace, body, **kwargs)
+
+ def _patch(self, body, **kwargs):
+ return self._manager.api.patch_namespaced_job(
+ self.name, self.namespace, body, **kwargs)
+
+ def _replace(self, body, **kwargs):
+ return self._manager.api.replace_namespaced_job(
+ self.name, self.namespace, body, **kwargs)
+
+ def _delete(self, **kwargs):
+ self._manager.api.delete_namespaced_job(
+ self.name, self.namespace, client.V1DeleteOptions(), **kwargs)
class K8sJobManager(K8sBaseManager):
- """docstring for ClassName"""
-
resource_class = K8sJob
- def _get(self, name, namespace=None, **kwargs):
- namespace = namespace or self.namespace
- return self.api.read_namespaced_job(
- name=name, namespace=namespace, **kwargs)
+ @property
+ def api(self):
+ return self._cluster.api_batch
- def _list(self, namespace=None, **kwargs):
- namespace = namespace or self.namespace
- return self.api.list_namespaced_job(namespace=namespace, **kwargs)
+ def _list(self, namespace, **kwargs):
+ return self.api.list_namespaced_job(namespace, **kwargs)
- def _create(self, body, namespace=None, **kwargs):
- namespace = namespace or self.namespace
- return self.api.create_namespaced_job(
- body=body, namespace=namespace, **kwargs)
-
- def _replace(self, body, name, namespace=None, **kwargs):
- namespace = namespace or self.namespace
- return self.api.replace_namespaced_job(
- body=body, name=name, namespace=namespace, **kwargs)
-
- def _delete(self, body, name, namespace=None, **kwargs):
- namespace = namespace or self.namespace
- return self.api.delete_namespaced_job(
- body=body, name=name, namespace=namespace, **kwargs)
-
- def _deletecollection(self, namespace=None, **kwargs):
- namespace = namespace or self.namespace
- return self.api.deletecollection_namespaced_job(
- namespace=namespace, **kwargs)
-
- def full_list(self, *args, **kwargs):
- lst = self._full_list(*args, **kwargs)
- return [self.resource_class(self, item) for item in lst.items]
-
- def _full_list(self, **kwargs):
- return self.api.list_job(**kwargs)
+ def _list_all(self, **kwargs):
+ return self.api.list_job_for_all_namespaces(**kwargs)
diff --git a/tcp_tests/managers/k8s/limitranges.py b/tcp_tests/managers/k8s/limitranges.py
index 7136300..3a9b2de 100644
--- a/tcp_tests/managers/k8s/limitranges.py
+++ b/tcp_tests/managers/k8s/limitranges.py
@@ -12,52 +12,45 @@
# License for the specific language governing permissions and limitations
+from kubernetes import client
+
from tcp_tests.managers.k8s.base import K8sBaseResource
from tcp_tests.managers.k8s.base import K8sBaseManager
class K8sLimitRange(K8sBaseResource):
- """docstring for K8sLimitRange"""
+ resource_type = 'limitrange'
- def __repr__(self):
- return "<K8sLimitRange: %s>" % self.name
+ def _read(self, **kwargs):
+ return self._manager.api.read_namespaced_limit_range(
+ self.name, self.namespace, **kwargs)
- @property
- def name(self):
- return self.metadata.name
+ def _create(self, body, **kwargs):
+ return self._manager.api.create_namespaced_limit_range(
+ self.namespace, body, **kwargs)
+
+ def _patch(self, body, **kwargs):
+ return self._manager.api.patch_namespaced_limit_range(
+ self.name, self.namespace, body, **kwargs)
+
+ def _replace(self, body, **kwargs):
+ return self._manager.api.replace_namespaced_limit_range(
+ self.name, self.namespace, body, **kwargs)
+
+ def _delete(self, **kwargs):
+ self._manager.api.delete_namespaced_limit_range(
+ self.name, self.namespace, client.V1DeleteOptions(), **kwargs)
class K8sLimitRangeManager(K8sBaseManager):
- """docstring for ClassName"""
-
resource_class = K8sLimitRange
- def _get(self, name, namespace=None, **kwargs):
- namespace = namespace or self.namespace
- return self.api.read_namespaced_limit_range(
- name=name, namespace=namespace, **kwargs)
+ @property
+ def api(self):
+ return self._cluster.api_core
- def _list(self, namespace=None, **kwargs):
- namespace = namespace or self.namespace
- return self.api.list_namespaced_limit_range(
- namespace=namespace, **kwargs)
+ def _list(self, namespace, **kwargs):
+ return self.api.list_namespaced_limit_range(namespace, **kwargs)
- def _create(self, body, namespace=None, **kwargs):
- namespace = namespace or self.namespace
- return self.api.create_namespaced_limit_range(
- body=body, namespace=namespace, **kwargs)
-
- def _replace(self, body, name, namespace=None, **kwargs):
- namespace = namespace or self.namespace
- return self.api.replace_namespaced_limit_range(
- body=body, name=name, namespace=namespace, **kwargs)
-
- def _delete(self, body, name, namespace=None, **kwargs):
- namespace = namespace or self.namespace
- return self.api.delete_namespaced_limit_range(
- body=body, name=name, namespace=namespace, **kwargs)
-
- def _deletecollection(self, namespace=None, **kwargs):
- namespace = namespace or self.namespace
- return self.api.deletecollection_namespaced_limit_range(
- namespace=namespace, **kwargs)
+ def _list_all(self, **kwargs):
+ return self.api.list_limit_range_for_all_namespaces(**kwargs)
diff --git a/tcp_tests/managers/k8s/namespaces.py b/tcp_tests/managers/k8s/namespaces.py
index 8c36302..224ae33 100644
--- a/tcp_tests/managers/k8s/namespaces.py
+++ b/tcp_tests/managers/k8s/namespaces.py
@@ -12,40 +12,41 @@
# License for the specific language governing permissions and limitations
+from kubernetes import client
+
from tcp_tests.managers.k8s.base import K8sBaseResource
from tcp_tests.managers.k8s.base import K8sBaseManager
class K8sNamespace(K8sBaseResource):
- """docstring for ClassName"""
+ resource_type = 'namespace'
- def __repr__(self):
- return "<K8sNamespace: %s>" % self.name
+ def _read(self, **kwargs):
+ return self._manager.api.read_namespace(self.name, **kwargs)
- @property
- def name(self):
- return self.metadata.name
+ def _create(self, body, **kwargs):
+ return self._manager.api.create_namespace(body, **kwargs)
+
+ def _patch(self, body, **kwargs):
+ return self._manager.api.patch_namespace(self.name, body, **kwargs)
+
+ def _replace(self, body, **kwargs):
+ return self._manager.api.replace_namespace(self.name, body, **kwargs)
+
+ def _delete(self, **kwargs):
+ self._manager.api.delete_namespace(
+ self.name, client.V1DeleteOptions(), **kwargs)
class K8sNamespaceManager(K8sBaseManager):
- """docstring for ClassName"""
-
resource_class = K8sNamespace
- def _get(self, name, **kwargs):
- return self.api.read_namespaced_namespace(name, **kwargs)
+ @property
+ def api(self):
+ return self._cluster.api_core
- def _list(self, **kwargs):
- return self.api.list_namespaced_namespace(**kwargs)
+ def _list(self, namespace, **kwargs):
+ return self.api.list_namespace(**kwargs)
- def _create(self, body, **kwargs):
- return self.api.create_namespaced_namespace(body, **kwargs)
-
- def _replace(self, body, name, **kwargs):
- return self.api.replace_namespaced_namespace(body, name, **kwargs)
-
- def _delete(self, body, name, **kwargs):
- return self.api.delete_namespaced_namespace(body, name, **kwargs)
-
- def _deletecollection(self, **kwargs):
- return self.api.deletecollection_namespaced_namespace(**kwargs)
+ def _list_all(self, **kwargs):
+ return self._list(None, **kwargs)
diff --git a/tcp_tests/managers/k8s/networkpolicies.py b/tcp_tests/managers/k8s/networkpolicies.py
new file mode 100644
index 0000000..eb92a12
--- /dev/null
+++ b/tcp_tests/managers/k8s/networkpolicies.py
@@ -0,0 +1,56 @@
+# Copyright 2017 Mirantis, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+
+
+from kubernetes import client
+
+from tcp_tests.managers.k8s.base import K8sBaseResource
+from tcp_tests.managers.k8s.base import K8sBaseManager
+
+
+class K8sNetworkPolicy(K8sBaseResource):
+ resource_type = 'networkpolicy'
+
+ def _read(self, **kwargs):
+ return self._manager.api.read_namespaced_network_policy(
+ self.name, self.namespace, **kwargs)
+
+ def _create(self, body, **kwargs):
+ return self._manager.api.create_namespaced_network_policy(
+ self.namespace, body, **kwargs)
+
+ def _patch(self, body, **kwargs):
+ return self._manager.api.patch_namespaced_network_policy(
+ self.name, self.namespace, body, **kwargs)
+
+ def _replace(self, body, **kwargs):
+ return self._manager.api.replace_namespaced_network_policy(
+ self.name, self.namespace, body, **kwargs)
+
+ def _delete(self, **kwargs):
+ self._manager.api.delete_namespaced_network_policy(
+ self.name, self.namespace, client.V1DeleteOptions(), **kwargs)
+
+
+class K8sNetworkPolicyManager(K8sBaseManager):
+ resource_class = K8sNetworkPolicy
+
+ @property
+ def api(self):
+ return self._cluster.api_extensions
+
+ def _list(self, namespace, **kwargs):
+ return self.api.list_namespaced_network_policy(namespace, **kwargs)
+
+ def _list_all(self, **kwargs):
+ return self.api.list_network_policy_for_all_namespaces(**kwargs)
diff --git a/tcp_tests/managers/k8s/nodes.py b/tcp_tests/managers/k8s/nodes.py
index c6d4dbe..4b0451e 100644
--- a/tcp_tests/managers/k8s/nodes.py
+++ b/tcp_tests/managers/k8s/nodes.py
@@ -12,70 +12,41 @@
# License for the specific language governing permissions and limitations
+from kubernetes import client
+
from tcp_tests.managers.k8s.base import K8sBaseResource
from tcp_tests.managers.k8s.base import K8sBaseManager
class K8sNode(K8sBaseResource):
- """docstring for ClassName"""
+ resource_type = 'node'
- def __repr__(self):
- return "<K8sNode: %s>" % self.name
+ def _read(self, **kwargs):
+ return self._manager.api.read_node(self.name, **kwargs)
- @property
- def name(self):
- return self.metadata.name
+ def _create(self, body, **kwargs):
+ return self._manager.api.create_node(body, **kwargs)
- @property
- def labels(self):
- return self.metadata.labels
+ def _patch(self, body, **kwargs):
+ return self._manager.api.patch_node(self.name, body, **kwargs)
- @labels.setter
- def labels(self, labels):
- current_labels = {
- label: None for label in self.labels
- }
- current_labels.update(labels)
- self.add_labels(labels=current_labels)
+ def _replace(self, body, **kwargs):
+ return self._manager.api.replace_node(self.name, body, **kwargs)
- def add_labels(self, labels):
- if not isinstance(labels, dict):
- raise TypeError("labels must be a dict!")
- body = {
- "metadata":
- {
- "labels": labels
- }
- }
- self._add_details(self._manager.update(body=body, name=self.name))
-
- def remove_labels(self, list_labels):
- labels = {label: None for label in list_labels}
- self.add_labels(labels=labels)
+ def _delete(self, **kwargs):
+ self._manager.api.delete_node(
+ self.name, client.V1DeleteOptions(), **kwargs)
class K8sNodeManager(K8sBaseManager):
- """docstring for ClassName"""
-
resource_class = K8sNode
- def _get(self, name, **kwargs):
- return self.api.read_namespaced_node(name=name, **kwargs)
+ @property
+ def api(self):
+ return self._cluster.api_core
- def _list(self, **kwargs):
- return self.api.list_namespaced_node(**kwargs)
+ def _list(self, namespace, **kwargs):
+ return self.api.list_node(**kwargs)
- def _create(self, body, **kwargs):
- return self.api.create_namespaced_node(body=body, **kwargs)
-
- def _replace(self, body, name, **kwargs):
- return self.api.replace_namespaced_node(body=body, name=name, **kwargs)
-
- def _delete(self, body, name, **kwargs):
- return self.api.delete_namespaced_node(body=body, name=name, **kwargs)
-
- def _deletecollection(self, **kwargs):
- return self.api.deletecollection_namespaced_node(**kwargs)
-
- def update(self, body, name, **kwargs):
- return self.api.patch_namespaced_node(body=body, name=name, **kwargs)
+ def _list_all(self, **kwargs):
+ return self._list(None, **kwargs)
diff --git a/tcp_tests/managers/k8s/persistentvolumeclaims.py b/tcp_tests/managers/k8s/persistentvolumeclaims.py
index f28f622..b28b869 100644
--- a/tcp_tests/managers/k8s/persistentvolumeclaims.py
+++ b/tcp_tests/managers/k8s/persistentvolumeclaims.py
@@ -12,52 +12,47 @@
# License for the specific language governing permissions and limitations
+from kubernetes import client
+
from tcp_tests.managers.k8s.base import K8sBaseResource
from tcp_tests.managers.k8s.base import K8sBaseManager
class K8sPersistentVolumeClaim(K8sBaseResource):
- """docstring for K8sPersistentVolumeClaim"""
+ resource_type = 'persistentvolumeclaim'
- def __repr__(self):
- return "<K8sPersistentVolumeClaim: %s>" % self.name
+ def _read(self, **kwargs):
+ return self._manager.api.read_namespaced_persistent_volume_claim(
+ self.name, self.namespace, **kwargs)
- @property
- def name(self):
- return self.metadata.name
+ def _create(self, body, **kwargs):
+ return self._manager.api.create_namespaced_persistent_volume_claim(
+ self.namespace, body, **kwargs)
+
+ def _patch(self, body, **kwargs):
+ return self._manager.api.patch_namespaced_persistent_volume_claim(
+ self.name, self.namespace, body, **kwargs)
+
+ def _replace(self, body, **kwargs):
+ return self._manager.api.replace_namespaced_persistent_volume_claim(
+ self.name, self.namespace, body, **kwargs)
+
+ def _delete(self, **kwargs):
+ self._manager.api.delete_namespaced_persistent_volume_claim(
+ self.name, self.namespace, client.V1DeleteOptions(), **kwargs)
class K8sPersistentVolumeClaimManager(K8sBaseManager):
- """docstring for ClassName"""
-
resource_class = K8sPersistentVolumeClaim
- def _get(self, name, namespace=None, **kwargs):
- namespace = namespace or self.namespace
- return self.api.read_namespaced_persistent_volume_claim(
- name=name, namespace=namespace, **kwargs)
+ @property
+ def api(self):
+ return self._cluster.api_core
- def _list(self, namespace=None, **kwargs):
- namespace = namespace or self.namespace
+ def _list(self, namespace, **kwargs):
return self.api.list_namespaced_persistent_volume_claim(
- namespace=namespace, **kwargs)
+ namespace, **kwargs)
- def _create(self, body, namespace=None, **kwargs):
- namespace = namespace or self.namespace
- return self.api.create_namespaced_persistent_volume_claim(
- body=body, namespace=namespace, **kwargs)
-
- def _replace(self, body, name, namespace=None, **kwargs):
- namespace = namespace or self.namespace
- return self.api.replace_namespaced_persistent_volume_claim(
- body=body, name=name, namespace=namespace, **kwargs)
-
- def _delete(self, body, name, namespace=None, **kwargs):
- namespace = namespace or self.namespace
- return self.api.delete_namespaced_persistent_volume_claim(
- body=body, name=name, namespace=namespace, **kwargs)
-
- def _deletecollection(self, namespace=None, **kwargs):
- namespace = namespace or self.namespace
- return self.api.deletecollection_namespaced_persistent_volume_claim(
- namespace=namespace, **kwargs)
+ def _list_all(self, **kwargs):
+ return self.api.list_persistent_volume_claim_for_all_namespaces(
+ **kwargs)
diff --git a/tcp_tests/managers/k8s/persistentvolumes.py b/tcp_tests/managers/k8s/persistentvolumes.py
index 8ab7ec2..7424935 100644
--- a/tcp_tests/managers/k8s/persistentvolumes.py
+++ b/tcp_tests/managers/k8s/persistentvolumes.py
@@ -12,46 +12,43 @@
# License for the specific language governing permissions and limitations
+from kubernetes import client
+
from tcp_tests.managers.k8s.base import K8sBaseResource
from tcp_tests.managers.k8s.base import K8sBaseManager
class K8sPersistentVolume(K8sBaseResource):
- """docstring for K8sPersistentVolume"""
+ resource_type = 'persistentvolume'
- def __repr__(self):
- return "<K8sPersistentVolume: %s>" % self.name
+ def _read(self, **kwargs):
+ return self._manager.api.read_persistent_volume(self.name, **kwargs)
- @property
- def name(self):
- return self.metadata.name
+ def _create(self, body, **kwargs):
+ return self._manager.api.create_persistent_volume(body, **kwargs)
+
+ def _patch(self, body, **kwargs):
+ return self._manager.api.patch_persistent_volume(
+ self.name, body, **kwargs)
+
+ def _replace(self, body, **kwargs):
+ return self._manager.api.replace_persistent_volume(
+ self.name, body, **kwargs)
+
+ def _delete(self, **kwargs):
+ self._manager.api.delete_persistent_volume(
+ self.name, client.V1DeleteOptions(), **kwargs)
class K8sPersistentVolumeManager(K8sBaseManager):
- """docstring for ClassName"""
-
resource_class = K8sPersistentVolume
- def _get(self, name, **kwargs):
- return self.api.read_namespaced_persistent_volume(
- name=name, **kwargs)
+ @property
+ def api(self):
+ return self._cluster.api_core
- def _list(self, **kwargs):
- return self.api.list_namespaced_persistent_volume(
- **kwargs)
+ def _list(self, namespace, **kwargs):
+ return self.api.list_persistent_volume(**kwargs)
- def _create(self, body, **kwargs):
- return self.api.create_namespaced_persistent_volume(
- body, **kwargs)
-
- def _replace(self, body, name, **kwargs):
- return self.api.replace_namespaced_persistent_volume(
- body=body, name=name, **kwargs)
-
- def _delete(self, body, name, **kwargs):
- return self.api.delete_namespaced_persistent_volume(
- body=body, name=name, **kwargs)
-
- def _deletecollection(self, **kwargs):
- return self.api.deletecollection_namespaced_persistent_volume(
- **kwargs)
+ def _list_all(self, **kwargs):
+ return self._list(None, **kwargs)
diff --git a/tcp_tests/managers/k8s/pods.py b/tcp_tests/managers/k8s/pods.py
index 94abc20..98192a6 100644
--- a/tcp_tests/managers/k8s/pods.py
+++ b/tcp_tests/managers/k8s/pods.py
@@ -11,6 +11,9 @@
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
+
+from kubernetes import client
+
from devops.helpers import helpers
from tcp_tests.managers.k8s.base import K8sBaseResource
@@ -18,91 +21,51 @@
class K8sPod(K8sBaseResource):
- """docstring for K8sPod"""
+ resource_type = 'pod'
- def __repr__(self):
- return "<K8sPod: %s>" % self.name
+ def _read(self, **kwargs):
+ return self._manager.api.read_namespaced_pod(
+ self.name, self.namespace, **kwargs)
- @property
- def name(self):
- return self.metadata.name
+ def _create(self, body, **kwargs):
+ return self._manager.api.create_namespaced_pod(
+ self.namespace, body, **kwargs)
- @property
- def phase(self):
- return self.status.phase
+ def _patch(self, body, **kwargs):
+ return self._manager.api.patch_namespaced_pod(
+ self.name, self.namespace, body, **kwargs)
- @property
- def namespace(self):
- return self.metadata.namespace
+ def _replace(self, body, **kwargs):
+ return self._manager.api.replace_namespaced_pod(
+ self.name, self.namespace, body, **kwargs)
- def wait_phase(self, phase, timeout=60, interval=5):
- """Wait phase of pod_name from namespace while timeout
+ def _delete(self, **kwargs):
+ self._manager.api.delete_namespaced_pod(
+ self.name, self.namespace, client.V1DeleteOptions(), **kwargs)
- :param list or str: phase
- :param int: timeout
+ def wait_phase(self, phases, timeout=60, interval=3):
+ if isinstance(phases, str):
+ phases = [phases]
- :rtype: None
- """
- if isinstance(phase, str):
- phase = [phase]
+ helpers.wait(lambda: self.read().status.phase in phases,
+ timeout=timeout, interval=interval,
+ timeout_msg='Timeout waiting, pod {0} phase is not in '
+ '"{1}"'.format(self.name, phases))
+ return self
- def check():
- self._add_details(self._manager.get(name=self.name,
- namespace=self.namespace))
- return self.phase in phase
-
- helpers.wait(check, timeout=timeout, interval=interval,
- timeout_msg='Timeout waiting({timeout}s), pod {pod_name} '
- 'is not in "{phase}" phase'.format(
- timeout=timeout,
- pod_name=self.name,
- phase=phase))
-
- def wait_running(self, timeout=60, interval=5):
- self.wait_phase(['Running'], timeout=timeout, interval=interval)
+ def wait_running(self, timeout=240, interval=3):
+ return self.wait_phase('Running', timeout=timeout, interval=interval)
class K8sPodManager(K8sBaseManager):
- """docstring for ClassName"""
-
resource_class = K8sPod
- def _get(self, name, namespace=None, **kwargs):
- namespace = namespace or self.namespace
- return self.api.read_namespaced_pod(
- name=name, namespace=namespace, **kwargs)
+ @property
+ def api(self):
+ return self._cluster.api_core
- def _list(self, namespace=None, **kwargs):
- namespace = namespace or self.namespace
- return self.api.list_namespaced_pod(namespace=namespace, **kwargs)
+ def _list(self, namespace, **kwargs):
+ return self.api.list_namespaced_pod(namespace, **kwargs)
- def _create(self, body, namespace=None, **kwargs):
- namespace = namespace or self.namespace
- return self.api.create_namespaced_pod(
- body=body, namespace=namespace, **kwargs)
-
- def _replace(self, body, name, namespace=None, **kwargs):
- namespace = namespace or self.namespace
- return self.api.replace_namespaced_pod(
- body=body, name=name, namespace=namespace, **kwargs)
-
- def _delete(self, body, name, namespace=None, **kwargs):
- namespace = namespace or self.namespace
- # NOTE: the following two lines should be deleted after
- # serialization is fixed in python-k8sclient
- if isinstance(body, self.resource_class):
- body = body.swagger_types
- return self.api.delete_namespaced_pod(
- body=body, name=name, namespace=namespace, **kwargs)
-
- def _deletecollection(self, namespace=None, **kwargs):
- namespace = namespace or self.namespace
- return self.api.deletecollection_namespaced_pod(
- namespace=namespace, **kwargs)
-
- def full_list(self, *args, **kwargs):
- lst = self._full_list(*args, **kwargs)
- return [self.resource_class(self, item) for item in lst.items]
-
- def _full_list(self, **kwargs):
- return self.api.list_pod(**kwargs)
+ def _list_all(self, **kwargs):
+ return self.api.list_pod_for_all_namespaces(**kwargs)
diff --git a/tcp_tests/managers/k8s/replicasets.py b/tcp_tests/managers/k8s/replicasets.py
index 31b6db6..7132157 100644
--- a/tcp_tests/managers/k8s/replicasets.py
+++ b/tcp_tests/managers/k8s/replicasets.py
@@ -12,59 +12,45 @@
# License for the specific language governing permissions and limitations
+from kubernetes import client
+
from tcp_tests.managers.k8s.base import K8sBaseResource
from tcp_tests.managers.k8s.base import K8sBaseManager
class K8sReplicaSet(K8sBaseResource):
- """docstring for K8sPod"""
+ resource_type = 'replicaset'
- def __repr__(self):
- return "<K8sPod: %s>" % self.name
+ def _read(self, **kwargs):
+ return self._manager.api.read_namespaced_replica_set(
+ self.name, self.namespace, **kwargs)
- @property
- def name(self):
- return self.metadata.name
+ def _create(self, body, **kwargs):
+ return self._manager.api.create_namespaced_replica_set(
+ self.namespace, body, **kwargs)
+
+ def _patch(self, body, **kwargs):
+ return self._manager.api.patch_namespaced_replica_set(
+ self.name, self.namespace, body, **kwargs)
+
+ def _replace(self, body, **kwargs):
+ return self._manager.api.replace_namespaced_replica_set(
+ self.name, self.namespace, body, **kwargs)
+
+ def _delete(self, **kwargs):
+ self._manager.api.delete_namespaced_replica_set(
+ self.name, self.namespace, client.V1DeleteOptions(), **kwargs)
class K8sReplicaSetManager(K8sBaseManager):
- """docstring for ClassName"""
-
resource_class = K8sReplicaSet
- def _get(self, name, namespace=None, **kwargs):
- namespace = namespace or self.namespace
- return self.api.read_namespaced_replica_set(
- name=name, namespace=namespace, **kwargs)
+ @property
+ def api(self):
+ return self._cluster.api_apps
- def _list(self, namespace=None, **kwargs):
- namespace = namespace or self.namespace
- return self.api.list_namespaced_replica_set(namespace=namespace,
- **kwargs)
+ def _list(self, namespace, **kwargs):
+ return self.api.list_namespaced_replica_set(namespace, **kwargs)
- def _create(self, body, namespace=None, **kwargs):
- namespace = namespace or self.namespace
- return self.api.create_namespaced_replica_set(
- body=body, namespace=namespace, **kwargs)
-
- def _replace(self, body, name, namespace=None, **kwargs):
- namespace = namespace or self.namespace
- return self.api.replace_namespaced_replica_set(
- body=body, name=name, namespace=namespace, **kwargs)
-
- def _delete(self, body, name, namespace=None, **kwargs):
- namespace = namespace or self.namespace
- return self.api.delete_namespaced_replica_set(
- body=body, name=name, namespace=namespace, **kwargs)
-
- def _deletecollection(self, namespace=None, **kwargs):
- namespace = namespace or self.namespace
- return self.api.deletecollection_namespaced_replica_set(
- namespace=namespace, **kwargs)
-
- def full_list(self, *args, **kwargs):
- lst = self._full_list(*args, **kwargs)
- return [self.resource_class(self, item) for item in lst.items]
-
- def _full_list(self, **kwargs):
- return self.api.list_replica_set(**kwargs)
+ def _list_all(self, **kwargs):
+ return self.api.list_replica_set_for_all_namespaces(**kwargs)
diff --git a/tcp_tests/managers/k8s/replicationcontrollers.py b/tcp_tests/managers/k8s/replicationcontrollers.py
index 6cf7da4..aabb092 100644
--- a/tcp_tests/managers/k8s/replicationcontrollers.py
+++ b/tcp_tests/managers/k8s/replicationcontrollers.py
@@ -12,59 +12,47 @@
# License for the specific language governing permissions and limitations
+from kubernetes import client
+
from tcp_tests.managers.k8s.base import K8sBaseResource
from tcp_tests.managers.k8s.base import K8sBaseManager
class K8sReplicationController(K8sBaseResource):
- """docstring for K8sReplicationController"""
+ resource_type = 'replicationcontroller'
- def __repr__(self):
- return "<K8sReplicationController: %s>" % self.name
+ def _read(self, **kwargs):
+ return self._manager.api.read_namespaced_replication_controller(
+ self.name, self.namespace, **kwargs)
- @property
- def name(self):
- return self.metadata.name
+ def _create(self, body, **kwargs):
+ return self._manager.api.create_namespaced_replication_controller(
+ self.namespace, body, **kwargs)
+
+ def _patch(self, body, **kwargs):
+ return self._manager.api.patch_namespaced_replication_controller(
+ self.name, self.namespace, body, **kwargs)
+
+ def _replace(self, body, **kwargs):
+ return self._manager.api.replace_namespaced_replication_controller(
+ self.name, self.namespace, body, **kwargs)
+
+ def _delete(self, **kwargs):
+ self._manager.api.delete_namespaced_replication_controller(
+ self.name, self.namespace, client.V1DeleteOptions(), **kwargs)
class K8sReplicationControllerManager(K8sBaseManager):
- """docstring for ClassName"""
-
resource_class = K8sReplicationController
- def _get(self, name, namespace=None, **kwargs):
- namespace = namespace or self.namespace
- return self.api.read_namespaced_replication_controller(
- name=name, namespace=namespace, **kwargs)
+ @property
+ def api(self):
+ return self._cluster.api_core
- def _list(self, namespace=None, **kwargs):
- namespace = namespace or self.namespace
+ def _list(self, namespace, **kwargs):
return self.api.list_namespaced_replication_controller(
- namespace=namespace, **kwargs)
+ namespace, **kwargs)
- def _create(self, body, namespace=None, **kwargs):
- namespace = namespace or self.namespace
- return self.api.create_namespaced_replication_controller(
- body=body, namespace=namespace, **kwargs)
-
- def _replace(self, body, name, namespace=None, **kwargs):
- namespace = namespace or self.namespace
- return self.api.replace_namespaced_replication_controller(
- body=body, name=name, namespace=namespace, **kwargs)
-
- def _delete(self, body, name, namespace=None, **kwargs):
- namespace = namespace or self.namespace
- return self.api.delete_namespaced_replication_controller(
- body=body, name=name, namespace=namespace, **kwargs)
-
- def _deletecollection(self, namespace=None, **kwargs):
- namespace = namespace or self.namespace
- return self.api.deletecollection_namespaced_replication_controller(
- namespace=namespace, **kwargs)
-
- def full_list(self, *args, **kwargs):
- lst = self._full_list(*args, **kwargs)
- return [self.resource_class(self, item) for item in lst.items]
-
- def _full_list(self, **kwargs):
- return self.api.list_replication_controller(**kwargs)
+ def _list_all(self, **kwargs):
+ return self.api.list_replication_controller_for_all_namespaces(
+ **kwargs)
diff --git a/tcp_tests/managers/k8s/resourcequotas.py b/tcp_tests/managers/k8s/resourcequotas.py
index 49d81d5..2edf00d 100644
--- a/tcp_tests/managers/k8s/resourcequotas.py
+++ b/tcp_tests/managers/k8s/resourcequotas.py
@@ -12,59 +12,45 @@
# License for the specific language governing permissions and limitations
+from kubernetes import client
+
from tcp_tests.managers.k8s.base import K8sBaseResource
from tcp_tests.managers.k8s.base import K8sBaseManager
class K8sResourceQuota(K8sBaseResource):
- """docstring for K8sResourceQuota"""
+ resource_type = 'resourcequota'
- def __repr__(self):
- return "<K8sResourceQuota: %s>" % self.name
+ def _read(self, **kwargs):
+ return self._manager.api.read_namespaced_resource_quota(
+ self.name, self.namespace, **kwargs)
- @property
- def name(self):
- return self.metadata.name
+ def _create(self, body, **kwargs):
+ return self._manager.api.create_namespaced_resource_quota(
+ self.namespace, body, **kwargs)
+
+ def _patch(self, body, **kwargs):
+ return self._manager.api.patch_namespaced_resource_quota(
+ self.name, self.namespace, body, **kwargs)
+
+ def _replace(self, body, **kwargs):
+ return self._manager.api.replace_namespaced_resource_quota(
+ self.name, self.namespace, body, **kwargs)
+
+ def _delete(self, **kwargs):
+ self._manager.api.delete_namespaced_resource_quota(
+ self.name, self.namespace, client.V1DeleteOptions(), **kwargs)
class K8sResourceQuotaManager(K8sBaseManager):
- """docstring for ClassName"""
-
resource_class = K8sResourceQuota
- def _get(self, name, namespace=None, **kwargs):
- namespace = namespace or self.namespace
- return self.api.read_namespaced_resource_quota(
- name=name, namespace=namespace, **kwargs)
+ @property
+ def api(self):
+ return self._cluster.api_core
- def _list(self, namespace=None, **kwargs):
- namespace = namespace or self.namespace
- return self.api.list_namespaced_resource_quota(
- namespace=namespace, **kwargs)
+ def _list(self, namespace, **kwargs):
+ return self.api.list_namespaced_resource_quota(namespace, **kwargs)
- def _create(self, body, namespace=None, **kwargs):
- namespace = namespace or self.namespace
- return self.api.create_namespaced_resource_quota(
- body=body, namespace=namespace, **kwargs)
-
- def _replace(self, body, name, namespace=None, **kwargs):
- namespace = namespace or self.namespace
- return self.api.replace_namespaced_resource_quota(
- body=body, name=name, namespace=namespace, **kwargs)
-
- def _delete(self, body, name, namespace=None, **kwargs):
- namespace = namespace or self.namespace
- return self.api.delete_namespaced_resource_quota(
- body=body, name=name, namespace=namespace, **kwargs)
-
- def _deletecollection(self, namespace=None, **kwargs):
- namespace = namespace or self.namespace
- return self.api.deletecollection_namespaced_resource_quota(
- namespace=namespace, **kwargs)
-
- def full_list(self, *args, **kwargs):
- lst = self._full_list(*args, **kwargs)
- return [self.resource_class(self, item) for item in lst.items]
-
- def _full_list(self, **kwargs):
- return self.api.list_resourse_quota(**kwargs)
+ def _list_all(self, **kwargs):
+ return self.api.list_resource_quota_for_all_namespaces(**kwargs)
diff --git a/tcp_tests/managers/k8s/secrets.py b/tcp_tests/managers/k8s/secrets.py
index 355c884..474c119 100644
--- a/tcp_tests/managers/k8s/secrets.py
+++ b/tcp_tests/managers/k8s/secrets.py
@@ -12,59 +12,45 @@
# License for the specific language governing permissions and limitations
+from kubernetes import client
+
from tcp_tests.managers.k8s.base import K8sBaseResource
from tcp_tests.managers.k8s.base import K8sBaseManager
class K8sSecret(K8sBaseResource):
- """docstring for K8sSecret"""
+ resource_type = 'secret'
- def __repr__(self):
- return "<K8sSecret: %s>" % self.name
+ def _read(self, **kwargs):
+ return self._manager.api.read_namespaced_secret(
+ self.name, self.namespace, **kwargs)
- @property
- def name(self):
- return self.metadata.name
+ def _create(self, body, **kwargs):
+ return self._manager.api.create_namespaced_secret(
+ self.namespace, body, **kwargs)
+
+ def _patch(self, body, **kwargs):
+ return self._manager.api.patch_namespaced_secret(
+ self.name, self.namespace, body, **kwargs)
+
+ def _replace(self, body, **kwargs):
+ return self._manager.api.replace_namespaced_secret(
+ self.name, self.namespace, body, **kwargs)
+
+ def _delete(self, **kwargs):
+ self._manager.api.delete_namespaced_secret(
+ self.name, self.namespace, client.V1DeleteOptions(), **kwargs)
class K8sSecretManager(K8sBaseManager):
- """docstring for ClassName"""
-
resource_class = K8sSecret
- def _get(self, name, namespace=None, **kwargs):
- namespace = namespace or self.namespace
- return self.api.read_namespaced_secret(
- name=name, namespace=namespace, **kwargs)
+ @property
+ def api(self):
+ return self._cluster.api_core
- def _list(self, namespace=None, **kwargs):
- namespace = namespace or self.namespace
- return self.api.list_namespaced_secret(
- namespace=namespace, **kwargs)
+ def _list(self, namespace, **kwargs):
+ return self.api.list_namespaced_secret(namespace, **kwargs)
- def _create(self, body, namespace=None, **kwargs):
- namespace = namespace or self.namespace
- return self.api.create_namespaced_secret(
- body=body, namespace=namespace, **kwargs)
-
- def _replace(self, body, name, namespace=None, **kwargs):
- namespace = namespace or self.namespace
- return self.api.replace_namespaced_secret(
- body=body, name=name, namespace=namespace, **kwargs)
-
- def _delete(self, body, name, namespace=None, **kwargs):
- namespace = namespace or self.namespace
- return self.api.delete_namespaced_secret(
- body=body, name=name, namespace=namespace, **kwargs)
-
- def _deletecollection(self, namespace=None, **kwargs):
- namespace = namespace or self.namespace
- return self.api.deletecollection_namespaced_secret(
- namespace=namespace, **kwargs)
-
- def full_list(self, *args, **kwargs):
- lst = self._full_list(*args, **kwargs)
- return [self.resource_class(self, item) for item in lst.items]
-
- def _full_list(self, **kwargs):
- return self.api.list_secret(**kwargs)
+ def _list_all(self, **kwargs):
+ return self.api.list_secret_for_all_namespaces(**kwargs)
diff --git a/tcp_tests/managers/k8s/serviceaccounts.py b/tcp_tests/managers/k8s/serviceaccounts.py
index bf58b4c..3b779eb 100644
--- a/tcp_tests/managers/k8s/serviceaccounts.py
+++ b/tcp_tests/managers/k8s/serviceaccounts.py
@@ -12,59 +12,45 @@
# License for the specific language governing permissions and limitations
+from kubernetes import client
+
from tcp_tests.managers.k8s.base import K8sBaseResource
from tcp_tests.managers.k8s.base import K8sBaseManager
class K8sServiceAccount(K8sBaseResource):
- """docstring for K8sServiceAccount"""
+ resource_type = 'serviceaccount'
- def __repr__(self):
- return "<K8sServiceAccount: %s>" % self.name
+ def _read(self, **kwargs):
+ return self._manager.api.read_namespaced_service_account(
+ self.name, self.namespace, **kwargs)
- @property
- def name(self):
- return self.metadata.name
+ def _create(self, body, **kwargs):
+ return self._manager.api.create_namespaced_service_account(
+ self.namespace, body, **kwargs)
+
+ def _patch(self, body, **kwargs):
+ return self._manager.api.patch_namespaced_service_account(
+ self.name, self.namespace, body, **kwargs)
+
+ def _replace(self, body, **kwargs):
+ return self._manager.api.replace_namespaced_service_account(
+ self.name, self.namespace, body, **kwargs)
+
+ def _delete(self, **kwargs):
+ self._manager.api.delete_namespaced_service_account(
+ self.name, self.namespace, client.V1DeleteOptions(), **kwargs)
class K8sServiceAccountManager(K8sBaseManager):
- """docstring for K8sServiceAccountManager"""
-
resource_class = K8sServiceAccount
- def _get(self, name, namespace=None, **kwargs):
- namespace = namespace or self.namespace
- return self.api.read_namespaced_service_account(
- name=name, namespace=namespace, **kwargs)
+ @property
+ def api(self):
+ return self._cluster.api_core
- def _list(self, namespace=None, **kwargs):
- namespace = namespace or self.namespace
- return self.api.list_namespaced_service_account(
- namespace=namespace, **kwargs)
+ def _list(self, namespace, **kwargs):
+ return self.api.list_namespaced_service_account(namespace, **kwargs)
- def _create(self, body, namespace=None, **kwargs):
- namespace = namespace or self.namespace
- return self.api.create_namespaced_service_account(
- body=body, namespace=namespace, **kwargs)
-
- def _replace(self, body, name, namespace=None, **kwargs):
- namespace = namespace or self.namespace
- return self.api.replace_namespaced_service_account(
- body=body, name=name, namespace=namespace, **kwargs)
-
- def _delete(self, body, name, namespace=None, **kwargs):
- namespace = namespace or self.namespace
- return self.api.delete_namespaced_service_account(
- body=body, name=name, namespace=namespace, **kwargs)
-
- def _deletecollection(self, namespace=None, **kwargs):
- namespace = namespace or self.namespace
- return self.api.deletecollection_namespaced_service_account(
- namespace=namespace, **kwargs)
-
- def full_list(self, *args, **kwargs):
- lst = self._full_list(*args, **kwargs)
- return [self.resource_class(self, item) for item in lst.items]
-
- def _full_list(self, **kwargs):
- return self.api.list_service_account(**kwargs)
+ def _list_all(self, **kwargs):
+ return self.api.list_service_account_for_all_namespaces(**kwargs)
diff --git a/tcp_tests/managers/k8s/services.py b/tcp_tests/managers/k8s/services.py
index 97a6be7..6c4a22b 100644
--- a/tcp_tests/managers/k8s/services.py
+++ b/tcp_tests/managers/k8s/services.py
@@ -12,57 +12,51 @@
# License for the specific language governing permissions and limitations
+from kubernetes import client
+
from tcp_tests.managers.k8s.base import K8sBaseResource
from tcp_tests.managers.k8s.base import K8sBaseManager
class K8sService(K8sBaseResource):
- """docstring for K8sService"""
+ resource_type = 'service'
- def __repr__(self):
- return "<K8sService: %s>" % self.name
+ def _read(self, **kwargs):
+ return self._manager.api.read_namespaced_service(
+ self.name, self.namespace, **kwargs)
- @property
- def name(self):
- return self.metadata.name
+ def _create(self, body, **kwargs):
+ return self._manager.api.create_namespaced_service(
+ self.namespace, body, **kwargs)
- @property
- def namespace(self):
- return self.metadata.namespace
+ def _patch(self, body, **kwargs):
+ return self._manager.api.patch_namespaced_service(
+ self.name, self.namespace, body, **kwargs)
+
+ def _replace(self, body, **kwargs):
+ return self._manager.api.replace_namespaced_service(
+ self.name, self.namespace, body, **kwargs)
+
+ def _delete(self, **kwargs):
+ self._manager.api.delete_namespaced_service(
+ self.name, self.namespace, client.V1DeleteOptions(), **kwargs)
+
+ def get_ip(self, external=False):
+ if external:
+ return self.read().status.load_balancer.ingress[0].ip
+ else:
+ return self.read().spec.cluster_ip
class K8sServiceManager(K8sBaseManager):
- """docstring for K8sServiceManager"""
-
resource_class = K8sService
- def _get(self, name, namespace=None, **kwargs):
- namespace = namespace or self.namespace
- return self.api.read_namespaced_service(
- name=name, namespace=namespace, **kwargs)
+ @property
+ def api(self):
+ return self._cluster.api_core
- def _list(self, namespace=None, **kwargs):
- namespace = namespace or self.namespace
- return self.api.list_namespaced_service(namespace=namespace, **kwargs)
+ def _list(self, namespace, **kwargs):
+ return self.api.list_namespaced_service(namespace, **kwargs)
- def _create(self, body, namespace=None, **kwargs):
- namespace = namespace or self.namespace
- return self.api.create_namespaced_service(
- body=body, namespace=namespace, **kwargs)
-
- def _replace(self, body, name, namespace=None, **kwargs):
- namespace = namespace or self.namespace
- return self.api.replace_namespaced_service(
- body=body, name=name, namespace=namespace, **kwargs)
-
- def _delete(self, name, namespace=None, **kwargs):
- namespace = namespace or self.namespace
- return self.api.delete_namespaced_service(
- name=name, namespace=namespace, **kwargs)
-
- def full_list(self, *args, **kwargs):
- lst = self._full_list(*args, **kwargs)
- return [self.resource_class(self, item) for item in lst.items]
-
- def _full_list(self, **kwargs):
- return self.api.list_service(**kwargs)
+ def _list_all(self, **kwargs):
+ return self.api.list_service_for_all_namespaces(**kwargs)
diff --git a/tcp_tests/managers/k8smanager.py b/tcp_tests/managers/k8smanager.py
index 9b5588d..c260926 100644
--- a/tcp_tests/managers/k8smanager.py
+++ b/tcp_tests/managers/k8smanager.py
@@ -13,11 +13,7 @@
# under the License.
import os
-import time
-from uuid import uuid4
-import six
import requests
-import yaml
from devops.helpers import helpers
from devops.error import DevopsCalledProcessError
@@ -27,7 +23,6 @@
from tcp_tests.helpers.utils import retry
from tcp_tests.managers.execute_commands import ExecuteCommandsMixin
from tcp_tests.managers.k8s import cluster
-from k8sclient.client.rest import ApiException
LOG = logger.logger
@@ -42,9 +37,10 @@
self.__config = config
self.__underlay = underlay
self._salt = salt
- self._api_client = None
- super(K8SManager, self).__init__(
- config=config, underlay=underlay)
+ self._api = None
+ self.kubectl = K8SKubectlCli(self)
+ self.virtlet = K8SVirtlet(self)
+ super(K8SManager, self).__init__(config=config, underlay=underlay)
def install(self, commands):
self.execute_commands(commands,
@@ -75,602 +71,81 @@
"nodes with K8s master").format(k8s_host, k8s_proxy_ip)
return k8s_host
+ def _api_init(self):
+ ca_result = self.controller_check_call(
+ 'base64 --wrap=0 /etc/kubernetes/ssl/ca-kubernetes.crt')
+
+ self._api = cluster.K8sCluster(
+ user=self.__config.k8s_deploy.kubernetes_admin_user,
+ password=self.__config.k8s_deploy.kubernetes_admin_password,
+ ca=ca_result['stdout'][0],
+ host=self.__config.k8s.kube_host,
+ port=self.__config.k8s.kube_apiserver_port)
+
@property
def api(self):
- if self._api_client is None:
- self._api_client = cluster.K8sCluster(
- user=self.__config.k8s_deploy.kubernetes_admin_user,
- password=self.__config.k8s_deploy.kubernetes_admin_password,
- host=self.__config.k8s.kube_host,
- port=self.__config.k8s.kube_apiserver_port,
- default_namespace='default')
- return self._api_client
+ """
+ :rtype: cluster.K8sCluster
+ """
+ if self._api is None:
+ self._api_init()
+ return self._api
- def ctl_hosts(self):
+ def get_controllers(self):
+ """ Return list of controllers ssh underlays """
return [node for node in self.__config.underlay.ssh if
ext.UNDERLAY_NODE_ROLES.k8s_controller in node['roles']]
+ def get_masters(self):
+ """ Return list of kubernetes masters hosts fqdn """
+ masters_fqdn = self._salt.get_pillar(
+ tgt='I@kubernetes:master', pillar='linux:network:fqdn')
+ return [self.__underlay.host_by_node_name(node_name=v)
+ for pillar in masters_fqdn for k, v in pillar.items()]
+
@property
- def ctl_host(self):
- return self.ctl_hosts()[0]['node_name']
+ def controller_name(self):
+ """ Return node name of controller node that used for all actions """
+ names = [node['node_name'] for node in self.get_controllers()]
+ # we want to return same controller name every time
+ names.sort()
+ return names[0]
- def get_pod_phase(self, pod_name, namespace=None):
- return self.api.pods.get(
- name=pod_name, namespace=namespace).phase
-
- def wait_pod_phase(self, pod_name, phase, namespace=None, timeout=60):
- """Wait phase of pod_name from namespace while timeout
-
- :param str: pod_name
- :param str: namespace
- :param list or str: phase
- :param int: timeout
-
- :rtype: None
- """
- if isinstance(phase, str):
- phase = [phase]
-
- def check():
- return self.get_pod_phase(pod_name, namespace) in phase
-
- helpers.wait(check, timeout=timeout,
- timeout_msg='Timeout waiting, pod {pod_name} is not in '
- '"{phase}" phase'.format(
- pod_name=pod_name, phase=phase))
-
- def wait_pods_phase(self, pods, phase, timeout=60):
- """Wait timeout seconds for phase of pods
-
- :param pods: list of K8sPod
- :param phase: list or str
- :param timeout: int
-
- :rtype: None
- """
- if isinstance(phase, str):
- phase = [phase]
-
- def check(pod_name, namespace):
- return self.get_pod_phase(pod_name, namespace) in phase
-
- def check_all_pods():
- return all(check(pod.name, pod.metadata.namespace) for pod in pods)
-
- helpers.wait(
- check_all_pods,
- timeout=timeout,
- timeout_msg='Timeout waiting, pods {0} are not in "{1}" '
- 'phase'.format([pod.name for pod in pods], phase))
-
- def check_pod_create(self, body, namespace=None, timeout=300, interval=5):
- """Check creating sample pod
-
- :param k8s_pod: V1Pod
- :param namespace: str
- :rtype: V1Pod
- """
- LOG.info("Creating pod in k8s cluster")
- if isinstance(body, six.string_types):
- body = yaml.load(body)
- LOG.debug(
- "POD spec to create:\n{}".format(
- yaml.dump(body, default_flow_style=False))
- )
- LOG.debug("Timeout for creation is set to {}".format(timeout))
- LOG.debug("Checking interval is set to {}".format(interval))
- pod = self.api.pods.create(body=body, namespace=namespace)
- pod.wait_running(timeout=timeout, interval=interval)
- LOG.info("Pod '{0}' is created in '{1}' namespace".format(
- pod.name, pod.namespace))
- return self.api.pods.get(name=pod.name, namespace=pod.namespace)
-
- def wait_pod_deleted(self, podname, timeout=60, interval=5):
- helpers.wait(
- lambda: podname not in [pod.name for pod in self.api.pods.list()],
- timeout=timeout,
- interval=interval,
- timeout_msg="Pod deletion timeout reached!"
- )
-
- def check_pod_delete(self, k8s_pod, timeout=300, interval=5,
- namespace=None):
- """Deleting pod from k8s
-
- :param k8s_pod: tcp_tests.managers.k8s.nodes.K8sNode
- :param k8sclient: tcp_tests.managers.k8s.cluster.K8sCluster
- """
- LOG.info("Deleting pod '{}'".format(k8s_pod.name))
- LOG.debug("Pod status:\n{}".format(k8s_pod.status))
- LOG.debug("Timeout for deletion is set to {}".format(timeout))
- LOG.debug("Checking interval is set to {}".format(interval))
- self.api.pods.delete(body=k8s_pod, name=k8s_pod.name,
- namespace=namespace)
- self.wait_pod_deleted(k8s_pod.name, timeout, interval)
- LOG.debug("Pod '{}' is deleted".format(k8s_pod.name))
-
- def check_service_create(self, body, namespace=None):
- """Check creating k8s service
-
- :param body: dict, service spec
- :param namespace: str
- :rtype: K8sService object
- """
- LOG.info("Creating service in k8s cluster")
- LOG.debug(
- "Service spec to create:\n{}".format(
- yaml.dump(body, default_flow_style=False))
- )
- service = self.api.services.create(body=body, namespace=namespace)
- LOG.info("Service '{0}' is created in '{1}' namespace".format(
- service.name, service.namespace))
- return self.api.services.get(name=service.name,
- namespace=service.namespace)
-
- def check_ds_create(self, body, namespace=None):
- """Check creating k8s DaemonSet
-
- :param body: dict, DaemonSet spec
- :param namespace: str
- :rtype: K8sDaemonSet object
- """
- LOG.info("Creating DaemonSet in k8s cluster")
- LOG.debug(
- "DaemonSet spec to create:\n{}".format(
- yaml.dump(body, default_flow_style=False))
- )
- ds = self.api.daemonsets.create(body=body, namespace=namespace)
- LOG.info("DaemonSet '{0}' is created in '{1}' namespace".format(
- ds.name, ds.namespace))
- return self.api.daemonsets.get(name=ds.name, namespace=ds.namespace)
-
- def check_ds_ready(self, dsname, namespace=None):
- """Check if k8s DaemonSet is ready
-
- :param dsname: str, ds name
- :return: bool
- """
- ds = self.api.daemonsets.get(name=dsname, namespace=namespace)
- return (ds.status.current_number_scheduled ==
- ds.status.desired_number_scheduled)
-
- def wait_ds_ready(self, dsname, namespace=None, timeout=60, interval=5):
- """Wait until all pods are scheduled on nodes
-
- :param dsname: str, ds name
- :param timeout: int
- :param interval: int
- """
- helpers.wait(
- lambda: self.check_ds_ready(dsname, namespace=namespace),
- timeout=timeout, interval=interval)
-
- def check_deploy_create(self, body, namespace=None):
- """Check creating k8s Deployment
-
- :param body: dict, Deployment spec
- :param namespace: str
- :rtype: K8sDeployment object
- """
- LOG.info("Creating Deployment in k8s cluster")
- LOG.debug(
- "Deployment spec to create:\n{}".format(
- yaml.dump(body, default_flow_style=False))
- )
- deploy = self.api.deployments.create(body=body, namespace=namespace)
- LOG.info("Deployment '{0}' is created in '{1}' namespace".format(
- deploy.name, deploy.namespace))
- return self.api.deployments.get(name=deploy.name,
- namespace=deploy.namespace)
-
- def check_deploy_ready(self, deploy_name, namespace=None):
- """Check if k8s Deployment is ready
-
- :param deploy_name: str, deploy name
- :return: bool
- """
- deploy = self.api.deployments.get(name=deploy_name,
- namespace=namespace)
- return deploy.status.available_replicas == deploy.status.replicas
-
- def wait_deploy_ready(self, deploy_name, namespace=None, timeout=60,
- interval=5):
- """Wait until all pods are scheduled on nodes
-
- :param deploy_name: str, deploy name
- :param timeout: int
- :param interval: int
- """
- helpers.wait(
- lambda: self.check_deploy_ready(deploy_name, namespace=namespace),
- timeout=timeout, interval=interval)
-
- def check_namespace_create(self, name):
- """Check creating k8s Namespace
-
- :param name: str
- :rtype: K8sNamespace object
- """
- try:
- ns = self.api.namespaces.get(name=name)
- LOG.info("Namespace '{0}' is already exists".format(ns.name))
- except ApiException as e:
- if hasattr(e, "status") and 404 == e.status:
- LOG.info("Creating Namespace in k8s cluster")
- ns = self.api.namespaces.create(
- body={'metadata': {'name': name}})
- LOG.info("Namespace '{0}' is created".format(ns.name))
- # wait 10 seconds until a token for new service account
- # is created
- time.sleep(10)
- ns = self.api.namespaces.get(name=ns.name)
- else:
- raise
- return ns
-
- def create_objects(self, path):
- if isinstance(path, str):
- path = [path]
- params = ' '.join(["-f {}".format(p) for p in path])
- cmd = 'kubectl create {params}'.format(params=params)
- with self.__underlay.remote(
- node_name=self.ctl_host) as remote:
- LOG.info("Running command '{cmd}' on node {node}".format(
- cmd=cmd,
- node=remote.hostname)
- )
- result = remote.check_call(cmd)
- LOG.info(result['stdout'])
-
- def get_running_pods(self, pod_name, namespace=None):
- pods = [pod for pod in self.api.pods.list(namespace=namespace)
- if (pod_name in pod.name and pod.status.phase == 'Running')]
- return pods
-
- def get_pods_number(self, pod_name, namespace=None):
- pods = self.get_running_pods(pod_name, namespace)
- return len(pods)
-
- def get_running_pods_by_ssh(self, pod_name, namespace=None):
- with self.__underlay.remote(
- node_name=self.ctl_host) as remote:
- result = remote.check_call("kubectl get pods --namespace {} |"
- " grep {} | awk '{{print $1 \" \""
- " $3}}'".format(namespace,
- pod_name))['stdout']
- running_pods = [data.strip().split()[0] for data in result
- if data.strip().split()[1] == 'Running']
- return running_pods
-
- def get_pods_restarts(self, pod_name, namespace=None):
- pods = [pod.status.container_statuses[0].restart_count
- for pod in self.get_running_pods(pod_name, namespace)]
- return sum(pods)
-
- def run_conformance(self, timeout=60 * 60, log_out='k8s_conformance.log',
- raise_on_err=True, node_name=None,
- api_server='http://127.0.0.1:8080'):
- if node_name is None:
- node_name = self.ctl_host
- cmd = "set -o pipefail; docker run --net=host -e API_SERVER="\
- "'{api}' {image} | tee '{log}'".format(
- api=api_server, image=self.__config.k8s.k8s_conformance_image,
- log=log_out)
+ def controller_check_call(self, cmd, **kwargs):
+ """ Run command on controller and return result """
+ LOG.info("running cmd on k8s controller: {}".format(cmd))
return self.__underlay.check_call(
- cmd=cmd, node_name=node_name, timeout=timeout,
- raise_on_err=raise_on_err)
+ cmd=cmd, node_name=self.controller_name, **kwargs)
- def get_k8s_masters(self):
- k8s_masters_fqdn = self._salt.get_pillar(tgt='I@kubernetes:master',
- pillar='linux:network:fqdn')
- return [self._K8SManager__underlay.host_by_node_name(node_name=v)
- for pillar in k8s_masters_fqdn for k, v in pillar.items()]
-
- def kubectl_run(self, name, image, port, replicas=None):
- cmd = "kubectl run {0} --image={1} --port={2}".format(
- name, image, port)
- if replicas is not None:
- cmd += " --replicas={}".format(replicas)
- return self.__underlay.check_call(cmd=cmd, node_name=self.ctl_host)
-
- def kubectl_expose(self, resource, name, port, type, target_name=None):
- cmd = "kubectl expose {0} {1} --port={2} --type={3}".format(
- resource, name, port, type)
- if target_name is not None:
- cmd += " --name={}".format(target_name)
- return self.__underlay.check_call(cmd=cmd, node_name=self.ctl_host)
-
- def kubectl_annotate(self, resource, name, annotation):
- with self.__underlay.remote(
- node_name=self.ctl_host) as remote:
- result = remote.check_call(
- "kubectl annotate {0} {1} {2}".format(
- resource, name, annotation
- )
- )
- return result
-
- def get_svc_ip(self, name, namespace='kube-system', external=False):
- cmd = "kubectl get svc {0} -n {1} | awk '{{print ${2}}}' | tail -1".\
- format(name, namespace, 4 if external else 3)
- result = self.__underlay.check_call(cmd, node_name=self.ctl_host)
- return result['stdout'][0].strip()
-
- @retry(300, exception=DevopsCalledProcessError)
- def nslookup(self, host, src):
- self.__underlay.check_call(
- "nslookup {0} {1}".format(host, src), node_name=self.ctl_host)
-
- @retry(300, exception=DevopsCalledProcessError)
- def curl(self, url):
+ def get_keepalived_vip(self):
"""
- Run curl on controller and return stdout
+ Return k8s VIP IP address
- :param url: url to curl
- :return: response string
+ :return: str, IP address
"""
- result = self.__underlay.check_call(
- "curl -s -S \"{}\"".format(url), node_name=self.ctl_host)
- LOG.debug("curl \"{0}\" result: {1}".format(url, result['stdout']))
- return result['stdout']
+ ctl_vip_pillar = self._salt.get_pillar(
+ tgt="I@kubernetes:control:enabled:True",
+ pillar="_param:cluster_vip_address")[0]
+ return ctl_vip_pillar.values()[0]
-# ---------------------------- Virtlet methods -------------------------------
- def install_jq(self):
- """Install JQuery on node. Required for changing yamls on the fly.
+ def run_sample_deployment(self, name, **kwargs):
+ return K8SSampleDeployment(self, name, **kwargs)
- :return:
+ def get_pod_ips_from_container(self, pod_name, exclude_local=True,
+ namespace='default'):
+ """ Get ips from container using 'ip a'
+ Required for cni-genie multi-cni cases
+
+ :return: list of IP adresses
"""
- cmd = "apt install jq -y"
- return self.__underlay.check_call(cmd, node_name=self.ctl_host)
+ cmd = "ip a|grep \"inet \"|awk '{{print $2}}'"
+ result = self.kubectl.cli_exec(namespace, pod_name, cmd)['stdout']
+ ips = [line.strip().split('/')[0] for line in result]
+ if exclude_local:
+ ips = [ip for ip in ips if not ip.startswith("127.")]
+ return ips
- def git_clone(self, project, target):
- cmd = "git clone {0} {1}".format(project, target)
- return self.__underlay.check_call(cmd, node_name=self.ctl_host)
-
- def run_vm(self, name=None, yaml_path='~/virtlet/examples/cirros-vm.yaml'):
- if not name:
- name = 'virtlet-vm-{}'.format(uuid4())
- cmd = (
- "kubectl convert -f {0} --local "
- "-o json | jq '.metadata.name|=\"{1}\"' | kubectl create -f -")
- self.__underlay.check_call(cmd.format(yaml_path, name),
- node_name=self.ctl_host)
- return name
-
- def get_vm_info(self, name, jsonpath="{.status.phase}", expected=None):
- cmd = "kubectl get po {} -n default".format(name)
- if jsonpath:
- cmd += " -o jsonpath={}".format(jsonpath)
- return self.__underlay.check_call(
- cmd, node_name=self.ctl_host, expected=expected)
-
- def wait_active_state(self, name, timeout=180):
- helpers.wait(
- lambda: self.get_vm_info(name)['stdout'][0] == 'Running',
- timeout=timeout,
- timeout_msg="VM {} didn't Running state in {} sec. "
- "Current state: ".format(
- name, timeout, self.get_vm_info(name)['stdout'][0]))
-
- def delete_vm(self, name, timeout=180):
- cmd = "kubectl delete po -n default {}".format(name)
- self.__underlay.check_call(cmd, node_name=self.ctl_host)
-
- helpers.wait(
- lambda:
- "Error from server (NotFound):" in
- " ".join(self.get_vm_info(name, expected=[0, 1])['stderr']),
- timeout=timeout,
- timeout_msg="VM {} didn't Running state in {} sec. "
- "Current state: ".format(
- name, timeout, self.get_vm_info(name)['stdout'][0]))
-
- def adjust_cirros_resources(
- self, cpu=2, memory='256',
- target_yaml='virtlet/examples/cirros-vm-exp.yaml'):
- # We will need to change params in case of example change
- cmd = ("cd ~/virtlet/examples && "
- "cp cirros-vm.yaml {2} && "
- "sed -r 's/^(\s*)(VirtletVCPUCount\s*:\s*\"1\"\s*$)/ "
- "\1VirtletVCPUCount: \"{0}\"/' {2} && "
- "sed -r 's/^(\s*)(memory\s*:\s*128Mi\s*$)/\1memory: "
- "{1}Mi/' {2}".format(cpu, memory, target_yaml))
- self.__underlay.check_call(cmd, node_name=self.ctl_host)
-
- def get_domain_name(self, vm_name):
- cmd = ("~/virtlet/examples/virsh.sh list --name | "
- "grep -i {0} ".format(vm_name))
- result = self.__underlay.check_call(cmd, node_name=self.ctl_host)
- return result['stdout'].strip()
-
- def get_vm_cpu_count(self, domain_name):
- cmd = ("~/virtlet/examples/virsh.sh dumpxml {0} | "
- "grep 'cpu' | grep -o '[[:digit:]]*'".format(domain_name))
- result = self.__underlay.check_call(cmd, node_name=self.ctl_host)
- return int(result['stdout'].strip())
-
- def get_vm_memory_count(self, domain_name):
- cmd = ("~/virtlet/examples/virsh.sh dumpxml {0} | "
- "grep 'memory unit' | "
- "grep -o '[[:digit:]]*'".format(domain_name))
- result = self.__underlay.check_call(cmd, node_name=self.ctl_host)
- return int(result['stdout'].strip())
-
- def get_domain_id(self, domain_name):
- cmd = ("virsh dumpxml {} | grep id=\' | "
- "grep -o [[:digit:]]*".format(domain_name))
- result = self.__underlay.check_call(cmd, node_name=self.ctl_host)
- return int(result['stdout'].strip())
-
- def list_vm_volumes(self, domain_name):
- domain_id = self.get_domain_id(domain_name)
- cmd = ("~/virtlet/examples/virsh.sh domblklist {} | "
- "tail -n +3 | awk {{'print $2'}}".format(domain_id))
- result = self.__underlay.check_call(cmd, node_name=self.ctl_host)
- return result['stdout'].strip()
-
- def run_virtlet_conformance(self, timeout=60 * 120,
- log_file='virtlet_conformance.log'):
- if self.__config.k8s.run_extended_virtlet_conformance:
- ci_image = "cloud-images.ubuntu.com/xenial/current/" \
- "xenial-server-cloudimg-amd64-disk1.img"
- cmd = ("set -o pipefail; "
- "docker run --net=host {0} /virtlet-e2e-tests "
- "-include-cloud-init-tests -junitOutput report.xml "
- "-image {2} -sshuser ubuntu -memoryLimit 1024 "
- "-alsologtostderr -cluster-url http://127.0.0.1:8080 "
- "-ginkgo.focus '\[Conformance\]' "
- "| tee {1}".format(
- self.__config.k8s_deploy.kubernetes_virtlet_image,
- log_file, ci_image))
- else:
- cmd = ("set -o pipefail; "
- "docker run --net=host {0} /virtlet-e2e-tests "
- "-junitOutput report.xml "
- "-alsologtostderr -cluster-url http://127.0.0.1:8080 "
- "-ginkgo.focus '\[Conformance\]' "
- "| tee {1}".format(
- self.__config.k8s_deploy.kubernetes_virtlet_image,
- log_file))
- LOG.info("Executing: {}".format(cmd))
- with self.__underlay.remote(
- node_name=self.ctl_host) as remote:
- result = remote.check_call(cmd, timeout=timeout)
- stderr = result['stderr']
- stdout = result['stdout']
- LOG.info("Test results stdout: {}".format(stdout))
- LOG.info("Test results stderr: {}".format(stderr))
- return result
-
- def start_k8s_cncf_verification(self, timeout=60 * 90):
- cncf_cmd = ("curl -L https://raw.githubusercontent.com/cncf/"
- "k8s-conformance/master/sonobuoy-conformance.yaml"
- " | kubectl apply -f -")
- with self.__underlay.remote(
- node_name=self.ctl_host) as remote:
- remote.check_call(cncf_cmd, timeout=60)
- self.wait_pod_phase('sonobuoy', 'Running',
- namespace='sonobuoy', timeout=120)
- wait_cmd = ('kubectl logs -n sonobuoy sonobuoy | '
- 'grep "sonobuoy is now blocking"')
-
- expected = [0, 1]
- helpers.wait(
- lambda: remote.check_call(
- wait_cmd, expected=expected).exit_code == 0,
- interval=30, timeout=timeout,
- timeout_msg="Timeout for CNCF reached."
- )
-
- def extract_file_to_node(self, system='docker',
- container='virtlet',
- file_path='report.xml',
- out_dir='.',
- **kwargs):
- """
- Download file from docker or k8s container to node
-
- :param system: docker or k8s
- :param container: Full name of part of name
- :param file_path: File path in container
- :param kwargs: Used to control pod and namespace
- :param out_dir: Output directory
- :return:
- """
- with self.__underlay.remote(
- node_name=self.ctl_host) as remote:
- if system is 'docker':
- cmd = ("docker ps --all | grep \"{0}\" |"
- " awk '{{print $1}}'".format(container))
- result = remote.check_call(cmd, raise_on_err=False)
- if result['stdout']:
- container_id = result['stdout'][0].strip()
- else:
- LOG.info('No container found, skipping extraction...')
- return
- cmd = "docker start {}".format(container_id)
- remote.check_call(cmd, raise_on_err=False)
- cmd = "docker cp \"{0}:/{1}\" \"{2}\"".format(
- container_id, file_path, out_dir)
- remote.check_call(cmd, raise_on_err=False)
- else:
- # system is k8s
- pod_name = kwargs.get('pod_name')
- pod_namespace = kwargs.get('pod_namespace')
- cmd = 'kubectl cp \"{0}/{1}:/{2}\" \"{3}\"'.format(
- pod_namespace, pod_name, file_path, out_dir)
- remote.check_call(cmd, raise_on_err=False)
-
- def download_k8s_logs(self, files):
- """
- Download JUnit report and conformance logs from cluster
- :param files:
- :return:
- """
- master_host = self.__config.salt.salt_master_host
- with self.__underlay.remote(host=master_host) as r:
- for log_file in files:
- cmd = "rsync -r \"{0}:/root/{1}\" /root/".format(
- self.ctl_host, log_file)
- r.check_call(cmd, raise_on_err=False)
- LOG.info("Downloading the artifact {0}".format(log_file))
- r.download(destination=log_file, target=os.getcwd())
-
- def combine_xunit(self, path, output):
- """
- Function to combine multiple xmls with test results to
- one.
-
- :param path: Path where xmls to combine located
- :param output: Path to xml file where output will stored
- :return:
- """
- with self.__underlay.remote(node_name=self.ctl_host) as r:
- cmd = ("apt-get install python-setuptools -y; "
- "pip install "
- "https://github.com/mogaika/xunitmerge/archive/master.zip")
- LOG.debug('Installing xunitmerge')
- r.check_call(cmd, raise_on_err=False)
- LOG.debug('Merging xunit')
- cmd = ("cd {0}; arg = ''; "
- "for i in $(ls | grep xml); "
- "do arg=\"$arg $i\"; done && "
- "xunitmerge $arg {1}".format(path, output))
- r.check_call(cmd, raise_on_err=False)
-
- def manage_cncf_archive(self):
- """
- Function to untar archive, move files, that we are needs to the
- home folder, prepare it to downloading and clean the trash.
- Will generate files: e2e.log, junit_01.xml, cncf_results.tar.gz
- and version.txt
- :return:
- """
-
- # Namespace and pod name may be hardcoded since this function is
- # very specific for cncf and cncf is not going to change
- # those launch pod name and namespace.
- get_tar_name_cmd = ("kubectl logs -n sonobuoy sonobuoy | "
- "grep 'Results available' | "
- "sed 's/.*\///' | tr -d '\"'")
-
- with self.__underlay.remote(
- node_name=self.ctl_host) as remote:
- tar_name = remote.check_call(get_tar_name_cmd)['stdout'][0].strip()
- untar = "mkdir result && tar -C result -xzf {0}".format(tar_name)
- remote.check_call(untar, raise_on_err=False)
- manage_results = ("mv result/plugins/e2e/results/e2e.log . && "
- "mv result/plugins/e2e/results/junit_01.xml . ;"
- "kubectl version > version.txt")
- remote.check_call(manage_results, raise_on_err=False)
- cleanup_host = "rm -rf result"
- remote.check_call(cleanup_host, raise_on_err=False)
- # This one needed to use download fixture, since I don't know
- # how possible apply fixture arg dynamically from test.
- rename_tar = "mv {0} cncf_results.tar.gz".format(tar_name)
- remote.check_call(rename_tar, raise_on_err=False)
-
- def update_k8s_images(self, tag):
+ def update_k8s_version(self, tag):
"""
Update k8s images tag version in cluster meta and apply required
for update states
@@ -705,73 +180,333 @@
self.execute_commands(
update_commands, label="Updating kubernetes to '{}'".format(tag))
- def get_keepalived_vip(self):
+ def run_conformance(self, timeout=60*60, log_out='k8s_conformance.log',
+ raise_on_err=True, node_name=None,
+ api_server='http://127.0.0.1:8080'):
+ if node_name is None:
+ node_name = self.controller_name
+ cmd = "set -o pipefail; docker run --net=host " \
+ "-e API_SERVER='{api}' {image} | tee '{log}'".format(
+ api=api_server, log=log_out,
+ image=self.__config.k8s.k8s_conformance_image)
+ return self.__underlay.check_call(
+ cmd=cmd, node_name=node_name, timeout=timeout,
+ raise_on_err=raise_on_err)
+
+ def run_virtlet_conformance(self, timeout=60 * 120,
+ log_file='virtlet_conformance.log'):
+ if self.__config.k8s.run_extended_virtlet_conformance:
+ ci_image = "cloud-images.ubuntu.com/xenial/current/" \
+ "xenial-server-cloudimg-amd64-disk1.img"
+ cmd = ("set -o pipefail; "
+ "docker run --net=host {0} /virtlet-e2e-tests "
+ "-include-cloud-init-tests -junitOutput report.xml "
+ "-image {2} -sshuser ubuntu -memoryLimit 1024 "
+ "-alsologtostderr -cluster-url http://127.0.0.1:8080 "
+ "-ginkgo.focus '\[Conformance\]' "
+ "| tee {1}".format(
+ self.__config.k8s_deploy.kubernetes_virtlet_image,
+ log_file, ci_image))
+ else:
+ cmd = ("set -o pipefail; "
+ "docker run --net=host {0} /virtlet-e2e-tests "
+ "-junitOutput report.xml "
+ "-alsologtostderr -cluster-url http://127.0.0.1:8080 "
+ "-ginkgo.focus '\[Conformance\]' "
+ "| tee {1}".format(
+ self.__config.k8s_deploy.kubernetes_virtlet_image,
+ log_file))
+ LOG.info("Executing: {}".format(cmd))
+ with self.__underlay.remote(
+ node_name=self.controller_name) as remote:
+ result = remote.check_call(cmd, timeout=timeout)
+ stderr = result['stderr']
+ stdout = result['stdout']
+ LOG.info("Test results stdout: {}".format(stdout))
+ LOG.info("Test results stderr: {}".format(stderr))
+ return result
+
+ def start_k8s_cncf_verification(self, timeout=60 * 90):
+ cncf_cmd = ("curl -L https://raw.githubusercontent.com/cncf/"
+ "k8s-conformance/master/sonobuoy-conformance.yaml"
+ " | kubectl apply -f -")
+ with self.__underlay.remote(
+ node_name=self.controller_name) as remote:
+ remote.check_call(cncf_cmd, timeout=60)
+ self.wait_pod_phase('sonobuoy', 'Running',
+ namespace='sonobuoy', timeout=120)
+ wait_cmd = ('kubectl logs -n sonobuoy sonobuoy | '
+ 'grep "sonobuoy is now blocking"')
+
+ expected = [0, 1]
+ helpers.wait(
+ lambda: remote.check_call(
+ wait_cmd, expected=expected).exit_code == 0,
+ interval=30, timeout=timeout,
+ timeout_msg="Timeout for CNCF reached."
+ )
+
+ def extract_file_to_node(self, system='docker',
+ container='virtlet',
+ file_path='report.xml',
+ out_dir='.',
+ **kwargs):
"""
- Return k8s VIP IP address
+ Download file from docker or k8s container to node
- :return: str, IP address
+ :param system: docker or k8s
+ :param container: Full name of part of name
+ :param file_path: File path in container
+ :param kwargs: Used to control pod and namespace
+ :param out_dir: Output directory
+ :return:
"""
- ctl_vip_pillar = self._salt.get_pillar(
- tgt="I@kubernetes:control:enabled:True",
- pillar="_param:cluster_vip_address")[0]
- return ctl_vip_pillar.values()[0]
+ with self.__underlay.remote(
+ node_name=self.controller_name) as remote:
+ if system is 'docker':
+ cmd = ("docker ps --all | grep \"{0}\" |"
+ " awk '{{print $1}}'".format(container))
+ result = remote.check_call(cmd, raise_on_err=False)
+ if result['stdout']:
+ container_id = result['stdout'][0].strip()
+ else:
+ LOG.info('No container found, skipping extraction...')
+ return
+ cmd = "docker start {}".format(container_id)
+ remote.check_call(cmd, raise_on_err=False)
+ cmd = "docker cp \"{0}:/{1}\" \"{2}\"".format(
+ container_id, file_path, out_dir)
+ remote.check_call(cmd, raise_on_err=False)
+ else:
+ # system is k8s
+ pod_name = kwargs.get('pod_name')
+ pod_namespace = kwargs.get('pod_namespace')
+ cmd = 'kubectl cp \"{0}/{1}:/{2}\" \"{3}\"'.format(
+ pod_namespace, pod_name, file_path, out_dir)
+ remote.check_call(cmd, raise_on_err=False)
- def get_sample_deployment(self, name, **kwargs):
- return K8SSampleDeployment(self, name, **kwargs)
+ def download_k8s_logs(self, files):
+ """
+ Download JUnit report and conformance logs from cluster
+ :param files:
+ :return:
+ """
+ master_host = self.__config.salt.salt_master_host
+ with self.__underlay.remote(host=master_host) as r:
+ for log_file in files:
+ cmd = "rsync -r \"{0}:/root/{1}\" /root/".format(
+ self.controller_name, log_file)
+ r.check_call(cmd, raise_on_err=False)
+ LOG.info("Downloading the artifact {0}".format(log_file))
+ r.download(destination=log_file, target=os.getcwd())
- def is_pod_exists_with_prefix(self, prefix, namespace, phase='Running'):
- for pod in self.api.pods.list(namespace=namespace):
- if pod.name.startswith(prefix) and pod.phase == phase:
- return True
- return False
+ def combine_xunit(self, path, output):
+ """
+ Function to combine multiple xmls with test results to
+ one.
- def get_pod_ips_from_container(self, pod_name, exclude_local=True):
- """ Not all containers have 'ip' binary on-board """
- cmd = "kubectl exec {0} ip a|grep \"inet \"|awk '{{print $2}}'".format(
- pod_name)
- result = self.__underlay.check_call(cmd, node_name=self.ctl_host)
- ips = [line.strip().split('/')[0] for line in result['stdout']]
- if exclude_local:
- ips = [ip for ip in ips if not ip.startswith("127.")]
- return ips
+ :param path: Path where xmls to combine located
+ :param output: Path to xml file where output will stored
+ :return:
+ """
+ with self.__underlay.remote(node_name=self.controller_name) as r:
+ cmd = ("apt-get install python-setuptools -y; "
+ "pip install "
+ "https://github.com/mogaika/xunitmerge/archive/master.zip")
+ LOG.debug('Installing xunitmerge')
+ r.check_call(cmd, raise_on_err=False)
+ LOG.debug('Merging xunit')
+ cmd = ("cd {0}; arg = ''; "
+ "for i in $(ls | grep xml); "
+ "do arg=\"$arg $i\"; done && "
+ "xunitmerge $arg {1}".format(path, output))
+ r.check_call(cmd, raise_on_err=False)
- def create_pod_from_file(self, path, namespace=None):
- with open(path) as f:
- data = f.read()
- return self.check_pod_create(data, namespace=namespace)
+ def manage_cncf_archive(self):
+ """
+ Function to untar archive, move files, that we are needs to the
+ home folder, prepare it to downloading and clean the trash.
+ Will generate files: e2e.log, junit_01.xml, cncf_results.tar.gz
+ and version.txt
+ :return:
+ """
+
+ # Namespace and pod name may be hardcoded since this function is
+ # very specific for cncf and cncf is not going to change
+ # those launch pod name and namespace.
+ get_tar_name_cmd = ("kubectl logs -n sonobuoy sonobuoy | "
+ "grep 'Results available' | "
+ "sed 's/.*\///' | tr -d '\"'")
+
+ with self.__underlay.remote(
+ node_name=self.controller_name) as remote:
+ tar_name = remote.check_call(get_tar_name_cmd)['stdout'][0].strip()
+ untar = "mkdir result && tar -C result -xzf {0}".format(tar_name)
+ remote.check_call(untar, raise_on_err=False)
+ manage_results = ("mv result/plugins/e2e/results/e2e.log . && "
+ "mv result/plugins/e2e/results/junit_01.xml . ;"
+ "kubectl version > version.txt")
+ remote.check_call(manage_results, raise_on_err=False)
+ cleanup_host = "rm -rf result"
+ remote.check_call(cleanup_host, raise_on_err=False)
+ # This one needed to use download fixture, since I don't know
+ # how possible apply fixture arg dynamically from test.
+ rename_tar = "mv {0} cncf_results.tar.gz".format(tar_name)
+ remote.check_call(rename_tar, raise_on_err=False)
+
+ @retry(300, exception=DevopsCalledProcessError)
+ def nslookup(self, host, src):
+ """ Run nslookup on controller and return result """
+ return self.controller_check_call("nslookup {0} {1}".format(host, src))
+
+ @retry(300, exception=DevopsCalledProcessError)
+ def curl(self, url):
+ """
+ Run curl on controller and return stdout
+
+ :param url: url to curl
+ :return: response string
+ """
+ result = self.controller_check_call("curl -s -S \"{}\"".format(url))
+ LOG.debug("curl \"{0}\" result: {1}".format(url, result['stdout']))
+ return result['stdout']
-class K8SSampleDeployment:
- def __init__(self, manager, name, replicas=2,
- image='gcr.io/google-samples/node-hello:1.0', port=8080):
- self.manager = manager
- self.name = name
- self.image = image
- self.port = port
- self.replicas = replicas
+class K8SKubectlCli(object):
+ """ Contain kubectl cli commands and api wrappers"""
+ def __init__(self, manager):
+ self._manager = manager
- def run(self):
- self.manager.kubectl_run(self.name, self.image, self.port,
- replicas=self.replicas)
+ def cli_run(self, namespace, name, image, port, replicas=1):
+ cmd = "kubectl -n {0} run {1} --image={2} --port={3} --replicas={4}".\
+ format(namespace, name, image, port, replicas)
+ return self._manager.controller_check_call(cmd)
- def expose(self, service_type='ClusterIP', target_name=None):
- self.manager.kubectl_expose(
- 'deployment', self.name, self.port, service_type, target_name)
+ def run(self, namespace, name, image, port, replicas=1):
+ self.cli_run(namespace, name, image, port, replicas)
+ return self._manager.api.deployments.get(
+ namespace=namespace, name=name)
- def get_svc_ip(self, external=False):
- return self.manager.get_svc_ip(self.name, namespace='default',
- external=external)
+ def cli_expose(self, namespace, resource_type, resource_name,
+ service_name=None, port='', service_type='ClusterIP'):
+ cmd = "kubectl -n {0} expose {1} {2} --port={3} --type={4}".format(
+ namespace, resource_type, resource_name, port, service_type)
+ if service_name is not None:
+ cmd += " --name={}".format(service_name)
+ return self._manager.controller_check_call(cmd)
- def curl(self, external=False):
- url = "http://{0}:{1}".format(
- self.get_svc_ip(external=external), self.port)
+ def expose(self, resource, service_name=None,
+ port='', service_type='ClusterIP'):
+ self.cli_expose(resource.namespace, resource.resource_type,
+ resource.name, service_name=service_name,
+ port=port, service_type=service_type)
+ return self._manager.api.services.get(
+ namespace=resource.namespace, name=service_name or resource.name)
+
+ def cli_exec(self, namespace, pod_name, cmd, container=''):
+ kubectl_cmd = "kubectl -n {0} exec --container={1} {2} -- {3}".format(
+ namespace, container, pod_name, cmd)
+ return self._manager.controller_check_call(kubectl_cmd)
+
+ # def exec(...), except exec is statement in python
+ def execute(self, pod, cmd, container=''):
+ return self.cli_exec(pod.namespace, pod.name, cmd, container=container)
+
+ def cli_annotate(self, namespace, resource_type, resource_name,
+ annotations, overwrite=False):
+ cmd = "kubectl -n {0} annotate {1} {2} {3}".format(
+ namespace, resource_type, resource_name, annotations)
+ if overwrite:
+ cmd += " --overwrite"
+ return self._manager.controller_check_call(cmd)
+
+ def annotate(self, resource, annotations, overwrite=False):
+ return self.cli_annotate(resource.namespace, resource.resource_type,
+ resource.name, annotations,
+ overwrite=overwrite)
+
+
+class K8SVirtlet(object):
+ """ Contain virtlet-related methods"""
+ def __init__(self, manager, namespace='kube-system'):
+ self._manager = manager
+ self._namespace = namespace
+
+ def get_virtlet_node_pod(self, node_name):
+ for pod in self._manager.api.pods.list(
+ namespace=self._namespace, name_prefix='virtlet-'):
+ if pod.read().spec.node_name == node_name:
+ return pod
+ return None
+
+ def get_pod_dom_uuid(self, pod):
+ uuid_name_map = self.virtlet_execute(
+ pod.read().spec.node_name, 'virsh list --uuid --name')['stdout']
+ LOG.info("HEHEHEH {}".format(uuid_name_map))
+ LOG.info("MDAMDMAD {}".format(pod.name))
+ for line in uuid_name_map:
+ if line.rstrip().endswith("-{}".format(pod.name)):
+ return line.split(" ")[0]
+ raise Exception("Cannot detect uuid for pod {}".format(pod.name))
+
+ def virsh_domstats(self, pod):
+ """ get dict of vm stats """
+ uuid = self.get_pod_dom_uuid(pod)
+ result = self.virtlet_execute(
+ pod.read().spec.node_name, 'virsh domstats {}'.format(uuid))
+ stats = dict()
+ for line in result['stdout']:
+ if '=' in line:
+ vals = line.strip().split('=')
+ stats[vals[0]] = vals[1]
+ return stats
+
+ def virtlet_execute(self, node_name, cmd, container='libvirt'):
+ """ execute command inside virtlet container """
+ pod = self.get_virtlet_node_pod(node_name)
+ return self._manager.kubectl.execute(pod, cmd, container)
+
+
+class K8SSampleDeployment(object):
+ """ Wrapper for deployment run=>expose=>check frequent combination """
+ def __init__(self, manager, name,
+ namespace=None,
+ image='gcr.io/google-samples/node-hello:1.0',
+ port=8080,
+ replicas=2):
+ namespace = namespace or manager.api.default_namespace
+
+ self._manager = manager
+ self._port = port
+ self._deployment = \
+ manager.kubectl.run(namespace, name,
+ image=image, port=port, replicas=replicas)
+ self._index = 1 # used to generate svc name
+ self._svc = None # hold last created svc
+
+ def wait_ready(self, timeout=300, interval=5):
+ self._deployment.wait_ready(timeout=timeout, interval=interval)
+ return self
+
+ def svc(self):
+ """ Return the last exposed service"""
+ return self._svc
+
+ def expose(self, service_type='ClusterIP'):
+ service_name = "{0}-s{1}".format(self._deployment.name, self._index)
+ self._svc = self._manager.kubectl.expose(
+ self._deployment, port=self._port,
+ service_name=service_name, service_type=service_type)
+ return self._svc
+
+ def curl(self, svc=None, external=False):
+ if svc is None:
+ svc = self.svc()
+ url = "http://{0}:{1}".format(svc.get_ip(external), self._port)
if external:
return requests.get(url).text
else:
- return self.manager.curl(url)
+ return self._manager.curl(url)
- def is_service_available(self, external=False):
- return "Hello Kubernetes!" in self.curl(external=external)
-
- def wait_for_ready(self):
- return self.manager.wait_deploy_ready(self.name)
+ def is_service_available(self, svc=None, external=False):
+ return "Hello Kubernetes!" in self.curl(svc, external=external)
diff --git a/tcp_tests/requirements.txt b/tcp_tests/requirements.txt
index ca2d0a1..8233a9b 100644
--- a/tcp_tests/requirements.txt
+++ b/tcp_tests/requirements.txt
@@ -15,10 +15,10 @@
jira
testrail<=0.3.8
functools32
-python-k8sclient==0.4.0
+kubernetes
salt-pepper<=0.5.3
setuptools<=36.2.0
netaddr
mock>=1.2
python-jenkins
-cmd2<0.9
\ No newline at end of file
+cmd2<0.9
diff --git a/tcp_tests/templates/k8s-ha-calico/k8s-update.yaml b/tcp_tests/templates/k8s-ha-calico/k8s-update.yaml
index 1f27c73..7162657 100644
--- a/tcp_tests/templates/k8s-ha-calico/k8s-update.yaml
+++ b/tcp_tests/templates/k8s-ha-calico/k8s-update.yaml
@@ -1,7 +1,7 @@
{% from 'k8s-ha-calico/underlay.yaml' import HOSTNAME_CFG01 with context %}
{% from 'k8s-ha-calico/underlay.yaml' import HOSTNAME_CTL01 with context %}
-# Kubernetes
+# Kubernetes upgrade
- description: Update hypercube image
cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-C 'I@kubernetes:pool' state.sls kubernetes.pool
@@ -9,18 +9,11 @@
retry: {count: 3, delay: 5}
skip_fail: false
-- description: Run whole master to check consistency
+- description: Update the Kubernetes Master nodes and restart the services
cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-C 'I@kubernetes:master' state.sls kubernetes exclude=kubernetes.master.setup
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 5}
- skip_fail: true
-
-# TODO: sync with PROD-20441
-- desciption: Restart kube-apiserver
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@kubernetes:control' service.restart kube-apiserver
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 3, delay: 5}
skip_fail: false
- desciption: Print kubernetes version
diff --git a/tcp_tests/tests/system/test_calico.py b/tcp_tests/tests/system/test_calico.py
index 484aec5..ca5c116 100644
--- a/tcp_tests/tests/system/test_calico.py
+++ b/tcp_tests/tests/system/test_calico.py
@@ -41,13 +41,10 @@
"""
show_step(1)
- k8sclient = k8s_deployed.api
- assert k8sclient.nodes.list() is not None, "Can not get nodes list"
- netchecker_port = netchecker.get_service_port(k8sclient)
+ nch = netchecker.Netchecker(k8s_deployed.api)
show_step(2)
- netchecker.wait_check_network(k8sclient, works=True, timeout=300,
- netchecker_pod_port=netchecker_port)
+ nch.wait_check_network(works=True)
@pytest.mark.fail_snapshot
@pytest.mark.calico_ci
@@ -72,27 +69,19 @@
"""
show_step(1)
- k8sclient = k8s_deployed.api
- assert k8sclient.nodes.list() is not None, "Can not get nodes list"
- netchecker_port = netchecker.get_service_port(k8sclient)
+ nch = netchecker.Netchecker(k8s_deployed.api)
show_step(2)
- netchecker.get_netchecker_pod_status(k8s=k8s_deployed,
- namespace='netchecker')
+ nch.wait_netchecker_pods_running('netchecker-server')
show_step(3)
- netchecker.get_netchecker_pod_status(k8s=k8s_deployed,
- pod_name='netchecker-agent',
- namespace='netchecker')
+ nch.wait_netchecker_pods_running('netchecker-agent')
show_step(4)
- netchecker.wait_check_network(k8sclient, namespace='netchecker',
- netchecker_pod_port=netchecker_port)
+ nch.wait_check_network(works=True)
show_step(5)
- res = netchecker.get_metric(k8sclient,
- netchecker_pod_port=netchecker_port,
- namespace='netchecker')
+ res = nch.get_metric()
assert res.status_code == 200, 'Unexpected response code {}'\
.format(res)
@@ -111,8 +100,9 @@
metric, res.text)
show_step(6)
- first_node = k8sclient.nodes.list()[0]
- first_node_ips = [addr.address for addr in first_node.status.addresses
+ first_node = k8s_deployed.api.nodes.list()[0]
+ first_node_ips = [addr.address for addr in
+ first_node.read().status.addresses
if 'IP' in addr.type]
assert len(first_node_ips) > 0, "Couldn't find first k8s node IP!"
first_node_names = [name for name in underlay.node_names()
@@ -123,13 +113,13 @@
target_pod_ip = None
- for pod in k8sclient.pods.list(namespace='netchecker'):
- LOG.debug('NC pod IP: {0}'.format(pod.status.pod_ip))
- if pod.status.host_ip not in first_node_ips:
+ for pod in k8s_deployed.api.pods.list(namespace='netchecker'):
+ LOG.debug('NC pod IP: {0}'.format(pod.read().status.pod_ip))
+ if pod.read().status.host_ip not in first_node_ips:
continue
# TODO: get pods by daemonset with name 'netchecker-agent'
if 'netchecker-agent-' in pod.name and 'hostnet' not in pod.name:
- target_pod_ip = pod.status.pod_ip
+ target_pod_ip = pod.read().status.pod_ip
assert target_pod_ip is not None, "Could not find netchecker pod IP!"
@@ -154,9 +144,7 @@
'recovered'.format(target_pod_ip, first_node.name))
show_step(8)
- netchecker.wait_check_network(k8sclient, namespace='netchecker',
- netchecker_pod_port=netchecker_port,
- works=True)
+ nch.wait_check_network(works=True)
@pytest.mark.fail_snapshot
@pytest.mark.calico_ci
@@ -183,38 +171,25 @@
"""
show_step(1)
- k8sclient = k8s_deployed.api
- assert k8sclient.nodes.list() is not None, "Can not get nodes list"
- kube_master_nodes = k8s_deployed.get_k8s_masters()
+ kube_master_nodes = k8s_deployed.get_masters()
assert kube_master_nodes, "No k8s masters found in pillars!"
- netchecker_port = netchecker.get_service_port(k8sclient)
+
+ nch = netchecker.Netchecker(k8s_deployed.api)
show_step(2)
- netchecker.wait_check_network(k8sclient, namespace='netchecker',
- works=True, timeout=300,
- netchecker_pod_port=netchecker_port)
+ nch.wait_check_network(works=True)
show_step(3)
- netchecker.kubernetes_block_traffic_namespace(underlay,
- kube_master_nodes[0],
- 'netchecker')
+ nch.kubernetes_block_traffic_namespace()
show_step(4)
- netchecker.calico_allow_netchecker_connections(underlay, k8sclient,
- kube_master_nodes[0],
- 'netchecker')
+ nch.calico_allow_netchecker_connections()
show_step(5)
- netchecker.wait_check_network(k8sclient, namespace='netchecker',
- works=False, timeout=500,
- netchecker_pod_port=netchecker_port)
+ nch.wait_check_network(works=False)
show_step(6)
- netchecker.kubernetes_allow_traffic_from_agents(underlay,
- kube_master_nodes[0],
- 'netchecker')
+ nch.kubernetes_allow_traffic_from_agents()
show_step(7)
- netchecker.wait_check_network(k8sclient, namespace='netchecker',
- works=True, timeout=300,
- netchecker_pod_port=netchecker_port)
+ nch.wait_check_network(works=True)
diff --git a/tcp_tests/tests/system/test_install_k8s.py b/tcp_tests/tests/system/test_install_k8s.py
index ec90863..8066cd9 100644
--- a/tcp_tests/tests/system/test_install_k8s.py
+++ b/tcp_tests/tests/system/test_install_k8s.py
@@ -47,29 +47,22 @@
11. Optionally run k8s e2e tests
"""
- # STEP #5
- # k8s_actions = k8s_deployed
- sl_actions = stacklight_deployed
+
show_step(5)
- k8sclient = k8s_deployed.api
- assert k8sclient.nodes.list() is not None, "Can not get nodes list"
- netchecker_port = netchecker.get_service_port(k8sclient)
+ sl_actions = stacklight_deployed
+ nch = netchecker.Netchecker(k8s_deployed.api)
+
show_step(6)
- netchecker.get_netchecker_pod_status(k8s=k8s_deployed,
- namespace='netchecker')
+ nch.wait_netchecker_pods_running(netchecker.NETCHECKER_SERVER_PREFIX)
show_step(7)
- netchecker.get_netchecker_pod_status(k8s=k8s_deployed,
- pod_name='netchecker-agent',
- namespace='netchecker')
+ nch.wait_netchecker_pods_running(netchecker.NETCHECKER_AGENT_PREFIX)
- # show_step(8)
- netchecker.wait_check_network(k8sclient, namespace='netchecker',
- netchecker_pod_port=netchecker_port)
+ show_step(8)
+ nch.wait_check_network(works=True)
+
show_step(9)
- res = netchecker.get_metric(k8sclient,
- netchecker_pod_port=netchecker_port,
- namespace='netchecker')
+ res = nch.get_metric()
assert res.status_code == 200, 'Unexpected response code {}'\
.format(res)
@@ -144,12 +137,9 @@
7. Optionally run k8s e2e conformance
"""
- k8s_actions = k8s_deployed
- sl_actions = stacklight_deployed
- # STEP #5
+
show_step(5)
- k8sclient = k8s_deployed.api
- assert k8sclient.nodes.list() is not None, "Can not get nodes list"
+ sl_actions = stacklight_deployed
prometheus_client = stacklight_deployed.api
try:
@@ -186,7 +176,7 @@
if config.k8s.k8s_conformance_run:
show_step(7)
- k8s_actions.run_conformance()
+ k8s_deployed.run_conformance()
LOG.info("*************** DONE **************")
@pytest.mark.extract(container_system='docker', extract_from='conformance',
@@ -211,8 +201,8 @@
5. Run conformance if need
"""
- k8s_actions = k8s_deployed
+
if config.k8s.k8s_conformance_run:
show_step(5)
- k8s_actions.run_conformance()
+ k8s_deployed.run_conformance()
LOG.info("*************** DONE **************")
diff --git a/tcp_tests/tests/system/test_k8s_actions.py b/tcp_tests/tests/system/test_k8s_actions.py
index 5216470..ea12129 100644
--- a/tcp_tests/tests/system/test_k8s_actions.py
+++ b/tcp_tests/tests/system/test_k8s_actions.py
@@ -19,6 +19,8 @@
from tcp_tests import logger
from tcp_tests import settings
+from tcp_tests.managers.k8s import read_yaml_file
+
LOG = logger.logger
@@ -38,32 +40,35 @@
3. Expose deployment
4. Annotate service with domain name
5. Try to get service using nslookup
+ 6. Delete service and deployment
"""
+ show_step(1)
if not (config.k8s_deploy.kubernetes_externaldns_enabled and
config.k8s_deploy.kubernetes_coredns_enabled):
- pytest.skip("Test requires Externaldns and coredns addons enabled")
-
- show_step(1)
- k8sclient = k8s_deployed.api
- assert k8sclient.nodes.list() is not None, "Can not get nodes list"
+ pytest.skip("Test requires externaldns and coredns addons enabled")
show_step(2)
- name = 'test-nginx'
- k8s_deployed.kubectl_run(name, 'nginx', '80')
+ deployment = k8s_deployed.run_sample_deployment('test-dep')
show_step(3)
- k8s_deployed.kubectl_expose('deployment', name, '80', 'ClusterIP')
+ svc = deployment.expose()
- hostname = "test.{0}.local.".format(settings.LAB_CONFIG_NAME)
- annotation = "\"external-dns.alpha.kubernetes.io/" \
- "hostname={0}\"".format(hostname)
show_step(4)
- k8s_deployed.kubectl_annotate('service', name, annotation)
+ hostname = "test.{0}.local.".format(settings.LAB_CONFIG_NAME)
+ svc.patch({
+ "metadata": {
+ "annotations": {
+ "external-dns.alpha.kubernetes.io/hostname": hostname
+ }
+ }
+ })
show_step(5)
- dns_host = k8s_deployed.get_svc_ip('coredns')
- k8s_deployed.nslookup(hostname, dns_host)
+ k8s_deployed.nslookup(hostname, svc.get_ip())
+
+ show_step(6)
+ deployment.delete()
@pytest.mark.grab_versions
@pytest.mark.cncf_publisher(name=['e2e.log', 'junit_01.xml', 'version.txt',
@@ -97,13 +102,13 @@
7. For every version in update chain:
Update cluster to new version, check test sample service
availability, run conformance
+ 8. Delete service and deployment
"""
show_step(5)
- sample = k8s_deployed.get_sample_deployment('test-dep-chain-upgrade')
- sample.run()
+ sample = k8s_deployed.run_sample_deployment('test-dep-chain-upgrade')
sample.expose()
- sample.wait_for_ready()
+ sample.wait_ready()
assert sample.is_service_available()
@@ -114,7 +119,7 @@
chain_versions = config.k8s.k8s_update_chain.split(" ")
for version in chain_versions:
LOG.info("Chain update to '{}' version".format(version))
- k8s_deployed.update_k8s_images(version)
+ k8s_deployed.update_k8s_version(version)
LOG.info("Checking test service availability")
assert sample.is_service_available()
@@ -123,6 +128,11 @@
log_name = "k8s_conformance_{}.log".format(version)
k8s_deployed.run_conformance(log_out=log_name, raise_on_err=False)
+ assert sample.is_service_available()
+
+ show_step(8)
+ sample.delete()
+
@pytest.mark.grap_versions
@pytest.mark.fail_snapshot
def test_k8s_metallb(self, show_step, config, k8s_deployed):
@@ -136,6 +146,7 @@
5. Check services availability from outside of cluster
6. Run conformance
7. Check services availability from outside of cluster
+ 8. Delete deployments
"""
show_step(1)
if not config.k8s_deploy.kubernetes_metallb_enabled:
@@ -143,25 +154,25 @@
show_step(2)
ns = "metallb-system"
- assert k8s_deployed.is_pod_exists_with_prefix("controller", ns)
- assert k8s_deployed.is_pod_exists_with_prefix("speaker", ns)
+ assert \
+ len(k8s_deployed.api.pods.list(ns, name_prefix="controller")) > 0
+ assert \
+ len(k8s_deployed.api.pods.list(ns, name_prefix="speaker")) > 0
show_step(3)
samples = []
for i in range(5):
name = 'test-dep-metallb-{}'.format(i)
- sample = k8s_deployed.get_sample_deployment(name)
- sample.run()
- samples.append(sample)
+ samples.append(k8s_deployed.run_sample_deployment(name))
show_step(4)
for sample in samples:
sample.expose('LoadBalancer')
- for sample in samples:
- sample.wait_for_ready()
+ sample.wait_ready()
show_step(5)
for sample in samples:
+ assert sample.is_service_available(external=False)
assert sample.is_service_available(external=True)
show_step(6)
@@ -169,11 +180,17 @@
show_step(7)
for sample in samples:
+ assert sample.is_service_available(external=False)
assert sample.is_service_available(external=True)
+ show_step(8)
+ for sample in samples:
+ sample.delete()
+
@pytest.mark.grap_versions
@pytest.mark.fail_snapshot
- def test_k8s_genie_flannel(self, show_step, salt_deployed, k8s_deployed):
+ def test_k8s_genie_flannel(self, show_step, config,
+ salt_deployed, k8s_deployed):
"""Test genie-cni+flannel cni setup
Scenario:
@@ -191,6 +208,7 @@
11. Check pods availability
12. Run conformance
13. Check pods availability
+ 14. Delete pods
"""
show_step(1)
@@ -213,13 +231,14 @@
LOG.info("Calico network: {}".format(calico_network))
show_step(2)
- assert k8s_deployed.is_pod_exists_with_prefix("kube-flannel-",
- "kube-system")
+ assert k8s_deployed.api.pods.list(
+ namespace="kube-system", name_prefix="kube-flannel-") > 0
- data_dir = os.path.join(os.path.dirname(__file__), 'testdata/k8s')
show_step(3)
- flannel_pod = k8s_deployed.create_pod_from_file(
- os.path.join(data_dir, 'pod-sample-flannel.yaml'))
+ data_dir = os.path.join(os.path.dirname(__file__), 'testdata/k8s')
+ flannel_pod = k8s_deployed.api.pods.create(
+ body=read_yaml_file(data_dir, 'pod-sample-flannel.yaml'))
+ flannel_pod.wait_running()
show_step(4)
flannel_ips = k8s_deployed.get_pod_ips_from_container(flannel_pod.name)
@@ -227,8 +246,9 @@
assert netaddr.IPAddress(flannel_ips[0]) in flannel_network
show_step(5)
- calico_pod = k8s_deployed.create_pod_from_file(
- os.path.join(data_dir, 'pod-sample-calico.yaml'))
+ calico_pod = k8s_deployed.api.pods.create(
+ body=read_yaml_file(data_dir, 'pod-sample-calico.yaml'))
+ calico_pod.wait_running()
show_step(6)
calico_ips = k8s_deployed.get_pod_ips_from_container(calico_pod.name)
@@ -236,8 +256,9 @@
assert netaddr.IPAddress(calico_ips[0]) in calico_network
show_step(7)
- multicni_pod = k8s_deployed.create_pod_from_file(
- os.path.join(data_dir, 'pod-sample-multicni.yaml'))
+ multicni_pod = k8s_deployed.api.pods.create(
+ body=read_yaml_file(data_dir, 'pod-sample-multicni.yaml'))
+ multicni_pod.wait_running()
show_step(8)
multicni_ips = \
@@ -248,8 +269,9 @@
netaddr.IPAddress(multicni_ips[1]) in net
show_step(9)
- nocni_pod = k8s_deployed.create_pod_from_file(
- os.path.join(data_dir, 'pod-sample.yaml'))
+ nocni_pod = k8s_deployed.api.pods.create(
+ body=read_yaml_file(data_dir, 'pod-sample.yaml'))
+ nocni_pod.wait_running()
show_step(10)
nocni_ips = k8s_deployed.get_pod_ips_from_container(nocni_pod.name)
@@ -277,3 +299,9 @@
show_step(13)
check_pods_availability()
+
+ show_step(14)
+ flannel_pod.delete()
+ calico_pod.delete()
+ multicni_pod.delete()
+ nocni_pod.delete()
diff --git a/tcp_tests/tests/system/test_virtlet_actions.py b/tcp_tests/tests/system/test_virtlet_actions.py
index 1cf4bee..83fd33a 100644
--- a/tcp_tests/tests/system/test_virtlet_actions.py
+++ b/tcp_tests/tests/system/test_virtlet_actions.py
@@ -12,7 +12,9 @@
# License for the specific language governing permissions and limitations
# under the License.
import pytest
+import os
+from tcp_tests.managers.k8s import read_yaml_file
from tcp_tests import logger
LOG = logger.logger
@@ -35,16 +37,17 @@
if not config.k8s_deploy.kubernetes_virtlet_enabled:
pytest.skip("Test requires Virtlet addon enabled")
+ data_dir = os.path.join(os.path.dirname(__file__), 'testdata/k8s')
- k8s_deployed.git_clone('https://github.com/Mirantis/virtlet',
- '~/virtlet')
- k8s_deployed.install_jq()
show_step(1)
- vm_name = k8s_deployed.run_vm()
+ vm_pod = k8s_deployed.api.pods.create(
+ body=read_yaml_file(data_dir, 'cirros-vm.yaml'))
+
show_step(2)
- k8s_deployed.wait_active_state(vm_name, timeout=360)
+ vm_pod.wait_running(timeout=600)
+
show_step(3)
- k8s_deployed.delete_vm(vm_name)
+ vm_pod.delete()
@pytest.mark.grab_versions
@pytest.mark.fail_snapshot
@@ -61,33 +64,27 @@
if not config.k8s_deploy.kubernetes_virtlet_enabled:
pytest.skip("Test requires Virtlet addon enabled")
+ data_dir = os.path.join(os.path.dirname(__file__), 'testdata/k8s')
+ cpu = 2
+ memory_mb = 512
- k8s_deployed.git_clone('https://github.com/Mirantis/virtlet',
- '~/virtlet')
- k8s_deployed.install_jq()
show_step(1)
- target_cpu = 2 # Cores
- target_memory = 256 # Size in MB
- target_memory_kb = target_memory * 1024
- target_yaml = 'virtlet/examples/cirros-vm-exp.yaml'
- k8s_deployed.adjust_cirros_resources(cpu=target_cpu,
- memory=target_memory,
- target_yaml=target_yaml)
+ pod_body = read_yaml_file(data_dir, 'cirros-vm.yaml')
+ pod_body['metadata']['annotations']['VirtletVCPUCount'] = str(cpu)
+ pod_body['spec']['containers'][0]['resources']['limits']['memory'] = \
+ '{}Mi'.format(memory_mb)
+
show_step(2)
- vm_name = k8s_deployed.run_vm(target_yaml)
- k8s_deployed.wait_active_state(vm_name, timeout=360)
+ vm_pod = k8s_deployed.api.pods.create(body=pod_body)
+ vm_pod.wait_running(timeout=600)
+
show_step(3)
- domain_name = k8s_deployed.get_domain_name(vm_name)
- cpu = k8s_deployed.get_vm_cpu_count(domain_name)
- mem = k8s_deployed.get_vm_memory_count(domain_name)
- fail_msg = '{0} is not correct memory unit for VM. Correct is {1}'.\
- format(mem, target_memory_kb)
- assert target_memory_kb == mem, fail_msg
- fail_msg = '{0} is not correct cpu cores count for VM. ' \
- 'Correct is {1}'.format(cpu, target_cpu)
- assert target_cpu == cpu, fail_msg
+ stats = k8s_deployed.virtlet.virsh_domstats(vm_pod)
+ assert int(stats['vcpu.current']) == cpu
+ assert int(stats['balloon.maximum'])/1024 == memory_mb
+
show_step(4)
- k8s_deployed.delete_vm(target_yaml)
+ vm_pod.delete()
@pytest.mark.grab_versions
@pytest.mark.grab_k8s_results(name=['virtlet_conformance.log',
@@ -104,30 +101,3 @@
show_step(1)
k8s_deployed.run_virtlet_conformance()
-
- @pytest.mark.skip(reason="No configuration with ceph and k8s")
- def test_rbd_flexvolume_driver(self, show_step, config, k8s_deployed):
- """Test for deploying a VM with Ceph RBD volume using flexvolumeDriver
-
- Scenario:
- 1. Start VM with prepared yaml from run-ceph.sh scripts
- 2. Check that RBD volume is listed in virsh domblklist for VM
- 3. Destroy VM
-
- """
- # From:
- # https://github.com/Mirantis/virtlet/blob/master/tests/e2e/run_ceph.sh
- if not config.k8s_deploy.kubernetes_virtlet_enabled:
- pytest.skip("Test requires Virtlet addon enabled")
-
- k8s_deployed.git_clone('https://github.com/Mirantis/virtlet',
- '~/virtlet')
- k8s_deployed.install_jq()
-
- target_yaml = "virtlet/tests/e2e/cirros-vm-rbd-volume.yaml"
- vm_name = k8s_deployed.run_vm(target_yaml)
- k8s_deployed.wait_active_state(vm_name)
- domain_name = k8s_deployed.get_domain_name(vm_name)
- vm_volumes_list = k8s_deployed.list_vm_volumes(domain_name)
- assert 'rbd' in vm_volumes_list
- k8s_deployed.delete_vm(target_yaml)
diff --git a/tcp_tests/tests/system/testdata/k8s/cirros-vm.yaml b/tcp_tests/tests/system/testdata/k8s/cirros-vm.yaml
new file mode 100644
index 0000000..5cac75b
--- /dev/null
+++ b/tcp_tests/tests/system/testdata/k8s/cirros-vm.yaml
@@ -0,0 +1,41 @@
+# From virtlet/examples/cirros-vm.yaml
+apiVersion: v1
+kind: Pod
+metadata:
+ name: cirros-vm
+ annotations:
+ # This tells CRI Proxy that this pod belongs to Virtlet runtime
+ kubernetes.io/target-runtime: virtlet.cloud
+ # CirrOS doesn't load nocloud data from SCSI CD-ROM for some reason
+ VirtletDiskDriver: virtio
+ # inject ssh keys via cloud-init
+ VirtletSSHKeys: |
+ ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCaJEcFDXEK2ZbX0ZLS1EIYFZRbDAcRfuVjpstSc0De8+sV1aiu+dePxdkuDRwqFtCyk6dEZkssjOkBXtri00MECLkir6FcH3kKOJtbJ6vy3uaJc9w1ERo+wyl6SkAh/+JTJkp7QRXj8oylW5E20LsbnA/dIwWzAF51PPwF7A7FtNg9DnwPqMkxFo1Th/buOMKbP5ZA1mmNNtmzbMpMfJATvVyiv3ccsSJKOiyQr6UG+j7sc/7jMVz5Xk34Vd0l8GwcB0334MchHckmqDB142h/NCWTr8oLakDNvkfC1YneAfAO41hDkUbxPtVBG5M/o7P4fxoqiHEX+ZLfRxDtHB53 me@localhost
+spec:
+ # This nodeAffinity specification tells Kubernetes to run this
+ # pod only on the nodes that have extraRuntime=virtlet label.
+ # This label is used by Virtlet DaemonSet to select nodes
+ # that must have Virtlet runtime
+ affinity:
+ nodeAffinity:
+ requiredDuringSchedulingIgnoredDuringExecution:
+ nodeSelectorTerms:
+ - matchExpressions:
+ - key: extraRuntime
+ operator: In
+ values:
+ - virtlet
+ containers:
+ - name: cirros-vm
+ # This specifies the image to use.
+ # virtlet.cloud/ prefix is used by CRI proxy, the remaining part
+ # of the image name is prepended with https:// and used to download the image
+ image: virtlet.cloud/cirros
+ imagePullPolicy: IfNotPresent
+ # tty and stdin required for `kubectl attach -t` to work
+ tty: true
+ stdin: true
+ resources:
+ limits:
+ # This memory limit is applied to the libvirt domain definition
+ memory: 160Mi