Refactoring of k8s manager and tests
Changes:
- Official kubernetes python lib
- Rewrite k8s api wrapper in OOP manner
- Use api where its possible instead of cli
- Remove virtlet code because its can be replaced with pod api
- Remove unused/oudated manager code
- Remove bug workaround in k8s upgrade template
- Remove netchecker obsolete code
- Remove unfinished test_rbd_flexvolume_driver
Change-Id: I446a240123282196a6ba54f588aea84791f175ba
Related-PROD: PROD-21700
diff --git a/tcp_tests/tests/system/test_k8s_actions.py b/tcp_tests/tests/system/test_k8s_actions.py
index 5216470..ea12129 100644
--- a/tcp_tests/tests/system/test_k8s_actions.py
+++ b/tcp_tests/tests/system/test_k8s_actions.py
@@ -19,6 +19,8 @@
from tcp_tests import logger
from tcp_tests import settings
+from tcp_tests.managers.k8s import read_yaml_file
+
LOG = logger.logger
@@ -38,32 +40,35 @@
3. Expose deployment
4. Annotate service with domain name
5. Try to get service using nslookup
+ 6. Delete service and deployment
"""
+ show_step(1)
if not (config.k8s_deploy.kubernetes_externaldns_enabled and
config.k8s_deploy.kubernetes_coredns_enabled):
- pytest.skip("Test requires Externaldns and coredns addons enabled")
-
- show_step(1)
- k8sclient = k8s_deployed.api
- assert k8sclient.nodes.list() is not None, "Can not get nodes list"
+ pytest.skip("Test requires externaldns and coredns addons enabled")
show_step(2)
- name = 'test-nginx'
- k8s_deployed.kubectl_run(name, 'nginx', '80')
+ deployment = k8s_deployed.run_sample_deployment('test-dep')
show_step(3)
- k8s_deployed.kubectl_expose('deployment', name, '80', 'ClusterIP')
+ svc = deployment.expose()
- hostname = "test.{0}.local.".format(settings.LAB_CONFIG_NAME)
- annotation = "\"external-dns.alpha.kubernetes.io/" \
- "hostname={0}\"".format(hostname)
show_step(4)
- k8s_deployed.kubectl_annotate('service', name, annotation)
+ hostname = "test.{0}.local.".format(settings.LAB_CONFIG_NAME)
+ svc.patch({
+ "metadata": {
+ "annotations": {
+ "external-dns.alpha.kubernetes.io/hostname": hostname
+ }
+ }
+ })
show_step(5)
- dns_host = k8s_deployed.get_svc_ip('coredns')
- k8s_deployed.nslookup(hostname, dns_host)
+ k8s_deployed.nslookup(hostname, svc.get_ip())
+
+ show_step(6)
+ deployment.delete()
@pytest.mark.grab_versions
@pytest.mark.cncf_publisher(name=['e2e.log', 'junit_01.xml', 'version.txt',
@@ -97,13 +102,13 @@
7. For every version in update chain:
Update cluster to new version, check test sample service
availability, run conformance
+ 8. Delete service and deployment
"""
show_step(5)
- sample = k8s_deployed.get_sample_deployment('test-dep-chain-upgrade')
- sample.run()
+ sample = k8s_deployed.run_sample_deployment('test-dep-chain-upgrade')
sample.expose()
- sample.wait_for_ready()
+ sample.wait_ready()
assert sample.is_service_available()
@@ -114,7 +119,7 @@
chain_versions = config.k8s.k8s_update_chain.split(" ")
for version in chain_versions:
LOG.info("Chain update to '{}' version".format(version))
- k8s_deployed.update_k8s_images(version)
+ k8s_deployed.update_k8s_version(version)
LOG.info("Checking test service availability")
assert sample.is_service_available()
@@ -123,6 +128,11 @@
log_name = "k8s_conformance_{}.log".format(version)
k8s_deployed.run_conformance(log_out=log_name, raise_on_err=False)
+ assert sample.is_service_available()
+
+ show_step(8)
+ sample.delete()
+
@pytest.mark.grap_versions
@pytest.mark.fail_snapshot
def test_k8s_metallb(self, show_step, config, k8s_deployed):
@@ -136,6 +146,7 @@
5. Check services availability from outside of cluster
6. Run conformance
7. Check services availability from outside of cluster
+ 8. Delete deployments
"""
show_step(1)
if not config.k8s_deploy.kubernetes_metallb_enabled:
@@ -143,25 +154,25 @@
show_step(2)
ns = "metallb-system"
- assert k8s_deployed.is_pod_exists_with_prefix("controller", ns)
- assert k8s_deployed.is_pod_exists_with_prefix("speaker", ns)
+ assert \
+ len(k8s_deployed.api.pods.list(ns, name_prefix="controller")) > 0
+ assert \
+ len(k8s_deployed.api.pods.list(ns, name_prefix="speaker")) > 0
show_step(3)
samples = []
for i in range(5):
name = 'test-dep-metallb-{}'.format(i)
- sample = k8s_deployed.get_sample_deployment(name)
- sample.run()
- samples.append(sample)
+ samples.append(k8s_deployed.run_sample_deployment(name))
show_step(4)
for sample in samples:
sample.expose('LoadBalancer')
- for sample in samples:
- sample.wait_for_ready()
+ sample.wait_ready()
show_step(5)
for sample in samples:
+ assert sample.is_service_available(external=False)
assert sample.is_service_available(external=True)
show_step(6)
@@ -169,11 +180,17 @@
show_step(7)
for sample in samples:
+ assert sample.is_service_available(external=False)
assert sample.is_service_available(external=True)
+ show_step(8)
+ for sample in samples:
+ sample.delete()
+
@pytest.mark.grap_versions
@pytest.mark.fail_snapshot
- def test_k8s_genie_flannel(self, show_step, salt_deployed, k8s_deployed):
+ def test_k8s_genie_flannel(self, show_step, config,
+ salt_deployed, k8s_deployed):
"""Test genie-cni+flannel cni setup
Scenario:
@@ -191,6 +208,7 @@
11. Check pods availability
12. Run conformance
13. Check pods availability
+ 14. Delete pods
"""
show_step(1)
@@ -213,13 +231,14 @@
LOG.info("Calico network: {}".format(calico_network))
show_step(2)
- assert k8s_deployed.is_pod_exists_with_prefix("kube-flannel-",
- "kube-system")
+ assert k8s_deployed.api.pods.list(
+ namespace="kube-system", name_prefix="kube-flannel-") > 0
- data_dir = os.path.join(os.path.dirname(__file__), 'testdata/k8s')
show_step(3)
- flannel_pod = k8s_deployed.create_pod_from_file(
- os.path.join(data_dir, 'pod-sample-flannel.yaml'))
+ data_dir = os.path.join(os.path.dirname(__file__), 'testdata/k8s')
+ flannel_pod = k8s_deployed.api.pods.create(
+ body=read_yaml_file(data_dir, 'pod-sample-flannel.yaml'))
+ flannel_pod.wait_running()
show_step(4)
flannel_ips = k8s_deployed.get_pod_ips_from_container(flannel_pod.name)
@@ -227,8 +246,9 @@
assert netaddr.IPAddress(flannel_ips[0]) in flannel_network
show_step(5)
- calico_pod = k8s_deployed.create_pod_from_file(
- os.path.join(data_dir, 'pod-sample-calico.yaml'))
+ calico_pod = k8s_deployed.api.pods.create(
+ body=read_yaml_file(data_dir, 'pod-sample-calico.yaml'))
+ calico_pod.wait_running()
show_step(6)
calico_ips = k8s_deployed.get_pod_ips_from_container(calico_pod.name)
@@ -236,8 +256,9 @@
assert netaddr.IPAddress(calico_ips[0]) in calico_network
show_step(7)
- multicni_pod = k8s_deployed.create_pod_from_file(
- os.path.join(data_dir, 'pod-sample-multicni.yaml'))
+ multicni_pod = k8s_deployed.api.pods.create(
+ body=read_yaml_file(data_dir, 'pod-sample-multicni.yaml'))
+ multicni_pod.wait_running()
show_step(8)
multicni_ips = \
@@ -248,8 +269,9 @@
netaddr.IPAddress(multicni_ips[1]) in net
show_step(9)
- nocni_pod = k8s_deployed.create_pod_from_file(
- os.path.join(data_dir, 'pod-sample.yaml'))
+ nocni_pod = k8s_deployed.api.pods.create(
+ body=read_yaml_file(data_dir, 'pod-sample.yaml'))
+ nocni_pod.wait_running()
show_step(10)
nocni_ips = k8s_deployed.get_pod_ips_from_container(nocni_pod.name)
@@ -277,3 +299,9 @@
show_step(13)
check_pods_availability()
+
+ show_step(14)
+ flannel_pod.delete()
+ calico_pod.delete()
+ multicni_pod.delete()
+ nocni_pod.delete()