Refactoring of k8s manager and tests
Changes:
- Official kubernetes python lib
- Rewrite k8s api wrapper in OOP manner
- Use api where its possible instead of cli
- Remove virtlet code because its can be replaced with pod api
- Remove unused/oudated manager code
- Remove bug workaround in k8s upgrade template
- Remove netchecker obsolete code
- Remove unfinished test_rbd_flexvolume_driver
Change-Id: I446a240123282196a6ba54f588aea84791f175ba
Related-PROD: PROD-21700
diff --git a/tcp_tests/tests/system/test_calico.py b/tcp_tests/tests/system/test_calico.py
index 484aec5..ca5c116 100644
--- a/tcp_tests/tests/system/test_calico.py
+++ b/tcp_tests/tests/system/test_calico.py
@@ -41,13 +41,10 @@
"""
show_step(1)
- k8sclient = k8s_deployed.api
- assert k8sclient.nodes.list() is not None, "Can not get nodes list"
- netchecker_port = netchecker.get_service_port(k8sclient)
+ nch = netchecker.Netchecker(k8s_deployed.api)
show_step(2)
- netchecker.wait_check_network(k8sclient, works=True, timeout=300,
- netchecker_pod_port=netchecker_port)
+ nch.wait_check_network(works=True)
@pytest.mark.fail_snapshot
@pytest.mark.calico_ci
@@ -72,27 +69,19 @@
"""
show_step(1)
- k8sclient = k8s_deployed.api
- assert k8sclient.nodes.list() is not None, "Can not get nodes list"
- netchecker_port = netchecker.get_service_port(k8sclient)
+ nch = netchecker.Netchecker(k8s_deployed.api)
show_step(2)
- netchecker.get_netchecker_pod_status(k8s=k8s_deployed,
- namespace='netchecker')
+ nch.wait_netchecker_pods_running('netchecker-server')
show_step(3)
- netchecker.get_netchecker_pod_status(k8s=k8s_deployed,
- pod_name='netchecker-agent',
- namespace='netchecker')
+ nch.wait_netchecker_pods_running('netchecker-agent')
show_step(4)
- netchecker.wait_check_network(k8sclient, namespace='netchecker',
- netchecker_pod_port=netchecker_port)
+ nch.wait_check_network(works=True)
show_step(5)
- res = netchecker.get_metric(k8sclient,
- netchecker_pod_port=netchecker_port,
- namespace='netchecker')
+ res = nch.get_metric()
assert res.status_code == 200, 'Unexpected response code {}'\
.format(res)
@@ -111,8 +100,9 @@
metric, res.text)
show_step(6)
- first_node = k8sclient.nodes.list()[0]
- first_node_ips = [addr.address for addr in first_node.status.addresses
+ first_node = k8s_deployed.api.nodes.list()[0]
+ first_node_ips = [addr.address for addr in
+ first_node.read().status.addresses
if 'IP' in addr.type]
assert len(first_node_ips) > 0, "Couldn't find first k8s node IP!"
first_node_names = [name for name in underlay.node_names()
@@ -123,13 +113,13 @@
target_pod_ip = None
- for pod in k8sclient.pods.list(namespace='netchecker'):
- LOG.debug('NC pod IP: {0}'.format(pod.status.pod_ip))
- if pod.status.host_ip not in first_node_ips:
+ for pod in k8s_deployed.api.pods.list(namespace='netchecker'):
+ LOG.debug('NC pod IP: {0}'.format(pod.read().status.pod_ip))
+ if pod.read().status.host_ip not in first_node_ips:
continue
# TODO: get pods by daemonset with name 'netchecker-agent'
if 'netchecker-agent-' in pod.name and 'hostnet' not in pod.name:
- target_pod_ip = pod.status.pod_ip
+ target_pod_ip = pod.read().status.pod_ip
assert target_pod_ip is not None, "Could not find netchecker pod IP!"
@@ -154,9 +144,7 @@
'recovered'.format(target_pod_ip, first_node.name))
show_step(8)
- netchecker.wait_check_network(k8sclient, namespace='netchecker',
- netchecker_pod_port=netchecker_port,
- works=True)
+ nch.wait_check_network(works=True)
@pytest.mark.fail_snapshot
@pytest.mark.calico_ci
@@ -183,38 +171,25 @@
"""
show_step(1)
- k8sclient = k8s_deployed.api
- assert k8sclient.nodes.list() is not None, "Can not get nodes list"
- kube_master_nodes = k8s_deployed.get_k8s_masters()
+ kube_master_nodes = k8s_deployed.get_masters()
assert kube_master_nodes, "No k8s masters found in pillars!"
- netchecker_port = netchecker.get_service_port(k8sclient)
+
+ nch = netchecker.Netchecker(k8s_deployed.api)
show_step(2)
- netchecker.wait_check_network(k8sclient, namespace='netchecker',
- works=True, timeout=300,
- netchecker_pod_port=netchecker_port)
+ nch.wait_check_network(works=True)
show_step(3)
- netchecker.kubernetes_block_traffic_namespace(underlay,
- kube_master_nodes[0],
- 'netchecker')
+ nch.kubernetes_block_traffic_namespace()
show_step(4)
- netchecker.calico_allow_netchecker_connections(underlay, k8sclient,
- kube_master_nodes[0],
- 'netchecker')
+ nch.calico_allow_netchecker_connections()
show_step(5)
- netchecker.wait_check_network(k8sclient, namespace='netchecker',
- works=False, timeout=500,
- netchecker_pod_port=netchecker_port)
+ nch.wait_check_network(works=False)
show_step(6)
- netchecker.kubernetes_allow_traffic_from_agents(underlay,
- kube_master_nodes[0],
- 'netchecker')
+ nch.kubernetes_allow_traffic_from_agents()
show_step(7)
- netchecker.wait_check_network(k8sclient, namespace='netchecker',
- works=True, timeout=300,
- netchecker_pod_port=netchecker_port)
+ nch.wait_check_network(works=True)
diff --git a/tcp_tests/tests/system/test_install_k8s.py b/tcp_tests/tests/system/test_install_k8s.py
index ec90863..8066cd9 100644
--- a/tcp_tests/tests/system/test_install_k8s.py
+++ b/tcp_tests/tests/system/test_install_k8s.py
@@ -47,29 +47,22 @@
11. Optionally run k8s e2e tests
"""
- # STEP #5
- # k8s_actions = k8s_deployed
- sl_actions = stacklight_deployed
+
show_step(5)
- k8sclient = k8s_deployed.api
- assert k8sclient.nodes.list() is not None, "Can not get nodes list"
- netchecker_port = netchecker.get_service_port(k8sclient)
+ sl_actions = stacklight_deployed
+ nch = netchecker.Netchecker(k8s_deployed.api)
+
show_step(6)
- netchecker.get_netchecker_pod_status(k8s=k8s_deployed,
- namespace='netchecker')
+ nch.wait_netchecker_pods_running(netchecker.NETCHECKER_SERVER_PREFIX)
show_step(7)
- netchecker.get_netchecker_pod_status(k8s=k8s_deployed,
- pod_name='netchecker-agent',
- namespace='netchecker')
+ nch.wait_netchecker_pods_running(netchecker.NETCHECKER_AGENT_PREFIX)
- # show_step(8)
- netchecker.wait_check_network(k8sclient, namespace='netchecker',
- netchecker_pod_port=netchecker_port)
+ show_step(8)
+ nch.wait_check_network(works=True)
+
show_step(9)
- res = netchecker.get_metric(k8sclient,
- netchecker_pod_port=netchecker_port,
- namespace='netchecker')
+ res = nch.get_metric()
assert res.status_code == 200, 'Unexpected response code {}'\
.format(res)
@@ -144,12 +137,9 @@
7. Optionally run k8s e2e conformance
"""
- k8s_actions = k8s_deployed
- sl_actions = stacklight_deployed
- # STEP #5
+
show_step(5)
- k8sclient = k8s_deployed.api
- assert k8sclient.nodes.list() is not None, "Can not get nodes list"
+ sl_actions = stacklight_deployed
prometheus_client = stacklight_deployed.api
try:
@@ -186,7 +176,7 @@
if config.k8s.k8s_conformance_run:
show_step(7)
- k8s_actions.run_conformance()
+ k8s_deployed.run_conformance()
LOG.info("*************** DONE **************")
@pytest.mark.extract(container_system='docker', extract_from='conformance',
@@ -211,8 +201,8 @@
5. Run conformance if need
"""
- k8s_actions = k8s_deployed
+
if config.k8s.k8s_conformance_run:
show_step(5)
- k8s_actions.run_conformance()
+ k8s_deployed.run_conformance()
LOG.info("*************** DONE **************")
diff --git a/tcp_tests/tests/system/test_k8s_actions.py b/tcp_tests/tests/system/test_k8s_actions.py
index 5216470..ea12129 100644
--- a/tcp_tests/tests/system/test_k8s_actions.py
+++ b/tcp_tests/tests/system/test_k8s_actions.py
@@ -19,6 +19,8 @@
from tcp_tests import logger
from tcp_tests import settings
+from tcp_tests.managers.k8s import read_yaml_file
+
LOG = logger.logger
@@ -38,32 +40,35 @@
3. Expose deployment
4. Annotate service with domain name
5. Try to get service using nslookup
+ 6. Delete service and deployment
"""
+ show_step(1)
if not (config.k8s_deploy.kubernetes_externaldns_enabled and
config.k8s_deploy.kubernetes_coredns_enabled):
- pytest.skip("Test requires Externaldns and coredns addons enabled")
-
- show_step(1)
- k8sclient = k8s_deployed.api
- assert k8sclient.nodes.list() is not None, "Can not get nodes list"
+ pytest.skip("Test requires externaldns and coredns addons enabled")
show_step(2)
- name = 'test-nginx'
- k8s_deployed.kubectl_run(name, 'nginx', '80')
+ deployment = k8s_deployed.run_sample_deployment('test-dep')
show_step(3)
- k8s_deployed.kubectl_expose('deployment', name, '80', 'ClusterIP')
+ svc = deployment.expose()
- hostname = "test.{0}.local.".format(settings.LAB_CONFIG_NAME)
- annotation = "\"external-dns.alpha.kubernetes.io/" \
- "hostname={0}\"".format(hostname)
show_step(4)
- k8s_deployed.kubectl_annotate('service', name, annotation)
+ hostname = "test.{0}.local.".format(settings.LAB_CONFIG_NAME)
+ svc.patch({
+ "metadata": {
+ "annotations": {
+ "external-dns.alpha.kubernetes.io/hostname": hostname
+ }
+ }
+ })
show_step(5)
- dns_host = k8s_deployed.get_svc_ip('coredns')
- k8s_deployed.nslookup(hostname, dns_host)
+ k8s_deployed.nslookup(hostname, svc.get_ip())
+
+ show_step(6)
+ deployment.delete()
@pytest.mark.grab_versions
@pytest.mark.cncf_publisher(name=['e2e.log', 'junit_01.xml', 'version.txt',
@@ -97,13 +102,13 @@
7. For every version in update chain:
Update cluster to new version, check test sample service
availability, run conformance
+ 8. Delete service and deployment
"""
show_step(5)
- sample = k8s_deployed.get_sample_deployment('test-dep-chain-upgrade')
- sample.run()
+ sample = k8s_deployed.run_sample_deployment('test-dep-chain-upgrade')
sample.expose()
- sample.wait_for_ready()
+ sample.wait_ready()
assert sample.is_service_available()
@@ -114,7 +119,7 @@
chain_versions = config.k8s.k8s_update_chain.split(" ")
for version in chain_versions:
LOG.info("Chain update to '{}' version".format(version))
- k8s_deployed.update_k8s_images(version)
+ k8s_deployed.update_k8s_version(version)
LOG.info("Checking test service availability")
assert sample.is_service_available()
@@ -123,6 +128,11 @@
log_name = "k8s_conformance_{}.log".format(version)
k8s_deployed.run_conformance(log_out=log_name, raise_on_err=False)
+ assert sample.is_service_available()
+
+ show_step(8)
+ sample.delete()
+
@pytest.mark.grap_versions
@pytest.mark.fail_snapshot
def test_k8s_metallb(self, show_step, config, k8s_deployed):
@@ -136,6 +146,7 @@
5. Check services availability from outside of cluster
6. Run conformance
7. Check services availability from outside of cluster
+ 8. Delete deployments
"""
show_step(1)
if not config.k8s_deploy.kubernetes_metallb_enabled:
@@ -143,25 +154,25 @@
show_step(2)
ns = "metallb-system"
- assert k8s_deployed.is_pod_exists_with_prefix("controller", ns)
- assert k8s_deployed.is_pod_exists_with_prefix("speaker", ns)
+ assert \
+ len(k8s_deployed.api.pods.list(ns, name_prefix="controller")) > 0
+ assert \
+ len(k8s_deployed.api.pods.list(ns, name_prefix="speaker")) > 0
show_step(3)
samples = []
for i in range(5):
name = 'test-dep-metallb-{}'.format(i)
- sample = k8s_deployed.get_sample_deployment(name)
- sample.run()
- samples.append(sample)
+ samples.append(k8s_deployed.run_sample_deployment(name))
show_step(4)
for sample in samples:
sample.expose('LoadBalancer')
- for sample in samples:
- sample.wait_for_ready()
+ sample.wait_ready()
show_step(5)
for sample in samples:
+ assert sample.is_service_available(external=False)
assert sample.is_service_available(external=True)
show_step(6)
@@ -169,11 +180,17 @@
show_step(7)
for sample in samples:
+ assert sample.is_service_available(external=False)
assert sample.is_service_available(external=True)
+ show_step(8)
+ for sample in samples:
+ sample.delete()
+
@pytest.mark.grap_versions
@pytest.mark.fail_snapshot
- def test_k8s_genie_flannel(self, show_step, salt_deployed, k8s_deployed):
+ def test_k8s_genie_flannel(self, show_step, config,
+ salt_deployed, k8s_deployed):
"""Test genie-cni+flannel cni setup
Scenario:
@@ -191,6 +208,7 @@
11. Check pods availability
12. Run conformance
13. Check pods availability
+ 14. Delete pods
"""
show_step(1)
@@ -213,13 +231,14 @@
LOG.info("Calico network: {}".format(calico_network))
show_step(2)
- assert k8s_deployed.is_pod_exists_with_prefix("kube-flannel-",
- "kube-system")
+ assert k8s_deployed.api.pods.list(
+ namespace="kube-system", name_prefix="kube-flannel-") > 0
- data_dir = os.path.join(os.path.dirname(__file__), 'testdata/k8s')
show_step(3)
- flannel_pod = k8s_deployed.create_pod_from_file(
- os.path.join(data_dir, 'pod-sample-flannel.yaml'))
+ data_dir = os.path.join(os.path.dirname(__file__), 'testdata/k8s')
+ flannel_pod = k8s_deployed.api.pods.create(
+ body=read_yaml_file(data_dir, 'pod-sample-flannel.yaml'))
+ flannel_pod.wait_running()
show_step(4)
flannel_ips = k8s_deployed.get_pod_ips_from_container(flannel_pod.name)
@@ -227,8 +246,9 @@
assert netaddr.IPAddress(flannel_ips[0]) in flannel_network
show_step(5)
- calico_pod = k8s_deployed.create_pod_from_file(
- os.path.join(data_dir, 'pod-sample-calico.yaml'))
+ calico_pod = k8s_deployed.api.pods.create(
+ body=read_yaml_file(data_dir, 'pod-sample-calico.yaml'))
+ calico_pod.wait_running()
show_step(6)
calico_ips = k8s_deployed.get_pod_ips_from_container(calico_pod.name)
@@ -236,8 +256,9 @@
assert netaddr.IPAddress(calico_ips[0]) in calico_network
show_step(7)
- multicni_pod = k8s_deployed.create_pod_from_file(
- os.path.join(data_dir, 'pod-sample-multicni.yaml'))
+ multicni_pod = k8s_deployed.api.pods.create(
+ body=read_yaml_file(data_dir, 'pod-sample-multicni.yaml'))
+ multicni_pod.wait_running()
show_step(8)
multicni_ips = \
@@ -248,8 +269,9 @@
netaddr.IPAddress(multicni_ips[1]) in net
show_step(9)
- nocni_pod = k8s_deployed.create_pod_from_file(
- os.path.join(data_dir, 'pod-sample.yaml'))
+ nocni_pod = k8s_deployed.api.pods.create(
+ body=read_yaml_file(data_dir, 'pod-sample.yaml'))
+ nocni_pod.wait_running()
show_step(10)
nocni_ips = k8s_deployed.get_pod_ips_from_container(nocni_pod.name)
@@ -277,3 +299,9 @@
show_step(13)
check_pods_availability()
+
+ show_step(14)
+ flannel_pod.delete()
+ calico_pod.delete()
+ multicni_pod.delete()
+ nocni_pod.delete()
diff --git a/tcp_tests/tests/system/test_virtlet_actions.py b/tcp_tests/tests/system/test_virtlet_actions.py
index 1cf4bee..83fd33a 100644
--- a/tcp_tests/tests/system/test_virtlet_actions.py
+++ b/tcp_tests/tests/system/test_virtlet_actions.py
@@ -12,7 +12,9 @@
# License for the specific language governing permissions and limitations
# under the License.
import pytest
+import os
+from tcp_tests.managers.k8s import read_yaml_file
from tcp_tests import logger
LOG = logger.logger
@@ -35,16 +37,17 @@
if not config.k8s_deploy.kubernetes_virtlet_enabled:
pytest.skip("Test requires Virtlet addon enabled")
+ data_dir = os.path.join(os.path.dirname(__file__), 'testdata/k8s')
- k8s_deployed.git_clone('https://github.com/Mirantis/virtlet',
- '~/virtlet')
- k8s_deployed.install_jq()
show_step(1)
- vm_name = k8s_deployed.run_vm()
+ vm_pod = k8s_deployed.api.pods.create(
+ body=read_yaml_file(data_dir, 'cirros-vm.yaml'))
+
show_step(2)
- k8s_deployed.wait_active_state(vm_name, timeout=360)
+ vm_pod.wait_running(timeout=600)
+
show_step(3)
- k8s_deployed.delete_vm(vm_name)
+ vm_pod.delete()
@pytest.mark.grab_versions
@pytest.mark.fail_snapshot
@@ -61,33 +64,27 @@
if not config.k8s_deploy.kubernetes_virtlet_enabled:
pytest.skip("Test requires Virtlet addon enabled")
+ data_dir = os.path.join(os.path.dirname(__file__), 'testdata/k8s')
+ cpu = 2
+ memory_mb = 512
- k8s_deployed.git_clone('https://github.com/Mirantis/virtlet',
- '~/virtlet')
- k8s_deployed.install_jq()
show_step(1)
- target_cpu = 2 # Cores
- target_memory = 256 # Size in MB
- target_memory_kb = target_memory * 1024
- target_yaml = 'virtlet/examples/cirros-vm-exp.yaml'
- k8s_deployed.adjust_cirros_resources(cpu=target_cpu,
- memory=target_memory,
- target_yaml=target_yaml)
+ pod_body = read_yaml_file(data_dir, 'cirros-vm.yaml')
+ pod_body['metadata']['annotations']['VirtletVCPUCount'] = str(cpu)
+ pod_body['spec']['containers'][0]['resources']['limits']['memory'] = \
+ '{}Mi'.format(memory_mb)
+
show_step(2)
- vm_name = k8s_deployed.run_vm(target_yaml)
- k8s_deployed.wait_active_state(vm_name, timeout=360)
+ vm_pod = k8s_deployed.api.pods.create(body=pod_body)
+ vm_pod.wait_running(timeout=600)
+
show_step(3)
- domain_name = k8s_deployed.get_domain_name(vm_name)
- cpu = k8s_deployed.get_vm_cpu_count(domain_name)
- mem = k8s_deployed.get_vm_memory_count(domain_name)
- fail_msg = '{0} is not correct memory unit for VM. Correct is {1}'.\
- format(mem, target_memory_kb)
- assert target_memory_kb == mem, fail_msg
- fail_msg = '{0} is not correct cpu cores count for VM. ' \
- 'Correct is {1}'.format(cpu, target_cpu)
- assert target_cpu == cpu, fail_msg
+ stats = k8s_deployed.virtlet.virsh_domstats(vm_pod)
+ assert int(stats['vcpu.current']) == cpu
+ assert int(stats['balloon.maximum'])/1024 == memory_mb
+
show_step(4)
- k8s_deployed.delete_vm(target_yaml)
+ vm_pod.delete()
@pytest.mark.grab_versions
@pytest.mark.grab_k8s_results(name=['virtlet_conformance.log',
@@ -104,30 +101,3 @@
show_step(1)
k8s_deployed.run_virtlet_conformance()
-
- @pytest.mark.skip(reason="No configuration with ceph and k8s")
- def test_rbd_flexvolume_driver(self, show_step, config, k8s_deployed):
- """Test for deploying a VM with Ceph RBD volume using flexvolumeDriver
-
- Scenario:
- 1. Start VM with prepared yaml from run-ceph.sh scripts
- 2. Check that RBD volume is listed in virsh domblklist for VM
- 3. Destroy VM
-
- """
- # From:
- # https://github.com/Mirantis/virtlet/blob/master/tests/e2e/run_ceph.sh
- if not config.k8s_deploy.kubernetes_virtlet_enabled:
- pytest.skip("Test requires Virtlet addon enabled")
-
- k8s_deployed.git_clone('https://github.com/Mirantis/virtlet',
- '~/virtlet')
- k8s_deployed.install_jq()
-
- target_yaml = "virtlet/tests/e2e/cirros-vm-rbd-volume.yaml"
- vm_name = k8s_deployed.run_vm(target_yaml)
- k8s_deployed.wait_active_state(vm_name)
- domain_name = k8s_deployed.get_domain_name(vm_name)
- vm_volumes_list = k8s_deployed.list_vm_volumes(domain_name)
- assert 'rbd' in vm_volumes_list
- k8s_deployed.delete_vm(target_yaml)
diff --git a/tcp_tests/tests/system/testdata/k8s/cirros-vm.yaml b/tcp_tests/tests/system/testdata/k8s/cirros-vm.yaml
new file mode 100644
index 0000000..5cac75b
--- /dev/null
+++ b/tcp_tests/tests/system/testdata/k8s/cirros-vm.yaml
@@ -0,0 +1,41 @@
+# From virtlet/examples/cirros-vm.yaml
+apiVersion: v1
+kind: Pod
+metadata:
+ name: cirros-vm
+ annotations:
+ # This tells CRI Proxy that this pod belongs to Virtlet runtime
+ kubernetes.io/target-runtime: virtlet.cloud
+ # CirrOS doesn't load nocloud data from SCSI CD-ROM for some reason
+ VirtletDiskDriver: virtio
+ # inject ssh keys via cloud-init
+ VirtletSSHKeys: |
+ ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCaJEcFDXEK2ZbX0ZLS1EIYFZRbDAcRfuVjpstSc0De8+sV1aiu+dePxdkuDRwqFtCyk6dEZkssjOkBXtri00MECLkir6FcH3kKOJtbJ6vy3uaJc9w1ERo+wyl6SkAh/+JTJkp7QRXj8oylW5E20LsbnA/dIwWzAF51PPwF7A7FtNg9DnwPqMkxFo1Th/buOMKbP5ZA1mmNNtmzbMpMfJATvVyiv3ccsSJKOiyQr6UG+j7sc/7jMVz5Xk34Vd0l8GwcB0334MchHckmqDB142h/NCWTr8oLakDNvkfC1YneAfAO41hDkUbxPtVBG5M/o7P4fxoqiHEX+ZLfRxDtHB53 me@localhost
+spec:
+ # This nodeAffinity specification tells Kubernetes to run this
+ # pod only on the nodes that have extraRuntime=virtlet label.
+ # This label is used by Virtlet DaemonSet to select nodes
+ # that must have Virtlet runtime
+ affinity:
+ nodeAffinity:
+ requiredDuringSchedulingIgnoredDuringExecution:
+ nodeSelectorTerms:
+ - matchExpressions:
+ - key: extraRuntime
+ operator: In
+ values:
+ - virtlet
+ containers:
+ - name: cirros-vm
+ # This specifies the image to use.
+ # virtlet.cloud/ prefix is used by CRI proxy, the remaining part
+ # of the image name is prepended with https:// and used to download the image
+ image: virtlet.cloud/cirros
+ imagePullPolicy: IfNotPresent
+ # tty and stdin required for `kubectl attach -t` to work
+ tty: true
+ stdin: true
+ resources:
+ limits:
+ # This memory limit is applied to the libvirt domain definition
+ memory: 160Mi