Add Calico tests (copied from mcp-qa)
Change-Id: I10219fc78c8759919c631baa9e9f95baf631c1dc
Reviewed-on: https://review.gerrithub.io/365463
Reviewed-by: Dennis Dmitriev <dis.xcom@gmail.com>
Tested-by: Dennis Dmitriev <dis.xcom@gmail.com>
diff --git a/tcp_tests/fixtures/k8s_fixtures.py b/tcp_tests/fixtures/k8s_fixtures.py
index bec38a5..3cacbaf 100644
--- a/tcp_tests/fixtures/k8s_fixtures.py
+++ b/tcp_tests/fixtures/k8s_fixtures.py
@@ -72,4 +72,10 @@
k8s_actions.install(commands)
hardware.create_snapshot(ext.SNAPSHOT.k8s_deployed)
- return k8s_actions
\ No newline at end of file
+ # Workaround for keepalived hang issue after env revert from snapshot
+ # see https://mirantis.jira.com/browse/PROD-12038
+ LOG.warning('Restarting keepalived service on controllers...')
+ k8s_actions._salt.local(tgt='ctl*', fun='cmd.run',
+ args='systemctl restart keepalived.service')
+
+ return k8s_actions
diff --git a/tcp_tests/helpers/netchecker.py b/tcp_tests/helpers/netchecker.py
new file mode 100644
index 0000000..f23a5c8
--- /dev/null
+++ b/tcp_tests/helpers/netchecker.py
@@ -0,0 +1,520 @@
+# Copyright 2017 Mirantis, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import json
+import requests
+
+from devops.helpers import helpers
+from k8sclient.client import rest
+
+from tcp_tests import logger
+from tcp_tests.helpers import utils
+
+
+LOG = logger.logger
+
+
+NETCHECKER_SERVICE_NAME = "netchecker-service"
+NETCHECKER_CONTAINER_PORT = NETCHECKER_SERVICE_PORT = 8081
+NETCHECKER_NODE_PORT = 31081
+NETCHECKER_REPORT_INTERVAL = 30
+NETCHECKER_SERVER_REPLICAS = 1
+NETCHECKER_PROBEURLS = "http://ipinfo.io"
+
+NETCHECKER_SVC_CFG = {
+ "apiVersion": "v1",
+ "kind": "Service",
+ "metadata": {
+ "name": NETCHECKER_SERVICE_NAME
+ },
+ "spec": {
+ "ports": [
+ {
+ "nodePort": NETCHECKER_NODE_PORT,
+ "port": NETCHECKER_SERVICE_PORT,
+ "protocol": "TCP",
+ "targetPort": NETCHECKER_CONTAINER_PORT
+ }
+ ],
+ "selector": {
+ "app": "netchecker-server"
+ },
+ "type": "NodePort"
+ }
+}
+
+NETCHECKER_DEPLOYMENT_CFG = {
+ "kind": "Deployment",
+ "spec": {
+ "template": {
+ "spec": {
+ "containers": [
+ {
+ "name": "netchecker-server",
+ "env": None,
+ "imagePullPolicy": "IfNotPresent",
+ "image": "mirantis/k8s-netchecker-server:latest",
+ "args": [
+ "-v=5",
+ "-logtostderr",
+ "-kubeproxyinit",
+ "-endpoint=0.0.0.0:{0}".format(
+ NETCHECKER_CONTAINER_PORT)
+ ],
+ "ports": [
+ {
+ "containerPort": NETCHECKER_CONTAINER_PORT,
+ "hostPort": NETCHECKER_NODE_PORT
+ }
+ ]
+ }
+ ]
+ },
+ "metadata": {
+ "labels": {
+ "app": "netchecker-server"
+ },
+ "name": "netchecker-server"
+ }
+ },
+ "replicas": NETCHECKER_SERVER_REPLICAS
+ },
+ "apiVersion": "extensions/v1beta1",
+ "metadata": {
+ "name": "netchecker-server"
+ }
+}
+
+NETCHECKER_DS_CFG = [
+ {
+ "apiVersion": "extensions/v1beta1",
+ "kind": "DaemonSet",
+ "metadata": {
+ "labels": {
+ "app": "netchecker-agent"
+ },
+ "name": "netchecker-agent"
+ },
+ "spec": {
+ "template": {
+ "metadata": {
+ "labels": {
+ "app": "netchecker-agent"
+ },
+ "name": "netchecker-agent"
+ },
+ "spec": {
+ "tolerations": [
+ {
+ "key": "node-role.kubernetes.io/master",
+ "effect": "NoSchedule"
+ }
+ ],
+ "containers": [
+ {
+ "env": [
+ {
+ "name": "MY_POD_NAME",
+ "valueFrom": {
+ "fieldRef": {
+ "fieldPath": "metadata.name"
+ }
+ }
+ },
+ {
+ "name": "MY_NODE_NAME",
+ "valueFrom": {
+ "fieldRef": {
+ "fieldPath": "spec.nodeName"
+ }
+ }
+ },
+ {
+ "name": "REPORT_INTERVAL",
+ "value": str(NETCHECKER_REPORT_INTERVAL)
+ },
+ ],
+ "image": "mirantis/k8s-netchecker-agent:latest",
+ "imagePullPolicy": "IfNotPresent",
+ "name": "netchecker-agent",
+ "command": ["netchecker-agent"],
+ "args": [
+ "-v=5",
+ "-logtostderr",
+ "-probeurls={0}".format(NETCHECKER_PROBEURLS)
+ ]
+ }
+ ],
+ }
+ },
+ "updateStrategy": {
+ "type": "RollingUpdate"
+ }
+ }
+ },
+ {
+ "apiVersion": "extensions/v1beta1",
+ "kind": "DaemonSet",
+ "metadata": {
+ "labels": {
+ "app": "netchecker-agent-hostnet"
+ },
+ "name": "netchecker-agent-hostnet"
+ },
+ "spec": {
+ "template": {
+ "metadata": {
+ "labels": {
+ "app": "netchecker-agent-hostnet"
+ },
+ "name": "netchecker-agent-hostnet"
+ },
+ "spec": {
+ "tolerations": [
+ {
+ "key": "node-role.kubernetes.io/master",
+ "effect": "NoSchedule"
+ }
+ ],
+ "containers": [
+ {
+ "env": [
+ {
+ "name": "MY_POD_NAME",
+ "valueFrom": {
+ "fieldRef": {
+ "fieldPath": "metadata.name"
+ }
+ }
+ },
+ {
+ "name": "MY_NODE_NAME",
+ "valueFrom": {
+ "fieldRef": {
+ "fieldPath": "spec.nodeName"
+ }
+ }
+ },
+ {
+ "name": "REPORT_INTERVAL",
+ "value": str(NETCHECKER_REPORT_INTERVAL)
+ },
+ ],
+ "image": "mirantis/k8s-netchecker-agent:latest",
+ "imagePullPolicy": "IfNotPresent",
+ "name": "netchecker-agent",
+ "command": ["netchecker-agent"],
+ "args": [
+ "-v=5",
+ "-logtostderr",
+ "-probeurls={0}".format(NETCHECKER_PROBEURLS)
+ ]
+ }
+ ],
+ "hostNetwork": True,
+ "dnsPolicy": "ClusterFirstWithHostNet",
+ "updateStrategy": {
+ "type": "RollingUpdate"
+ }
+ }
+ },
+ "updateStrategy": {
+ "type": "RollingUpdate"
+ }
+ }
+ }
+]
+
+NETCHECKER_BLOCK_POLICY = {
+ "kind": "policy",
+ "spec": {
+ "ingress": [
+ {
+ "action": "allow"
+ },
+ {
+ "action": "deny",
+ "destination": {
+ "ports": [
+ NETCHECKER_SERVICE_PORT
+ ]
+ },
+ "protocol": "tcp"
+ }
+ ]
+ },
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "deny-netchecker"
+ }
+}
+
+
+def start_server(k8s, config, namespace=None,
+ deploy_spec=NETCHECKER_DEPLOYMENT_CFG,
+ svc_spec=NETCHECKER_SVC_CFG):
+ """Start netchecker server in k8s cluster
+
+ :param k8s: K8SManager
+ :param config: fixture provides oslo.config
+ :param namespace: str
+ :param deploy_spec: dict
+ :param svc_spec: dict
+ :return: None
+ """
+ for container in deploy_spec['spec']['template']['spec']['containers']:
+ if container['name'] == 'netchecker-server':
+ container['image'] = \
+ config.k8s_deploy.kubernetes_netchecker_server_image
+ try:
+ if k8s.api.deployments.get(name=deploy_spec['metadata']['name'],
+ namespace=namespace):
+ LOG.debug('Network checker server deployment "{}" '
+ 'already exists! Skipping resource '
+ 'creation'.format(deploy_spec['metadata']['name']))
+ except rest.ApiException as e:
+ if e.status == 404:
+ n = k8s.check_deploy_create(body=deploy_spec, namespace=namespace)
+ k8s.wait_deploy_ready(n.name, namespace=namespace)
+ else:
+ raise e
+ try:
+ if k8s.api.services.get(name=svc_spec['metadata']['name']):
+ LOG.debug('Network checker server service {} is '
+ 'already running! Skipping resource creation'
+ '.'.format(svc_spec['metadata']['name']))
+ except rest.ApiException as e:
+ if e.status == 404:
+ k8s.check_service_create(body=svc_spec, namespace=namespace)
+ else:
+ raise e
+
+
+def start_agent(k8s, config, namespace=None, ds_spec=NETCHECKER_DS_CFG,
+ service_namespace=None):
+ """Start netchecker agent in k8s cluster
+
+ :param k8s: K8SManager
+ :param config: fixture provides oslo.config
+ :param namespace: str
+ :param ds_spec: str
+ :return: None
+ """
+ for ds in ds_spec:
+ for container in ds['spec']['template']['spec']['containers']:
+ if container['name'] == 'netchecker-agent':
+ container['image'] = \
+ config.k8s_deploy.kubernetes_netchecker_agent_image
+ if service_namespace is not None:
+ container['args'].append(
+ "-serverendpoint={0}.{1}.svc.cluster.local:{2}".format(
+ NETCHECKER_SERVICE_NAME,
+ service_namespace,
+ NETCHECKER_SERVICE_PORT))
+ k8s.check_ds_create(body=ds, namespace=namespace)
+ k8s.wait_ds_ready(dsname=ds['metadata']['name'], namespace=namespace)
+ k8s.wait_pods_phase(pods=[pod for pod in k8s.api.pods.list()
+ if 'netchecker-agent' in pod.name],
+ phase='Running',
+ timeout=600)
+
+
+@utils.retry(3, requests.exceptions.RequestException)
+def get_status(k8sclient, netchecker_pod_port=NETCHECKER_NODE_PORT,
+ pod_name='netchecker-server', namespace='default'):
+
+ netchecker_srv_pod_names = [pod.name for pod in
+ k8sclient.pods.list(namespace=namespace)
+ if pod_name in pod.name]
+
+ assert len(netchecker_srv_pod_names) > 0, \
+ "No netchecker-server pods found!"
+
+ netchecker_srv_pod = k8sclient.pods.get(name=netchecker_srv_pod_names[0],
+ namespace=namespace)
+ kube_host_ip = netchecker_srv_pod.status.host_ip
+ net_status_url = 'http://{0}:{1}/api/v1/connectivity_check'.format(
+ kube_host_ip, netchecker_pod_port)
+ response = requests.get(net_status_url, timeout=5)
+ LOG.debug('Connectivity check status: [{0}] {1}'.format(
+ response.status_code, response.text.strip()))
+ return response
+
+
+def check_network(k8sclient, namespace='default', works=True):
+ if works:
+ assert get_status(k8sclient,
+ namespace=namespace).status_code in (200, 204)
+ else:
+ assert get_status(k8sclient, namespace=namespace).status_code == 400
+
+
+def wait_check_network(k8sclient, namespace='default', works=True, timeout=120,
+ interval=5):
+ helpers.wait_pass(lambda: check_network(k8sclient, namespace=namespace,
+ works=works),
+ timeout=timeout, interval=interval)
+
+
+def calico_block_traffic_on_node(underlay, target_node):
+ cmd = "echo '{0}' | calicoctl create -f -".format(NETCHECKER_BLOCK_POLICY)
+ underlay.sudo_check_call(cmd, node_name=target_node)
+ LOG.info('Blocked traffic to the network checker service from '
+ 'containers on node "{}".'.format(target_node))
+
+
+def calico_unblock_traffic_on_node(underlay, target_node):
+ cmd = "echo '{0}' | calicoctl delete -f -".format(NETCHECKER_BLOCK_POLICY)
+
+ underlay.sudo_check_call(cmd, node_name=target_node)
+ LOG.info('Unblocked traffic to the network checker service from '
+ 'containers on node "{}".'.format(target_node))
+
+
+def calico_get_version(underlay, target_node):
+ raw_version = underlay.sudo_check_call('calicoctl version',
+ node_name=target_node)
+
+ assert raw_version['exit_code'] == 0 and len(raw_version['stdout']) > 0, \
+ "Unable to get calico version!"
+
+ if len(raw_version['stdout']) > 1:
+ ctl_version = raw_version['stdout'][0].split()[1].strip()
+ else:
+ ctl_version = raw_version['stdout'][0].strip()
+
+ LOG.debug("Calico (calicoctl) version on '{0}': '{1}'".format(target_node,
+ ctl_version))
+ return ctl_version
+
+
+def kubernetes_block_traffic_namespace(underlay, kube_host_ip, namespace):
+ # TODO(apanchenko): do annotation using kubernetes API
+ cmd = ('kubectl annotate ns {0} \'net.beta.kubernetes.io/'
+ 'network-policy={{"ingress": {{"isolation":'
+ ' "DefaultDeny"}}}}\'').format(namespace)
+ underlay.sudo_check_call(cmd=cmd, host=kube_host_ip)
+
+
+def calico_allow_netchecker_connections(underlay, kube_ssh_ip, kube_host_ip,
+ namespace):
+ calico_policy = {"kind": "policy",
+ "spec": {
+ "ingress": [
+ {
+ "action": "allow",
+ "source": {
+ "net": "{0}/24".format(kube_host_ip)
+ },
+ "destination": {
+ "selector": ("calico/k8s_ns =="
+ " \"{0}\"").format(namespace)
+ },
+ "protocol": "tcp"
+ }
+ ],
+ "order": 500,
+ "selector": "has(calico/k8s_ns)"
+ },
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "netchecker.allow-host-connections"}
+ }
+
+ cmd = "echo '{0}' | calicoctl apply -f -".format(
+ json.dumps(calico_policy))
+ underlay.sudo_check_call(cmd=cmd, host=kube_ssh_ip)
+
+
+def kubernetes_allow_traffic_from_agents(underlay, kube_host_ip, namespace):
+ # TODO(apanchenko): add network policies using kubernetes API
+ label_namespace_cmd = "kubectl label namespace default name=default"
+ underlay.sudo_check_call(cmd=label_namespace_cmd, host=kube_host_ip)
+ kubernetes_policy = {
+ "apiVersion": "extensions/v1beta1",
+ "kind": "NetworkPolicy",
+ "metadata": {
+ "name": "access-netchecker",
+ "namespace": namespace,
+ },
+ "spec": {
+ "ingress": [
+ {
+ "from": [
+ {
+ "namespaceSelector": {
+ "matchLabels": {
+ "name": "default"
+ }
+ }
+ },
+ {
+ "podSelector": {
+ "matchLabels": {
+ "app": "netchecker-agent"
+ }
+ }
+ }
+ ]
+ }
+ ],
+ "podSelector": {
+ "matchLabels": {
+ "app": "netchecker-server"
+ }
+ }
+ }
+ }
+
+ kubernetes_policy_hostnet = {
+ "apiVersion": "extensions/v1beta1",
+ "kind": "NetworkPolicy",
+ "metadata": {
+ "name": "access-netchecker-hostnet",
+ "namespace": namespace,
+ },
+ "spec": {
+ "ingress": [
+ {
+ "from": [
+ {
+ "namespaceSelector": {
+ "matchLabels": {
+ "name": "default"
+ }
+ }
+ },
+ {
+ "podSelector": {
+ "matchLabels": {
+ "app": "netchecker-agent-hostnet"
+ }
+ }
+ }
+ ]
+ }
+ ],
+ "podSelector": {
+ "matchLabels": {
+ "app": "netchecker-server"
+ }
+ }
+ }
+ }
+ cmd_add_policy = "echo '{0}' | kubectl create -f -".format(
+ json.dumps(kubernetes_policy))
+ underlay.sudo_check_call(cmd=cmd_add_policy, host=kube_host_ip)
+ cmd_add_policy_hostnet = "echo '{0}' | kubectl create -f -".format(
+ json.dumps(kubernetes_policy_hostnet))
+ underlay.sudo_check_call(cmd=cmd_add_policy_hostnet, host=kube_host_ip)
diff --git a/tcp_tests/managers/k8s/deployments.py b/tcp_tests/managers/k8s/deployments.py
index 1dbd1d2..5d47d70 100644
--- a/tcp_tests/managers/k8s/deployments.py
+++ b/tcp_tests/managers/k8s/deployments.py
@@ -26,6 +26,10 @@
def name(self):
return self.metadata.name
+ @property
+ def namespace(self):
+ return self.metadata.namespace
+
class K8sDeploymentManager(K8sBaseManager):
"""docstring for ClassName"""
diff --git a/tcp_tests/managers/k8smanager.py b/tcp_tests/managers/k8smanager.py
index 45cb078..04222d9 100644
--- a/tcp_tests/managers/k8smanager.py
+++ b/tcp_tests/managers/k8smanager.py
@@ -229,6 +229,44 @@
lambda: self.check_ds_ready(dsname, namespace=namespace),
timeout=timeout, interval=interval)
+ def check_deploy_create(self, body, namespace=None):
+ """Check creating k8s Deployment
+
+ :param body: dict, Deployment spec
+ :param namespace: str
+ :rtype: K8sDeployment object
+ """
+ LOG.info("Creating Deployment in k8s cluster")
+ LOG.debug(
+ "Deployment spec to create:\n{}".format(
+ yaml.dump(body, default_flow_style=False))
+ )
+ deploy = self.api.deployments.create(body=body, namespace=namespace)
+ LOG.info("Deployment '{0}' is created in '{1}' namespace".format(
+ deploy.name, deploy.namespace))
+ return self.api.deployments.get(name=deploy.name,
+ namespace=deploy.namespace)
+
+ def check_deploy_ready(self, deploy_name, namespace=None):
+ """Check if k8s Deployment is ready
+
+ :param deploy_name: str, deploy name
+ :return: bool
+ """
+ deploy = self.api.deployments.get(name=deploy_name, namespace=namespace)
+ return deploy.status.available_replicas == deploy.status.replicas
+
+ def wait_deploy_ready(self, deploy_name, namespace=None, timeout=60, interval=5):
+ """Wait until all pods are scheduled on nodes
+
+ :param deploy_name: str, deploy name
+ :param timeout: int
+ :param interval: int
+ """
+ helpers.wait(
+ lambda: self.check_deploy_ready(deploy_name, namespace=namespace),
+ timeout=timeout, interval=interval)
+
def check_namespace_create(self, name):
"""Check creating k8s Namespace
@@ -290,3 +328,9 @@
self.__config.k8s.k8s_conformance_image),
timeout=timeout)['stdout']
return result
+
+ def get_k8s_masters(self):
+ k8s_masters_fqdn = self._salt.get_pillar(tgt='I@kubernetes:master',
+ pillar='linux:network:fqdn')
+ return [self._K8SManager__underlay.host_by_node_name(node_name=v)
+ for pillar in k8s_masters_fqdn for k, v in pillar.items()]
diff --git a/tcp_tests/settings_oslo.py b/tcp_tests/settings_oslo.py
index b475357..d761d7f 100644
--- a/tcp_tests/settings_oslo.py
+++ b/tcp_tests/settings_oslo.py
@@ -200,6 +200,12 @@
default='mirantis/k8s-netchecker-agent:latest'),
ct.Cfg('kubernetes_netchecker_server_image', ct.String(),
default='mirantis/k8s-netchecker-server:latest'),
+ ct.Cfg('kubernetes_calico_policy_enabled', ct.Boolean(),
+ help="", default=False),
+ ct.Cfg('kubernetes_calico_policy_image', ct.String(),
+ default='calico/kube-policy-controller:v0.5.4'),
+ ct.Cfg('kubernetes_helm_enabled', ct.Boolean(),
+ help="", default=True),
]
k8s_opts = [
diff --git a/tcp_tests/templates/virtual-mcp11-k8s-calico-minimal/k8s.yaml b/tcp_tests/templates/virtual-mcp11-k8s-calico-minimal/k8s.yaml
index 6071753..1c367e3 100644
--- a/tcp_tests/templates/virtual-mcp11-k8s-calico-minimal/k8s.yaml
+++ b/tcp_tests/templates/virtual-mcp11-k8s-calico-minimal/k8s.yaml
@@ -1,5 +1,16 @@
{% from 'virtual-mcp11-k8s-calico/underlay.yaml' import HOSTNAME_CFG01 with context %}
+- description: Set k8s deploy parameters
+ cmd: |
+ {% for k8s_opt, value in config.k8s_deploy.items() %}
+ {% if value|string() %}
+ salt-call reclass.cluster_meta_set {{ k8s_opt }} {{ value }};
+ {% endif %}
+ {% endfor %}
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 1}
+ skip_fail: false
+
- description: Install etcd
cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-C 'I@etcd:server' state.sls etcd.server.service
diff --git a/tcp_tests/templates/virtual-mcp11-k8s-calico-minimal/salt.yaml b/tcp_tests/templates/virtual-mcp11-k8s-calico-minimal/salt.yaml
index 200b88b..f5c55df 100644
--- a/tcp_tests/templates/virtual-mcp11-k8s-calico-minimal/salt.yaml
+++ b/tcp_tests/templates/virtual-mcp11-k8s-calico-minimal/salt.yaml
@@ -164,6 +164,14 @@
retry: {count: 1, delay: 1}
skip_fail: false
+# Patch linux formula to workaround https://mirantis.jira.com/browse/PROD-12327
+- description:
+ cmd: salt '*' cmd.run 'wget https://raw.githubusercontent.com/saltstack/salt/2017.7/salt/templates/debian_ip/route_eth.jinja -O
+ /usr/lib/python2.7/dist-packages/salt/templates/debian_ip/route_eth.jinja'
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
# Prepare salt services and nodes settings
- description: Run 'linux' formula on cfg01
cmd: salt --hard-crash --state-output=mixed --state-verbose=False
@@ -205,17 +213,6 @@
retry: {count: 5, delay: 5}
skip_fail: false
-- description: Set k8s deploy parameters
- cmd: |
- {% for k8s_opt, value in config.k8s_deploy.items() %}
- {% if value|string() %}
- salt-call reclass.cluster_meta_set {{ k8s_opt }} {{ value }};
- {% endif %}
- {% endfor %}
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 1}
- skip_fail: false
-
- description: Generate inventory for all the nodes to the /srv/salt/reclass/nodes/_generated
cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-C 'I@salt:master' state.sls reclass
diff --git a/tcp_tests/tests/system/test_calico.py b/tcp_tests/tests/system/test_calico.py
new file mode 100644
index 0000000..f174c9f
--- /dev/null
+++ b/tcp_tests/tests/system/test_calico.py
@@ -0,0 +1,221 @@
+# Copyright 2017 Mirantis, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import pytest
+
+from devops.helpers import helpers
+
+from tcp_tests import logger
+from tcp_tests.helpers import netchecker
+
+LOG = logger.logger
+
+
+class TestMCPCalico(object):
+ """Test class for Calico network provider in k8s"""
+
+ @pytest.mark.fail_snapshot
+ def test_k8s_netchecker_calico(self, show_step, config, k8s_deployed):
+ """Test for deploying k8s environment with Calico plugin and check
+ network connectivity between different pods by k8s-netchecker
+
+ Scenario:
+ 1. Install k8s with Calico network plugin.
+ 2. Run netchecker-server service.
+ 3. Run netchecker-agent daemon set.
+ 4. Get network verification status. Check status is 'OK'.
+
+ Duration: 3000 seconds
+ """
+
+ # STEP #1
+ show_step(1)
+ k8sclient = k8s_deployed.api
+ assert k8sclient.nodes.list() is not None, "Can not get nodes list"
+
+ # STEP #2
+ show_step(2)
+ netchecker.start_server(k8s=k8s_deployed, config=config)
+ netchecker.wait_check_network(k8sclient, works=True,
+ timeout=300)
+
+ # STEP #3
+ show_step(3)
+ netchecker.start_agent(k8s=k8s_deployed, config=config)
+
+ # STEP #4
+ show_step(4)
+ netchecker.wait_check_network(k8sclient, works=True,
+ timeout=300)
+
+ @pytest.mark.fail_snapshot
+ def test_calico_route_recovery(self, show_step, config, underlay,
+ k8s_deployed):
+ """Test for deploying k8s environment with Calico plugin and check
+ that local routes are recovered by felix after removal
+
+ Scenario:
+ 1. Install k8s with Calico network plugin.
+ 2. Run netchecker-server service.
+ 3. Run netchecker-agent daemon set.
+ 4. Get network verification status. Check status is 'OK'.
+ 5. Remove local route to netchecker-agent pod on the first node
+ 6. Check that the route is automatically recovered
+ 7. Get network verification status. Check status is 'OK'.
+
+ Duration: 3000 seconds
+ """
+
+ # STEP #1
+ show_step(1)
+ k8sclient = k8s_deployed.api
+ assert k8sclient.nodes.list() is not None, "Can not get nodes list"
+
+ # STEP #2
+ show_step(2)
+ netchecker.start_server(k8s=k8s_deployed, config=config)
+ LOG.info("Waiting for netchecker server is running")
+ netchecker.wait_check_network(k8sclient, works=True,
+ timeout=300)
+
+ # STEP #3
+ show_step(3)
+ netchecker.start_agent(k8s=k8s_deployed, config=config)
+
+ # STEP #4
+ show_step(4)
+ netchecker.wait_check_network(k8sclient, works=True,
+ timeout=300)
+
+ # STEP #5
+ show_step(5)
+ first_node = k8sclient.nodes.list()[0]
+ first_node_ips = [addr.address for addr in first_node.status.addresses
+ if 'IP' in addr.type]
+ assert len(first_node_ips) > 0, "Couldn't find first k8s node IP!"
+ first_node_names = [name for name in underlay.node_names()
+ if name.startswith(first_node.name)]
+ assert len(first_node_names) == 1, "Couldn't find first k8s node " \
+ "hostname in SSH config!"
+ first_node_name = first_node_names.pop()
+
+ target_pod_ip = None
+
+ for pod in k8sclient.pods.list():
+ if pod.status.host_ip not in first_node_ips:
+ continue
+ # TODO: get pods by daemonset with name 'netchecker-agent'
+ if 'netchecker-agent-' in pod.name and 'hostnet' not in pod.name:
+ target_pod_ip = pod.status.pod_ip
+
+ assert target_pod_ip is not None, "Could not find netchecker pod IP!"
+
+ route_del_cmd = 'ip route delete {0}'.format(target_pod_ip)
+ underlay.sudo_check_call(cmd=route_del_cmd, node_name=first_node_name)
+ LOG.debug('Removed local route to pod IP {0} on node {1}'.format(
+ target_pod_ip, first_node.name
+ ))
+
+ # STEP #6
+ show_step(6)
+ route_chk_cmd = 'ip route list | grep -q "{0}"'.format(target_pod_ip)
+ helpers.wait_pass(
+ lambda: underlay.sudo_check_call(cmd=route_chk_cmd,
+ node_name=first_node_name),
+ timeout=30,
+ interval=1
+ )
+ pod_ping_cmd = 'sleep 3 && ping -q -c 1 -w 3 {0}'.format(target_pod_ip)
+ underlay.sudo_check_call(cmd=pod_ping_cmd, node_name=first_node_name)
+ LOG.debug('Local route to pod IP {0} on node {1} is '
+ 'recovered'.format(target_pod_ip, first_node.name))
+
+ # STEP #7
+ show_step(7)
+ netchecker.wait_check_network(k8sclient, works=True)
+
+ @pytest.mark.fail_snapshot
+ def test_calico_network_policies(self, show_step, config, underlay,
+ k8s_deployed):
+ """Test for deploying k8s environment with Calico and check
+ that network policies work as expected
+
+ Scenario:
+ 1. Install k8s.
+ 2. Create new namespace 'netchecker'
+ 3. Run netchecker-server service
+ 4. Check that netchecker-server returns '200 OK'
+ 5. Run netchecker-agent daemon set in default namespace
+ 6. Get network verification status. Check status is 'OK'
+ 7. Enable network isolation for 'netchecker' namespace
+ 8. Allow connections to netchecker-server from tests using
+ Calico policy
+ 9. Get network verification status. Check status is 'FAIL' because
+ no netcheker-agent pods can reach netchecker-service pod
+ 10. Add kubernetes network policies which allow connections
+ from netchecker-agent pods (including ones with host network)
+ 11. Get network verification status. Check status is 'OK'
+
+ Duration: 3000 seconds
+ """
+
+ show_step(1)
+ k8sclient = k8s_deployed.api
+ assert k8sclient.nodes.list() is not None, "Can not get nodes list"
+ kube_master_nodes = k8s_deployed.get_k8s_masters()
+ assert kube_master_nodes, "No k8s masters found in pillars!"
+
+ show_step(2)
+ k8s_deployed.check_namespace_create(name='netchecker')
+
+ show_step(3)
+ netchecker.start_server(k8s=k8s_deployed, config=config,
+ namespace='netchecker')
+
+ show_step(4)
+ netchecker.wait_check_network(k8sclient, namespace='netchecker',
+ works=True)
+
+ show_step(5)
+ netchecker.start_agent(k8s=k8s_deployed, config=config,
+ namespace='default',
+ service_namespace='netchecker')
+
+ show_step(6)
+ netchecker.wait_check_network(k8sclient, namespace='netchecker',
+ works=True, timeout=300)
+
+ show_step(7)
+ netchecker.kubernetes_block_traffic_namespace(underlay,
+ kube_master_nodes[0],
+ 'netchecker')
+
+ show_step(8)
+ netchecker.calico_allow_netchecker_connections(underlay,
+ kube_master_nodes[0],
+ config.k8s.kube_host,
+ 'netchecker')
+
+ show_step(9)
+ netchecker.wait_check_network(k8sclient, namespace='netchecker',
+ works=False, timeout=500)
+
+ show_step(10)
+ netchecker.kubernetes_allow_traffic_from_agents(underlay,
+ kube_master_nodes[0],
+ 'netchecker')
+
+ show_step(11)
+ netchecker.wait_check_network(k8sclient, namespace='netchecker',
+ works=True, timeout=300)