Update calico tests
Make calico tests operational again.
All policies are being set up using kubernetes primitives (instead of using calicoctl).
Policy test is included for CI run.
Netchecker should be deployed prior to running the tests (because it's hard to keep
netchecker configuration for tests in sync with its configuration in MCP).
Common calico tests requirements:
KUBERNETES_NETCHECKER_ENABLED=true
Policy test additional requirement:
KUBERNETES_CALICO_POLICY_ENABLED=true
PROD-19834
Change-Id: I5100126dfdf03fd77a82e8dc412c8bf716eb1bd1
diff --git a/tcp_tests/helpers/netchecker.py b/tcp_tests/helpers/netchecker.py
index b233023..24ab271 100644
--- a/tcp_tests/helpers/netchecker.py
+++ b/tcp_tests/helpers/netchecker.py
@@ -426,40 +426,19 @@
underlay.sudo_check_call(cmd=cmd, host=kube_host_ip)
-def calico_allow_netchecker_connections(underlay, kube_ssh_ip, kube_host_ip,
+def calico_allow_netchecker_connections(underlay, k8sclient, kube_host_ip,
namespace):
- calico_policy = {"kind": "policy",
- "spec": {
- "ingress": [
- {
- "action": "allow",
- "source": {
- "net": "{0}/24".format(kube_host_ip)
- },
- "destination": {
- "selector": ("calico/k8s_ns =="
- " \"{0}\"").format(namespace)
- },
- "protocol": "tcp"
- }
- ],
- "order": 500,
- "selector": "has(calico/k8s_ns)"
- },
- "apiVersion": "v1",
- "metadata": {
- "name": "netchecker.allow-host-connections"}
- }
+ netchecker_srv_pod_names = [pod.name for pod in
+ k8sclient.pods.list(namespace=namespace)
+ if 'netchecker-server' in pod.name]
- cmd = "echo '{0}' | calicoctl apply -f -".format(
- json.dumps(calico_policy))
- underlay.sudo_check_call(cmd=cmd, host=kube_ssh_ip)
+ assert len(netchecker_srv_pod_names) > 0, \
+ "No netchecker-server pods found!"
+ netchecker_srv_pod = k8sclient.pods.get(name=netchecker_srv_pod_names[0],
+ namespace=namespace)
+ nc_host_ip = netchecker_srv_pod.status.host_ip
-def kubernetes_allow_traffic_from_agents(underlay, kube_host_ip, namespace):
- # TODO(apanchenko): add network policies using kubernetes API
- label_namespace_cmd = "kubectl label namespace default name=default"
- underlay.sudo_check_call(cmd=label_namespace_cmd, host=kube_host_ip)
kubernetes_policy = {
"apiVersion": "extensions/v1beta1",
"kind": "NetworkPolicy",
@@ -472,9 +451,45 @@
{
"from": [
{
+ "ipBlock": {
+ "cidr": nc_host_ip + "/24"
+ }
+ }
+ ]
+ }
+ ],
+ "podSelector": {
+ "matchLabels": {
+ "app": "netchecker-server"
+ }
+ }
+ }
+ }
+
+ cmd_add_policy = "echo '{0}' | kubectl create -f -".format(
+ json.dumps(kubernetes_policy))
+ underlay.sudo_check_call(cmd=cmd_add_policy, host=kube_host_ip)
+
+
+def kubernetes_allow_traffic_from_agents(underlay, kube_host_ip, namespace):
+ # TODO(apanchenko): add network policies using kubernetes API
+ label_namespace_cmd = "kubectl label namespace default name=default"
+ underlay.sudo_check_call(cmd=label_namespace_cmd, host=kube_host_ip)
+ kubernetes_policy = {
+ "apiVersion": "extensions/v1beta1",
+ "kind": "NetworkPolicy",
+ "metadata": {
+ "name": "access-netchecker-agent",
+ "namespace": namespace,
+ },
+ "spec": {
+ "ingress": [
+ {
+ "from": [
+ {
"namespaceSelector": {
"matchLabels": {
- "name": "default"
+ "name": namespace
}
}
},
@@ -500,7 +515,7 @@
"apiVersion": "extensions/v1beta1",
"kind": "NetworkPolicy",
"metadata": {
- "name": "access-netchecker-hostnet",
+ "name": "access-netchecker-agent-hostnet",
"namespace": namespace,
},
"spec": {
@@ -510,7 +525,7 @@
{
"namespaceSelector": {
"matchLabels": {
- "name": "default"
+ "name": namespace
}
}
},
@@ -531,9 +546,11 @@
}
}
}
+
cmd_add_policy = "echo '{0}' | kubectl create -f -".format(
json.dumps(kubernetes_policy))
underlay.sudo_check_call(cmd=cmd_add_policy, host=kube_host_ip)
+
cmd_add_policy_hostnet = "echo '{0}' | kubectl create -f -".format(
json.dumps(kubernetes_policy_hostnet))
underlay.sudo_check_call(cmd=cmd_add_policy_hostnet, host=kube_host_ip)
diff --git a/tcp_tests/tests/system/test_calico.py b/tcp_tests/tests/system/test_calico.py
index cc44ba4..0dc7265 100644
--- a/tcp_tests/tests/system/test_calico.py
+++ b/tcp_tests/tests/system/test_calico.py
@@ -23,7 +23,10 @@
class TestMCPCalico(object):
- """Test class for Calico network provider in k8s"""
+ """Test class for Calico network provider in k8s.
+ Common calico tests requirements:
+ KUBERNETES_NETCHECKER_ENABLED=true
+ """
@pytest.mark.fail_snapshot
def test_k8s_netchecker_calico(self, show_step, config, k8s_deployed):
@@ -31,33 +34,20 @@
network connectivity between different pods by k8s-netchecker
Scenario:
- 1. Install k8s with Calico network plugin.
- 2. Run netchecker-server service.
- 3. Run netchecker-agent daemon set.
- 4. Get network verification status. Check status is 'OK'.
+ 1. Check k8s installation.
+ 2. Get network verification status. Excepted status is 'OK'.
Duration: 3000 seconds
"""
- # STEP #1
show_step(1)
k8sclient = k8s_deployed.api
assert k8sclient.nodes.list() is not None, "Can not get nodes list"
+ netchecker_port = netchecker.get_service_port(k8sclient)
- # STEP #2
show_step(2)
- netchecker.start_server(k8s=k8s_deployed, config=config)
- netchecker.wait_check_network(k8sclient, works=True,
- timeout=300)
-
- # STEP #3
- show_step(3)
- netchecker.start_agent(k8s=k8s_deployed, config=config)
-
- # STEP #4
- show_step(4)
- netchecker.wait_check_network(k8sclient, works=True,
- timeout=300)
+ netchecker.wait_check_network(k8sclient, works=True, timeout=300,
+ netchecker_pod_port=netchecker_port)
@pytest.mark.fail_snapshot
@pytest.mark.calico_ci
@@ -68,14 +58,14 @@
that local routes are recovered by felix after removal
Scenario:
- 1. Install k8s with Calico network plugin.
+ 1. Check k8s installation.
2. Check netchecker-server service.
3. Check netchecker-agent daemon set.
- 4. Get network verification status. Check status is 'OK'.
- 5. Get metrics from netchecker
- 6. Remove local route to netchecker-agent pod on the first node
- 7. Check that the route is automatically recovered
- 8. Get network verification status. Check status is 'OK'.
+ 4. Get network verification status. Excepted status is 'OK'.
+ 5. Get metrics from netchecker.
+ 6. Remove local route to netchecker-agent pod on the first node.
+ 7. Check that the route is automatically recovered.
+ 8. Get network verification status. Excepted status is 'OK'.
Duration: 3000 seconds
"""
@@ -84,6 +74,7 @@
k8sclient = k8s_deployed.api
assert k8sclient.nodes.list() is not None, "Can not get nodes list"
netchecker_port = netchecker.get_service_port(k8sclient)
+
show_step(2)
netchecker.get_netchecker_pod_status(k8s=k8s_deployed,
namespace='netchecker')
@@ -96,6 +87,7 @@
show_step(4)
netchecker.wait_check_network(k8sclient, namespace='netchecker',
netchecker_pod_port=netchecker_port)
+
show_step(5)
res = netchecker.get_metric(k8sclient,
netchecker_pod_port=netchecker_port,
@@ -111,13 +103,12 @@
'ncagent_http_probe_server_processing_time_ms',
'ncagent_http_probe_tcp_connection_time_ms',
'ncagent_http_probe_total_time_ms',
- 'ncagent_report_count_tota']
+ 'ncagent_report_count_total']
for metric in metrics:
assert metric in res.text.strip(), \
- 'Mandotory metric {0} is missing in {1}'.format(
+ 'Mandatory metric {0} is missing in {1}'.format(
metric, res.text)
- # STEP #6
show_step(6)
first_node = k8sclient.nodes.list()[0]
first_node_ips = [addr.address for addr in first_node.status.addresses
@@ -132,6 +123,7 @@
target_pod_ip = None
for pod in k8sclient.pods.list(namespace='netchecker'):
+ LOG.debug('NC pod IP: {0}'.format(pod.status.pod_ip))
if pod.status.host_ip not in first_node_ips:
continue
# TODO: get pods by daemonset with name 'netchecker-agent'
@@ -146,7 +138,6 @@
target_pod_ip, first_node.name
))
- # STEP #7
show_step(7)
route_chk_cmd = 'ip route list | grep -q "{0}"'.format(target_pod_ip)
helpers.wait_pass(
@@ -161,36 +152,31 @@
LOG.debug('Local route to pod IP {0} on node {1} is '
'recovered'.format(target_pod_ip, first_node.name))
- # STEP #8
show_step(8)
netchecker.wait_check_network(k8sclient, namespace='netchecker',
netchecker_pod_port=netchecker_port,
works=True)
@pytest.mark.fail_snapshot
- # FIXME(apanchenko): uncomment as soon as the following bug is fixed
- # FIXME(apanchenko): https://mirantis.jira.com/browse/PROD-12532
- # @pytest.mark.calico_ci
+ @pytest.mark.calico_ci
def test_calico_network_policies(self, show_step, config, underlay,
k8s_deployed):
"""Test for deploying k8s environment with Calico and check
- that network policies work as expected
+ that network policies work as expected.
+ Policy test additional requirement:
+ KUBERNETES_CALICO_POLICY_ENABLED=true
Scenario:
- 1. Install k8s.
- 2. Create new namespace 'netchecker'
- 3. Run netchecker-server service
- 4. Check that netchecker-server returns '200 OK'
- 5. Run netchecker-agent daemon set in default namespace
- 6. Get network verification status. Check status is 'OK'
- 7. Enable network isolation for 'netchecker' namespace
- 8. Allow connections to netchecker-server from tests using
- Calico policy
- 9. Get network verification status. Check status is 'FAIL' because
- no netcheker-agent pods can reach netchecker-service pod
- 10. Add kubernetes network policies which allow connections
- from netchecker-agent pods (including ones with host network)
- 11. Get network verification status. Check status is 'OK'
+ 1. Check k8s installation.
+ 2. Get network verification status. Excepted status is 'OK'.
+ 3. Enable network isolation for 'netchecker' namespace.
+ 4. Allow connections to netchecker-server from tests.
+ 5. Get network verification status. Excepted status is 'FAIL'
+ because no netcheker-agent pods should be able to reach
+ netchecker-service pod.
+ 6. Add kubernetes network policies which allow connections
+ from netchecker-agent pods (including ones with host network).
+ 7. Get network verification status. Excepted status is 'OK'.
Duration: 3000 seconds
"""
@@ -200,47 +186,34 @@
assert k8sclient.nodes.list() is not None, "Can not get nodes list"
kube_master_nodes = k8s_deployed.get_k8s_masters()
assert kube_master_nodes, "No k8s masters found in pillars!"
+ netchecker_port = netchecker.get_service_port(k8sclient)
show_step(2)
- k8s_deployed.check_namespace_create(name='netchecker')
+ netchecker.wait_check_network(k8sclient, namespace='netchecker',
+ works=True, timeout=300,
+ netchecker_pod_port=netchecker_port)
show_step(3)
- netchecker.start_server(k8s=k8s_deployed, config=config,
- namespace='netchecker')
-
- show_step(4)
- netchecker.wait_check_network(k8sclient, namespace='netchecker',
- works=True)
-
- show_step(5)
- netchecker.start_agent(k8s=k8s_deployed, config=config,
- namespace='default',
- service_namespace='netchecker')
-
- show_step(6)
- netchecker.wait_check_network(k8sclient, namespace='netchecker',
- works=True, timeout=300)
-
- show_step(7)
netchecker.kubernetes_block_traffic_namespace(underlay,
kube_master_nodes[0],
'netchecker')
- show_step(8)
- netchecker.calico_allow_netchecker_connections(underlay,
+ show_step(4)
+ netchecker.calico_allow_netchecker_connections(underlay, k8sclient,
kube_master_nodes[0],
- config.k8s.kube_host,
'netchecker')
- show_step(9)
+ show_step(5)
netchecker.wait_check_network(k8sclient, namespace='netchecker',
- works=False, timeout=500)
+ works=False, timeout=500,
+ netchecker_pod_port=netchecker_port)
- show_step(10)
+ show_step(6)
netchecker.kubernetes_allow_traffic_from_agents(underlay,
kube_master_nodes[0],
'netchecker')
- show_step(11)
+ show_step(7)
netchecker.wait_check_network(k8sclient, namespace='netchecker',
- works=True, timeout=300)
+ works=True, timeout=300,
+ netchecker_pod_port=netchecker_port)