blob: c0f1b9487d7f7e93e0e9edf91a42970aa6c79180 [file] [log] [blame]
Victor Ryzhenkin14354ac2017-09-27 17:42:30 +04001# Copyright 2017 Mirantis, Inc.
2#
3# Licensed under the Apache License, Version 2.0 (the "License"); you may
4# not use this file except in compliance with the License. You may obtain
5# a copy of the License at
6#
7# http://www.apache.org/licenses/LICENSE-2.0
8#
9# Unless required by applicable law or agreed to in writing, software
10# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
11# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
12# License for the specific language governing permissions and limitations
13# under the License.
14
15import pytest
Vladimir Jigulin34dfa942018-07-23 21:05:48 +040016import netaddr
17import os
Victor Ryzhenkin14354ac2017-09-27 17:42:30 +040018
19from tcp_tests import logger
20from tcp_tests import settings
21
22LOG = logger.logger
23
24
25class TestMCPK8sActions(object):
26 """Test class for different k8s actions"""
27
Tatyana Leontovichc411ec32017-10-09 14:48:00 +030028 @pytest.mark.grab_versions
Victor Ryzhenkin14354ac2017-09-27 17:42:30 +040029 @pytest.mark.fail_snapshot
Tatyana Leontovich071ce6a2017-10-24 18:08:10 +030030 @pytest.mark.cz8116
Dennis Dmitriev0f624a82018-06-11 12:57:13 +030031 @pytest.mark.k8s_calico
Victor Ryzhenkin14354ac2017-09-27 17:42:30 +040032 def test_k8s_externaldns_coredns(self, show_step, config, k8s_deployed):
33 """Test externaldns integration with coredns
34
35 Scenario:
36 1. Install k8s with externaldns addon enabled(including etcd, coredns)
37 2. Start simple service
38 3. Expose deployment
39 4. Annotate service with domain name
40 5. Try to get service using nslookup
41 """
42
43 if not (config.k8s_deploy.kubernetes_externaldns_enabled and
44 config.k8s_deploy.kubernetes_coredns_enabled):
45 pytest.skip("Test requires Externaldns and coredns addons enabled")
46
47 show_step(1)
48 k8sclient = k8s_deployed.api
49 assert k8sclient.nodes.list() is not None, "Can not get nodes list"
50
51 show_step(2)
52 name = 'test-nginx'
53 k8s_deployed.kubectl_run(name, 'nginx', '80')
54
55 show_step(3)
56 k8s_deployed.kubectl_expose('deployment', name, '80', 'ClusterIP')
57
Victor Ryzhenkin66d39372017-09-28 19:25:48 +040058 hostname = "test.{0}.local.".format(settings.LAB_CONFIG_NAME)
Victor Ryzhenkin14354ac2017-09-27 17:42:30 +040059 annotation = "\"external-dns.alpha.kubernetes.io/" \
60 "hostname={0}\"".format(hostname)
61 show_step(4)
62 k8s_deployed.kubectl_annotate('service', name, annotation)
63
64 show_step(5)
65 dns_host = k8s_deployed.get_svc_ip('coredns')
66 k8s_deployed.nslookup(hostname, dns_host)
Victor Ryzhenkin87a31422018-03-16 22:25:27 +040067
68 @pytest.mark.grab_versions
69 @pytest.mark.cncf_publisher(name=['e2e.log', 'junit_01.xml', 'version.txt',
70 'cncf_results.tar.gz'])
71 @pytest.mark.fail_snapshot
72 def test_k8s_cncf_certification(self, show_step, config, k8s_deployed,
73 cncf_log_helper):
74 """Run cncf e2e suite and provide files needed for pull request
75 to the CNCF repo
76
77 Scenario:
78 1. Run cncf from https://github.com/cncf/k8s-conformance
79 """
80
81 show_step(1)
82 k8s_deployed.start_k8s_cncf_verification()
Vladimir Jigulin62bcf462018-05-28 18:17:01 +040083
84 @pytest.mark.grap_versions
85 @pytest.mark.fail_snapshot
86 def test_k8s_chain_update(self, show_step, underlay, config, k8s_deployed,
87 k8s_chain_update_log_helper):
88 """Test for chain-upgrading k8s hypercube pool and checking it
89
90 Scenario:
91 1. Prepare salt on hosts
92 2. Setup controller nodes
93 3. Setup compute nodes
94 4. Setup Kubernetes cluster
95 5. Run and expose sample test service
96 6. Run conformance to check consistency
97 7. For every version in update chain:
98 Update cluster to new version, check test sample service
99 availability, run conformance
100 """
101
Vladimir Jigulin62bcf462018-05-28 18:17:01 +0400102 show_step(5)
Vladimir Jigulina6b018b2018-07-18 15:19:01 +0400103 sample = k8s_deployed.get_sample_deployment('test-dep-chain-upgrade')
104 sample.run()
105 sample.expose()
106 sample.wait_for_ready()
Vladimir Jigulin62bcf462018-05-28 18:17:01 +0400107
Vladimir Jigulina6b018b2018-07-18 15:19:01 +0400108 assert sample.is_service_available()
Vladimir Jigulin62bcf462018-05-28 18:17:01 +0400109
110 show_step(6)
111 k8s_deployed.run_conformance(log_out="k8s_conformance.log")
112
113 show_step(7)
114 chain_versions = config.k8s.k8s_update_chain.split(" ")
115 for version in chain_versions:
116 LOG.info("Chain update to '{}' version".format(version))
117 k8s_deployed.update_k8s_images(version)
118
119 LOG.info("Checking test service availability")
Vladimir Jigulina6b018b2018-07-18 15:19:01 +0400120 assert sample.is_service_available()
Vladimir Jigulin62bcf462018-05-28 18:17:01 +0400121
122 LOG.info("Running conformance on {} version".format(version))
123 log_name = "k8s_conformance_{}.log".format(version)
124 k8s_deployed.run_conformance(log_out=log_name, raise_on_err=False)
Vladimir Jigulina6b018b2018-07-18 15:19:01 +0400125
126 @pytest.mark.grap_versions
127 @pytest.mark.fail_snapshot
128 def test_k8s_metallb(self, show_step, config, k8s_deployed):
129 """Enable metallb in cluster and do basic tests
130
131 Scenario:
132 1. Setup Kubernetes cluster with enabled metallb
133 2. Check that metallb pods created in metallb-system namespace
134 3. Run 5 sample deployments
135 4. Expose deployments with type=LoadBalancer
136 5. Check services availability from outside of cluster
137 6. Run conformance
138 7. Check services availability from outside of cluster
139 """
140 show_step(1)
141 if not config.k8s_deploy.kubernetes_metallb_enabled:
142 pytest.skip("Test requires metallb addon enabled")
143
144 show_step(2)
Vladimir Jigulin34dfa942018-07-23 21:05:48 +0400145 ns = "metallb-system"
146 assert k8s_deployed.is_pod_exists_with_prefix("controller", ns)
147 assert k8s_deployed.is_pod_exists_with_prefix("speaker", ns)
Vladimir Jigulina6b018b2018-07-18 15:19:01 +0400148
149 show_step(3)
150 samples = []
151 for i in range(5):
152 name = 'test-dep-metallb-{}'.format(i)
153 sample = k8s_deployed.get_sample_deployment(name)
154 sample.run()
155 samples.append(sample)
156
157 show_step(4)
158 for sample in samples:
159 sample.expose('LoadBalancer')
160 for sample in samples:
161 sample.wait_for_ready()
162
163 show_step(5)
164 for sample in samples:
165 assert sample.is_service_available(external=True)
166
167 show_step(6)
168 k8s_deployed.run_conformance()
169
170 show_step(7)
171 for sample in samples:
172 assert sample.is_service_available(external=True)
Vladimir Jigulin34dfa942018-07-23 21:05:48 +0400173
174 @pytest.mark.grap_versions
175 @pytest.mark.fail_snapshot
176 def test_k8s_genie_flannel(self, show_step, underlay, salt_deployed,
177 k8s_deployed, k8s_copy_sample_testdata):
178 """Test genie-cni+flannel cni setup
179
180 Scenario:
181 1. Setup Kubernetes cluster with genie cni and flannel
182 2. Check that flannel pods created in kube-system namespace
183 3. Create sample deployment with flannel cni annotation
184 4. Check that the deployment have 1 ip addresses from cni provider
185 5. Create sample deployment with calico cni annotation
186 6. Check that the deployment have 1 ip addresses from cni provider
187 7. Create sample deployment with multi-cni annotation
188 8. Check that the deployment have 2 ip addresses from different
189 cni providers
190 9. Create sample deployment without cni annotation
191 10. Check that the deployment have 1 ip address
192 11. Check pods availability
193 12. Run conformance
194 13. Check pods availability
195 """
196 show_step(1)
197
198 # Find out calico and flannel networks
199 tgt_k8s_control = "I@kubernetes:control:enabled:True"
200
201 flannel_pillar = salt_deployed.get_pillar(
202 tgt=tgt_k8s_control,
203 pillar="kubernetes:master:network:flannel:private_ip_range")[0]
204 flannel_network = netaddr.IPNetwork(flannel_pillar.values()[0])
205 LOG.info("Flannel network: {}".format(flannel_network))
206
207 calico_network_pillar = salt_deployed.get_pillar(
208 tgt=tgt_k8s_control, pillar="_param:calico_private_network")[0]
209 calico_netmask_pillar = salt_deployed.get_pillar(
210 tgt=tgt_k8s_control, pillar="_param:calico_private_netmask")[0]
211 calico_network = netaddr.IPNetwork(
212 "{0}/{1}".format(calico_network_pillar.values()[0],
213 calico_netmask_pillar.values()[0]))
214 LOG.info("Calico network: {}".format(calico_network))
215
216 show_step(2)
217 assert k8s_deployed.is_pod_exists_with_prefix("kube-flannel-",
218 "kube-system")
219
220 data_dir = os.path.join(os.path.dirname(__file__), 'testdata/k8s')
221 show_step(3)
222 flannel_pod = k8s_deployed.create_pod_from_file(
223 os.path.join(data_dir, 'pod-sample-flannel.yaml'))
224
225 show_step(4)
226 flannel_ips = k8s_deployed.get_pod_ips_from_container(flannel_pod.name)
227 assert len(flannel_ips) == 1
228 assert netaddr.IPAddress(flannel_ips[0]) in flannel_network
229
230 show_step(5)
231 calico_pod = k8s_deployed.create_pod_from_file(
232 os.path.join(data_dir, 'pod-sample-calico.yaml'))
233
234 show_step(6)
235 calico_ips = k8s_deployed.get_pod_ips_from_container(calico_pod.name)
236 assert len(calico_ips) == 1
237 assert netaddr.IPAddress(calico_ips[0]) in calico_network
238
239 show_step(7)
240 multicni_pod = k8s_deployed.create_pod_from_file(
241 os.path.join(data_dir, 'pod-sample-multicni.yaml'))
242
243 show_step(8)
244 multicni_ips = \
245 k8s_deployed.get_pod_ips_from_container(multicni_pod.name)
246 assert len(multicni_ips) == 2
247 for net in [calico_network, flannel_network]:
248 assert netaddr.IPAddress(multicni_ips[0]) in net or \
249 netaddr.IPAddress(multicni_ips[1]) in net
250
251 show_step(9)
252 nocni_pod = k8s_deployed.create_pod_from_file(
253 os.path.join(data_dir, 'pod-sample.yaml'))
254
255 show_step(10)
256 nocni_ips = k8s_deployed.get_pod_ips_from_container(nocni_pod.name)
257 assert len(nocni_ips) == 1
258 assert (netaddr.IPAddress(nocni_ips[0]) in calico_network or
259 netaddr.IPAddress(nocni_ips[0]) in flannel_network)
260
261 show_step(11)
262
263 def check_pod_availability(ip):
264 assert "Hello Kubernetes!" in k8s_deployed.curl(
265 "http://{}:8080".format(ip))
266
267 def check_pods_availability():
268 check_pod_availability(flannel_ips[0])
269 check_pod_availability(calico_ips[0])
270 check_pod_availability(multicni_ips[0])
271 check_pod_availability(multicni_ips[1])
272 check_pod_availability(nocni_ips[0])
273
274 check_pods_availability()
275
276 show_step(12)
277 k8s_deployed.run_conformance()
278
279 show_step(13)
280 check_pods_availability()