blob: be234d1a4cbd1b320bb01c74b162dfe41195c88f [file] [log] [blame]
Victor Ryzhenkin14354ac2017-09-27 17:42:30 +04001# Copyright 2017 Mirantis, Inc.
2#
3# Licensed under the Apache License, Version 2.0 (the "License"); you may
4# not use this file except in compliance with the License. You may obtain
5# a copy of the License at
6#
7# http://www.apache.org/licenses/LICENSE-2.0
8#
9# Unless required by applicable law or agreed to in writing, software
10# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
11# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
12# License for the specific language governing permissions and limitations
13# under the License.
14
15import pytest
Vladimir Jigulin34dfa942018-07-23 21:05:48 +040016import netaddr
17import os
Vladimir Jigulin57ecae92018-09-10 22:51:15 +040018import json
Vladimir Jigulin5775bbb2018-10-03 10:34:54 +040019import requests
Victor Ryzhenkin14354ac2017-09-27 17:42:30 +040020
21from tcp_tests import logger
22from tcp_tests import settings
23
Vladimir Jigulin4ad52a82018-08-12 05:51:30 +040024from tcp_tests.managers.k8s import read_yaml_file
25
Victor Ryzhenkin14354ac2017-09-27 17:42:30 +040026LOG = logger.logger
27
28
29class TestMCPK8sActions(object):
30 """Test class for different k8s actions"""
31
Vladimir Jigulin57ecae92018-09-10 22:51:15 +040032 def __read_testdata_yaml(self, name):
33 dir = os.path.join(os.path.dirname(__file__), 'testdata/k8s')
34 return read_yaml_file(dir, name)
35
Tatyana Leontovichc411ec32017-10-09 14:48:00 +030036 @pytest.mark.grab_versions
Victor Ryzhenkin14354ac2017-09-27 17:42:30 +040037 @pytest.mark.fail_snapshot
Tatyana Leontovich071ce6a2017-10-24 18:08:10 +030038 @pytest.mark.cz8116
Dennis Dmitriev0f624a82018-06-11 12:57:13 +030039 @pytest.mark.k8s_calico
Victor Ryzhenkin14354ac2017-09-27 17:42:30 +040040 def test_k8s_externaldns_coredns(self, show_step, config, k8s_deployed):
41 """Test externaldns integration with coredns
42
43 Scenario:
44 1. Install k8s with externaldns addon enabled(including etcd, coredns)
45 2. Start simple service
46 3. Expose deployment
47 4. Annotate service with domain name
48 5. Try to get service using nslookup
Vladimir Jigulin4ad52a82018-08-12 05:51:30 +040049 6. Delete service and deployment
Victor Ryzhenkin14354ac2017-09-27 17:42:30 +040050 """
51
Vladimir Jigulin4ad52a82018-08-12 05:51:30 +040052 show_step(1)
Victor Ryzhenkin14354ac2017-09-27 17:42:30 +040053 if not (config.k8s_deploy.kubernetes_externaldns_enabled and
54 config.k8s_deploy.kubernetes_coredns_enabled):
Vladimir Jigulin4ad52a82018-08-12 05:51:30 +040055 pytest.skip("Test requires externaldns and coredns addons enabled")
Victor Ryzhenkin14354ac2017-09-27 17:42:30 +040056
57 show_step(2)
Vladimir Jigulin4ad52a82018-08-12 05:51:30 +040058 deployment = k8s_deployed.run_sample_deployment('test-dep')
Victor Ryzhenkin14354ac2017-09-27 17:42:30 +040059
60 show_step(3)
Vladimir Jigulin4ad52a82018-08-12 05:51:30 +040061 svc = deployment.expose()
Victor Ryzhenkin14354ac2017-09-27 17:42:30 +040062
Victor Ryzhenkin14354ac2017-09-27 17:42:30 +040063 show_step(4)
Vladimir Jigulin4ad52a82018-08-12 05:51:30 +040064 hostname = "test.{0}.local.".format(settings.LAB_CONFIG_NAME)
65 svc.patch({
66 "metadata": {
67 "annotations": {
68 "external-dns.alpha.kubernetes.io/hostname": hostname
69 }
70 }
71 })
Victor Ryzhenkin14354ac2017-09-27 17:42:30 +040072
73 show_step(5)
Vladimir Jigulin90689152018-09-26 15:38:19 +040074 dns_svc = k8s_deployed.api.services.get(
75 name='coredns', namespace='kube-system')
76 k8s_deployed.nslookup(hostname, dns_svc.get_ip())
Vladimir Jigulin4ad52a82018-08-12 05:51:30 +040077
78 show_step(6)
79 deployment.delete()
Victor Ryzhenkin87a31422018-03-16 22:25:27 +040080
81 @pytest.mark.grab_versions
82 @pytest.mark.cncf_publisher(name=['e2e.log', 'junit_01.xml', 'version.txt',
83 'cncf_results.tar.gz'])
84 @pytest.mark.fail_snapshot
85 def test_k8s_cncf_certification(self, show_step, config, k8s_deployed,
Vladimir Jigulin0c8dd5a2018-08-28 05:08:35 +040086 k8s_cncf_log_helper):
Victor Ryzhenkin87a31422018-03-16 22:25:27 +040087 """Run cncf e2e suite and provide files needed for pull request
88 to the CNCF repo
89
90 Scenario:
91 1. Run cncf from https://github.com/cncf/k8s-conformance
92 """
93
94 show_step(1)
95 k8s_deployed.start_k8s_cncf_verification()
Vladimir Jigulin62bcf462018-05-28 18:17:01 +040096
97 @pytest.mark.grap_versions
98 @pytest.mark.fail_snapshot
99 def test_k8s_chain_update(self, show_step, underlay, config, k8s_deployed,
100 k8s_chain_update_log_helper):
101 """Test for chain-upgrading k8s hypercube pool and checking it
102
103 Scenario:
104 1. Prepare salt on hosts
105 2. Setup controller nodes
106 3. Setup compute nodes
107 4. Setup Kubernetes cluster
108 5. Run and expose sample test service
109 6. Run conformance to check consistency
110 7. For every version in update chain:
111 Update cluster to new version, check test sample service
112 availability, run conformance
Vladimir Jigulin4ad52a82018-08-12 05:51:30 +0400113 8. Delete service and deployment
Vladimir Jigulin62bcf462018-05-28 18:17:01 +0400114 """
115
Vladimir Jigulin62bcf462018-05-28 18:17:01 +0400116 show_step(5)
Vladimir Jigulin4ad52a82018-08-12 05:51:30 +0400117 sample = k8s_deployed.run_sample_deployment('test-dep-chain-upgrade')
Vladimir Jigulina6b018b2018-07-18 15:19:01 +0400118 sample.expose()
Vladimir Jigulin4ad52a82018-08-12 05:51:30 +0400119 sample.wait_ready()
Vladimir Jigulin62bcf462018-05-28 18:17:01 +0400120
Vladimir Jigulina6b018b2018-07-18 15:19:01 +0400121 assert sample.is_service_available()
Vladimir Jigulin62bcf462018-05-28 18:17:01 +0400122
123 show_step(6)
124 k8s_deployed.run_conformance(log_out="k8s_conformance.log")
125
126 show_step(7)
127 chain_versions = config.k8s.k8s_update_chain.split(" ")
128 for version in chain_versions:
129 LOG.info("Chain update to '{}' version".format(version))
Vladimir Jigulin4ad52a82018-08-12 05:51:30 +0400130 k8s_deployed.update_k8s_version(version)
Vladimir Jigulin62bcf462018-05-28 18:17:01 +0400131
132 LOG.info("Checking test service availability")
Vladimir Jigulina6b018b2018-07-18 15:19:01 +0400133 assert sample.is_service_available()
Vladimir Jigulin62bcf462018-05-28 18:17:01 +0400134
135 LOG.info("Running conformance on {} version".format(version))
136 log_name = "k8s_conformance_{}.log".format(version)
137 k8s_deployed.run_conformance(log_out=log_name, raise_on_err=False)
Vladimir Jigulina6b018b2018-07-18 15:19:01 +0400138
Vladimir Jigulin4ad52a82018-08-12 05:51:30 +0400139 assert sample.is_service_available()
140
141 show_step(8)
142 sample.delete()
143
Vladimir Jigulina6b018b2018-07-18 15:19:01 +0400144 @pytest.mark.grap_versions
145 @pytest.mark.fail_snapshot
146 def test_k8s_metallb(self, show_step, config, k8s_deployed):
147 """Enable metallb in cluster and do basic tests
148
149 Scenario:
150 1. Setup Kubernetes cluster with enabled metallb
151 2. Check that metallb pods created in metallb-system namespace
152 3. Run 5 sample deployments
153 4. Expose deployments with type=LoadBalancer
154 5. Check services availability from outside of cluster
155 6. Run conformance
156 7. Check services availability from outside of cluster
Vladimir Jigulin4ad52a82018-08-12 05:51:30 +0400157 8. Delete deployments
Vladimir Jigulina6b018b2018-07-18 15:19:01 +0400158 """
159 show_step(1)
160 if not config.k8s_deploy.kubernetes_metallb_enabled:
161 pytest.skip("Test requires metallb addon enabled")
162
163 show_step(2)
Vladimir Jigulin34dfa942018-07-23 21:05:48 +0400164 ns = "metallb-system"
Vladimir Jigulin4ad52a82018-08-12 05:51:30 +0400165 assert \
166 len(k8s_deployed.api.pods.list(ns, name_prefix="controller")) > 0
167 assert \
168 len(k8s_deployed.api.pods.list(ns, name_prefix="speaker")) > 0
Vladimir Jigulina6b018b2018-07-18 15:19:01 +0400169
170 show_step(3)
171 samples = []
172 for i in range(5):
173 name = 'test-dep-metallb-{}'.format(i)
Vladimir Jigulin4ad52a82018-08-12 05:51:30 +0400174 samples.append(k8s_deployed.run_sample_deployment(name))
Vladimir Jigulina6b018b2018-07-18 15:19:01 +0400175
176 show_step(4)
177 for sample in samples:
178 sample.expose('LoadBalancer')
Vladimir Jigulin4ad52a82018-08-12 05:51:30 +0400179 sample.wait_ready()
Vladimir Jigulina6b018b2018-07-18 15:19:01 +0400180
181 show_step(5)
182 for sample in samples:
Vladimir Jigulin4ad52a82018-08-12 05:51:30 +0400183 assert sample.is_service_available(external=False)
Vladimir Jigulina6b018b2018-07-18 15:19:01 +0400184 assert sample.is_service_available(external=True)
185
186 show_step(6)
187 k8s_deployed.run_conformance()
188
189 show_step(7)
190 for sample in samples:
Vladimir Jigulin4ad52a82018-08-12 05:51:30 +0400191 assert sample.is_service_available(external=False)
Vladimir Jigulina6b018b2018-07-18 15:19:01 +0400192 assert sample.is_service_available(external=True)
Vladimir Jigulin34dfa942018-07-23 21:05:48 +0400193
Vladimir Jigulin4ad52a82018-08-12 05:51:30 +0400194 show_step(8)
195 for sample in samples:
196 sample.delete()
197
Vladimir Jigulin34dfa942018-07-23 21:05:48 +0400198 @pytest.mark.grap_versions
199 @pytest.mark.fail_snapshot
Dennis Dmitrievee5ef232018-08-31 13:53:18 +0300200 @pytest.mark.k8s_genie
Vladimir Jigulin4ad52a82018-08-12 05:51:30 +0400201 def test_k8s_genie_flannel(self, show_step, config,
202 salt_deployed, k8s_deployed):
Vladimir Jigulin34dfa942018-07-23 21:05:48 +0400203 """Test genie-cni+flannel cni setup
204
205 Scenario:
206 1. Setup Kubernetes cluster with genie cni and flannel
207 2. Check that flannel pods created in kube-system namespace
208 3. Create sample deployment with flannel cni annotation
209 4. Check that the deployment have 1 ip addresses from cni provider
210 5. Create sample deployment with calico cni annotation
211 6. Check that the deployment have 1 ip addresses from cni provider
212 7. Create sample deployment with multi-cni annotation
213 8. Check that the deployment have 2 ip addresses from different
214 cni providers
215 9. Create sample deployment without cni annotation
216 10. Check that the deployment have 1 ip address
217 11. Check pods availability
218 12. Run conformance
219 13. Check pods availability
Vladimir Jigulin4ad52a82018-08-12 05:51:30 +0400220 14. Delete pods
Vladimir Jigulin34dfa942018-07-23 21:05:48 +0400221 """
222 show_step(1)
223
224 # Find out calico and flannel networks
Victor Ryzhenkin0c373822018-10-30 17:55:50 +0400225 tgt_k8s_control = "I@kubernetes:master"
Vladimir Jigulin34dfa942018-07-23 21:05:48 +0400226
227 flannel_pillar = salt_deployed.get_pillar(
228 tgt=tgt_k8s_control,
229 pillar="kubernetes:master:network:flannel:private_ip_range")[0]
230 flannel_network = netaddr.IPNetwork(flannel_pillar.values()[0])
231 LOG.info("Flannel network: {}".format(flannel_network))
232
233 calico_network_pillar = salt_deployed.get_pillar(
234 tgt=tgt_k8s_control, pillar="_param:calico_private_network")[0]
235 calico_netmask_pillar = salt_deployed.get_pillar(
236 tgt=tgt_k8s_control, pillar="_param:calico_private_netmask")[0]
237 calico_network = netaddr.IPNetwork(
238 "{0}/{1}".format(calico_network_pillar.values()[0],
239 calico_netmask_pillar.values()[0]))
240 LOG.info("Calico network: {}".format(calico_network))
241
242 show_step(2)
Vladimir Jigulin4ad52a82018-08-12 05:51:30 +0400243 assert k8s_deployed.api.pods.list(
244 namespace="kube-system", name_prefix="kube-flannel-") > 0
Vladimir Jigulin34dfa942018-07-23 21:05:48 +0400245
Vladimir Jigulin34dfa942018-07-23 21:05:48 +0400246 show_step(3)
Vladimir Jigulin4ad52a82018-08-12 05:51:30 +0400247 flannel_pod = k8s_deployed.api.pods.create(
Vladimir Jigulin57ecae92018-09-10 22:51:15 +0400248 body=self.__read_testdata_yaml('pod-sample-flannel.yaml'))
Vladimir Jigulin4ad52a82018-08-12 05:51:30 +0400249 flannel_pod.wait_running()
Vladimir Jigulin34dfa942018-07-23 21:05:48 +0400250
251 show_step(4)
252 flannel_ips = k8s_deployed.get_pod_ips_from_container(flannel_pod.name)
253 assert len(flannel_ips) == 1
254 assert netaddr.IPAddress(flannel_ips[0]) in flannel_network
255
256 show_step(5)
Vladimir Jigulin4ad52a82018-08-12 05:51:30 +0400257 calico_pod = k8s_deployed.api.pods.create(
Vladimir Jigulin57ecae92018-09-10 22:51:15 +0400258 body=self.__read_testdata_yaml('pod-sample-calico.yaml'))
Vladimir Jigulin4ad52a82018-08-12 05:51:30 +0400259 calico_pod.wait_running()
Vladimir Jigulin34dfa942018-07-23 21:05:48 +0400260
261 show_step(6)
262 calico_ips = k8s_deployed.get_pod_ips_from_container(calico_pod.name)
263 assert len(calico_ips) == 1
264 assert netaddr.IPAddress(calico_ips[0]) in calico_network
265
266 show_step(7)
Vladimir Jigulin4ad52a82018-08-12 05:51:30 +0400267 multicni_pod = k8s_deployed.api.pods.create(
Vladimir Jigulin57ecae92018-09-10 22:51:15 +0400268 body=self.__read_testdata_yaml('pod-sample-multicni.yaml'))
Vladimir Jigulin4ad52a82018-08-12 05:51:30 +0400269 multicni_pod.wait_running()
Vladimir Jigulin34dfa942018-07-23 21:05:48 +0400270
271 show_step(8)
272 multicni_ips = \
273 k8s_deployed.get_pod_ips_from_container(multicni_pod.name)
274 assert len(multicni_ips) == 2
275 for net in [calico_network, flannel_network]:
276 assert netaddr.IPAddress(multicni_ips[0]) in net or \
277 netaddr.IPAddress(multicni_ips[1]) in net
278
279 show_step(9)
Vladimir Jigulin4ad52a82018-08-12 05:51:30 +0400280 nocni_pod = k8s_deployed.api.pods.create(
Vladimir Jigulin57ecae92018-09-10 22:51:15 +0400281 body=self.__read_testdata_yaml('pod-sample.yaml'))
Vladimir Jigulin4ad52a82018-08-12 05:51:30 +0400282 nocni_pod.wait_running()
Vladimir Jigulin34dfa942018-07-23 21:05:48 +0400283
284 show_step(10)
285 nocni_ips = k8s_deployed.get_pod_ips_from_container(nocni_pod.name)
286 assert len(nocni_ips) == 1
287 assert (netaddr.IPAddress(nocni_ips[0]) in calico_network or
288 netaddr.IPAddress(nocni_ips[0]) in flannel_network)
289
290 show_step(11)
291
292 def check_pod_availability(ip):
293 assert "Hello Kubernetes!" in k8s_deployed.curl(
294 "http://{}:8080".format(ip))
295
296 def check_pods_availability():
297 check_pod_availability(flannel_ips[0])
298 check_pod_availability(calico_ips[0])
299 check_pod_availability(multicni_ips[0])
300 check_pod_availability(multicni_ips[1])
301 check_pod_availability(nocni_ips[0])
302
303 check_pods_availability()
304
305 show_step(12)
306 k8s_deployed.run_conformance()
307
308 show_step(13)
309 check_pods_availability()
Vladimir Jigulin4ad52a82018-08-12 05:51:30 +0400310
311 show_step(14)
312 flannel_pod.delete()
313 calico_pod.delete()
314 multicni_pod.delete()
315 nocni_pod.delete()
Vladimir Jigulin57ecae92018-09-10 22:51:15 +0400316
317 @pytest.mark.grap_versions
318 @pytest.mark.fail_snapshot
319 def test_k8s_dashboard(self, show_step, config,
320 salt_deployed, k8s_deployed):
321 """Test dashboard setup
322
323 Scenario:
324 1. Setup Kubernetes cluster
325 2. Try to curl login status api
326 3. Create a test-admin-user account
327 4. Try to login in dashboard using test-admin-user account
328 5. Get and check list of namespaces using dashboard api
329 """
330 show_step(1)
331
332 show_step(2)
333 system_ns = 'kube-system'
334 dashboard_service = \
335 k8s_deployed.api.services.get('kubernetes-dashboard', system_ns)
336 dashboard_url = 'https://{}'.format(dashboard_service.get_ip())
337
338 def dashboard_curl(url, data=None, headers=None):
339 """ Using curl command on controller node. Alternatives:
340 - connect_{get,post}_namespaced_service_proxy_with_path -
341 k8s lib does not provide way to pass headers or POST data
342 - raw rest k8s api - need to auth somehow
343 - new load-balancer svc for dashboard + requests python lib -
344 requires working metallb or other load-balancer
345 """
346 args = ['--insecure']
347 for name in headers or {}:
348 args.append('--header')
349 args.append("{0}: {1}".format(name, headers[name]))
350 if data is not None:
351 args.append('--data')
352 args.append(data)
353 return ''.join(k8s_deployed.curl(dashboard_url + url, *args))
354
355 assert 'tokenPresent' in \
356 json.loads(dashboard_curl('/api/v1/login/status'))
357
358 show_step(3)
359 account = k8s_deployed.api.serviceaccounts.create(
360 namespace=system_ns,
361 body=self.__read_testdata_yaml('test-admin-user-account.yaml'))
362 account.wait_secret_generation()
363
364 k8s_deployed.api.clusterrolebindings.create(
365 body=self.__read_testdata_yaml(
366 'test-admin-user-cluster-role-bind.yaml'))
367
368 account_secret = account.read().secrets[0]
369 account_token = k8s_deployed.api.secrets.get(
370 namespace=system_ns, name=account_secret.name).read().data['token']
371
372 show_step(4)
373 csrf_token = \
374 json.loads(dashboard_curl('/api/v1/csrftoken/login'))['token']
375 login_headers = {'X-CSRF-TOKEN': csrf_token,
376 'Content-Type': 'application/json'}
377 jwe_token = json.loads(dashboard_curl(
378 '/api/v1/login', headers=login_headers,
379 data=json.dumps({'token': account_token.decode('base64')})
380 ))['jweToken']
381 headers = {'jweToken': jwe_token}
382
383 show_step(5)
384 dashboard_namespaces = json.loads(
385 dashboard_curl('/api/v1/namespace', headers=headers))['namespaces']
386
387 namespaces_names_list = \
388 [ns.name for ns in k8s_deployed.api.namespaces.list()]
389 for namespace in dashboard_namespaces:
390 assert namespace['objectMeta']['name'] in namespaces_names_list
Vladimir Jigulin5775bbb2018-10-03 10:34:54 +0400391
392 @pytest.mark.grap_versions
393 @pytest.mark.fail_snapshot
394 def test_k8s_ingress_nginx(self, show_step, config,
395 salt_deployed, k8s_deployed):
396 """Test ingress-nginx configured and working with metallb
397
398 Scenario:
399 1. Setup Kubernetes cluster with metallb
400 2. Create 2 example deployments and expose them
401 3. Create ingress controller with 2 backends to each deployment
402 service respectively
403 4. Wait ingress for deploy
404 5. Try to reach default endpoint
405 6. Try to reach test1 and test2 deployment services endpoints
406 """
407 show_step(1)
408 if not config.k8s_deploy.kubernetes_metallb_enabled:
409 pytest.skip("Test requires metallb addon enabled")
410 if not config.k8s_deploy.kubernetes_ingressnginx_enabled:
411 pytest.skip("Test requires ingress-nginx addon enabled")
412
413 show_step(2)
414 image = 'nginxdemos/hello:plain-text'
415 port = 80
416 dep1 = k8s_deployed.run_sample_deployment(
417 'dep-ingress-1', image=image, port=port)
418 dep2 = k8s_deployed.run_sample_deployment(
419 'dep-ingress-2', image=image, port=port)
420 svc1 = dep1.wait_ready().expose()
421 svc2 = dep2.wait_ready().expose()
422
423 show_step(3)
424 body = {
425 'apiVersion': 'extensions/v1beta1',
426 'kind': 'Ingress',
427 'metadata': {'name': 'ingress-test'},
428 'spec': {
429 'rules': [{'http': {
430 'paths': [{
431 'backend': {
432 'serviceName': svc1.name,
433 'servicePort': port},
434 'path': '/test1'}, {
435 'backend': {
436 'serviceName': svc2.name,
437 'servicePort': port},
438 'path': '/test2'
439 }]
440 }}]
441 }
442 }
443 ingress = k8s_deployed.api.ingresses.create(body=body)
444
445 show_step(4)
446 ingress.wait_ready()
447
448 show_step(5)
449 ingress_address = "https://{}".format(
450 ingress.read().status.load_balancer.ingress[0].ip)
451
452 assert requests.get(ingress_address, verify=False).status_code == 404
453
454 show_step(6)
455 req1 = requests.get(ingress_address + "/test1", verify=False)
456 assert req1.status_code == 200
457 assert 'dep-ingress-1' in req1.text
458
459 req2 = requests.get(ingress_address + "/test2", verify=False)
460 assert req2.status_code == 200
461 assert 'dep-ingress-2' in req2.text