blob: 6467a8a33d1e95d31a397bffe494c4c168514fb9 [file] [log] [blame]
Victor Ryzhenkin14354ac2017-09-27 17:42:30 +04001# Copyright 2017 Mirantis, Inc.
2#
3# Licensed under the Apache License, Version 2.0 (the "License"); you may
4# not use this file except in compliance with the License. You may obtain
5# a copy of the License at
6#
7# http://www.apache.org/licenses/LICENSE-2.0
8#
9# Unless required by applicable law or agreed to in writing, software
10# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
11# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
12# License for the specific language governing permissions and limitations
13# under the License.
14
15import pytest
Vladimir Jigulin34dfa942018-07-23 21:05:48 +040016import netaddr
17import os
Vladimir Jigulin57ecae92018-09-10 22:51:15 +040018import json
Vladimir Jigulin5775bbb2018-10-03 10:34:54 +040019import requests
Victor Ryzhenkin14354ac2017-09-27 17:42:30 +040020
21from tcp_tests import logger
22from tcp_tests import settings
23
Vladimir Jigulin4ad52a82018-08-12 05:51:30 +040024from tcp_tests.managers.k8s import read_yaml_file
Vladimir Jigulin2154e4b2018-11-14 12:14:05 +040025from tcp_tests.managers.jenkins.client import JenkinsClient
Vladimir Jigulin4ad52a82018-08-12 05:51:30 +040026
Victor Ryzhenkin14354ac2017-09-27 17:42:30 +040027LOG = logger.logger
28
29
30class TestMCPK8sActions(object):
31 """Test class for different k8s actions"""
32
Vladimir Jigulin57ecae92018-09-10 22:51:15 +040033 def __read_testdata_yaml(self, name):
34 dir = os.path.join(os.path.dirname(__file__), 'testdata/k8s')
35 return read_yaml_file(dir, name)
36
Tatyana Leontovichc411ec32017-10-09 14:48:00 +030037 @pytest.mark.grab_versions
Victor Ryzhenkin14354ac2017-09-27 17:42:30 +040038 @pytest.mark.fail_snapshot
Tatyana Leontovich071ce6a2017-10-24 18:08:10 +030039 @pytest.mark.cz8116
Dennis Dmitriev0f624a82018-06-11 12:57:13 +030040 @pytest.mark.k8s_calico
Victor Ryzhenkin14354ac2017-09-27 17:42:30 +040041 def test_k8s_externaldns_coredns(self, show_step, config, k8s_deployed):
42 """Test externaldns integration with coredns
43
44 Scenario:
45 1. Install k8s with externaldns addon enabled(including etcd, coredns)
46 2. Start simple service
47 3. Expose deployment
48 4. Annotate service with domain name
49 5. Try to get service using nslookup
Vladimir Jigulin4ad52a82018-08-12 05:51:30 +040050 6. Delete service and deployment
Victor Ryzhenkin14354ac2017-09-27 17:42:30 +040051 """
52
Vladimir Jigulin4ad52a82018-08-12 05:51:30 +040053 show_step(1)
Victor Ryzhenkin14354ac2017-09-27 17:42:30 +040054 if not (config.k8s_deploy.kubernetes_externaldns_enabled and
55 config.k8s_deploy.kubernetes_coredns_enabled):
Vladimir Jigulin4ad52a82018-08-12 05:51:30 +040056 pytest.skip("Test requires externaldns and coredns addons enabled")
Victor Ryzhenkin14354ac2017-09-27 17:42:30 +040057
58 show_step(2)
Vladimir Jigulin4ad52a82018-08-12 05:51:30 +040059 deployment = k8s_deployed.run_sample_deployment('test-dep')
Victor Ryzhenkin14354ac2017-09-27 17:42:30 +040060
61 show_step(3)
Vladimir Jigulin4ad52a82018-08-12 05:51:30 +040062 svc = deployment.expose()
Victor Ryzhenkin14354ac2017-09-27 17:42:30 +040063
Victor Ryzhenkin14354ac2017-09-27 17:42:30 +040064 show_step(4)
Vladimir Jigulin7eb41b02018-10-24 17:03:51 +040065 hostname = "test.{0}.".format(settings.DOMAIN_NAME)
Vladimir Jigulin4ad52a82018-08-12 05:51:30 +040066 svc.patch({
67 "metadata": {
68 "annotations": {
69 "external-dns.alpha.kubernetes.io/hostname": hostname
70 }
71 }
72 })
Victor Ryzhenkin14354ac2017-09-27 17:42:30 +040073
74 show_step(5)
Vladimir Jigulin90689152018-09-26 15:38:19 +040075 dns_svc = k8s_deployed.api.services.get(
76 name='coredns', namespace='kube-system')
77 k8s_deployed.nslookup(hostname, dns_svc.get_ip())
Vladimir Jigulin4ad52a82018-08-12 05:51:30 +040078
79 show_step(6)
80 deployment.delete()
Victor Ryzhenkin87a31422018-03-16 22:25:27 +040081
82 @pytest.mark.grab_versions
83 @pytest.mark.cncf_publisher(name=['e2e.log', 'junit_01.xml', 'version.txt',
84 'cncf_results.tar.gz'])
85 @pytest.mark.fail_snapshot
86 def test_k8s_cncf_certification(self, show_step, config, k8s_deployed,
Vladimir Jigulin0c8dd5a2018-08-28 05:08:35 +040087 k8s_cncf_log_helper):
Victor Ryzhenkin87a31422018-03-16 22:25:27 +040088 """Run cncf e2e suite and provide files needed for pull request
89 to the CNCF repo
90
91 Scenario:
92 1. Run cncf from https://github.com/cncf/k8s-conformance
93 """
94
95 show_step(1)
96 k8s_deployed.start_k8s_cncf_verification()
Vladimir Jigulin62bcf462018-05-28 18:17:01 +040097
Vladimir Jigulin2154e4b2018-11-14 12:14:05 +040098 @pytest.mark.grab_versions
Vladimir Jigulin62bcf462018-05-28 18:17:01 +040099 @pytest.mark.fail_snapshot
100 def test_k8s_chain_update(self, show_step, underlay, config, k8s_deployed,
101 k8s_chain_update_log_helper):
102 """Test for chain-upgrading k8s hypercube pool and checking it
103
104 Scenario:
105 1. Prepare salt on hosts
106 2. Setup controller nodes
107 3. Setup compute nodes
108 4. Setup Kubernetes cluster
109 5. Run and expose sample test service
110 6. Run conformance to check consistency
111 7. For every version in update chain:
112 Update cluster to new version, check test sample service
113 availability, run conformance
Vladimir Jigulin4ad52a82018-08-12 05:51:30 +0400114 8. Delete service and deployment
Vladimir Jigulin62bcf462018-05-28 18:17:01 +0400115 """
116
Vladimir Jigulin62bcf462018-05-28 18:17:01 +0400117 show_step(5)
Vladimir Jigulin4ad52a82018-08-12 05:51:30 +0400118 sample = k8s_deployed.run_sample_deployment('test-dep-chain-upgrade')
Vladimir Jigulina6b018b2018-07-18 15:19:01 +0400119 sample.expose()
Vladimir Jigulin4ad52a82018-08-12 05:51:30 +0400120 sample.wait_ready()
Vladimir Jigulin62bcf462018-05-28 18:17:01 +0400121
Vladimir Jigulina6b018b2018-07-18 15:19:01 +0400122 assert sample.is_service_available()
Vladimir Jigulin62bcf462018-05-28 18:17:01 +0400123
124 show_step(6)
125 k8s_deployed.run_conformance(log_out="k8s_conformance.log")
126
127 show_step(7)
128 chain_versions = config.k8s.k8s_update_chain.split(" ")
129 for version in chain_versions:
130 LOG.info("Chain update to '{}' version".format(version))
Vladimir Jigulin4ad52a82018-08-12 05:51:30 +0400131 k8s_deployed.update_k8s_version(version)
Vladimir Jigulin62bcf462018-05-28 18:17:01 +0400132
133 LOG.info("Checking test service availability")
Vladimir Jigulina6b018b2018-07-18 15:19:01 +0400134 assert sample.is_service_available()
Vladimir Jigulin62bcf462018-05-28 18:17:01 +0400135
136 LOG.info("Running conformance on {} version".format(version))
137 log_name = "k8s_conformance_{}.log".format(version)
138 k8s_deployed.run_conformance(log_out=log_name, raise_on_err=False)
Vladimir Jigulina6b018b2018-07-18 15:19:01 +0400139
Vladimir Jigulin4ad52a82018-08-12 05:51:30 +0400140 assert sample.is_service_available()
141
142 show_step(8)
143 sample.delete()
144
Vladimir Jigulin2154e4b2018-11-14 12:14:05 +0400145 @pytest.mark.grab_versions
Vladimir Jigulina6b018b2018-07-18 15:19:01 +0400146 @pytest.mark.fail_snapshot
Dennis Dmitriev66650fc2018-11-02 11:04:37 +0200147 @pytest.mark.k8s_metallb
Vladimir Jigulina6b018b2018-07-18 15:19:01 +0400148 def test_k8s_metallb(self, show_step, config, k8s_deployed):
149 """Enable metallb in cluster and do basic tests
150
151 Scenario:
152 1. Setup Kubernetes cluster with enabled metallb
153 2. Check that metallb pods created in metallb-system namespace
154 3. Run 5 sample deployments
155 4. Expose deployments with type=LoadBalancer
156 5. Check services availability from outside of cluster
157 6. Run conformance
158 7. Check services availability from outside of cluster
Vladimir Jigulin4ad52a82018-08-12 05:51:30 +0400159 8. Delete deployments
Vladimir Jigulina6b018b2018-07-18 15:19:01 +0400160 """
161 show_step(1)
Dennis Dmitriev66650fc2018-11-02 11:04:37 +0200162 if not k8s_deployed.is_metallb_enabled:
Vladimir Jigulina6b018b2018-07-18 15:19:01 +0400163 pytest.skip("Test requires metallb addon enabled")
164
165 show_step(2)
Vladimir Jigulin34dfa942018-07-23 21:05:48 +0400166 ns = "metallb-system"
Vladimir Jigulin4ad52a82018-08-12 05:51:30 +0400167 assert \
168 len(k8s_deployed.api.pods.list(ns, name_prefix="controller")) > 0
169 assert \
170 len(k8s_deployed.api.pods.list(ns, name_prefix="speaker")) > 0
Vladimir Jigulina6b018b2018-07-18 15:19:01 +0400171
172 show_step(3)
173 samples = []
174 for i in range(5):
175 name = 'test-dep-metallb-{}'.format(i)
Vladimir Jigulin4ad52a82018-08-12 05:51:30 +0400176 samples.append(k8s_deployed.run_sample_deployment(name))
Vladimir Jigulina6b018b2018-07-18 15:19:01 +0400177
178 show_step(4)
179 for sample in samples:
180 sample.expose('LoadBalancer')
Vladimir Jigulin4ad52a82018-08-12 05:51:30 +0400181 sample.wait_ready()
Vladimir Jigulina6b018b2018-07-18 15:19:01 +0400182
183 show_step(5)
184 for sample in samples:
Vladimir Jigulin4ad52a82018-08-12 05:51:30 +0400185 assert sample.is_service_available(external=False)
Vladimir Jigulina6b018b2018-07-18 15:19:01 +0400186 assert sample.is_service_available(external=True)
187
188 show_step(6)
189 k8s_deployed.run_conformance()
190
191 show_step(7)
192 for sample in samples:
Vladimir Jigulin4ad52a82018-08-12 05:51:30 +0400193 assert sample.is_service_available(external=False)
Vladimir Jigulina6b018b2018-07-18 15:19:01 +0400194 assert sample.is_service_available(external=True)
Vladimir Jigulin34dfa942018-07-23 21:05:48 +0400195
Vladimir Jigulin4ad52a82018-08-12 05:51:30 +0400196 show_step(8)
197 for sample in samples:
198 sample.delete()
199
Vladimir Jigulin2154e4b2018-11-14 12:14:05 +0400200 @pytest.mark.grab_versions
Vladimir Jigulin34dfa942018-07-23 21:05:48 +0400201 @pytest.mark.fail_snapshot
Dennis Dmitrievee5ef232018-08-31 13:53:18 +0300202 @pytest.mark.k8s_genie
Vladimir Jigulin4ad52a82018-08-12 05:51:30 +0400203 def test_k8s_genie_flannel(self, show_step, config,
204 salt_deployed, k8s_deployed):
Vladimir Jigulin34dfa942018-07-23 21:05:48 +0400205 """Test genie-cni+flannel cni setup
206
207 Scenario:
208 1. Setup Kubernetes cluster with genie cni and flannel
209 2. Check that flannel pods created in kube-system namespace
210 3. Create sample deployment with flannel cni annotation
211 4. Check that the deployment have 1 ip addresses from cni provider
212 5. Create sample deployment with calico cni annotation
213 6. Check that the deployment have 1 ip addresses from cni provider
214 7. Create sample deployment with multi-cni annotation
215 8. Check that the deployment have 2 ip addresses from different
216 cni providers
217 9. Create sample deployment without cni annotation
218 10. Check that the deployment have 1 ip address
219 11. Check pods availability
220 12. Run conformance
221 13. Check pods availability
Vladimir Jigulin4ad52a82018-08-12 05:51:30 +0400222 14. Delete pods
Vladimir Jigulin34dfa942018-07-23 21:05:48 +0400223 """
224 show_step(1)
225
226 # Find out calico and flannel networks
Victor Ryzhenkin0c373822018-10-30 17:55:50 +0400227 tgt_k8s_control = "I@kubernetes:master"
Vladimir Jigulin34dfa942018-07-23 21:05:48 +0400228
229 flannel_pillar = salt_deployed.get_pillar(
230 tgt=tgt_k8s_control,
231 pillar="kubernetes:master:network:flannel:private_ip_range")[0]
232 flannel_network = netaddr.IPNetwork(flannel_pillar.values()[0])
233 LOG.info("Flannel network: {}".format(flannel_network))
234
235 calico_network_pillar = salt_deployed.get_pillar(
236 tgt=tgt_k8s_control, pillar="_param:calico_private_network")[0]
237 calico_netmask_pillar = salt_deployed.get_pillar(
238 tgt=tgt_k8s_control, pillar="_param:calico_private_netmask")[0]
239 calico_network = netaddr.IPNetwork(
240 "{0}/{1}".format(calico_network_pillar.values()[0],
241 calico_netmask_pillar.values()[0]))
242 LOG.info("Calico network: {}".format(calico_network))
243
244 show_step(2)
Vladimir Jigulin4ad52a82018-08-12 05:51:30 +0400245 assert k8s_deployed.api.pods.list(
246 namespace="kube-system", name_prefix="kube-flannel-") > 0
Vladimir Jigulin34dfa942018-07-23 21:05:48 +0400247
Vladimir Jigulin34dfa942018-07-23 21:05:48 +0400248 show_step(3)
Vladimir Jigulin4ad52a82018-08-12 05:51:30 +0400249 flannel_pod = k8s_deployed.api.pods.create(
Vladimir Jigulin57ecae92018-09-10 22:51:15 +0400250 body=self.__read_testdata_yaml('pod-sample-flannel.yaml'))
Vladimir Jigulin4ad52a82018-08-12 05:51:30 +0400251 flannel_pod.wait_running()
Vladimir Jigulin34dfa942018-07-23 21:05:48 +0400252
253 show_step(4)
254 flannel_ips = k8s_deployed.get_pod_ips_from_container(flannel_pod.name)
255 assert len(flannel_ips) == 1
256 assert netaddr.IPAddress(flannel_ips[0]) in flannel_network
257
258 show_step(5)
Vladimir Jigulin4ad52a82018-08-12 05:51:30 +0400259 calico_pod = k8s_deployed.api.pods.create(
Vladimir Jigulin57ecae92018-09-10 22:51:15 +0400260 body=self.__read_testdata_yaml('pod-sample-calico.yaml'))
Vladimir Jigulin4ad52a82018-08-12 05:51:30 +0400261 calico_pod.wait_running()
Vladimir Jigulin34dfa942018-07-23 21:05:48 +0400262
263 show_step(6)
264 calico_ips = k8s_deployed.get_pod_ips_from_container(calico_pod.name)
265 assert len(calico_ips) == 1
266 assert netaddr.IPAddress(calico_ips[0]) in calico_network
267
268 show_step(7)
Vladimir Jigulin4ad52a82018-08-12 05:51:30 +0400269 multicni_pod = k8s_deployed.api.pods.create(
Vladimir Jigulin57ecae92018-09-10 22:51:15 +0400270 body=self.__read_testdata_yaml('pod-sample-multicni.yaml'))
Vladimir Jigulin4ad52a82018-08-12 05:51:30 +0400271 multicni_pod.wait_running()
Vladimir Jigulin34dfa942018-07-23 21:05:48 +0400272
273 show_step(8)
274 multicni_ips = \
275 k8s_deployed.get_pod_ips_from_container(multicni_pod.name)
276 assert len(multicni_ips) == 2
277 for net in [calico_network, flannel_network]:
278 assert netaddr.IPAddress(multicni_ips[0]) in net or \
279 netaddr.IPAddress(multicni_ips[1]) in net
280
281 show_step(9)
Vladimir Jigulin4ad52a82018-08-12 05:51:30 +0400282 nocni_pod = k8s_deployed.api.pods.create(
Vladimir Jigulin57ecae92018-09-10 22:51:15 +0400283 body=self.__read_testdata_yaml('pod-sample.yaml'))
Vladimir Jigulin4ad52a82018-08-12 05:51:30 +0400284 nocni_pod.wait_running()
Vladimir Jigulin34dfa942018-07-23 21:05:48 +0400285
286 show_step(10)
287 nocni_ips = k8s_deployed.get_pod_ips_from_container(nocni_pod.name)
288 assert len(nocni_ips) == 1
289 assert (netaddr.IPAddress(nocni_ips[0]) in calico_network or
290 netaddr.IPAddress(nocni_ips[0]) in flannel_network)
291
292 show_step(11)
293
294 def check_pod_availability(ip):
295 assert "Hello Kubernetes!" in k8s_deployed.curl(
296 "http://{}:8080".format(ip))
297
298 def check_pods_availability():
299 check_pod_availability(flannel_ips[0])
300 check_pod_availability(calico_ips[0])
301 check_pod_availability(multicni_ips[0])
302 check_pod_availability(multicni_ips[1])
303 check_pod_availability(nocni_ips[0])
304
305 check_pods_availability()
306
307 show_step(12)
308 k8s_deployed.run_conformance()
309
310 show_step(13)
311 check_pods_availability()
Vladimir Jigulin4ad52a82018-08-12 05:51:30 +0400312
313 show_step(14)
314 flannel_pod.delete()
315 calico_pod.delete()
316 multicni_pod.delete()
317 nocni_pod.delete()
Vladimir Jigulin57ecae92018-09-10 22:51:15 +0400318
Vladimir Jigulin2154e4b2018-11-14 12:14:05 +0400319 @pytest.mark.grab_versions
Vladimir Jigulin57ecae92018-09-10 22:51:15 +0400320 @pytest.mark.fail_snapshot
Dennis Dmitriev66650fc2018-11-02 11:04:37 +0200321 @pytest.mark.k8s_dashboard
Vladimir Jigulin57ecae92018-09-10 22:51:15 +0400322 def test_k8s_dashboard(self, show_step, config,
323 salt_deployed, k8s_deployed):
324 """Test dashboard setup
325
326 Scenario:
327 1. Setup Kubernetes cluster
328 2. Try to curl login status api
329 3. Create a test-admin-user account
330 4. Try to login in dashboard using test-admin-user account
331 5. Get and check list of namespaces using dashboard api
332 """
333 show_step(1)
334
335 show_step(2)
336 system_ns = 'kube-system'
337 dashboard_service = \
338 k8s_deployed.api.services.get('kubernetes-dashboard', system_ns)
339 dashboard_url = 'https://{}'.format(dashboard_service.get_ip())
340
341 def dashboard_curl(url, data=None, headers=None):
342 """ Using curl command on controller node. Alternatives:
343 - connect_{get,post}_namespaced_service_proxy_with_path -
344 k8s lib does not provide way to pass headers or POST data
345 - raw rest k8s api - need to auth somehow
346 - new load-balancer svc for dashboard + requests python lib -
347 requires working metallb or other load-balancer
348 """
349 args = ['--insecure']
350 for name in headers or {}:
351 args.append('--header')
352 args.append("{0}: {1}".format(name, headers[name]))
353 if data is not None:
354 args.append('--data')
355 args.append(data)
356 return ''.join(k8s_deployed.curl(dashboard_url + url, *args))
357
358 assert 'tokenPresent' in \
359 json.loads(dashboard_curl('/api/v1/login/status'))
360
361 show_step(3)
362 account = k8s_deployed.api.serviceaccounts.create(
363 namespace=system_ns,
364 body=self.__read_testdata_yaml('test-admin-user-account.yaml'))
365 account.wait_secret_generation()
366
367 k8s_deployed.api.clusterrolebindings.create(
368 body=self.__read_testdata_yaml(
369 'test-admin-user-cluster-role-bind.yaml'))
370
371 account_secret = account.read().secrets[0]
372 account_token = k8s_deployed.api.secrets.get(
373 namespace=system_ns, name=account_secret.name).read().data['token']
374
375 show_step(4)
376 csrf_token = \
377 json.loads(dashboard_curl('/api/v1/csrftoken/login'))['token']
378 login_headers = {'X-CSRF-TOKEN': csrf_token,
379 'Content-Type': 'application/json'}
380 jwe_token = json.loads(dashboard_curl(
381 '/api/v1/login', headers=login_headers,
382 data=json.dumps({'token': account_token.decode('base64')})
383 ))['jweToken']
384 headers = {'jweToken': jwe_token}
385
386 show_step(5)
387 dashboard_namespaces = json.loads(
388 dashboard_curl('/api/v1/namespace', headers=headers))['namespaces']
389
390 namespaces_names_list = \
391 [ns.name for ns in k8s_deployed.api.namespaces.list()]
392 for namespace in dashboard_namespaces:
393 assert namespace['objectMeta']['name'] in namespaces_names_list
Vladimir Jigulin5775bbb2018-10-03 10:34:54 +0400394
Vladimir Jigulin2154e4b2018-11-14 12:14:05 +0400395 @pytest.mark.grab_versions
Vladimir Jigulin5775bbb2018-10-03 10:34:54 +0400396 @pytest.mark.fail_snapshot
Dennis Dmitriev66650fc2018-11-02 11:04:37 +0200397 @pytest.mark.k8s_ingress_nginx
Vladimir Jigulin5775bbb2018-10-03 10:34:54 +0400398 def test_k8s_ingress_nginx(self, show_step, config,
399 salt_deployed, k8s_deployed):
400 """Test ingress-nginx configured and working with metallb
401
402 Scenario:
403 1. Setup Kubernetes cluster with metallb
404 2. Create 2 example deployments and expose them
405 3. Create ingress controller with 2 backends to each deployment
406 service respectively
407 4. Wait ingress for deploy
408 5. Try to reach default endpoint
409 6. Try to reach test1 and test2 deployment services endpoints
410 """
411 show_step(1)
Dennis Dmitriev66650fc2018-11-02 11:04:37 +0200412 if not k8s_deployed.is_metallb_enabled:
Vladimir Jigulin5775bbb2018-10-03 10:34:54 +0400413 pytest.skip("Test requires metallb addon enabled")
Dennis Dmitriev66650fc2018-11-02 11:04:37 +0200414 if not k8s_deployed.is_ingress_nginx_enabled:
Vladimir Jigulin5775bbb2018-10-03 10:34:54 +0400415 pytest.skip("Test requires ingress-nginx addon enabled")
416
417 show_step(2)
418 image = 'nginxdemos/hello:plain-text'
419 port = 80
420 dep1 = k8s_deployed.run_sample_deployment(
421 'dep-ingress-1', image=image, port=port)
422 dep2 = k8s_deployed.run_sample_deployment(
423 'dep-ingress-2', image=image, port=port)
424 svc1 = dep1.wait_ready().expose()
425 svc2 = dep2.wait_ready().expose()
426
427 show_step(3)
428 body = {
429 'apiVersion': 'extensions/v1beta1',
430 'kind': 'Ingress',
431 'metadata': {'name': 'ingress-test'},
432 'spec': {
433 'rules': [{'http': {
434 'paths': [{
435 'backend': {
436 'serviceName': svc1.name,
437 'servicePort': port},
438 'path': '/test1'}, {
439 'backend': {
440 'serviceName': svc2.name,
441 'servicePort': port},
442 'path': '/test2'
443 }]
444 }}]
445 }
446 }
447 ingress = k8s_deployed.api.ingresses.create(body=body)
448
449 show_step(4)
450 ingress.wait_ready()
451
452 show_step(5)
453 ingress_address = "https://{}".format(
454 ingress.read().status.load_balancer.ingress[0].ip)
455
456 assert requests.get(ingress_address, verify=False).status_code == 404
457
458 show_step(6)
459 req1 = requests.get(ingress_address + "/test1", verify=False)
460 assert req1.status_code == 200
461 assert 'dep-ingress-1' in req1.text
462
463 req2 = requests.get(ingress_address + "/test2", verify=False)
464 assert req2.status_code == 200
465 assert 'dep-ingress-2' in req2.text
Vladimir Jigulin2154e4b2018-11-14 12:14:05 +0400466
467 @pytest.mark.grab_versions
468 @pytest.mark.fail_snapshot
469 def test_k8s_cicd_upgrade(self, show_step, config,
470 salt_deployed, k8s_deployed):
471 """Test k8s upgrade cicd pipeline
472
473 Scenario:
474 1. Setup Kubernetes+CICD cluster
475 2. Start deploy-k8s-upgrade job in jenkins
476 3. Wait for job to end
477 """
478 show_step(1)
479 jenkins_info = salt_deployed.get_pillar(
480 tgt='cid*1*', pillar="jenkins:client:master")[0].values()[0]
481
482 salt_api = salt_deployed.get_pillar(
483 tgt='cid*1*', pillar="_param:jenkins_salt_api_url")[0].values()[0]
484
485 show_step(2)
486 jenkins = JenkinsClient(
487 host='http://{host}:{port}'.format(**jenkins_info),
488 username=jenkins_info['username'],
489 password=jenkins_info['password'])
490
491 params = jenkins.make_defults_params('deploy-k8s-upgrade')
492 params['SALT_MASTER_URL'] = salt_api
493 params['SALT_MASTER_CREDENTIALS'] = 'salt'
494 params['CONFORMANCE_RUN_AFTER'] = True
495 params['CONFORMANCE_RUN_BEFORE'] = True
496 build = jenkins.run_build('deploy-k8s-upgrade', params)
497
498 show_step(3)
499 jenkins.wait_end_of_build(
500 name=build[0], build_id=build[1], timeout=3600 * 4)
501 result = jenkins.build_info(
502 name=build[0], build_id=build[1])['result']
503 assert result == 'SUCCESS', "k8s upgrade job has been failed"