blob: c93e152b12c7b71f382d8eecdd410f9a094c19c5 [file] [log] [blame]
Victor Ryzhenkin14354ac2017-09-27 17:42:30 +04001# Copyright 2017 Mirantis, Inc.
2#
3# Licensed under the Apache License, Version 2.0 (the "License"); you may
4# not use this file except in compliance with the License. You may obtain
5# a copy of the License at
6#
7# http://www.apache.org/licenses/LICENSE-2.0
8#
9# Unless required by applicable law or agreed to in writing, software
10# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
11# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
12# License for the specific language governing permissions and limitations
13# under the License.
14
15import pytest
Vladimir Jigulin34dfa942018-07-23 21:05:48 +040016import netaddr
17import os
Vladimir Jigulin57ecae92018-09-10 22:51:15 +040018import json
Vladimir Jigulin5775bbb2018-10-03 10:34:54 +040019import requests
Victor Ryzhenkin14354ac2017-09-27 17:42:30 +040020
21from tcp_tests import logger
22from tcp_tests import settings
23
Vladimir Jigulin4ad52a82018-08-12 05:51:30 +040024from tcp_tests.managers.k8s import read_yaml_file
Vladimir Jigulin2154e4b2018-11-14 12:14:05 +040025from tcp_tests.managers.jenkins.client import JenkinsClient
Vladimir Jigulin4ad52a82018-08-12 05:51:30 +040026
Victor Ryzhenkin14354ac2017-09-27 17:42:30 +040027LOG = logger.logger
28
29
30class TestMCPK8sActions(object):
31 """Test class for different k8s actions"""
32
Vladimir Jigulin57ecae92018-09-10 22:51:15 +040033 def __read_testdata_yaml(self, name):
34 dir = os.path.join(os.path.dirname(__file__), 'testdata/k8s')
35 return read_yaml_file(dir, name)
36
Tatyana Leontovichc411ec32017-10-09 14:48:00 +030037 @pytest.mark.grab_versions
Victor Ryzhenkin14354ac2017-09-27 17:42:30 +040038 @pytest.mark.fail_snapshot
Tatyana Leontovich071ce6a2017-10-24 18:08:10 +030039 @pytest.mark.cz8116
Dennis Dmitriev0f624a82018-06-11 12:57:13 +030040 @pytest.mark.k8s_calico
Vladimir Jigulineb8b8132019-03-19 15:34:02 +040041 @pytest.mark.k8s_system
Victor Ryzhenkin14354ac2017-09-27 17:42:30 +040042 def test_k8s_externaldns_coredns(self, show_step, config, k8s_deployed):
43 """Test externaldns integration with coredns
44
45 Scenario:
46 1. Install k8s with externaldns addon enabled(including etcd, coredns)
47 2. Start simple service
48 3. Expose deployment
49 4. Annotate service with domain name
50 5. Try to get service using nslookup
Vladimir Jigulin4ad52a82018-08-12 05:51:30 +040051 6. Delete service and deployment
Victor Ryzhenkin14354ac2017-09-27 17:42:30 +040052 """
53
Vladimir Jigulin4ad52a82018-08-12 05:51:30 +040054 show_step(1)
Victor Ryzhenkin14354ac2017-09-27 17:42:30 +040055 if not (config.k8s_deploy.kubernetes_externaldns_enabled and
56 config.k8s_deploy.kubernetes_coredns_enabled):
Vladimir Jigulin4ad52a82018-08-12 05:51:30 +040057 pytest.skip("Test requires externaldns and coredns addons enabled")
Victor Ryzhenkin14354ac2017-09-27 17:42:30 +040058
59 show_step(2)
Vladimir Jigulin4ad52a82018-08-12 05:51:30 +040060 deployment = k8s_deployed.run_sample_deployment('test-dep')
Victor Ryzhenkin14354ac2017-09-27 17:42:30 +040061
62 show_step(3)
Vladimir Jigulin4ad52a82018-08-12 05:51:30 +040063 svc = deployment.expose()
Victor Ryzhenkin14354ac2017-09-27 17:42:30 +040064
Victor Ryzhenkin14354ac2017-09-27 17:42:30 +040065 show_step(4)
Vladimir Jigulin7eb41b02018-10-24 17:03:51 +040066 hostname = "test.{0}.".format(settings.DOMAIN_NAME)
Vladimir Jigulin4ad52a82018-08-12 05:51:30 +040067 svc.patch({
68 "metadata": {
69 "annotations": {
70 "external-dns.alpha.kubernetes.io/hostname": hostname
71 }
72 }
73 })
Victor Ryzhenkin14354ac2017-09-27 17:42:30 +040074
75 show_step(5)
Vladimir Jigulin90689152018-09-26 15:38:19 +040076 dns_svc = k8s_deployed.api.services.get(
77 name='coredns', namespace='kube-system')
78 k8s_deployed.nslookup(hostname, dns_svc.get_ip())
Vladimir Jigulin4ad52a82018-08-12 05:51:30 +040079
80 show_step(6)
81 deployment.delete()
Victor Ryzhenkin87a31422018-03-16 22:25:27 +040082
83 @pytest.mark.grab_versions
84 @pytest.mark.cncf_publisher(name=['e2e.log', 'junit_01.xml', 'version.txt',
85 'cncf_results.tar.gz'])
86 @pytest.mark.fail_snapshot
87 def test_k8s_cncf_certification(self, show_step, config, k8s_deployed,
Vladimir Jigulin0c8dd5a2018-08-28 05:08:35 +040088 k8s_cncf_log_helper):
Victor Ryzhenkin87a31422018-03-16 22:25:27 +040089 """Run cncf e2e suite and provide files needed for pull request
90 to the CNCF repo
91
92 Scenario:
93 1. Run cncf from https://github.com/cncf/k8s-conformance
94 """
95
96 show_step(1)
97 k8s_deployed.start_k8s_cncf_verification()
Vladimir Jigulin62bcf462018-05-28 18:17:01 +040098
Vladimir Jigulin2154e4b2018-11-14 12:14:05 +040099 @pytest.mark.grab_versions
Vladimir Jigulin62bcf462018-05-28 18:17:01 +0400100 @pytest.mark.fail_snapshot
101 def test_k8s_chain_update(self, show_step, underlay, config, k8s_deployed,
102 k8s_chain_update_log_helper):
103 """Test for chain-upgrading k8s hypercube pool and checking it
104
105 Scenario:
106 1. Prepare salt on hosts
107 2. Setup controller nodes
108 3. Setup compute nodes
109 4. Setup Kubernetes cluster
110 5. Run and expose sample test service
111 6. Run conformance to check consistency
112 7. For every version in update chain:
113 Update cluster to new version, check test sample service
114 availability, run conformance
Vladimir Jigulin4ad52a82018-08-12 05:51:30 +0400115 8. Delete service and deployment
Vladimir Jigulin62bcf462018-05-28 18:17:01 +0400116 """
117
Vladimir Jigulin62bcf462018-05-28 18:17:01 +0400118 show_step(5)
Vladimir Jigulin4ad52a82018-08-12 05:51:30 +0400119 sample = k8s_deployed.run_sample_deployment('test-dep-chain-upgrade')
Vladimir Jigulina6b018b2018-07-18 15:19:01 +0400120 sample.expose()
Vladimir Jigulin4ad52a82018-08-12 05:51:30 +0400121 sample.wait_ready()
Vladimir Jigulin62bcf462018-05-28 18:17:01 +0400122
Vladimir Jigulina6b018b2018-07-18 15:19:01 +0400123 assert sample.is_service_available()
Vladimir Jigulin62bcf462018-05-28 18:17:01 +0400124
125 show_step(6)
Victor Ryzhenkin57c43202018-12-28 01:48:39 +0400126 k8s_deployed.start_conformance_inside_pod()
Vladimir Jigulin62bcf462018-05-28 18:17:01 +0400127
128 show_step(7)
129 chain_versions = config.k8s.k8s_update_chain.split(" ")
130 for version in chain_versions:
131 LOG.info("Chain update to '{}' version".format(version))
Vladimir Jigulin4ad52a82018-08-12 05:51:30 +0400132 k8s_deployed.update_k8s_version(version)
Vladimir Jigulin62bcf462018-05-28 18:17:01 +0400133
134 LOG.info("Checking test service availability")
Vladimir Jigulina6b018b2018-07-18 15:19:01 +0400135 assert sample.is_service_available()
Vladimir Jigulin62bcf462018-05-28 18:17:01 +0400136
137 LOG.info("Running conformance on {} version".format(version))
Victor Ryzhenkin57c43202018-12-28 01:48:39 +0400138 k8s_deployed.start_conformance_inside_pod()
Vladimir Jigulina6b018b2018-07-18 15:19:01 +0400139
Vladimir Jigulin4ad52a82018-08-12 05:51:30 +0400140 assert sample.is_service_available()
141
142 show_step(8)
143 sample.delete()
144
Vladimir Jigulin2154e4b2018-11-14 12:14:05 +0400145 @pytest.mark.grab_versions
Vladimir Jigulina6b018b2018-07-18 15:19:01 +0400146 @pytest.mark.fail_snapshot
Dennis Dmitriev66650fc2018-11-02 11:04:37 +0200147 @pytest.mark.k8s_metallb
Vladimir Jigulineb8b8132019-03-19 15:34:02 +0400148 @pytest.mark.k8s_system
Vladimir Jigulina6b018b2018-07-18 15:19:01 +0400149 def test_k8s_metallb(self, show_step, config, k8s_deployed):
150 """Enable metallb in cluster and do basic tests
151
152 Scenario:
153 1. Setup Kubernetes cluster with enabled metallb
154 2. Check that metallb pods created in metallb-system namespace
155 3. Run 5 sample deployments
156 4. Expose deployments with type=LoadBalancer
157 5. Check services availability from outside of cluster
158 6. Run conformance
159 7. Check services availability from outside of cluster
Vladimir Jigulin4ad52a82018-08-12 05:51:30 +0400160 8. Delete deployments
Vladimir Jigulina6b018b2018-07-18 15:19:01 +0400161 """
162 show_step(1)
Dennis Dmitriev66650fc2018-11-02 11:04:37 +0200163 if not k8s_deployed.is_metallb_enabled:
Vladimir Jigulina6b018b2018-07-18 15:19:01 +0400164 pytest.skip("Test requires metallb addon enabled")
165
166 show_step(2)
Vladimir Jigulin34dfa942018-07-23 21:05:48 +0400167 ns = "metallb-system"
Vladimir Jigulin4ad52a82018-08-12 05:51:30 +0400168 assert \
169 len(k8s_deployed.api.pods.list(ns, name_prefix="controller")) > 0
170 assert \
171 len(k8s_deployed.api.pods.list(ns, name_prefix="speaker")) > 0
Vladimir Jigulina6b018b2018-07-18 15:19:01 +0400172
173 show_step(3)
174 samples = []
175 for i in range(5):
176 name = 'test-dep-metallb-{}'.format(i)
Vladimir Jigulin4ad52a82018-08-12 05:51:30 +0400177 samples.append(k8s_deployed.run_sample_deployment(name))
Vladimir Jigulina6b018b2018-07-18 15:19:01 +0400178
179 show_step(4)
180 for sample in samples:
181 sample.expose('LoadBalancer')
Vladimir Jigulin4ad52a82018-08-12 05:51:30 +0400182 sample.wait_ready()
Vladimir Jigulina6b018b2018-07-18 15:19:01 +0400183
184 show_step(5)
185 for sample in samples:
Vladimir Jigulin4ad52a82018-08-12 05:51:30 +0400186 assert sample.is_service_available(external=False)
Vladimir Jigulina6b018b2018-07-18 15:19:01 +0400187 assert sample.is_service_available(external=True)
188
189 show_step(6)
Victor Ryzhenkin57c43202018-12-28 01:48:39 +0400190 k8s_deployed.start_conformance_inside_pod()
Vladimir Jigulina6b018b2018-07-18 15:19:01 +0400191
192 show_step(7)
193 for sample in samples:
Vladimir Jigulin4ad52a82018-08-12 05:51:30 +0400194 assert sample.is_service_available(external=False)
Vladimir Jigulina6b018b2018-07-18 15:19:01 +0400195 assert sample.is_service_available(external=True)
Vladimir Jigulin34dfa942018-07-23 21:05:48 +0400196
Vladimir Jigulin4ad52a82018-08-12 05:51:30 +0400197 show_step(8)
198 for sample in samples:
199 sample.delete()
200
Vladimir Jigulin2154e4b2018-11-14 12:14:05 +0400201 @pytest.mark.grab_versions
Vladimir Jigulin34dfa942018-07-23 21:05:48 +0400202 @pytest.mark.fail_snapshot
Dennis Dmitrievee5ef232018-08-31 13:53:18 +0300203 @pytest.mark.k8s_genie
Vladimir Jigulineb8b8132019-03-19 15:34:02 +0400204 @pytest.mark.k8s_system
Vladimir Jigulin4ad52a82018-08-12 05:51:30 +0400205 def test_k8s_genie_flannel(self, show_step, config,
206 salt_deployed, k8s_deployed):
Vladimir Jigulin34dfa942018-07-23 21:05:48 +0400207 """Test genie-cni+flannel cni setup
208
209 Scenario:
210 1. Setup Kubernetes cluster with genie cni and flannel
211 2. Check that flannel pods created in kube-system namespace
212 3. Create sample deployment with flannel cni annotation
213 4. Check that the deployment have 1 ip addresses from cni provider
214 5. Create sample deployment with calico cni annotation
215 6. Check that the deployment have 1 ip addresses from cni provider
216 7. Create sample deployment with multi-cni annotation
217 8. Check that the deployment have 2 ip addresses from different
218 cni providers
219 9. Create sample deployment without cni annotation
220 10. Check that the deployment have 1 ip address
221 11. Check pods availability
222 12. Run conformance
223 13. Check pods availability
Vladimir Jigulin4ad52a82018-08-12 05:51:30 +0400224 14. Delete pods
Vladimir Jigulin34dfa942018-07-23 21:05:48 +0400225 """
226 show_step(1)
227
228 # Find out calico and flannel networks
Victor Ryzhenkin0c373822018-10-30 17:55:50 +0400229 tgt_k8s_control = "I@kubernetes:master"
Vladimir Jigulin34dfa942018-07-23 21:05:48 +0400230
231 flannel_pillar = salt_deployed.get_pillar(
232 tgt=tgt_k8s_control,
233 pillar="kubernetes:master:network:flannel:private_ip_range")[0]
234 flannel_network = netaddr.IPNetwork(flannel_pillar.values()[0])
235 LOG.info("Flannel network: {}".format(flannel_network))
236
237 calico_network_pillar = salt_deployed.get_pillar(
238 tgt=tgt_k8s_control, pillar="_param:calico_private_network")[0]
239 calico_netmask_pillar = salt_deployed.get_pillar(
240 tgt=tgt_k8s_control, pillar="_param:calico_private_netmask")[0]
241 calico_network = netaddr.IPNetwork(
242 "{0}/{1}".format(calico_network_pillar.values()[0],
243 calico_netmask_pillar.values()[0]))
244 LOG.info("Calico network: {}".format(calico_network))
245
246 show_step(2)
Vladimir Jigulin4ad52a82018-08-12 05:51:30 +0400247 assert k8s_deployed.api.pods.list(
248 namespace="kube-system", name_prefix="kube-flannel-") > 0
Vladimir Jigulin34dfa942018-07-23 21:05:48 +0400249
Vladimir Jigulin34dfa942018-07-23 21:05:48 +0400250 show_step(3)
Vladimir Jigulin4ad52a82018-08-12 05:51:30 +0400251 flannel_pod = k8s_deployed.api.pods.create(
Vladimir Jigulin57ecae92018-09-10 22:51:15 +0400252 body=self.__read_testdata_yaml('pod-sample-flannel.yaml'))
Vladimir Jigulin4ad52a82018-08-12 05:51:30 +0400253 flannel_pod.wait_running()
Vladimir Jigulin34dfa942018-07-23 21:05:48 +0400254
255 show_step(4)
256 flannel_ips = k8s_deployed.get_pod_ips_from_container(flannel_pod.name)
257 assert len(flannel_ips) == 1
258 assert netaddr.IPAddress(flannel_ips[0]) in flannel_network
259
260 show_step(5)
Vladimir Jigulin4ad52a82018-08-12 05:51:30 +0400261 calico_pod = k8s_deployed.api.pods.create(
Vladimir Jigulin57ecae92018-09-10 22:51:15 +0400262 body=self.__read_testdata_yaml('pod-sample-calico.yaml'))
Vladimir Jigulin4ad52a82018-08-12 05:51:30 +0400263 calico_pod.wait_running()
Vladimir Jigulin34dfa942018-07-23 21:05:48 +0400264
265 show_step(6)
266 calico_ips = k8s_deployed.get_pod_ips_from_container(calico_pod.name)
267 assert len(calico_ips) == 1
268 assert netaddr.IPAddress(calico_ips[0]) in calico_network
269
270 show_step(7)
Vladimir Jigulin4ad52a82018-08-12 05:51:30 +0400271 multicni_pod = k8s_deployed.api.pods.create(
Vladimir Jigulin57ecae92018-09-10 22:51:15 +0400272 body=self.__read_testdata_yaml('pod-sample-multicni.yaml'))
Vladimir Jigulin4ad52a82018-08-12 05:51:30 +0400273 multicni_pod.wait_running()
Vladimir Jigulin34dfa942018-07-23 21:05:48 +0400274
275 show_step(8)
276 multicni_ips = \
277 k8s_deployed.get_pod_ips_from_container(multicni_pod.name)
278 assert len(multicni_ips) == 2
279 for net in [calico_network, flannel_network]:
280 assert netaddr.IPAddress(multicni_ips[0]) in net or \
281 netaddr.IPAddress(multicni_ips[1]) in net
282
283 show_step(9)
Vladimir Jigulin4ad52a82018-08-12 05:51:30 +0400284 nocni_pod = k8s_deployed.api.pods.create(
Vladimir Jigulin57ecae92018-09-10 22:51:15 +0400285 body=self.__read_testdata_yaml('pod-sample.yaml'))
Vladimir Jigulin4ad52a82018-08-12 05:51:30 +0400286 nocni_pod.wait_running()
Vladimir Jigulin34dfa942018-07-23 21:05:48 +0400287
288 show_step(10)
289 nocni_ips = k8s_deployed.get_pod_ips_from_container(nocni_pod.name)
290 assert len(nocni_ips) == 1
291 assert (netaddr.IPAddress(nocni_ips[0]) in calico_network or
292 netaddr.IPAddress(nocni_ips[0]) in flannel_network)
293
294 show_step(11)
295
296 def check_pod_availability(ip):
297 assert "Hello Kubernetes!" in k8s_deployed.curl(
298 "http://{}:8080".format(ip))
299
300 def check_pods_availability():
301 check_pod_availability(flannel_ips[0])
302 check_pod_availability(calico_ips[0])
303 check_pod_availability(multicni_ips[0])
304 check_pod_availability(multicni_ips[1])
305 check_pod_availability(nocni_ips[0])
306
307 check_pods_availability()
308
309 show_step(12)
Victor Ryzhenkin57c43202018-12-28 01:48:39 +0400310 k8s_deployed.start_conformance_inside_pod()
Vladimir Jigulin34dfa942018-07-23 21:05:48 +0400311
312 show_step(13)
313 check_pods_availability()
Vladimir Jigulin4ad52a82018-08-12 05:51:30 +0400314
315 show_step(14)
316 flannel_pod.delete()
317 calico_pod.delete()
318 multicni_pod.delete()
319 nocni_pod.delete()
Vladimir Jigulin57ecae92018-09-10 22:51:15 +0400320
Vladimir Jigulin2154e4b2018-11-14 12:14:05 +0400321 @pytest.mark.grab_versions
Vladimir Jigulin57ecae92018-09-10 22:51:15 +0400322 @pytest.mark.fail_snapshot
Dennis Dmitriev66650fc2018-11-02 11:04:37 +0200323 @pytest.mark.k8s_dashboard
Vladimir Jigulineb8b8132019-03-19 15:34:02 +0400324 @pytest.mark.k8s_system
Vladimir Jigulin57ecae92018-09-10 22:51:15 +0400325 def test_k8s_dashboard(self, show_step, config,
326 salt_deployed, k8s_deployed):
327 """Test dashboard setup
328
329 Scenario:
330 1. Setup Kubernetes cluster
331 2. Try to curl login status api
332 3. Create a test-admin-user account
333 4. Try to login in dashboard using test-admin-user account
334 5. Get and check list of namespaces using dashboard api
335 """
336 show_step(1)
337
338 show_step(2)
339 system_ns = 'kube-system'
340 dashboard_service = \
341 k8s_deployed.api.services.get('kubernetes-dashboard', system_ns)
342 dashboard_url = 'https://{}'.format(dashboard_service.get_ip())
343
344 def dashboard_curl(url, data=None, headers=None):
345 """ Using curl command on controller node. Alternatives:
346 - connect_{get,post}_namespaced_service_proxy_with_path -
347 k8s lib does not provide way to pass headers or POST data
348 - raw rest k8s api - need to auth somehow
349 - new load-balancer svc for dashboard + requests python lib -
350 requires working metallb or other load-balancer
351 """
352 args = ['--insecure']
353 for name in headers or {}:
354 args.append('--header')
355 args.append("{0}: {1}".format(name, headers[name]))
356 if data is not None:
357 args.append('--data')
358 args.append(data)
359 return ''.join(k8s_deployed.curl(dashboard_url + url, *args))
360
361 assert 'tokenPresent' in \
362 json.loads(dashboard_curl('/api/v1/login/status'))
363
364 show_step(3)
365 account = k8s_deployed.api.serviceaccounts.create(
366 namespace=system_ns,
367 body=self.__read_testdata_yaml('test-admin-user-account.yaml'))
368 account.wait_secret_generation()
369
370 k8s_deployed.api.clusterrolebindings.create(
371 body=self.__read_testdata_yaml(
372 'test-admin-user-cluster-role-bind.yaml'))
373
374 account_secret = account.read().secrets[0]
375 account_token = k8s_deployed.api.secrets.get(
376 namespace=system_ns, name=account_secret.name).read().data['token']
377
378 show_step(4)
379 csrf_token = \
380 json.loads(dashboard_curl('/api/v1/csrftoken/login'))['token']
381 login_headers = {'X-CSRF-TOKEN': csrf_token,
382 'Content-Type': 'application/json'}
383 jwe_token = json.loads(dashboard_curl(
384 '/api/v1/login', headers=login_headers,
385 data=json.dumps({'token': account_token.decode('base64')})
386 ))['jweToken']
387 headers = {'jweToken': jwe_token}
388
389 show_step(5)
390 dashboard_namespaces = json.loads(
391 dashboard_curl('/api/v1/namespace', headers=headers))['namespaces']
392
393 namespaces_names_list = \
394 [ns.name for ns in k8s_deployed.api.namespaces.list()]
395 for namespace in dashboard_namespaces:
396 assert namespace['objectMeta']['name'] in namespaces_names_list
Vladimir Jigulin5775bbb2018-10-03 10:34:54 +0400397
Vladimir Jigulin2154e4b2018-11-14 12:14:05 +0400398 @pytest.mark.grab_versions
Vladimir Jigulin5775bbb2018-10-03 10:34:54 +0400399 @pytest.mark.fail_snapshot
Dennis Dmitriev66650fc2018-11-02 11:04:37 +0200400 @pytest.mark.k8s_ingress_nginx
Vladimir Jigulineb8b8132019-03-19 15:34:02 +0400401 @pytest.mark.k8s_system
Vladimir Jigulin5775bbb2018-10-03 10:34:54 +0400402 def test_k8s_ingress_nginx(self, show_step, config,
403 salt_deployed, k8s_deployed):
404 """Test ingress-nginx configured and working with metallb
405
406 Scenario:
407 1. Setup Kubernetes cluster with metallb
408 2. Create 2 example deployments and expose them
409 3. Create ingress controller with 2 backends to each deployment
410 service respectively
411 4. Wait ingress for deploy
412 5. Try to reach default endpoint
413 6. Try to reach test1 and test2 deployment services endpoints
414 """
415 show_step(1)
Dennis Dmitriev66650fc2018-11-02 11:04:37 +0200416 if not k8s_deployed.is_metallb_enabled:
Vladimir Jigulin5775bbb2018-10-03 10:34:54 +0400417 pytest.skip("Test requires metallb addon enabled")
Dennis Dmitriev66650fc2018-11-02 11:04:37 +0200418 if not k8s_deployed.is_ingress_nginx_enabled:
Vladimir Jigulin5775bbb2018-10-03 10:34:54 +0400419 pytest.skip("Test requires ingress-nginx addon enabled")
420
421 show_step(2)
422 image = 'nginxdemos/hello:plain-text'
423 port = 80
424 dep1 = k8s_deployed.run_sample_deployment(
425 'dep-ingress-1', image=image, port=port)
426 dep2 = k8s_deployed.run_sample_deployment(
427 'dep-ingress-2', image=image, port=port)
428 svc1 = dep1.wait_ready().expose()
429 svc2 = dep2.wait_ready().expose()
430
431 show_step(3)
432 body = {
433 'apiVersion': 'extensions/v1beta1',
434 'kind': 'Ingress',
435 'metadata': {'name': 'ingress-test'},
436 'spec': {
437 'rules': [{'http': {
438 'paths': [{
439 'backend': {
440 'serviceName': svc1.name,
441 'servicePort': port},
442 'path': '/test1'}, {
443 'backend': {
444 'serviceName': svc2.name,
445 'servicePort': port},
446 'path': '/test2'
447 }]
448 }}]
449 }
450 }
451 ingress = k8s_deployed.api.ingresses.create(body=body)
452
453 show_step(4)
454 ingress.wait_ready()
455
456 show_step(5)
457 ingress_address = "https://{}".format(
458 ingress.read().status.load_balancer.ingress[0].ip)
459
460 assert requests.get(ingress_address, verify=False).status_code == 404
461
462 show_step(6)
463 req1 = requests.get(ingress_address + "/test1", verify=False)
464 assert req1.status_code == 200
465 assert 'dep-ingress-1' in req1.text
466
467 req2 = requests.get(ingress_address + "/test2", verify=False)
468 assert req2.status_code == 200
469 assert 'dep-ingress-2' in req2.text
Vladimir Jigulin2154e4b2018-11-14 12:14:05 +0400470
471 @pytest.mark.grab_versions
472 @pytest.mark.fail_snapshot
473 def test_k8s_cicd_upgrade(self, show_step, config,
474 salt_deployed, k8s_deployed):
475 """Test k8s upgrade cicd pipeline
476
477 Scenario:
478 1. Setup Kubernetes+CICD cluster
479 2. Start deploy-k8s-upgrade job in jenkins
480 3. Wait for job to end
481 """
482 show_step(1)
483 jenkins_info = salt_deployed.get_pillar(
484 tgt='cid*1*', pillar="jenkins:client:master")[0].values()[0]
485
486 salt_api = salt_deployed.get_pillar(
487 tgt='cid*1*', pillar="_param:jenkins_salt_api_url")[0].values()[0]
488
489 show_step(2)
490 jenkins = JenkinsClient(
491 host='http://{host}:{port}'.format(**jenkins_info),
492 username=jenkins_info['username'],
493 password=jenkins_info['password'])
494
495 params = jenkins.make_defults_params('deploy-k8s-upgrade')
496 params['SALT_MASTER_URL'] = salt_api
497 params['SALT_MASTER_CREDENTIALS'] = 'salt'
498 params['CONFORMANCE_RUN_AFTER'] = True
499 params['CONFORMANCE_RUN_BEFORE'] = True
500 build = jenkins.run_build('deploy-k8s-upgrade', params)
501
502 show_step(3)
503 jenkins.wait_end_of_build(
504 name=build[0], build_id=build[1], timeout=3600 * 4)
505 result = jenkins.build_info(
506 name=build[0], build_id=build[1])['result']
507 assert result == 'SUCCESS', "k8s upgrade job has been failed"