blob: c6d61dd4891f92d755da3827cebff8c69d8943d5 [file] [log] [blame]
Victor Ryzhenkin14354ac2017-09-27 17:42:30 +04001# Copyright 2017 Mirantis, Inc.
2#
3# Licensed under the Apache License, Version 2.0 (the "License"); you may
4# not use this file except in compliance with the License. You may obtain
5# a copy of the License at
6#
7# http://www.apache.org/licenses/LICENSE-2.0
8#
9# Unless required by applicable law or agreed to in writing, software
10# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
11# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
12# License for the specific language governing permissions and limitations
13# under the License.
14
15import pytest
Vladimir Jigulin34dfa942018-07-23 21:05:48 +040016import netaddr
17import os
Vladimir Jigulin57ecae92018-09-10 22:51:15 +040018import json
Vladimir Jigulin5775bbb2018-10-03 10:34:54 +040019import requests
Victor Ryzhenkin14354ac2017-09-27 17:42:30 +040020
21from tcp_tests import logger
22from tcp_tests import settings
23
Vladimir Jigulin4ad52a82018-08-12 05:51:30 +040024from tcp_tests.managers.k8s import read_yaml_file
Vladimir Jigulin2154e4b2018-11-14 12:14:05 +040025from tcp_tests.managers.jenkins.client import JenkinsClient
Vladimir Jigulin4ad52a82018-08-12 05:51:30 +040026
Victor Ryzhenkin14354ac2017-09-27 17:42:30 +040027LOG = logger.logger
28
29
30class TestMCPK8sActions(object):
31 """Test class for different k8s actions"""
32
Vladimir Jigulin57ecae92018-09-10 22:51:15 +040033 def __read_testdata_yaml(self, name):
34 dir = os.path.join(os.path.dirname(__file__), 'testdata/k8s')
35 return read_yaml_file(dir, name)
36
Tatyana Leontovichc411ec32017-10-09 14:48:00 +030037 @pytest.mark.grab_versions
Victor Ryzhenkin14354ac2017-09-27 17:42:30 +040038 @pytest.mark.fail_snapshot
Tatyana Leontovich071ce6a2017-10-24 18:08:10 +030039 @pytest.mark.cz8116
Dennis Dmitriev0f624a82018-06-11 12:57:13 +030040 @pytest.mark.k8s_calico
Vladimir Jigulineb8b8132019-03-19 15:34:02 +040041 @pytest.mark.k8s_system
Victor Ryzhenkin14354ac2017-09-27 17:42:30 +040042 def test_k8s_externaldns_coredns(self, show_step, config, k8s_deployed):
43 """Test externaldns integration with coredns
44
45 Scenario:
46 1. Install k8s with externaldns addon enabled(including etcd, coredns)
47 2. Start simple service
48 3. Expose deployment
49 4. Annotate service with domain name
50 5. Try to get service using nslookup
Vladimir Jigulin4ad52a82018-08-12 05:51:30 +040051 6. Delete service and deployment
Victor Ryzhenkin14354ac2017-09-27 17:42:30 +040052 """
53
Vladimir Jigulin4ad52a82018-08-12 05:51:30 +040054 show_step(1)
Victor Ryzhenkin14354ac2017-09-27 17:42:30 +040055 if not (config.k8s_deploy.kubernetes_externaldns_enabled and
56 config.k8s_deploy.kubernetes_coredns_enabled):
Vladimir Jigulin4ad52a82018-08-12 05:51:30 +040057 pytest.skip("Test requires externaldns and coredns addons enabled")
Victor Ryzhenkin14354ac2017-09-27 17:42:30 +040058
59 show_step(2)
Vladimir Jigulin4ad52a82018-08-12 05:51:30 +040060 deployment = k8s_deployed.run_sample_deployment('test-dep')
Victor Ryzhenkin14354ac2017-09-27 17:42:30 +040061
62 show_step(3)
Vladimir Jigulin4ad52a82018-08-12 05:51:30 +040063 svc = deployment.expose()
Victor Ryzhenkin14354ac2017-09-27 17:42:30 +040064
Victor Ryzhenkin14354ac2017-09-27 17:42:30 +040065 show_step(4)
Vladimir Jigulin7eb41b02018-10-24 17:03:51 +040066 hostname = "test.{0}.".format(settings.DOMAIN_NAME)
Vladimir Jigulin4ad52a82018-08-12 05:51:30 +040067 svc.patch({
68 "metadata": {
69 "annotations": {
70 "external-dns.alpha.kubernetes.io/hostname": hostname
71 }
72 }
73 })
Victor Ryzhenkin14354ac2017-09-27 17:42:30 +040074
75 show_step(5)
Vladimir Jigulin90689152018-09-26 15:38:19 +040076 dns_svc = k8s_deployed.api.services.get(
77 name='coredns', namespace='kube-system')
78 k8s_deployed.nslookup(hostname, dns_svc.get_ip())
Vladimir Jigulin4ad52a82018-08-12 05:51:30 +040079
80 show_step(6)
81 deployment.delete()
Victor Ryzhenkin87a31422018-03-16 22:25:27 +040082
83 @pytest.mark.grab_versions
84 @pytest.mark.cncf_publisher(name=['e2e.log', 'junit_01.xml', 'version.txt',
85 'cncf_results.tar.gz'])
86 @pytest.mark.fail_snapshot
87 def test_k8s_cncf_certification(self, show_step, config, k8s_deployed,
Vladimir Jigulin0c8dd5a2018-08-28 05:08:35 +040088 k8s_cncf_log_helper):
Victor Ryzhenkin87a31422018-03-16 22:25:27 +040089 """Run cncf e2e suite and provide files needed for pull request
90 to the CNCF repo
91
92 Scenario:
93 1. Run cncf from https://github.com/cncf/k8s-conformance
94 """
95
96 show_step(1)
97 k8s_deployed.start_k8s_cncf_verification()
Vladimir Jigulin62bcf462018-05-28 18:17:01 +040098
Vladimir Jigulin2154e4b2018-11-14 12:14:05 +040099 @pytest.mark.grab_versions
Vladimir Jigulin62bcf462018-05-28 18:17:01 +0400100 @pytest.mark.fail_snapshot
101 def test_k8s_chain_update(self, show_step, underlay, config, k8s_deployed,
102 k8s_chain_update_log_helper):
103 """Test for chain-upgrading k8s hypercube pool and checking it
104
105 Scenario:
106 1. Prepare salt on hosts
107 2. Setup controller nodes
108 3. Setup compute nodes
109 4. Setup Kubernetes cluster
110 5. Run and expose sample test service
111 6. Run conformance to check consistency
112 7. For every version in update chain:
113 Update cluster to new version, check test sample service
114 availability, run conformance
Vladimir Jigulin4ad52a82018-08-12 05:51:30 +0400115 8. Delete service and deployment
Vladimir Jigulin62bcf462018-05-28 18:17:01 +0400116 """
117
Vladimir Jigulin62bcf462018-05-28 18:17:01 +0400118 show_step(5)
Vladimir Jigulin4ad52a82018-08-12 05:51:30 +0400119 sample = k8s_deployed.run_sample_deployment('test-dep-chain-upgrade')
Vladimir Jigulina6b018b2018-07-18 15:19:01 +0400120 sample.expose()
Vladimir Jigulin4ad52a82018-08-12 05:51:30 +0400121 sample.wait_ready()
Vladimir Jigulin62bcf462018-05-28 18:17:01 +0400122
Vladimir Jigulina6b018b2018-07-18 15:19:01 +0400123 assert sample.is_service_available()
Vladimir Jigulin62bcf462018-05-28 18:17:01 +0400124
125 show_step(6)
Victor Ryzhenkin57c43202018-12-28 01:48:39 +0400126 k8s_deployed.start_conformance_inside_pod()
Vladimir Jigulin62bcf462018-05-28 18:17:01 +0400127
128 show_step(7)
129 chain_versions = config.k8s.k8s_update_chain.split(" ")
130 for version in chain_versions:
131 LOG.info("Chain update to '{}' version".format(version))
Vladimir Jigulin4ad52a82018-08-12 05:51:30 +0400132 k8s_deployed.update_k8s_version(version)
Vladimir Jigulin62bcf462018-05-28 18:17:01 +0400133
134 LOG.info("Checking test service availability")
Vladimir Jigulina6b018b2018-07-18 15:19:01 +0400135 assert sample.is_service_available()
Vladimir Jigulin62bcf462018-05-28 18:17:01 +0400136
137 LOG.info("Running conformance on {} version".format(version))
Victor Ryzhenkin57c43202018-12-28 01:48:39 +0400138 k8s_deployed.start_conformance_inside_pod()
Vladimir Jigulina6b018b2018-07-18 15:19:01 +0400139
Vladimir Jigulin4ad52a82018-08-12 05:51:30 +0400140 assert sample.is_service_available()
141
142 show_step(8)
143 sample.delete()
144
Vladimir Jigulin2154e4b2018-11-14 12:14:05 +0400145 @pytest.mark.grab_versions
Vladimir Jigulina6b018b2018-07-18 15:19:01 +0400146 @pytest.mark.fail_snapshot
Dennis Dmitriev66650fc2018-11-02 11:04:37 +0200147 @pytest.mark.k8s_metallb
Vladimir Jigulineb8b8132019-03-19 15:34:02 +0400148 @pytest.mark.k8s_system
Vladimir Jigulina6b018b2018-07-18 15:19:01 +0400149 def test_k8s_metallb(self, show_step, config, k8s_deployed):
150 """Enable metallb in cluster and do basic tests
151
152 Scenario:
153 1. Setup Kubernetes cluster with enabled metallb
154 2. Check that metallb pods created in metallb-system namespace
155 3. Run 5 sample deployments
156 4. Expose deployments with type=LoadBalancer
157 5. Check services availability from outside of cluster
158 6. Run conformance
159 7. Check services availability from outside of cluster
Vladimir Jigulin4ad52a82018-08-12 05:51:30 +0400160 8. Delete deployments
Vladimir Jigulina6b018b2018-07-18 15:19:01 +0400161 """
162 show_step(1)
Dennis Dmitriev66650fc2018-11-02 11:04:37 +0200163 if not k8s_deployed.is_metallb_enabled:
Vladimir Jigulina6b018b2018-07-18 15:19:01 +0400164 pytest.skip("Test requires metallb addon enabled")
165
166 show_step(2)
Vladimir Jigulin34dfa942018-07-23 21:05:48 +0400167 ns = "metallb-system"
Vladimir Jigulin4ad52a82018-08-12 05:51:30 +0400168 assert \
169 len(k8s_deployed.api.pods.list(ns, name_prefix="controller")) > 0
170 assert \
171 len(k8s_deployed.api.pods.list(ns, name_prefix="speaker")) > 0
Vladimir Jigulina6b018b2018-07-18 15:19:01 +0400172
173 show_step(3)
174 samples = []
175 for i in range(5):
176 name = 'test-dep-metallb-{}'.format(i)
Vladimir Jigulin4ad52a82018-08-12 05:51:30 +0400177 samples.append(k8s_deployed.run_sample_deployment(name))
Vladimir Jigulina6b018b2018-07-18 15:19:01 +0400178
179 show_step(4)
180 for sample in samples:
181 sample.expose('LoadBalancer')
Vladimir Jigulin4ad52a82018-08-12 05:51:30 +0400182 sample.wait_ready()
Vladimir Jigulina6b018b2018-07-18 15:19:01 +0400183
184 show_step(5)
185 for sample in samples:
Vladimir Jigulin4ad52a82018-08-12 05:51:30 +0400186 assert sample.is_service_available(external=False)
Vladimir Jigulina6b018b2018-07-18 15:19:01 +0400187 assert sample.is_service_available(external=True)
188
189 show_step(6)
Victor Ryzhenkin57c43202018-12-28 01:48:39 +0400190 k8s_deployed.start_conformance_inside_pod()
Vladimir Jigulina6b018b2018-07-18 15:19:01 +0400191
192 show_step(7)
193 for sample in samples:
Vladimir Jigulin4ad52a82018-08-12 05:51:30 +0400194 assert sample.is_service_available(external=False)
Vladimir Jigulina6b018b2018-07-18 15:19:01 +0400195 assert sample.is_service_available(external=True)
Vladimir Jigulin34dfa942018-07-23 21:05:48 +0400196
Vladimir Jigulin4ad52a82018-08-12 05:51:30 +0400197 show_step(8)
198 for sample in samples:
199 sample.delete()
200
Vladimir Jigulin2154e4b2018-11-14 12:14:05 +0400201 @pytest.mark.grab_versions
Vladimir Jigulin34dfa942018-07-23 21:05:48 +0400202 @pytest.mark.fail_snapshot
Dennis Dmitrievee5ef232018-08-31 13:53:18 +0300203 @pytest.mark.k8s_genie
Vladimir Jigulineb8b8132019-03-19 15:34:02 +0400204 @pytest.mark.k8s_system
Vladimir Jigulin4ad52a82018-08-12 05:51:30 +0400205 def test_k8s_genie_flannel(self, show_step, config,
Dennis Dmitrievfa1774a2019-05-28 15:27:44 +0300206 salt_actions, k8s_deployed):
Vladimir Jigulin34dfa942018-07-23 21:05:48 +0400207 """Test genie-cni+flannel cni setup
208
209 Scenario:
210 1. Setup Kubernetes cluster with genie cni and flannel
211 2. Check that flannel pods created in kube-system namespace
212 3. Create sample deployment with flannel cni annotation
213 4. Check that the deployment have 1 ip addresses from cni provider
214 5. Create sample deployment with calico cni annotation
215 6. Check that the deployment have 1 ip addresses from cni provider
216 7. Create sample deployment with multi-cni annotation
217 8. Check that the deployment have 2 ip addresses from different
218 cni providers
219 9. Create sample deployment without cni annotation
220 10. Check that the deployment have 1 ip address
221 11. Check pods availability
222 12. Run conformance
223 13. Check pods availability
Vladimir Jigulin4ad52a82018-08-12 05:51:30 +0400224 14. Delete pods
Vladimir Jigulin34dfa942018-07-23 21:05:48 +0400225 """
226 show_step(1)
227
228 # Find out calico and flannel networks
Victor Ryzhenkin0c373822018-10-30 17:55:50 +0400229 tgt_k8s_control = "I@kubernetes:master"
Vladimir Jigulin34dfa942018-07-23 21:05:48 +0400230
Dennis Dmitrievfa1774a2019-05-28 15:27:44 +0300231 flannel_pillar = salt_actions.get_pillar(
Vladimir Jigulin34dfa942018-07-23 21:05:48 +0400232 tgt=tgt_k8s_control,
233 pillar="kubernetes:master:network:flannel:private_ip_range")[0]
234 flannel_network = netaddr.IPNetwork(flannel_pillar.values()[0])
235 LOG.info("Flannel network: {}".format(flannel_network))
236
Dennis Dmitrievfa1774a2019-05-28 15:27:44 +0300237 calico_network_pillar = salt_actions.get_pillar(
Vladimir Jigulin34dfa942018-07-23 21:05:48 +0400238 tgt=tgt_k8s_control, pillar="_param:calico_private_network")[0]
Dennis Dmitrievfa1774a2019-05-28 15:27:44 +0300239 calico_netmask_pillar = salt_actions.get_pillar(
Vladimir Jigulin34dfa942018-07-23 21:05:48 +0400240 tgt=tgt_k8s_control, pillar="_param:calico_private_netmask")[0]
241 calico_network = netaddr.IPNetwork(
242 "{0}/{1}".format(calico_network_pillar.values()[0],
243 calico_netmask_pillar.values()[0]))
244 LOG.info("Calico network: {}".format(calico_network))
245
246 show_step(2)
Vladimir Jigulin4ad52a82018-08-12 05:51:30 +0400247 assert k8s_deployed.api.pods.list(
248 namespace="kube-system", name_prefix="kube-flannel-") > 0
Vladimir Jigulin34dfa942018-07-23 21:05:48 +0400249
Vladimir Jigulin34dfa942018-07-23 21:05:48 +0400250 show_step(3)
Vladimir Jigulin4ad52a82018-08-12 05:51:30 +0400251 flannel_pod = k8s_deployed.api.pods.create(
Vladimir Jigulin57ecae92018-09-10 22:51:15 +0400252 body=self.__read_testdata_yaml('pod-sample-flannel.yaml'))
Vladimir Jigulin4ad52a82018-08-12 05:51:30 +0400253 flannel_pod.wait_running()
Vladimir Jigulin34dfa942018-07-23 21:05:48 +0400254
255 show_step(4)
256 flannel_ips = k8s_deployed.get_pod_ips_from_container(flannel_pod.name)
257 assert len(flannel_ips) == 1
258 assert netaddr.IPAddress(flannel_ips[0]) in flannel_network
259
260 show_step(5)
Vladimir Jigulin4ad52a82018-08-12 05:51:30 +0400261 calico_pod = k8s_deployed.api.pods.create(
Vladimir Jigulin57ecae92018-09-10 22:51:15 +0400262 body=self.__read_testdata_yaml('pod-sample-calico.yaml'))
Vladimir Jigulin4ad52a82018-08-12 05:51:30 +0400263 calico_pod.wait_running()
Vladimir Jigulin34dfa942018-07-23 21:05:48 +0400264
265 show_step(6)
266 calico_ips = k8s_deployed.get_pod_ips_from_container(calico_pod.name)
267 assert len(calico_ips) == 1
268 assert netaddr.IPAddress(calico_ips[0]) in calico_network
269
270 show_step(7)
Vladimir Jigulin4ad52a82018-08-12 05:51:30 +0400271 multicni_pod = k8s_deployed.api.pods.create(
Vladimir Jigulin57ecae92018-09-10 22:51:15 +0400272 body=self.__read_testdata_yaml('pod-sample-multicni.yaml'))
Vladimir Jigulin4ad52a82018-08-12 05:51:30 +0400273 multicni_pod.wait_running()
Vladimir Jigulin34dfa942018-07-23 21:05:48 +0400274
275 show_step(8)
276 multicni_ips = \
277 k8s_deployed.get_pod_ips_from_container(multicni_pod.name)
278 assert len(multicni_ips) == 2
279 for net in [calico_network, flannel_network]:
280 assert netaddr.IPAddress(multicni_ips[0]) in net or \
281 netaddr.IPAddress(multicni_ips[1]) in net
282
283 show_step(9)
Vladimir Jigulin4ad52a82018-08-12 05:51:30 +0400284 nocni_pod = k8s_deployed.api.pods.create(
Vladimir Jigulin57ecae92018-09-10 22:51:15 +0400285 body=self.__read_testdata_yaml('pod-sample.yaml'))
Vladimir Jigulin4ad52a82018-08-12 05:51:30 +0400286 nocni_pod.wait_running()
Vladimir Jigulin34dfa942018-07-23 21:05:48 +0400287
288 show_step(10)
289 nocni_ips = k8s_deployed.get_pod_ips_from_container(nocni_pod.name)
290 assert len(nocni_ips) == 1
291 assert (netaddr.IPAddress(nocni_ips[0]) in calico_network or
292 netaddr.IPAddress(nocni_ips[0]) in flannel_network)
293
294 show_step(11)
295
296 def check_pod_availability(ip):
297 assert "Hello Kubernetes!" in k8s_deployed.curl(
298 "http://{}:8080".format(ip))
299
300 def check_pods_availability():
301 check_pod_availability(flannel_ips[0])
302 check_pod_availability(calico_ips[0])
303 check_pod_availability(multicni_ips[0])
304 check_pod_availability(multicni_ips[1])
305 check_pod_availability(nocni_ips[0])
306
307 check_pods_availability()
308
309 show_step(12)
Victor Ryzhenkin57c43202018-12-28 01:48:39 +0400310 k8s_deployed.start_conformance_inside_pod()
Vladimir Jigulin34dfa942018-07-23 21:05:48 +0400311
312 show_step(13)
313 check_pods_availability()
Vladimir Jigulin4ad52a82018-08-12 05:51:30 +0400314
315 show_step(14)
316 flannel_pod.delete()
317 calico_pod.delete()
318 multicni_pod.delete()
319 nocni_pod.delete()
Vladimir Jigulin57ecae92018-09-10 22:51:15 +0400320
Vladimir Jigulin2154e4b2018-11-14 12:14:05 +0400321 @pytest.mark.grab_versions
Vladimir Jigulin57ecae92018-09-10 22:51:15 +0400322 @pytest.mark.fail_snapshot
Dennis Dmitriev66650fc2018-11-02 11:04:37 +0200323 @pytest.mark.k8s_dashboard
Vladimir Jigulineb8b8132019-03-19 15:34:02 +0400324 @pytest.mark.k8s_system
Dennis Dmitrievfa1774a2019-05-28 15:27:44 +0300325 def test_k8s_dashboard(self, show_step, config, k8s_deployed):
Vladimir Jigulin57ecae92018-09-10 22:51:15 +0400326 """Test dashboard setup
327
328 Scenario:
329 1. Setup Kubernetes cluster
330 2. Try to curl login status api
331 3. Create a test-admin-user account
332 4. Try to login in dashboard using test-admin-user account
333 5. Get and check list of namespaces using dashboard api
334 """
335 show_step(1)
336
337 show_step(2)
338 system_ns = 'kube-system'
339 dashboard_service = \
340 k8s_deployed.api.services.get('kubernetes-dashboard', system_ns)
341 dashboard_url = 'https://{}'.format(dashboard_service.get_ip())
342
343 def dashboard_curl(url, data=None, headers=None):
344 """ Using curl command on controller node. Alternatives:
345 - connect_{get,post}_namespaced_service_proxy_with_path -
346 k8s lib does not provide way to pass headers or POST data
347 - raw rest k8s api - need to auth somehow
348 - new load-balancer svc for dashboard + requests python lib -
349 requires working metallb or other load-balancer
350 """
351 args = ['--insecure']
352 for name in headers or {}:
353 args.append('--header')
354 args.append("{0}: {1}".format(name, headers[name]))
355 if data is not None:
356 args.append('--data')
357 args.append(data)
358 return ''.join(k8s_deployed.curl(dashboard_url + url, *args))
359
360 assert 'tokenPresent' in \
361 json.loads(dashboard_curl('/api/v1/login/status'))
362
363 show_step(3)
364 account = k8s_deployed.api.serviceaccounts.create(
365 namespace=system_ns,
366 body=self.__read_testdata_yaml('test-admin-user-account.yaml'))
367 account.wait_secret_generation()
368
369 k8s_deployed.api.clusterrolebindings.create(
370 body=self.__read_testdata_yaml(
371 'test-admin-user-cluster-role-bind.yaml'))
372
373 account_secret = account.read().secrets[0]
374 account_token = k8s_deployed.api.secrets.get(
375 namespace=system_ns, name=account_secret.name).read().data['token']
376
377 show_step(4)
378 csrf_token = \
379 json.loads(dashboard_curl('/api/v1/csrftoken/login'))['token']
380 login_headers = {'X-CSRF-TOKEN': csrf_token,
381 'Content-Type': 'application/json'}
382 jwe_token = json.loads(dashboard_curl(
383 '/api/v1/login', headers=login_headers,
384 data=json.dumps({'token': account_token.decode('base64')})
385 ))['jweToken']
386 headers = {'jweToken': jwe_token}
387
388 show_step(5)
389 dashboard_namespaces = json.loads(
390 dashboard_curl('/api/v1/namespace', headers=headers))['namespaces']
391
392 namespaces_names_list = \
393 [ns.name for ns in k8s_deployed.api.namespaces.list()]
394 for namespace in dashboard_namespaces:
395 assert namespace['objectMeta']['name'] in namespaces_names_list
Vladimir Jigulin5775bbb2018-10-03 10:34:54 +0400396
Vladimir Jigulin2154e4b2018-11-14 12:14:05 +0400397 @pytest.mark.grab_versions
Vladimir Jigulin5775bbb2018-10-03 10:34:54 +0400398 @pytest.mark.fail_snapshot
Dennis Dmitriev66650fc2018-11-02 11:04:37 +0200399 @pytest.mark.k8s_ingress_nginx
Vladimir Jigulineb8b8132019-03-19 15:34:02 +0400400 @pytest.mark.k8s_system
Dennis Dmitrievfa1774a2019-05-28 15:27:44 +0300401 def test_k8s_ingress_nginx(self, show_step, config, k8s_deployed):
Vladimir Jigulin5775bbb2018-10-03 10:34:54 +0400402 """Test ingress-nginx configured and working with metallb
403
404 Scenario:
405 1. Setup Kubernetes cluster with metallb
406 2. Create 2 example deployments and expose them
407 3. Create ingress controller with 2 backends to each deployment
408 service respectively
409 4. Wait ingress for deploy
410 5. Try to reach default endpoint
411 6. Try to reach test1 and test2 deployment services endpoints
412 """
413 show_step(1)
Dennis Dmitriev66650fc2018-11-02 11:04:37 +0200414 if not k8s_deployed.is_metallb_enabled:
Vladimir Jigulin5775bbb2018-10-03 10:34:54 +0400415 pytest.skip("Test requires metallb addon enabled")
Dennis Dmitriev66650fc2018-11-02 11:04:37 +0200416 if not k8s_deployed.is_ingress_nginx_enabled:
Vladimir Jigulin5775bbb2018-10-03 10:34:54 +0400417 pytest.skip("Test requires ingress-nginx addon enabled")
418
419 show_step(2)
420 image = 'nginxdemos/hello:plain-text'
421 port = 80
422 dep1 = k8s_deployed.run_sample_deployment(
423 'dep-ingress-1', image=image, port=port)
424 dep2 = k8s_deployed.run_sample_deployment(
425 'dep-ingress-2', image=image, port=port)
426 svc1 = dep1.wait_ready().expose()
427 svc2 = dep2.wait_ready().expose()
428
429 show_step(3)
430 body = {
431 'apiVersion': 'extensions/v1beta1',
432 'kind': 'Ingress',
433 'metadata': {'name': 'ingress-test'},
434 'spec': {
435 'rules': [{'http': {
436 'paths': [{
437 'backend': {
438 'serviceName': svc1.name,
439 'servicePort': port},
440 'path': '/test1'}, {
441 'backend': {
442 'serviceName': svc2.name,
443 'servicePort': port},
444 'path': '/test2'
445 }]
446 }}]
447 }
448 }
449 ingress = k8s_deployed.api.ingresses.create(body=body)
450
451 show_step(4)
452 ingress.wait_ready()
453
454 show_step(5)
455 ingress_address = "https://{}".format(
456 ingress.read().status.load_balancer.ingress[0].ip)
457
458 assert requests.get(ingress_address, verify=False).status_code == 404
459
460 show_step(6)
461 req1 = requests.get(ingress_address + "/test1", verify=False)
462 assert req1.status_code == 200
463 assert 'dep-ingress-1' in req1.text
464
465 req2 = requests.get(ingress_address + "/test2", verify=False)
466 assert req2.status_code == 200
467 assert 'dep-ingress-2' in req2.text
Vladimir Jigulin2154e4b2018-11-14 12:14:05 +0400468
469 @pytest.mark.grab_versions
470 @pytest.mark.fail_snapshot
471 def test_k8s_cicd_upgrade(self, show_step, config,
Dennis Dmitrievfa1774a2019-05-28 15:27:44 +0300472 salt_actions, k8s_deployed):
Vladimir Jigulin2154e4b2018-11-14 12:14:05 +0400473 """Test k8s upgrade cicd pipeline
474
475 Scenario:
476 1. Setup Kubernetes+CICD cluster
477 2. Start deploy-k8s-upgrade job in jenkins
478 3. Wait for job to end
479 """
480 show_step(1)
Dennis Dmitrievfa1774a2019-05-28 15:27:44 +0300481 jenkins_info = salt_actions.get_pillar(
Vladimir Jigulin2154e4b2018-11-14 12:14:05 +0400482 tgt='cid*1*', pillar="jenkins:client:master")[0].values()[0]
483
Dennis Dmitrievfa1774a2019-05-28 15:27:44 +0300484 salt_api = salt_actions.get_pillar(
Vladimir Jigulin2154e4b2018-11-14 12:14:05 +0400485 tgt='cid*1*', pillar="_param:jenkins_salt_api_url")[0].values()[0]
486
487 show_step(2)
488 jenkins = JenkinsClient(
489 host='http://{host}:{port}'.format(**jenkins_info),
490 username=jenkins_info['username'],
491 password=jenkins_info['password'])
492
493 params = jenkins.make_defults_params('deploy-k8s-upgrade')
494 params['SALT_MASTER_URL'] = salt_api
495 params['SALT_MASTER_CREDENTIALS'] = 'salt'
496 params['CONFORMANCE_RUN_AFTER'] = True
497 params['CONFORMANCE_RUN_BEFORE'] = True
498 build = jenkins.run_build('deploy-k8s-upgrade', params)
499
500 show_step(3)
501 jenkins.wait_end_of_build(
502 name=build[0], build_id=build[1], timeout=3600 * 4)
503 result = jenkins.build_info(
504 name=build[0], build_id=build[1])['result']
505 assert result == 'SUCCESS', "k8s upgrade job has been failed"