blob: 066a476e5112f2cd54b49a2706ff2cf35f9b8bcb [file] [log] [blame]
Victor Ryzhenkin14354ac2017-09-27 17:42:30 +04001# Copyright 2017 Mirantis, Inc.
2#
3# Licensed under the Apache License, Version 2.0 (the "License"); you may
4# not use this file except in compliance with the License. You may obtain
5# a copy of the License at
6#
7# http://www.apache.org/licenses/LICENSE-2.0
8#
9# Unless required by applicable law or agreed to in writing, software
10# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
11# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
12# License for the specific language governing permissions and limitations
13# under the License.
14
15import pytest
Vladimir Jigulin34dfa942018-07-23 21:05:48 +040016import netaddr
17import os
Vladimir Jigulin57ecae92018-09-10 22:51:15 +040018import json
Vladimir Jigulin5775bbb2018-10-03 10:34:54 +040019import requests
Victor Ryzhenkin14354ac2017-09-27 17:42:30 +040020
21from tcp_tests import logger
22from tcp_tests import settings
23
Vladimir Jigulin4ad52a82018-08-12 05:51:30 +040024from tcp_tests.managers.k8s import read_yaml_file
Vladimir Jigulin2154e4b2018-11-14 12:14:05 +040025from tcp_tests.managers.jenkins.client import JenkinsClient
Vladimir Jigulin4ad52a82018-08-12 05:51:30 +040026
Victor Ryzhenkin14354ac2017-09-27 17:42:30 +040027LOG = logger.logger
28
29
30class TestMCPK8sActions(object):
31 """Test class for different k8s actions"""
32
Vladimir Jigulin57ecae92018-09-10 22:51:15 +040033 def __read_testdata_yaml(self, name):
34 dir = os.path.join(os.path.dirname(__file__), 'testdata/k8s')
35 return read_yaml_file(dir, name)
36
Tatyana Leontovichc411ec32017-10-09 14:48:00 +030037 @pytest.mark.grab_versions
Victor Ryzhenkin14354ac2017-09-27 17:42:30 +040038 @pytest.mark.fail_snapshot
Tatyana Leontovich071ce6a2017-10-24 18:08:10 +030039 @pytest.mark.cz8116
Dennis Dmitriev0f624a82018-06-11 12:57:13 +030040 @pytest.mark.k8s_calico
Victor Ryzhenkin14354ac2017-09-27 17:42:30 +040041 def test_k8s_externaldns_coredns(self, show_step, config, k8s_deployed):
42 """Test externaldns integration with coredns
43
44 Scenario:
45 1. Install k8s with externaldns addon enabled(including etcd, coredns)
46 2. Start simple service
47 3. Expose deployment
48 4. Annotate service with domain name
49 5. Try to get service using nslookup
Vladimir Jigulin4ad52a82018-08-12 05:51:30 +040050 6. Delete service and deployment
Victor Ryzhenkin14354ac2017-09-27 17:42:30 +040051 """
52
Vladimir Jigulin4ad52a82018-08-12 05:51:30 +040053 show_step(1)
Victor Ryzhenkin14354ac2017-09-27 17:42:30 +040054 if not (config.k8s_deploy.kubernetes_externaldns_enabled and
55 config.k8s_deploy.kubernetes_coredns_enabled):
Vladimir Jigulin4ad52a82018-08-12 05:51:30 +040056 pytest.skip("Test requires externaldns and coredns addons enabled")
Victor Ryzhenkin14354ac2017-09-27 17:42:30 +040057
58 show_step(2)
Vladimir Jigulin4ad52a82018-08-12 05:51:30 +040059 deployment = k8s_deployed.run_sample_deployment('test-dep')
Victor Ryzhenkin14354ac2017-09-27 17:42:30 +040060
61 show_step(3)
Vladimir Jigulin4ad52a82018-08-12 05:51:30 +040062 svc = deployment.expose()
Victor Ryzhenkin14354ac2017-09-27 17:42:30 +040063
Victor Ryzhenkin14354ac2017-09-27 17:42:30 +040064 show_step(4)
Vladimir Jigulin7eb41b02018-10-24 17:03:51 +040065 hostname = "test.{0}.".format(settings.DOMAIN_NAME)
Vladimir Jigulin4ad52a82018-08-12 05:51:30 +040066 svc.patch({
67 "metadata": {
68 "annotations": {
69 "external-dns.alpha.kubernetes.io/hostname": hostname
70 }
71 }
72 })
Victor Ryzhenkin14354ac2017-09-27 17:42:30 +040073
74 show_step(5)
Vladimir Jigulin90689152018-09-26 15:38:19 +040075 dns_svc = k8s_deployed.api.services.get(
76 name='coredns', namespace='kube-system')
77 k8s_deployed.nslookup(hostname, dns_svc.get_ip())
Vladimir Jigulin4ad52a82018-08-12 05:51:30 +040078
79 show_step(6)
80 deployment.delete()
Victor Ryzhenkin87a31422018-03-16 22:25:27 +040081
82 @pytest.mark.grab_versions
83 @pytest.mark.cncf_publisher(name=['e2e.log', 'junit_01.xml', 'version.txt',
84 'cncf_results.tar.gz'])
85 @pytest.mark.fail_snapshot
86 def test_k8s_cncf_certification(self, show_step, config, k8s_deployed,
Vladimir Jigulin0c8dd5a2018-08-28 05:08:35 +040087 k8s_cncf_log_helper):
Victor Ryzhenkin87a31422018-03-16 22:25:27 +040088 """Run cncf e2e suite and provide files needed for pull request
89 to the CNCF repo
90
91 Scenario:
92 1. Run cncf from https://github.com/cncf/k8s-conformance
93 """
94
95 show_step(1)
96 k8s_deployed.start_k8s_cncf_verification()
Vladimir Jigulin62bcf462018-05-28 18:17:01 +040097
Vladimir Jigulin2154e4b2018-11-14 12:14:05 +040098 @pytest.mark.grab_versions
Vladimir Jigulin62bcf462018-05-28 18:17:01 +040099 @pytest.mark.fail_snapshot
100 def test_k8s_chain_update(self, show_step, underlay, config, k8s_deployed,
101 k8s_chain_update_log_helper):
102 """Test for chain-upgrading k8s hypercube pool and checking it
103
104 Scenario:
105 1. Prepare salt on hosts
106 2. Setup controller nodes
107 3. Setup compute nodes
108 4. Setup Kubernetes cluster
109 5. Run and expose sample test service
110 6. Run conformance to check consistency
111 7. For every version in update chain:
112 Update cluster to new version, check test sample service
113 availability, run conformance
Vladimir Jigulin4ad52a82018-08-12 05:51:30 +0400114 8. Delete service and deployment
Vladimir Jigulin62bcf462018-05-28 18:17:01 +0400115 """
116
Vladimir Jigulin62bcf462018-05-28 18:17:01 +0400117 show_step(5)
Vladimir Jigulin4ad52a82018-08-12 05:51:30 +0400118 sample = k8s_deployed.run_sample_deployment('test-dep-chain-upgrade')
Vladimir Jigulina6b018b2018-07-18 15:19:01 +0400119 sample.expose()
Vladimir Jigulin4ad52a82018-08-12 05:51:30 +0400120 sample.wait_ready()
Vladimir Jigulin62bcf462018-05-28 18:17:01 +0400121
Vladimir Jigulina6b018b2018-07-18 15:19:01 +0400122 assert sample.is_service_available()
Vladimir Jigulin62bcf462018-05-28 18:17:01 +0400123
124 show_step(6)
125 k8s_deployed.run_conformance(log_out="k8s_conformance.log")
126
127 show_step(7)
128 chain_versions = config.k8s.k8s_update_chain.split(" ")
129 for version in chain_versions:
130 LOG.info("Chain update to '{}' version".format(version))
Vladimir Jigulin4ad52a82018-08-12 05:51:30 +0400131 k8s_deployed.update_k8s_version(version)
Vladimir Jigulin62bcf462018-05-28 18:17:01 +0400132
133 LOG.info("Checking test service availability")
Vladimir Jigulina6b018b2018-07-18 15:19:01 +0400134 assert sample.is_service_available()
Vladimir Jigulin62bcf462018-05-28 18:17:01 +0400135
136 LOG.info("Running conformance on {} version".format(version))
137 log_name = "k8s_conformance_{}.log".format(version)
138 k8s_deployed.run_conformance(log_out=log_name, raise_on_err=False)
Vladimir Jigulina6b018b2018-07-18 15:19:01 +0400139
Vladimir Jigulin4ad52a82018-08-12 05:51:30 +0400140 assert sample.is_service_available()
141
142 show_step(8)
143 sample.delete()
144
Vladimir Jigulin2154e4b2018-11-14 12:14:05 +0400145 @pytest.mark.grab_versions
Vladimir Jigulina6b018b2018-07-18 15:19:01 +0400146 @pytest.mark.fail_snapshot
147 def test_k8s_metallb(self, show_step, config, k8s_deployed):
148 """Enable metallb in cluster and do basic tests
149
150 Scenario:
151 1. Setup Kubernetes cluster with enabled metallb
152 2. Check that metallb pods created in metallb-system namespace
153 3. Run 5 sample deployments
154 4. Expose deployments with type=LoadBalancer
155 5. Check services availability from outside of cluster
156 6. Run conformance
157 7. Check services availability from outside of cluster
Vladimir Jigulin4ad52a82018-08-12 05:51:30 +0400158 8. Delete deployments
Vladimir Jigulina6b018b2018-07-18 15:19:01 +0400159 """
160 show_step(1)
161 if not config.k8s_deploy.kubernetes_metallb_enabled:
162 pytest.skip("Test requires metallb addon enabled")
163
164 show_step(2)
Vladimir Jigulin34dfa942018-07-23 21:05:48 +0400165 ns = "metallb-system"
Vladimir Jigulin4ad52a82018-08-12 05:51:30 +0400166 assert \
167 len(k8s_deployed.api.pods.list(ns, name_prefix="controller")) > 0
168 assert \
169 len(k8s_deployed.api.pods.list(ns, name_prefix="speaker")) > 0
Vladimir Jigulina6b018b2018-07-18 15:19:01 +0400170
171 show_step(3)
172 samples = []
173 for i in range(5):
174 name = 'test-dep-metallb-{}'.format(i)
Vladimir Jigulin4ad52a82018-08-12 05:51:30 +0400175 samples.append(k8s_deployed.run_sample_deployment(name))
Vladimir Jigulina6b018b2018-07-18 15:19:01 +0400176
177 show_step(4)
178 for sample in samples:
179 sample.expose('LoadBalancer')
Vladimir Jigulin4ad52a82018-08-12 05:51:30 +0400180 sample.wait_ready()
Vladimir Jigulina6b018b2018-07-18 15:19:01 +0400181
182 show_step(5)
183 for sample in samples:
Vladimir Jigulin4ad52a82018-08-12 05:51:30 +0400184 assert sample.is_service_available(external=False)
Vladimir Jigulina6b018b2018-07-18 15:19:01 +0400185 assert sample.is_service_available(external=True)
186
187 show_step(6)
188 k8s_deployed.run_conformance()
189
190 show_step(7)
191 for sample in samples:
Vladimir Jigulin4ad52a82018-08-12 05:51:30 +0400192 assert sample.is_service_available(external=False)
Vladimir Jigulina6b018b2018-07-18 15:19:01 +0400193 assert sample.is_service_available(external=True)
Vladimir Jigulin34dfa942018-07-23 21:05:48 +0400194
Vladimir Jigulin4ad52a82018-08-12 05:51:30 +0400195 show_step(8)
196 for sample in samples:
197 sample.delete()
198
Vladimir Jigulin2154e4b2018-11-14 12:14:05 +0400199 @pytest.mark.grab_versions
Vladimir Jigulin34dfa942018-07-23 21:05:48 +0400200 @pytest.mark.fail_snapshot
Dennis Dmitrievee5ef232018-08-31 13:53:18 +0300201 @pytest.mark.k8s_genie
Vladimir Jigulin4ad52a82018-08-12 05:51:30 +0400202 def test_k8s_genie_flannel(self, show_step, config,
203 salt_deployed, k8s_deployed):
Vladimir Jigulin34dfa942018-07-23 21:05:48 +0400204 """Test genie-cni+flannel cni setup
205
206 Scenario:
207 1. Setup Kubernetes cluster with genie cni and flannel
208 2. Check that flannel pods created in kube-system namespace
209 3. Create sample deployment with flannel cni annotation
210 4. Check that the deployment have 1 ip addresses from cni provider
211 5. Create sample deployment with calico cni annotation
212 6. Check that the deployment have 1 ip addresses from cni provider
213 7. Create sample deployment with multi-cni annotation
214 8. Check that the deployment have 2 ip addresses from different
215 cni providers
216 9. Create sample deployment without cni annotation
217 10. Check that the deployment have 1 ip address
218 11. Check pods availability
219 12. Run conformance
220 13. Check pods availability
Vladimir Jigulin4ad52a82018-08-12 05:51:30 +0400221 14. Delete pods
Vladimir Jigulin34dfa942018-07-23 21:05:48 +0400222 """
223 show_step(1)
224
225 # Find out calico and flannel networks
Victor Ryzhenkin0c373822018-10-30 17:55:50 +0400226 tgt_k8s_control = "I@kubernetes:master"
Vladimir Jigulin34dfa942018-07-23 21:05:48 +0400227
228 flannel_pillar = salt_deployed.get_pillar(
229 tgt=tgt_k8s_control,
230 pillar="kubernetes:master:network:flannel:private_ip_range")[0]
231 flannel_network = netaddr.IPNetwork(flannel_pillar.values()[0])
232 LOG.info("Flannel network: {}".format(flannel_network))
233
234 calico_network_pillar = salt_deployed.get_pillar(
235 tgt=tgt_k8s_control, pillar="_param:calico_private_network")[0]
236 calico_netmask_pillar = salt_deployed.get_pillar(
237 tgt=tgt_k8s_control, pillar="_param:calico_private_netmask")[0]
238 calico_network = netaddr.IPNetwork(
239 "{0}/{1}".format(calico_network_pillar.values()[0],
240 calico_netmask_pillar.values()[0]))
241 LOG.info("Calico network: {}".format(calico_network))
242
243 show_step(2)
Vladimir Jigulin4ad52a82018-08-12 05:51:30 +0400244 assert k8s_deployed.api.pods.list(
245 namespace="kube-system", name_prefix="kube-flannel-") > 0
Vladimir Jigulin34dfa942018-07-23 21:05:48 +0400246
Vladimir Jigulin34dfa942018-07-23 21:05:48 +0400247 show_step(3)
Vladimir Jigulin4ad52a82018-08-12 05:51:30 +0400248 flannel_pod = k8s_deployed.api.pods.create(
Vladimir Jigulin57ecae92018-09-10 22:51:15 +0400249 body=self.__read_testdata_yaml('pod-sample-flannel.yaml'))
Vladimir Jigulin4ad52a82018-08-12 05:51:30 +0400250 flannel_pod.wait_running()
Vladimir Jigulin34dfa942018-07-23 21:05:48 +0400251
252 show_step(4)
253 flannel_ips = k8s_deployed.get_pod_ips_from_container(flannel_pod.name)
254 assert len(flannel_ips) == 1
255 assert netaddr.IPAddress(flannel_ips[0]) in flannel_network
256
257 show_step(5)
Vladimir Jigulin4ad52a82018-08-12 05:51:30 +0400258 calico_pod = k8s_deployed.api.pods.create(
Vladimir Jigulin57ecae92018-09-10 22:51:15 +0400259 body=self.__read_testdata_yaml('pod-sample-calico.yaml'))
Vladimir Jigulin4ad52a82018-08-12 05:51:30 +0400260 calico_pod.wait_running()
Vladimir Jigulin34dfa942018-07-23 21:05:48 +0400261
262 show_step(6)
263 calico_ips = k8s_deployed.get_pod_ips_from_container(calico_pod.name)
264 assert len(calico_ips) == 1
265 assert netaddr.IPAddress(calico_ips[0]) in calico_network
266
267 show_step(7)
Vladimir Jigulin4ad52a82018-08-12 05:51:30 +0400268 multicni_pod = k8s_deployed.api.pods.create(
Vladimir Jigulin57ecae92018-09-10 22:51:15 +0400269 body=self.__read_testdata_yaml('pod-sample-multicni.yaml'))
Vladimir Jigulin4ad52a82018-08-12 05:51:30 +0400270 multicni_pod.wait_running()
Vladimir Jigulin34dfa942018-07-23 21:05:48 +0400271
272 show_step(8)
273 multicni_ips = \
274 k8s_deployed.get_pod_ips_from_container(multicni_pod.name)
275 assert len(multicni_ips) == 2
276 for net in [calico_network, flannel_network]:
277 assert netaddr.IPAddress(multicni_ips[0]) in net or \
278 netaddr.IPAddress(multicni_ips[1]) in net
279
280 show_step(9)
Vladimir Jigulin4ad52a82018-08-12 05:51:30 +0400281 nocni_pod = k8s_deployed.api.pods.create(
Vladimir Jigulin57ecae92018-09-10 22:51:15 +0400282 body=self.__read_testdata_yaml('pod-sample.yaml'))
Vladimir Jigulin4ad52a82018-08-12 05:51:30 +0400283 nocni_pod.wait_running()
Vladimir Jigulin34dfa942018-07-23 21:05:48 +0400284
285 show_step(10)
286 nocni_ips = k8s_deployed.get_pod_ips_from_container(nocni_pod.name)
287 assert len(nocni_ips) == 1
288 assert (netaddr.IPAddress(nocni_ips[0]) in calico_network or
289 netaddr.IPAddress(nocni_ips[0]) in flannel_network)
290
291 show_step(11)
292
293 def check_pod_availability(ip):
294 assert "Hello Kubernetes!" in k8s_deployed.curl(
295 "http://{}:8080".format(ip))
296
297 def check_pods_availability():
298 check_pod_availability(flannel_ips[0])
299 check_pod_availability(calico_ips[0])
300 check_pod_availability(multicni_ips[0])
301 check_pod_availability(multicni_ips[1])
302 check_pod_availability(nocni_ips[0])
303
304 check_pods_availability()
305
306 show_step(12)
307 k8s_deployed.run_conformance()
308
309 show_step(13)
310 check_pods_availability()
Vladimir Jigulin4ad52a82018-08-12 05:51:30 +0400311
312 show_step(14)
313 flannel_pod.delete()
314 calico_pod.delete()
315 multicni_pod.delete()
316 nocni_pod.delete()
Vladimir Jigulin57ecae92018-09-10 22:51:15 +0400317
Vladimir Jigulin2154e4b2018-11-14 12:14:05 +0400318 @pytest.mark.grab_versions
Vladimir Jigulin57ecae92018-09-10 22:51:15 +0400319 @pytest.mark.fail_snapshot
320 def test_k8s_dashboard(self, show_step, config,
321 salt_deployed, k8s_deployed):
322 """Test dashboard setup
323
324 Scenario:
325 1. Setup Kubernetes cluster
326 2. Try to curl login status api
327 3. Create a test-admin-user account
328 4. Try to login in dashboard using test-admin-user account
329 5. Get and check list of namespaces using dashboard api
330 """
331 show_step(1)
332
333 show_step(2)
334 system_ns = 'kube-system'
335 dashboard_service = \
336 k8s_deployed.api.services.get('kubernetes-dashboard', system_ns)
337 dashboard_url = 'https://{}'.format(dashboard_service.get_ip())
338
339 def dashboard_curl(url, data=None, headers=None):
340 """ Using curl command on controller node. Alternatives:
341 - connect_{get,post}_namespaced_service_proxy_with_path -
342 k8s lib does not provide way to pass headers or POST data
343 - raw rest k8s api - need to auth somehow
344 - new load-balancer svc for dashboard + requests python lib -
345 requires working metallb or other load-balancer
346 """
347 args = ['--insecure']
348 for name in headers or {}:
349 args.append('--header')
350 args.append("{0}: {1}".format(name, headers[name]))
351 if data is not None:
352 args.append('--data')
353 args.append(data)
354 return ''.join(k8s_deployed.curl(dashboard_url + url, *args))
355
356 assert 'tokenPresent' in \
357 json.loads(dashboard_curl('/api/v1/login/status'))
358
359 show_step(3)
360 account = k8s_deployed.api.serviceaccounts.create(
361 namespace=system_ns,
362 body=self.__read_testdata_yaml('test-admin-user-account.yaml'))
363 account.wait_secret_generation()
364
365 k8s_deployed.api.clusterrolebindings.create(
366 body=self.__read_testdata_yaml(
367 'test-admin-user-cluster-role-bind.yaml'))
368
369 account_secret = account.read().secrets[0]
370 account_token = k8s_deployed.api.secrets.get(
371 namespace=system_ns, name=account_secret.name).read().data['token']
372
373 show_step(4)
374 csrf_token = \
375 json.loads(dashboard_curl('/api/v1/csrftoken/login'))['token']
376 login_headers = {'X-CSRF-TOKEN': csrf_token,
377 'Content-Type': 'application/json'}
378 jwe_token = json.loads(dashboard_curl(
379 '/api/v1/login', headers=login_headers,
380 data=json.dumps({'token': account_token.decode('base64')})
381 ))['jweToken']
382 headers = {'jweToken': jwe_token}
383
384 show_step(5)
385 dashboard_namespaces = json.loads(
386 dashboard_curl('/api/v1/namespace', headers=headers))['namespaces']
387
388 namespaces_names_list = \
389 [ns.name for ns in k8s_deployed.api.namespaces.list()]
390 for namespace in dashboard_namespaces:
391 assert namespace['objectMeta']['name'] in namespaces_names_list
Vladimir Jigulin5775bbb2018-10-03 10:34:54 +0400392
Vladimir Jigulin2154e4b2018-11-14 12:14:05 +0400393 @pytest.mark.grab_versions
Vladimir Jigulin5775bbb2018-10-03 10:34:54 +0400394 @pytest.mark.fail_snapshot
395 def test_k8s_ingress_nginx(self, show_step, config,
396 salt_deployed, k8s_deployed):
397 """Test ingress-nginx configured and working with metallb
398
399 Scenario:
400 1. Setup Kubernetes cluster with metallb
401 2. Create 2 example deployments and expose them
402 3. Create ingress controller with 2 backends to each deployment
403 service respectively
404 4. Wait ingress for deploy
405 5. Try to reach default endpoint
406 6. Try to reach test1 and test2 deployment services endpoints
407 """
408 show_step(1)
409 if not config.k8s_deploy.kubernetes_metallb_enabled:
410 pytest.skip("Test requires metallb addon enabled")
411 if not config.k8s_deploy.kubernetes_ingressnginx_enabled:
412 pytest.skip("Test requires ingress-nginx addon enabled")
413
414 show_step(2)
415 image = 'nginxdemos/hello:plain-text'
416 port = 80
417 dep1 = k8s_deployed.run_sample_deployment(
418 'dep-ingress-1', image=image, port=port)
419 dep2 = k8s_deployed.run_sample_deployment(
420 'dep-ingress-2', image=image, port=port)
421 svc1 = dep1.wait_ready().expose()
422 svc2 = dep2.wait_ready().expose()
423
424 show_step(3)
425 body = {
426 'apiVersion': 'extensions/v1beta1',
427 'kind': 'Ingress',
428 'metadata': {'name': 'ingress-test'},
429 'spec': {
430 'rules': [{'http': {
431 'paths': [{
432 'backend': {
433 'serviceName': svc1.name,
434 'servicePort': port},
435 'path': '/test1'}, {
436 'backend': {
437 'serviceName': svc2.name,
438 'servicePort': port},
439 'path': '/test2'
440 }]
441 }}]
442 }
443 }
444 ingress = k8s_deployed.api.ingresses.create(body=body)
445
446 show_step(4)
447 ingress.wait_ready()
448
449 show_step(5)
450 ingress_address = "https://{}".format(
451 ingress.read().status.load_balancer.ingress[0].ip)
452
453 assert requests.get(ingress_address, verify=False).status_code == 404
454
455 show_step(6)
456 req1 = requests.get(ingress_address + "/test1", verify=False)
457 assert req1.status_code == 200
458 assert 'dep-ingress-1' in req1.text
459
460 req2 = requests.get(ingress_address + "/test2", verify=False)
461 assert req2.status_code == 200
462 assert 'dep-ingress-2' in req2.text
Vladimir Jigulin2154e4b2018-11-14 12:14:05 +0400463
464 @pytest.mark.grab_versions
465 @pytest.mark.fail_snapshot
466 def test_k8s_cicd_upgrade(self, show_step, config,
467 salt_deployed, k8s_deployed):
468 """Test k8s upgrade cicd pipeline
469
470 Scenario:
471 1. Setup Kubernetes+CICD cluster
472 2. Start deploy-k8s-upgrade job in jenkins
473 3. Wait for job to end
474 """
475 show_step(1)
476 jenkins_info = salt_deployed.get_pillar(
477 tgt='cid*1*', pillar="jenkins:client:master")[0].values()[0]
478
479 salt_api = salt_deployed.get_pillar(
480 tgt='cid*1*', pillar="_param:jenkins_salt_api_url")[0].values()[0]
481
482 show_step(2)
483 jenkins = JenkinsClient(
484 host='http://{host}:{port}'.format(**jenkins_info),
485 username=jenkins_info['username'],
486 password=jenkins_info['password'])
487
488 params = jenkins.make_defults_params('deploy-k8s-upgrade')
489 params['SALT_MASTER_URL'] = salt_api
490 params['SALT_MASTER_CREDENTIALS'] = 'salt'
491 params['CONFORMANCE_RUN_AFTER'] = True
492 params['CONFORMANCE_RUN_BEFORE'] = True
493 build = jenkins.run_build('deploy-k8s-upgrade', params)
494
495 show_step(3)
496 jenkins.wait_end_of_build(
497 name=build[0], build_id=build[1], timeout=3600 * 4)
498 result = jenkins.build_info(
499 name=build[0], build_id=build[1])['result']
500 assert result == 'SUCCESS', "k8s upgrade job has been failed"