blob: 7f01cf55438a302266acbb7a136e48509bfc8682 [file] [log] [blame]
Victor Ryzhenkin14354ac2017-09-27 17:42:30 +04001# Copyright 2017 Mirantis, Inc.
2#
3# Licensed under the Apache License, Version 2.0 (the "License"); you may
4# not use this file except in compliance with the License. You may obtain
5# a copy of the License at
6#
7# http://www.apache.org/licenses/LICENSE-2.0
8#
9# Unless required by applicable law or agreed to in writing, software
10# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
11# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
12# License for the specific language governing permissions and limitations
13# under the License.
14
15import pytest
Vladimir Jigulin34dfa942018-07-23 21:05:48 +040016import netaddr
17import os
Vladimir Jigulin57ecae92018-09-10 22:51:15 +040018import json
Victor Ryzhenkin14354ac2017-09-27 17:42:30 +040019
20from tcp_tests import logger
21from tcp_tests import settings
22
Vladimir Jigulin4ad52a82018-08-12 05:51:30 +040023from tcp_tests.managers.k8s import read_yaml_file
24
Victor Ryzhenkin14354ac2017-09-27 17:42:30 +040025LOG = logger.logger
26
27
28class TestMCPK8sActions(object):
29 """Test class for different k8s actions"""
30
Vladimir Jigulin57ecae92018-09-10 22:51:15 +040031 def __read_testdata_yaml(self, name):
32 dir = os.path.join(os.path.dirname(__file__), 'testdata/k8s')
33 return read_yaml_file(dir, name)
34
Tatyana Leontovichc411ec32017-10-09 14:48:00 +030035 @pytest.mark.grab_versions
Victor Ryzhenkin14354ac2017-09-27 17:42:30 +040036 @pytest.mark.fail_snapshot
Tatyana Leontovich071ce6a2017-10-24 18:08:10 +030037 @pytest.mark.cz8116
Dennis Dmitriev0f624a82018-06-11 12:57:13 +030038 @pytest.mark.k8s_calico
Victor Ryzhenkin14354ac2017-09-27 17:42:30 +040039 def test_k8s_externaldns_coredns(self, show_step, config, k8s_deployed):
40 """Test externaldns integration with coredns
41
42 Scenario:
43 1. Install k8s with externaldns addon enabled(including etcd, coredns)
44 2. Start simple service
45 3. Expose deployment
46 4. Annotate service with domain name
47 5. Try to get service using nslookup
Vladimir Jigulin4ad52a82018-08-12 05:51:30 +040048 6. Delete service and deployment
Victor Ryzhenkin14354ac2017-09-27 17:42:30 +040049 """
50
Vladimir Jigulin4ad52a82018-08-12 05:51:30 +040051 show_step(1)
Victor Ryzhenkin14354ac2017-09-27 17:42:30 +040052 if not (config.k8s_deploy.kubernetes_externaldns_enabled and
53 config.k8s_deploy.kubernetes_coredns_enabled):
Vladimir Jigulin4ad52a82018-08-12 05:51:30 +040054 pytest.skip("Test requires externaldns and coredns addons enabled")
Victor Ryzhenkin14354ac2017-09-27 17:42:30 +040055
56 show_step(2)
Vladimir Jigulin4ad52a82018-08-12 05:51:30 +040057 deployment = k8s_deployed.run_sample_deployment('test-dep')
Victor Ryzhenkin14354ac2017-09-27 17:42:30 +040058
59 show_step(3)
Vladimir Jigulin4ad52a82018-08-12 05:51:30 +040060 svc = deployment.expose()
Victor Ryzhenkin14354ac2017-09-27 17:42:30 +040061
Victor Ryzhenkin14354ac2017-09-27 17:42:30 +040062 show_step(4)
Vladimir Jigulin4ad52a82018-08-12 05:51:30 +040063 hostname = "test.{0}.local.".format(settings.LAB_CONFIG_NAME)
64 svc.patch({
65 "metadata": {
66 "annotations": {
67 "external-dns.alpha.kubernetes.io/hostname": hostname
68 }
69 }
70 })
Victor Ryzhenkin14354ac2017-09-27 17:42:30 +040071
72 show_step(5)
Vladimir Jigulin4ad52a82018-08-12 05:51:30 +040073 k8s_deployed.nslookup(hostname, svc.get_ip())
74
75 show_step(6)
76 deployment.delete()
Victor Ryzhenkin87a31422018-03-16 22:25:27 +040077
78 @pytest.mark.grab_versions
79 @pytest.mark.cncf_publisher(name=['e2e.log', 'junit_01.xml', 'version.txt',
80 'cncf_results.tar.gz'])
81 @pytest.mark.fail_snapshot
82 def test_k8s_cncf_certification(self, show_step, config, k8s_deployed,
Vladimir Jigulin0c8dd5a2018-08-28 05:08:35 +040083 k8s_cncf_log_helper):
Victor Ryzhenkin87a31422018-03-16 22:25:27 +040084 """Run cncf e2e suite and provide files needed for pull request
85 to the CNCF repo
86
87 Scenario:
88 1. Run cncf from https://github.com/cncf/k8s-conformance
89 """
90
91 show_step(1)
92 k8s_deployed.start_k8s_cncf_verification()
Vladimir Jigulin62bcf462018-05-28 18:17:01 +040093
94 @pytest.mark.grap_versions
95 @pytest.mark.fail_snapshot
96 def test_k8s_chain_update(self, show_step, underlay, config, k8s_deployed,
97 k8s_chain_update_log_helper):
98 """Test for chain-upgrading k8s hypercube pool and checking it
99
100 Scenario:
101 1. Prepare salt on hosts
102 2. Setup controller nodes
103 3. Setup compute nodes
104 4. Setup Kubernetes cluster
105 5. Run and expose sample test service
106 6. Run conformance to check consistency
107 7. For every version in update chain:
108 Update cluster to new version, check test sample service
109 availability, run conformance
Vladimir Jigulin4ad52a82018-08-12 05:51:30 +0400110 8. Delete service and deployment
Vladimir Jigulin62bcf462018-05-28 18:17:01 +0400111 """
112
Vladimir Jigulin62bcf462018-05-28 18:17:01 +0400113 show_step(5)
Vladimir Jigulin4ad52a82018-08-12 05:51:30 +0400114 sample = k8s_deployed.run_sample_deployment('test-dep-chain-upgrade')
Vladimir Jigulina6b018b2018-07-18 15:19:01 +0400115 sample.expose()
Vladimir Jigulin4ad52a82018-08-12 05:51:30 +0400116 sample.wait_ready()
Vladimir Jigulin62bcf462018-05-28 18:17:01 +0400117
Vladimir Jigulina6b018b2018-07-18 15:19:01 +0400118 assert sample.is_service_available()
Vladimir Jigulin62bcf462018-05-28 18:17:01 +0400119
120 show_step(6)
121 k8s_deployed.run_conformance(log_out="k8s_conformance.log")
122
123 show_step(7)
124 chain_versions = config.k8s.k8s_update_chain.split(" ")
125 for version in chain_versions:
126 LOG.info("Chain update to '{}' version".format(version))
Vladimir Jigulin4ad52a82018-08-12 05:51:30 +0400127 k8s_deployed.update_k8s_version(version)
Vladimir Jigulin62bcf462018-05-28 18:17:01 +0400128
129 LOG.info("Checking test service availability")
Vladimir Jigulina6b018b2018-07-18 15:19:01 +0400130 assert sample.is_service_available()
Vladimir Jigulin62bcf462018-05-28 18:17:01 +0400131
132 LOG.info("Running conformance on {} version".format(version))
133 log_name = "k8s_conformance_{}.log".format(version)
134 k8s_deployed.run_conformance(log_out=log_name, raise_on_err=False)
Vladimir Jigulina6b018b2018-07-18 15:19:01 +0400135
Vladimir Jigulin4ad52a82018-08-12 05:51:30 +0400136 assert sample.is_service_available()
137
138 show_step(8)
139 sample.delete()
140
Vladimir Jigulina6b018b2018-07-18 15:19:01 +0400141 @pytest.mark.grap_versions
142 @pytest.mark.fail_snapshot
143 def test_k8s_metallb(self, show_step, config, k8s_deployed):
144 """Enable metallb in cluster and do basic tests
145
146 Scenario:
147 1. Setup Kubernetes cluster with enabled metallb
148 2. Check that metallb pods created in metallb-system namespace
149 3. Run 5 sample deployments
150 4. Expose deployments with type=LoadBalancer
151 5. Check services availability from outside of cluster
152 6. Run conformance
153 7. Check services availability from outside of cluster
Vladimir Jigulin4ad52a82018-08-12 05:51:30 +0400154 8. Delete deployments
Vladimir Jigulina6b018b2018-07-18 15:19:01 +0400155 """
156 show_step(1)
157 if not config.k8s_deploy.kubernetes_metallb_enabled:
158 pytest.skip("Test requires metallb addon enabled")
159
160 show_step(2)
Vladimir Jigulin34dfa942018-07-23 21:05:48 +0400161 ns = "metallb-system"
Vladimir Jigulin4ad52a82018-08-12 05:51:30 +0400162 assert \
163 len(k8s_deployed.api.pods.list(ns, name_prefix="controller")) > 0
164 assert \
165 len(k8s_deployed.api.pods.list(ns, name_prefix="speaker")) > 0
Vladimir Jigulina6b018b2018-07-18 15:19:01 +0400166
167 show_step(3)
168 samples = []
169 for i in range(5):
170 name = 'test-dep-metallb-{}'.format(i)
Vladimir Jigulin4ad52a82018-08-12 05:51:30 +0400171 samples.append(k8s_deployed.run_sample_deployment(name))
Vladimir Jigulina6b018b2018-07-18 15:19:01 +0400172
173 show_step(4)
174 for sample in samples:
175 sample.expose('LoadBalancer')
Vladimir Jigulin4ad52a82018-08-12 05:51:30 +0400176 sample.wait_ready()
Vladimir Jigulina6b018b2018-07-18 15:19:01 +0400177
178 show_step(5)
179 for sample in samples:
Vladimir Jigulin4ad52a82018-08-12 05:51:30 +0400180 assert sample.is_service_available(external=False)
Vladimir Jigulina6b018b2018-07-18 15:19:01 +0400181 assert sample.is_service_available(external=True)
182
183 show_step(6)
184 k8s_deployed.run_conformance()
185
186 show_step(7)
187 for sample in samples:
Vladimir Jigulin4ad52a82018-08-12 05:51:30 +0400188 assert sample.is_service_available(external=False)
Vladimir Jigulina6b018b2018-07-18 15:19:01 +0400189 assert sample.is_service_available(external=True)
Vladimir Jigulin34dfa942018-07-23 21:05:48 +0400190
Vladimir Jigulin4ad52a82018-08-12 05:51:30 +0400191 show_step(8)
192 for sample in samples:
193 sample.delete()
194
Vladimir Jigulin34dfa942018-07-23 21:05:48 +0400195 @pytest.mark.grap_versions
196 @pytest.mark.fail_snapshot
Dennis Dmitrievee5ef232018-08-31 13:53:18 +0300197 @pytest.mark.k8s_genie
Vladimir Jigulin4ad52a82018-08-12 05:51:30 +0400198 def test_k8s_genie_flannel(self, show_step, config,
199 salt_deployed, k8s_deployed):
Vladimir Jigulin34dfa942018-07-23 21:05:48 +0400200 """Test genie-cni+flannel cni setup
201
202 Scenario:
203 1. Setup Kubernetes cluster with genie cni and flannel
204 2. Check that flannel pods created in kube-system namespace
205 3. Create sample deployment with flannel cni annotation
206 4. Check that the deployment have 1 ip addresses from cni provider
207 5. Create sample deployment with calico cni annotation
208 6. Check that the deployment have 1 ip addresses from cni provider
209 7. Create sample deployment with multi-cni annotation
210 8. Check that the deployment have 2 ip addresses from different
211 cni providers
212 9. Create sample deployment without cni annotation
213 10. Check that the deployment have 1 ip address
214 11. Check pods availability
215 12. Run conformance
216 13. Check pods availability
Vladimir Jigulin4ad52a82018-08-12 05:51:30 +0400217 14. Delete pods
Vladimir Jigulin34dfa942018-07-23 21:05:48 +0400218 """
219 show_step(1)
220
221 # Find out calico and flannel networks
222 tgt_k8s_control = "I@kubernetes:control:enabled:True"
223
224 flannel_pillar = salt_deployed.get_pillar(
225 tgt=tgt_k8s_control,
226 pillar="kubernetes:master:network:flannel:private_ip_range")[0]
227 flannel_network = netaddr.IPNetwork(flannel_pillar.values()[0])
228 LOG.info("Flannel network: {}".format(flannel_network))
229
230 calico_network_pillar = salt_deployed.get_pillar(
231 tgt=tgt_k8s_control, pillar="_param:calico_private_network")[0]
232 calico_netmask_pillar = salt_deployed.get_pillar(
233 tgt=tgt_k8s_control, pillar="_param:calico_private_netmask")[0]
234 calico_network = netaddr.IPNetwork(
235 "{0}/{1}".format(calico_network_pillar.values()[0],
236 calico_netmask_pillar.values()[0]))
237 LOG.info("Calico network: {}".format(calico_network))
238
239 show_step(2)
Vladimir Jigulin4ad52a82018-08-12 05:51:30 +0400240 assert k8s_deployed.api.pods.list(
241 namespace="kube-system", name_prefix="kube-flannel-") > 0
Vladimir Jigulin34dfa942018-07-23 21:05:48 +0400242
Vladimir Jigulin34dfa942018-07-23 21:05:48 +0400243 show_step(3)
Vladimir Jigulin4ad52a82018-08-12 05:51:30 +0400244 flannel_pod = k8s_deployed.api.pods.create(
Vladimir Jigulin57ecae92018-09-10 22:51:15 +0400245 body=self.__read_testdata_yaml('pod-sample-flannel.yaml'))
Vladimir Jigulin4ad52a82018-08-12 05:51:30 +0400246 flannel_pod.wait_running()
Vladimir Jigulin34dfa942018-07-23 21:05:48 +0400247
248 show_step(4)
249 flannel_ips = k8s_deployed.get_pod_ips_from_container(flannel_pod.name)
250 assert len(flannel_ips) == 1
251 assert netaddr.IPAddress(flannel_ips[0]) in flannel_network
252
253 show_step(5)
Vladimir Jigulin4ad52a82018-08-12 05:51:30 +0400254 calico_pod = k8s_deployed.api.pods.create(
Vladimir Jigulin57ecae92018-09-10 22:51:15 +0400255 body=self.__read_testdata_yaml('pod-sample-calico.yaml'))
Vladimir Jigulin4ad52a82018-08-12 05:51:30 +0400256 calico_pod.wait_running()
Vladimir Jigulin34dfa942018-07-23 21:05:48 +0400257
258 show_step(6)
259 calico_ips = k8s_deployed.get_pod_ips_from_container(calico_pod.name)
260 assert len(calico_ips) == 1
261 assert netaddr.IPAddress(calico_ips[0]) in calico_network
262
263 show_step(7)
Vladimir Jigulin4ad52a82018-08-12 05:51:30 +0400264 multicni_pod = k8s_deployed.api.pods.create(
Vladimir Jigulin57ecae92018-09-10 22:51:15 +0400265 body=self.__read_testdata_yaml('pod-sample-multicni.yaml'))
Vladimir Jigulin4ad52a82018-08-12 05:51:30 +0400266 multicni_pod.wait_running()
Vladimir Jigulin34dfa942018-07-23 21:05:48 +0400267
268 show_step(8)
269 multicni_ips = \
270 k8s_deployed.get_pod_ips_from_container(multicni_pod.name)
271 assert len(multicni_ips) == 2
272 for net in [calico_network, flannel_network]:
273 assert netaddr.IPAddress(multicni_ips[0]) in net or \
274 netaddr.IPAddress(multicni_ips[1]) in net
275
276 show_step(9)
Vladimir Jigulin4ad52a82018-08-12 05:51:30 +0400277 nocni_pod = k8s_deployed.api.pods.create(
Vladimir Jigulin57ecae92018-09-10 22:51:15 +0400278 body=self.__read_testdata_yaml('pod-sample.yaml'))
Vladimir Jigulin4ad52a82018-08-12 05:51:30 +0400279 nocni_pod.wait_running()
Vladimir Jigulin34dfa942018-07-23 21:05:48 +0400280
281 show_step(10)
282 nocni_ips = k8s_deployed.get_pod_ips_from_container(nocni_pod.name)
283 assert len(nocni_ips) == 1
284 assert (netaddr.IPAddress(nocni_ips[0]) in calico_network or
285 netaddr.IPAddress(nocni_ips[0]) in flannel_network)
286
287 show_step(11)
288
289 def check_pod_availability(ip):
290 assert "Hello Kubernetes!" in k8s_deployed.curl(
291 "http://{}:8080".format(ip))
292
293 def check_pods_availability():
294 check_pod_availability(flannel_ips[0])
295 check_pod_availability(calico_ips[0])
296 check_pod_availability(multicni_ips[0])
297 check_pod_availability(multicni_ips[1])
298 check_pod_availability(nocni_ips[0])
299
300 check_pods_availability()
301
302 show_step(12)
303 k8s_deployed.run_conformance()
304
305 show_step(13)
306 check_pods_availability()
Vladimir Jigulin4ad52a82018-08-12 05:51:30 +0400307
308 show_step(14)
309 flannel_pod.delete()
310 calico_pod.delete()
311 multicni_pod.delete()
312 nocni_pod.delete()
Vladimir Jigulin57ecae92018-09-10 22:51:15 +0400313
314 @pytest.mark.grap_versions
315 @pytest.mark.fail_snapshot
316 def test_k8s_dashboard(self, show_step, config,
317 salt_deployed, k8s_deployed):
318 """Test dashboard setup
319
320 Scenario:
321 1. Setup Kubernetes cluster
322 2. Try to curl login status api
323 3. Create a test-admin-user account
324 4. Try to login in dashboard using test-admin-user account
325 5. Get and check list of namespaces using dashboard api
326 """
327 show_step(1)
328
329 show_step(2)
330 system_ns = 'kube-system'
331 dashboard_service = \
332 k8s_deployed.api.services.get('kubernetes-dashboard', system_ns)
333 dashboard_url = 'https://{}'.format(dashboard_service.get_ip())
334
335 def dashboard_curl(url, data=None, headers=None):
336 """ Using curl command on controller node. Alternatives:
337 - connect_{get,post}_namespaced_service_proxy_with_path -
338 k8s lib does not provide way to pass headers or POST data
339 - raw rest k8s api - need to auth somehow
340 - new load-balancer svc for dashboard + requests python lib -
341 requires working metallb or other load-balancer
342 """
343 args = ['--insecure']
344 for name in headers or {}:
345 args.append('--header')
346 args.append("{0}: {1}".format(name, headers[name]))
347 if data is not None:
348 args.append('--data')
349 args.append(data)
350 return ''.join(k8s_deployed.curl(dashboard_url + url, *args))
351
352 assert 'tokenPresent' in \
353 json.loads(dashboard_curl('/api/v1/login/status'))
354
355 show_step(3)
356 account = k8s_deployed.api.serviceaccounts.create(
357 namespace=system_ns,
358 body=self.__read_testdata_yaml('test-admin-user-account.yaml'))
359 account.wait_secret_generation()
360
361 k8s_deployed.api.clusterrolebindings.create(
362 body=self.__read_testdata_yaml(
363 'test-admin-user-cluster-role-bind.yaml'))
364
365 account_secret = account.read().secrets[0]
366 account_token = k8s_deployed.api.secrets.get(
367 namespace=system_ns, name=account_secret.name).read().data['token']
368
369 show_step(4)
370 csrf_token = \
371 json.loads(dashboard_curl('/api/v1/csrftoken/login'))['token']
372 login_headers = {'X-CSRF-TOKEN': csrf_token,
373 'Content-Type': 'application/json'}
374 jwe_token = json.loads(dashboard_curl(
375 '/api/v1/login', headers=login_headers,
376 data=json.dumps({'token': account_token.decode('base64')})
377 ))['jweToken']
378 headers = {'jweToken': jwe_token}
379
380 show_step(5)
381 dashboard_namespaces = json.loads(
382 dashboard_curl('/api/v1/namespace', headers=headers))['namespaces']
383
384 namespaces_names_list = \
385 [ns.name for ns in k8s_deployed.api.namespaces.list()]
386 for namespace in dashboard_namespaces:
387 assert namespace['objectMeta']['name'] in namespaces_names_list