blob: ff8150131cb733cd4240d99fea6640d4e9033e49 [file] [log] [blame]
Artem Panchenko0594cd72017-06-12 13:25:26 +03001# Copyright 2017 Mirantis, Inc.
2#
3# Licensed under the Apache License, Version 2.0 (the "License"); you may
4# not use this file except in compliance with the License. You may obtain
5# a copy of the License at
6#
7# http://www.apache.org/licenses/LICENSE-2.0
8#
9# Unless required by applicable law or agreed to in writing, software
10# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
11# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
12# License for the specific language governing permissions and limitations
13# under the License.
14
Victor Ryzhenkin8ff3c3f2018-01-17 19:37:05 +040015import os
Artem Panchenko0594cd72017-06-12 13:25:26 +030016import time
Victor Ryzhenkin3ffa2b42017-10-05 16:38:44 +040017from uuid import uuid4
Artem Panchenko0594cd72017-06-12 13:25:26 +030018
Vladimir Jigulina6b018b2018-07-18 15:19:01 +040019import requests
Artem Panchenko0594cd72017-06-12 13:25:26 +030020import yaml
21
22from devops.helpers import helpers
Victor Ryzhenkin66d39372017-09-28 19:25:48 +040023from devops.error import DevopsCalledProcessError
Artem Panchenko0594cd72017-06-12 13:25:26 +030024
25from tcp_tests import logger
Victor Ryzhenkin66d39372017-09-28 19:25:48 +040026from tcp_tests.helpers import ext
27from tcp_tests.helpers.utils import retry
Artem Panchenko0594cd72017-06-12 13:25:26 +030028from tcp_tests.managers.execute_commands import ExecuteCommandsMixin
29from tcp_tests.managers.k8s import cluster
Sergey Vasilenkofd1fd612017-09-20 13:09:51 +030030from k8sclient.client.rest import ApiException
Artem Panchenko0594cd72017-06-12 13:25:26 +030031
32LOG = logger.logger
33
34
35class K8SManager(ExecuteCommandsMixin):
36 """docstring for K8SManager"""
37
38 __config = None
39 __underlay = None
40
41 def __init__(self, config, underlay, salt):
42 self.__config = config
43 self.__underlay = underlay
44 self._salt = salt
45 self._api_client = None
46 super(K8SManager, self).__init__(
47 config=config, underlay=underlay)
48
49 def install(self, commands):
50 self.execute_commands(commands,
51 label='Install Kubernetes services')
52 self.__config.k8s.k8s_installed = True
53 self.__config.k8s.kube_host = self.get_proxy_api()
54
55 def get_proxy_api(self):
56 k8s_proxy_ip_pillars = self._salt.get_pillar(
vrovachev99228d32017-06-08 19:46:10 +040057 tgt='I@haproxy:proxy:enabled:true and I@kubernetes:master',
Artem Panchenko0594cd72017-06-12 13:25:26 +030058 pillar='haproxy:proxy:listen:k8s_secure:binds:address')
vrovachev99228d32017-06-08 19:46:10 +040059 k8s_hosts = self._salt.get_pillar(
60 tgt='I@haproxy:proxy:enabled:true and I@kubernetes:master',
61 pillar='kubernetes:pool:apiserver:host')
Artem Panchenko0594cd72017-06-12 13:25:26 +030062 k8s_proxy_ip = set([ip
63 for item in k8s_proxy_ip_pillars
Dina Belovae6fdffb2017-09-19 13:58:34 -070064 for node, ip in item.items() if ip])
vrovachev99228d32017-06-08 19:46:10 +040065 k8s_hosts = set([ip
Dina Belovae6fdffb2017-09-19 13:58:34 -070066 for item in k8s_hosts
67 for node, ip in item.items() if ip])
vrovachev99228d32017-06-08 19:46:10 +040068 assert len(k8s_hosts) == 1, (
69 "Found more than one Kubernetes API hosts in pillars:{0}, "
70 "expected one!").format(k8s_hosts)
71 k8s_host = k8s_hosts.pop()
72 assert k8s_host in k8s_proxy_ip, (
73 "Kubernetes API host:{0} not found in proxies:{} "
74 "on k8s master nodes. K8s proxies are expected on "
75 "nodes with K8s master").format(k8s_host, k8s_proxy_ip)
76 return k8s_host
Artem Panchenko0594cd72017-06-12 13:25:26 +030077
78 @property
79 def api(self):
80 if self._api_client is None:
81 self._api_client = cluster.K8sCluster(
82 user=self.__config.k8s_deploy.kubernetes_admin_user,
83 password=self.__config.k8s_deploy.kubernetes_admin_password,
84 host=self.__config.k8s.kube_host,
85 port=self.__config.k8s.kube_apiserver_port,
86 default_namespace='default')
87 return self._api_client
88
Victor Ryzhenkin66d39372017-09-28 19:25:48 +040089 @property
90 def ctl_host(self):
91 nodes = [node for node in self.__config.underlay.ssh if
92 ext.UNDERLAY_NODE_ROLES.k8s_controller in node['roles']]
93 return nodes[0]['node_name']
94
Artem Panchenko0594cd72017-06-12 13:25:26 +030095 def get_pod_phase(self, pod_name, namespace=None):
96 return self.api.pods.get(
97 name=pod_name, namespace=namespace).phase
98
99 def wait_pod_phase(self, pod_name, phase, namespace=None, timeout=60):
100 """Wait phase of pod_name from namespace while timeout
101
102 :param str: pod_name
103 :param str: namespace
104 :param list or str: phase
105 :param int: timeout
106
107 :rtype: None
108 """
109 if isinstance(phase, str):
110 phase = [phase]
111
112 def check():
113 return self.get_pod_phase(pod_name, namespace) in phase
114
115 helpers.wait(check, timeout=timeout,
116 timeout_msg='Timeout waiting, pod {pod_name} is not in '
117 '"{phase}" phase'.format(
118 pod_name=pod_name, phase=phase))
119
120 def wait_pods_phase(self, pods, phase, timeout=60):
121 """Wait timeout seconds for phase of pods
122
123 :param pods: list of K8sPod
124 :param phase: list or str
125 :param timeout: int
126
127 :rtype: None
128 """
129 if isinstance(phase, str):
130 phase = [phase]
131
132 def check(pod_name, namespace):
133 return self.get_pod_phase(pod_name, namespace) in phase
134
135 def check_all_pods():
136 return all(check(pod.name, pod.metadata.namespace) for pod in pods)
137
138 helpers.wait(
139 check_all_pods,
140 timeout=timeout,
141 timeout_msg='Timeout waiting, pods {0} are not in "{1}" '
142 'phase'.format([pod.name for pod in pods], phase))
143
144 def check_pod_create(self, body, namespace=None, timeout=300, interval=5):
145 """Check creating sample pod
146
147 :param k8s_pod: V1Pod
148 :param namespace: str
149 :rtype: V1Pod
150 """
151 LOG.info("Creating pod in k8s cluster")
152 LOG.debug(
153 "POD spec to create:\n{}".format(
154 yaml.dump(body, default_flow_style=False))
155 )
156 LOG.debug("Timeout for creation is set to {}".format(timeout))
157 LOG.debug("Checking interval is set to {}".format(interval))
158 pod = self.api.pods.create(body=body, namespace=namespace)
159 pod.wait_running(timeout=300, interval=5)
160 LOG.info("Pod '{0}' is created in '{1}' namespace".format(
161 pod.name, pod.namespace))
162 return self.api.pods.get(name=pod.name, namespace=pod.namespace)
163
164 def wait_pod_deleted(self, podname, timeout=60, interval=5):
165 helpers.wait(
166 lambda: podname not in [pod.name for pod in self.api.pods.list()],
167 timeout=timeout,
168 interval=interval,
169 timeout_msg="Pod deletion timeout reached!"
170 )
171
172 def check_pod_delete(self, k8s_pod, timeout=300, interval=5,
173 namespace=None):
174 """Deleting pod from k8s
175
176 :param k8s_pod: tcp_tests.managers.k8s.nodes.K8sNode
177 :param k8sclient: tcp_tests.managers.k8s.cluster.K8sCluster
178 """
179 LOG.info("Deleting pod '{}'".format(k8s_pod.name))
180 LOG.debug("Pod status:\n{}".format(k8s_pod.status))
181 LOG.debug("Timeout for deletion is set to {}".format(timeout))
182 LOG.debug("Checking interval is set to {}".format(interval))
183 self.api.pods.delete(body=k8s_pod, name=k8s_pod.name,
184 namespace=namespace)
185 self.wait_pod_deleted(k8s_pod.name, timeout, interval)
186 LOG.debug("Pod '{}' is deleted".format(k8s_pod.name))
187
188 def check_service_create(self, body, namespace=None):
189 """Check creating k8s service
190
191 :param body: dict, service spec
192 :param namespace: str
193 :rtype: K8sService object
194 """
195 LOG.info("Creating service in k8s cluster")
196 LOG.debug(
197 "Service spec to create:\n{}".format(
198 yaml.dump(body, default_flow_style=False))
199 )
200 service = self.api.services.create(body=body, namespace=namespace)
201 LOG.info("Service '{0}' is created in '{1}' namespace".format(
202 service.name, service.namespace))
203 return self.api.services.get(name=service.name,
204 namespace=service.namespace)
205
206 def check_ds_create(self, body, namespace=None):
207 """Check creating k8s DaemonSet
208
209 :param body: dict, DaemonSet spec
210 :param namespace: str
211 :rtype: K8sDaemonSet object
212 """
213 LOG.info("Creating DaemonSet in k8s cluster")
214 LOG.debug(
215 "DaemonSet spec to create:\n{}".format(
216 yaml.dump(body, default_flow_style=False))
217 )
218 ds = self.api.daemonsets.create(body=body, namespace=namespace)
219 LOG.info("DaemonSet '{0}' is created in '{1}' namespace".format(
220 ds.name, ds.namespace))
221 return self.api.daemonsets.get(name=ds.name, namespace=ds.namespace)
222
223 def check_ds_ready(self, dsname, namespace=None):
224 """Check if k8s DaemonSet is ready
225
226 :param dsname: str, ds name
227 :return: bool
228 """
229 ds = self.api.daemonsets.get(name=dsname, namespace=namespace)
230 return (ds.status.current_number_scheduled ==
231 ds.status.desired_number_scheduled)
232
233 def wait_ds_ready(self, dsname, namespace=None, timeout=60, interval=5):
234 """Wait until all pods are scheduled on nodes
235
236 :param dsname: str, ds name
237 :param timeout: int
238 :param interval: int
239 """
240 helpers.wait(
241 lambda: self.check_ds_ready(dsname, namespace=namespace),
242 timeout=timeout, interval=interval)
243
Artem Panchenko501e67e2017-06-14 14:59:18 +0300244 def check_deploy_create(self, body, namespace=None):
245 """Check creating k8s Deployment
246
247 :param body: dict, Deployment spec
248 :param namespace: str
249 :rtype: K8sDeployment object
250 """
251 LOG.info("Creating Deployment in k8s cluster")
252 LOG.debug(
253 "Deployment spec to create:\n{}".format(
254 yaml.dump(body, default_flow_style=False))
255 )
256 deploy = self.api.deployments.create(body=body, namespace=namespace)
257 LOG.info("Deployment '{0}' is created in '{1}' namespace".format(
258 deploy.name, deploy.namespace))
259 return self.api.deployments.get(name=deploy.name,
260 namespace=deploy.namespace)
261
262 def check_deploy_ready(self, deploy_name, namespace=None):
263 """Check if k8s Deployment is ready
264
265 :param deploy_name: str, deploy name
266 :return: bool
267 """
Dina Belovae6fdffb2017-09-19 13:58:34 -0700268 deploy = self.api.deployments.get(name=deploy_name,
269 namespace=namespace)
Artem Panchenko501e67e2017-06-14 14:59:18 +0300270 return deploy.status.available_replicas == deploy.status.replicas
271
Dina Belovae6fdffb2017-09-19 13:58:34 -0700272 def wait_deploy_ready(self, deploy_name, namespace=None, timeout=60,
273 interval=5):
Artem Panchenko501e67e2017-06-14 14:59:18 +0300274 """Wait until all pods are scheduled on nodes
275
276 :param deploy_name: str, deploy name
277 :param timeout: int
278 :param interval: int
279 """
280 helpers.wait(
281 lambda: self.check_deploy_ready(deploy_name, namespace=namespace),
282 timeout=timeout, interval=interval)
283
Artem Panchenko0594cd72017-06-12 13:25:26 +0300284 def check_namespace_create(self, name):
285 """Check creating k8s Namespace
286
287 :param name: str
288 :rtype: K8sNamespace object
289 """
Sergey Vasilenkofd1fd612017-09-20 13:09:51 +0300290 try:
291 ns = self.api.namespaces.get(name=name)
292 LOG.info("Namespace '{0}' is already exists".format(ns.name))
293 except ApiException as e:
Dennis Dmitriev9b02c8b2017-11-13 15:31:35 +0200294 if hasattr(e, "status") and 404 == e.status:
295 LOG.info("Creating Namespace in k8s cluster")
296 ns = self.api.namespaces.create(
297 body={'metadata': {'name': name}})
298 LOG.info("Namespace '{0}' is created".format(ns.name))
299 # wait 10 seconds until a token for new service account
300 # is created
301 time.sleep(10)
302 ns = self.api.namespaces.get(name=ns.name)
303 else:
304 raise
Sergey Vasilenkofd1fd612017-09-20 13:09:51 +0300305 return ns
Artem Panchenko0594cd72017-06-12 13:25:26 +0300306
307 def create_objects(self, path):
308 if isinstance(path, str):
309 path = [path]
310 params = ' '.join(["-f {}".format(p) for p in path])
311 cmd = 'kubectl create {params}'.format(params=params)
312 with self.__underlay.remote(
Victor Ryzhenkin66d39372017-09-28 19:25:48 +0400313 node_name=self.ctl_host) as remote:
Artem Panchenko0594cd72017-06-12 13:25:26 +0300314 LOG.info("Running command '{cmd}' on node {node}".format(
315 cmd=cmd,
316 node=remote.hostname)
317 )
318 result = remote.check_call(cmd)
319 LOG.info(result['stdout'])
320
321 def get_running_pods(self, pod_name, namespace=None):
322 pods = [pod for pod in self.api.pods.list(namespace=namespace)
323 if (pod_name in pod.name and pod.status.phase == 'Running')]
324 return pods
325
326 def get_pods_number(self, pod_name, namespace=None):
327 pods = self.get_running_pods(pod_name, namespace)
328 return len(pods)
329
330 def get_running_pods_by_ssh(self, pod_name, namespace=None):
331 with self.__underlay.remote(
Victor Ryzhenkin66d39372017-09-28 19:25:48 +0400332 node_name=self.ctl_host) as remote:
Artem Panchenko0594cd72017-06-12 13:25:26 +0300333 result = remote.check_call("kubectl get pods --namespace {} |"
334 " grep {} | awk '{{print $1 \" \""
335 " $3}}'".format(namespace,
336 pod_name))['stdout']
337 running_pods = [data.strip().split()[0] for data in result
338 if data.strip().split()[1] == 'Running']
339 return running_pods
340
341 def get_pods_restarts(self, pod_name, namespace=None):
342 pods = [pod.status.container_statuses[0].restart_count
343 for pod in self.get_running_pods(pod_name, namespace)]
344 return sum(pods)
vrovacheva9d08332017-06-22 20:01:59 +0400345
Vladimir Jigulin62bcf462018-05-28 18:17:01 +0400346 def run_conformance(self, timeout=60 * 60, log_out='k8s_conformance.log',
Vladimir Jigulinee1faa52018-06-25 13:00:51 +0400347 raise_on_err=True, node_name=None,
348 api_server='http://127.0.0.1:8080'):
349 if node_name is None:
350 node_name = self.ctl_host
351 cmd = "set -o pipefail; docker run --net=host -e API_SERVER="\
352 "'{api}' {image} | tee '{log}'".format(
353 api=api_server, image=self.__config.k8s.k8s_conformance_image,
354 log=log_out)
355 return self.__underlay.check_call(
356 cmd=cmd, node_name=node_name, timeout=timeout,
357 raise_on_err=raise_on_err)
Artem Panchenko501e67e2017-06-14 14:59:18 +0300358
359 def get_k8s_masters(self):
360 k8s_masters_fqdn = self._salt.get_pillar(tgt='I@kubernetes:master',
361 pillar='linux:network:fqdn')
362 return [self._K8SManager__underlay.host_by_node_name(node_name=v)
363 for pillar in k8s_masters_fqdn for k, v in pillar.items()]
Victor Ryzhenkin14354ac2017-09-27 17:42:30 +0400364
Vladimir Jigulina6b018b2018-07-18 15:19:01 +0400365 def kubectl_run(self, name, image, port, replicas=None):
366 cmd = "kubectl run {0} --image={1} --port={2}".format(
367 name, image, port)
368 if replicas is not None:
369 cmd += " --replicas={}".format(replicas)
370 return self.__underlay.check_call(cmd=cmd, node_name=self.ctl_host)
Victor Ryzhenkin14354ac2017-09-27 17:42:30 +0400371
Vladimir Jigulina6b018b2018-07-18 15:19:01 +0400372 def kubectl_expose(self, resource, name, port, type, target_name=None):
373 cmd = "kubectl expose {0} {1} --port={2} --type={3}".format(
374 resource, name, port, type)
375 if target_name is not None:
376 cmd += " --name={}".format(target_name)
377 return self.__underlay.check_call(cmd=cmd, node_name=self.ctl_host)
Victor Ryzhenkin14354ac2017-09-27 17:42:30 +0400378
Victor Ryzhenkin66d39372017-09-28 19:25:48 +0400379 def kubectl_annotate(self, resource, name, annotation):
Victor Ryzhenkin14354ac2017-09-27 17:42:30 +0400380 with self.__underlay.remote(
Victor Ryzhenkin66d39372017-09-28 19:25:48 +0400381 node_name=self.ctl_host) as remote:
Victor Ryzhenkin14354ac2017-09-27 17:42:30 +0400382 result = remote.check_call(
Victor Ryzhenkin66d39372017-09-28 19:25:48 +0400383 "kubectl annotate {0} {1} {2}".format(
384 resource, name, annotation
Victor Ryzhenkin14354ac2017-09-27 17:42:30 +0400385 )
386 )
387 return result
388
Vladimir Jigulina6b018b2018-07-18 15:19:01 +0400389 def get_svc_ip(self, name, namespace='kube-system', external=False):
390 cmd = "kubectl get svc {0} -n {1} | awk '{{print ${2}}}' | tail -1".\
391 format(name, namespace, 4 if external else 3)
392 result = self.__underlay.check_call(cmd, node_name=self.ctl_host)
393 return result['stdout'][0].strip()
Victor Ryzhenkin14354ac2017-09-27 17:42:30 +0400394
Victor Ryzhenkin66d39372017-09-28 19:25:48 +0400395 @retry(300, exception=DevopsCalledProcessError)
Victor Ryzhenkin14354ac2017-09-27 17:42:30 +0400396 def nslookup(self, host, src):
397 with self.__underlay.remote(
Victor Ryzhenkin66d39372017-09-28 19:25:48 +0400398 node_name=self.ctl_host) as remote:
Victor Ryzhenkin14354ac2017-09-27 17:42:30 +0400399 remote.check_call("nslookup {0} {1}".format(host, src))
400
Vladimir Jigulin62bcf462018-05-28 18:17:01 +0400401 @retry(300, exception=DevopsCalledProcessError)
402 def curl(self, url):
403 """
404 Run curl on controller and return stdout
405
406 :param url: url to curl
407 :return: response string
408 """
409 with self.__underlay.remote(node_name=self.ctl_host) as r:
410 return r.check_call("curl -s -S \"{}\"".format(url))['stdout']
411
Victor Ryzhenkin3ffa2b42017-10-05 16:38:44 +0400412# ---------------------------- Virtlet methods -------------------------------
413 def install_jq(self):
414 """Install JQuery on node. Required for changing yamls on the fly.
415
416 :return:
417 """
418 cmd = "apt install jq -y"
419 return self.__underlay.check_call(cmd, node_name=self.ctl_host)
420
Victor Ryzhenkin3ffa2b42017-10-05 16:38:44 +0400421 def git_clone(self, project, target):
422 cmd = "git clone {0} {1}".format(project, target)
423 return self.__underlay.check_call(cmd, node_name=self.ctl_host)
424
425 def run_vm(self, name=None, yaml_path='~/virtlet/examples/cirros-vm.yaml'):
426 if not name:
427 name = 'virtlet-vm-{}'.format(uuid4())
428 cmd = (
429 "kubectl convert -f {0} --local "
430 "-o json | jq '.metadata.name|=\"{1}\"' | kubectl create -f -")
431 self.__underlay.check_call(cmd.format(yaml_path, name),
432 node_name=self.ctl_host)
433 return name
434
435 def get_vm_info(self, name, jsonpath="{.status.phase}", expected=None):
436 cmd = "kubectl get po {} -n default".format(name)
437 if jsonpath:
438 cmd += " -o jsonpath={}".format(jsonpath)
439 return self.__underlay.check_call(
440 cmd, node_name=self.ctl_host, expected=expected)
441
442 def wait_active_state(self, name, timeout=180):
443 helpers.wait(
444 lambda: self.get_vm_info(name)['stdout'][0] == 'Running',
445 timeout=timeout,
446 timeout_msg="VM {} didn't Running state in {} sec. "
447 "Current state: ".format(
448 name, timeout, self.get_vm_info(name)['stdout'][0]))
449
450 def delete_vm(self, name, timeout=180):
451 cmd = "kubectl delete po -n default {}".format(name)
452 self.__underlay.check_call(cmd, node_name=self.ctl_host)
453
454 helpers.wait(
455 lambda:
456 "Error from server (NotFound):" in
457 " ".join(self.get_vm_info(name, expected=[0, 1])['stderr']),
458 timeout=timeout,
459 timeout_msg="VM {} didn't Running state in {} sec. "
460 "Current state: ".format(
461 name, timeout, self.get_vm_info(name)['stdout'][0]))
462
463 def adjust_cirros_resources(
464 self, cpu=2, memory='256',
465 target_yaml='virtlet/examples/cirros-vm-exp.yaml'):
466 # We will need to change params in case of example change
467 cmd = ("cd ~/virtlet/examples && "
468 "cp cirros-vm.yaml {2} && "
469 "sed -r 's/^(\s*)(VirtletVCPUCount\s*:\s*\"1\"\s*$)/ "
470 "\1VirtletVCPUCount: \"{0}\"/' {2} && "
471 "sed -r 's/^(\s*)(memory\s*:\s*128Mi\s*$)/\1memory: "
472 "{1}Mi/' {2}".format(cpu, memory, target_yaml))
473 self.__underlay.check_call(cmd, node_name=self.ctl_host)
474
475 def get_domain_name(self, vm_name):
476 cmd = ("~/virtlet/examples/virsh.sh list --name | "
477 "grep -i {0} ".format(vm_name))
478 result = self.__underlay.check_call(cmd, node_name=self.ctl_host)
479 return result['stdout'].strip()
480
481 def get_vm_cpu_count(self, domain_name):
482 cmd = ("~/virtlet/examples/virsh.sh dumpxml {0} | "
483 "grep 'cpu' | grep -o '[[:digit:]]*'".format(domain_name))
484 result = self.__underlay.check_call(cmd, node_name=self.ctl_host)
485 return int(result['stdout'].strip())
486
487 def get_vm_memory_count(self, domain_name):
488 cmd = ("~/virtlet/examples/virsh.sh dumpxml {0} | "
489 "grep 'memory unit' | "
490 "grep -o '[[:digit:]]*'".format(domain_name))
491 result = self.__underlay.check_call(cmd, node_name=self.ctl_host)
492 return int(result['stdout'].strip())
493
494 def get_domain_id(self, domain_name):
495 cmd = ("virsh dumpxml {} | grep id=\' | "
496 "grep -o [[:digit:]]*".format(domain_name))
497 result = self.__underlay.check_call(cmd, node_name=self.ctl_host)
498 return int(result['stdout'].strip())
499
500 def list_vm_volumes(self, domain_name):
501 domain_id = self.get_domain_id(domain_name)
502 cmd = ("~/virtlet/examples/virsh.sh domblklist {} | "
503 "tail -n +3 | awk {{'print $2'}}".format(domain_id))
504 result = self.__underlay.check_call(cmd, node_name=self.ctl_host)
Dennis Dmitriev9b02c8b2017-11-13 15:31:35 +0200505 return result['stdout'].strip()
Victor Ryzhenkin8ff3c3f2018-01-17 19:37:05 +0400506
Victor Ryzhenkinac37a752018-02-21 17:55:45 +0400507 def run_virtlet_conformance(self, timeout=60 * 120,
Victor Ryzhenkin8ff3c3f2018-01-17 19:37:05 +0400508 log_file='virtlet_conformance.log'):
509 if self.__config.k8s.run_extended_virtlet_conformance:
510 ci_image = "cloud-images.ubuntu.com/xenial/current/" \
511 "xenial-server-cloudimg-amd64-disk1.img"
512 cmd = ("set -o pipefail; "
513 "docker run --net=host {0} /virtlet-e2e-tests "
Victor Ryzhenkin53b7ad22018-02-08 20:05:40 +0400514 "-include-cloud-init-tests -junitOutput report.xml "
515 "-image {2} -sshuser ubuntu -memoryLimit 1024 "
Victor Ryzhenkin8ff3c3f2018-01-17 19:37:05 +0400516 "-alsologtostderr -cluster-url http://127.0.0.1:8080 "
517 "-ginkgo.focus '\[Conformance\]' "
518 "| tee {1}".format(
519 self.__config.k8s_deploy.kubernetes_virtlet_image,
520 log_file, ci_image))
521 else:
522 cmd = ("set -o pipefail; "
523 "docker run --net=host {0} /virtlet-e2e-tests "
Victor Ryzhenkin53b7ad22018-02-08 20:05:40 +0400524 "-junitOutput report.xml "
Victor Ryzhenkin8ff3c3f2018-01-17 19:37:05 +0400525 "-alsologtostderr -cluster-url http://127.0.0.1:8080 "
526 "-ginkgo.focus '\[Conformance\]' "
527 "| tee {1}".format(
528 self.__config.k8s_deploy.kubernetes_virtlet_image,
529 log_file))
530 LOG.info("Executing: {}".format(cmd))
531 with self.__underlay.remote(
532 node_name=self.ctl_host) as remote:
533 result = remote.check_call(cmd, timeout=timeout)
534 stderr = result['stderr']
535 stdout = result['stdout']
536 LOG.info("Test results stdout: {}".format(stdout))
537 LOG.info("Test results stderr: {}".format(stderr))
538 return result
539
Victor Ryzhenkin87a31422018-03-16 22:25:27 +0400540 def start_k8s_cncf_verification(self, timeout=60 * 90):
541 cncf_cmd = ("curl -L https://raw.githubusercontent.com/cncf/"
542 "k8s-conformance/master/sonobuoy-conformance.yaml"
543 " | kubectl apply -f -")
544 with self.__underlay.remote(
545 node_name=self.ctl_host) as remote:
546 remote.check_call(cncf_cmd, timeout=60)
547 self.wait_pod_phase('sonobuoy', 'Running',
548 namespace='sonobuoy', timeout=120)
549 wait_cmd = ('kubectl logs -n sonobuoy sonobuoy | '
550 'grep "sonobuoy is now blocking"')
Victor Ryzhenkin53b7ad22018-02-08 20:05:40 +0400551
Victor Ryzhenkin87a31422018-03-16 22:25:27 +0400552 expected = [0, 1]
553 helpers.wait(
554 lambda: remote.check_call(
555 wait_cmd, expected=expected).exit_code == 0,
556 interval=30, timeout=timeout,
557 timeout_msg="Timeout for CNCF reached."
558 )
559
560 def extract_file_to_node(self, system='docker',
561 container='virtlet',
Vladimir Jigulin62bcf462018-05-28 18:17:01 +0400562 file_path='report.xml',
563 out_dir='.',
564 **kwargs):
Victor Ryzhenkin87a31422018-03-16 22:25:27 +0400565 """
566 Download file from docker or k8s container to node
567
568 :param system: docker or k8s
Victor Ryzhenkin53b7ad22018-02-08 20:05:40 +0400569 :param container: Full name of part of name
570 :param file_path: File path in container
Victor Ryzhenkin87a31422018-03-16 22:25:27 +0400571 :param kwargs: Used to control pod and namespace
Vladimir Jigulin62bcf462018-05-28 18:17:01 +0400572 :param out_dir: Output directory
Victor Ryzhenkin53b7ad22018-02-08 20:05:40 +0400573 :return:
574 """
575 with self.__underlay.remote(
576 node_name=self.ctl_host) as remote:
Victor Ryzhenkin87a31422018-03-16 22:25:27 +0400577 if system is 'docker':
Vladimir Jigulin62bcf462018-05-28 18:17:01 +0400578 cmd = ("docker ps --all | grep \"{0}\" |"
Victor Ryzhenkin87a31422018-03-16 22:25:27 +0400579 " awk '{{print $1}}'".format(container))
Victor Ryzhenkin4e4126c2018-05-22 19:09:20 +0400580 result = remote.check_call(cmd, raise_on_err=False)
581 if result['stdout']:
582 container_id = result['stdout'][0].strip()
583 else:
584 LOG.info('No container found, skipping extraction...')
585 return
Victor Ryzhenkin87a31422018-03-16 22:25:27 +0400586 cmd = "docker start {}".format(container_id)
Victor Ryzhenkin4e4126c2018-05-22 19:09:20 +0400587 remote.check_call(cmd, raise_on_err=False)
Vladimir Jigulin62bcf462018-05-28 18:17:01 +0400588 cmd = "docker cp \"{0}:/{1}\" \"{2}\"".format(
589 container_id, file_path, out_dir)
Victor Ryzhenkin4e4126c2018-05-22 19:09:20 +0400590 remote.check_call(cmd, raise_on_err=False)
Victor Ryzhenkin87a31422018-03-16 22:25:27 +0400591 else:
592 # system is k8s
593 pod_name = kwargs.get('pod_name')
594 pod_namespace = kwargs.get('pod_namespace')
Vladimir Jigulin62bcf462018-05-28 18:17:01 +0400595 cmd = 'kubectl cp \"{0}/{1}:/{2}\" \"{3}\"'.format(
596 pod_namespace, pod_name, file_path, out_dir)
Victor Ryzhenkin4e4126c2018-05-22 19:09:20 +0400597 remote.check_call(cmd, raise_on_err=False)
Victor Ryzhenkin53b7ad22018-02-08 20:05:40 +0400598
Victor Ryzhenkin87a31422018-03-16 22:25:27 +0400599 def download_k8s_logs(self, files):
Victor Ryzhenkin53b7ad22018-02-08 20:05:40 +0400600 """
601 Download JUnit report and conformance logs from cluster
602 :param files:
603 :return:
604 """
Victor Ryzhenkin8ff3c3f2018-01-17 19:37:05 +0400605 master_host = self.__config.salt.salt_master_host
606 with self.__underlay.remote(host=master_host) as r:
Victor Ryzhenkin53b7ad22018-02-08 20:05:40 +0400607 for log_file in files:
Vladimir Jigulin62bcf462018-05-28 18:17:01 +0400608 cmd = "rsync -r \"{0}:/root/{1}\" /root/".format(
609 self.ctl_host, log_file)
Victor Ryzhenkin53b7ad22018-02-08 20:05:40 +0400610 r.check_call(cmd, raise_on_err=False)
611 LOG.info("Downloading the artifact {0}".format(log_file))
612 r.download(destination=log_file, target=os.getcwd())
Victor Ryzhenkin87a31422018-03-16 22:25:27 +0400613
Victor Ryzhenkincf26c932018-03-29 20:08:21 +0400614 def combine_xunit(self, path, output):
615 """
616 Function to combine multiple xmls with test results to
617 one.
618
619 :param path: Path where xmls to combine located
620 :param output: Path to xml file where output will stored
621 :return:
622 """
623 with self.__underlay.remote(node_name=self.ctl_host) as r:
Tatyana Leontovichfe1834c2018-04-19 13:52:05 +0300624 cmd = ("apt-get install python-setuptools -y; "
Vladimir Jigulin3c1ea6c2018-07-19 13:59:05 +0400625 "pip install "
626 "https://github.com/mogaika/xunitmerge/archive/master.zip")
Victor Ryzhenkincf26c932018-03-29 20:08:21 +0400627 LOG.debug('Installing xunitmerge')
Victor Ryzhenkin4e4126c2018-05-22 19:09:20 +0400628 r.check_call(cmd, raise_on_err=False)
Victor Ryzhenkincf26c932018-03-29 20:08:21 +0400629 LOG.debug('Merging xunit')
630 cmd = ("cd {0}; arg = ''; "
631 "for i in $(ls | grep xml); "
632 "do arg=\"$arg $i\"; done && "
633 "xunitmerge $arg {1}".format(path, output))
Victor Ryzhenkin4e4126c2018-05-22 19:09:20 +0400634 r.check_call(cmd, raise_on_err=False)
Victor Ryzhenkincf26c932018-03-29 20:08:21 +0400635
Victor Ryzhenkin87a31422018-03-16 22:25:27 +0400636 def manage_cncf_archive(self):
637 """
638 Function to untar archive, move files, that we are needs to the
639 home folder, prepare it to downloading and clean the trash.
640 Will generate files: e2e.log, junit_01.xml, cncf_results.tar.gz
641 and version.txt
642 :return:
643 """
644
645 # Namespace and pod name may be hardcoded since this function is
646 # very specific for cncf and cncf is not going to change
647 # those launch pod name and namespace.
648 get_tar_name_cmd = ("kubectl logs -n sonobuoy sonobuoy | "
649 "grep 'Results available' | "
650 "sed 's/.*\///' | tr -d '\"'")
651
652 with self.__underlay.remote(
653 node_name=self.ctl_host) as remote:
654 tar_name = remote.check_call(get_tar_name_cmd)['stdout'][0].strip()
655 untar = "mkdir result && tar -C result -xzf {0}".format(tar_name)
Victor Ryzhenkin4e4126c2018-05-22 19:09:20 +0400656 remote.check_call(untar, raise_on_err=False)
Victor Ryzhenkin87a31422018-03-16 22:25:27 +0400657 manage_results = ("mv result/plugins/e2e/results/e2e.log . && "
658 "mv result/plugins/e2e/results/junit_01.xml . ;"
659 "kubectl version > version.txt")
660 remote.check_call(manage_results, raise_on_err=False)
661 cleanup_host = "rm -rf result"
Victor Ryzhenkin4e4126c2018-05-22 19:09:20 +0400662 remote.check_call(cleanup_host, raise_on_err=False)
Victor Ryzhenkin87a31422018-03-16 22:25:27 +0400663 # This one needed to use download fixture, since I don't know
664 # how possible apply fixture arg dynamically from test.
665 rename_tar = "mv {0} cncf_results.tar.gz".format(tar_name)
Victor Ryzhenkin4e4126c2018-05-22 19:09:20 +0400666 remote.check_call(rename_tar, raise_on_err=False)
Vladimir Jigulin62bcf462018-05-28 18:17:01 +0400667
668 def update_k8s_images(self, tag):
669 """
670 Update k8s images tag version in cluster meta and apply required
671 for update states
672
673 :param tag: New version tag of k8s images
674 :return:
675 """
676 master_host = self.__config.salt.salt_master_host
677
678 def update_image_tag_meta(config, image_name):
679 image_old = config.get(image_name)
680 image_base = image_old.split(':')[0]
681 image_new = "{}:{}".format(image_base, tag)
682 LOG.info("Changing k8s '{0}' image cluster meta to '{1}'".format(
683 image_name, image_new))
684
685 with self.__underlay.remote(host=master_host) as r:
686 cmd = "salt-call reclass.cluster_meta_set" \
687 " name={0} value={1}".format(image_name, image_new)
688 r.check_call(cmd)
689 return image_new
690
691 cfg = self.__config
692
693 update_image_tag_meta(cfg.k8s_deploy, "kubernetes_hyperkube_image")
694 update_image_tag_meta(cfg.k8s_deploy, "kubernetes_pause_image")
695 cfg.k8s.k8s_conformance_image = update_image_tag_meta(
696 cfg.k8s, "k8s_conformance_image")
697
698 steps_path = cfg.k8s_deploy.k8s_update_steps_path
699 update_commands = self.__underlay.read_template(steps_path)
700 self.execute_commands(
701 update_commands, label="Updating kubernetes to '{}'".format(tag))
Vladimir Jigulinee1faa52018-06-25 13:00:51 +0400702
703 def get_keepalived_vip(self):
704 """
705 Return k8s VIP IP address
706
707 :return: str, IP address
708 """
709 ctl_vip_pillar = self._salt.get_pillar(
710 tgt="I@kubernetes:control:enabled:True",
711 pillar="_param:cluster_vip_address")[0]
712 return [vip for minion_id, vip in ctl_vip_pillar.items()][0]
Vladimir Jigulina6b018b2018-07-18 15:19:01 +0400713
714 def get_sample_deployment(self, name, **kwargs):
715 return K8SSampleDeployment(self, name, **kwargs)
716
717
718class K8SSampleDeployment:
719 def __init__(self, manager, name, replicas=2,
720 image='gcr.io/google-samples/node-hello:1.0', port=8080):
721 self.manager = manager
722 self.name = name
723 self.image = image
724 self.port = port
725 self.replicas = replicas
726
727 def run(self):
728 self.manager.kubectl_run(self.name, self.image, self.port,
729 replicas=self.replicas)
730
731 def expose(self, service_type='ClusterIP', target_name=None):
732 self.manager.kubectl_expose(
733 'deployment', self.name, self.port, service_type, target_name)
734
735 def get_svc_ip(self, external=False):
736 return self.manager.get_svc_ip(self.name, namespace='default',
737 external=external)
738
739 def curl(self, external=False):
740 url = "http://{0}:{1}".format(
741 self.get_svc_ip(external=external), self.port)
742 if external:
743 return requests.get(url).text
744 else:
745 return self.manager.curl(url)
746
747 def is_service_available(self, external=False):
748 return "Hello Kubernetes!" in self.curl(external=external)
749
750 def wait_for_ready(self):
751 return self.manager.wait_deploy_ready(self.name)