blob: 38521c7489591be5f30e115255c43b3b01c7e974 [file] [log] [blame]
Artem Panchenko0594cd72017-06-12 13:25:26 +03001# Copyright 2017 Mirantis, Inc.
2#
3# Licensed under the Apache License, Version 2.0 (the "License"); you may
4# not use this file except in compliance with the License. You may obtain
5# a copy of the License at
6#
7# http://www.apache.org/licenses/LICENSE-2.0
8#
9# Unless required by applicable law or agreed to in writing, software
10# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
11# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
12# License for the specific language governing permissions and limitations
13# under the License.
14
Victor Ryzhenkin8ff3c3f2018-01-17 19:37:05 +040015import os
Artem Panchenko0594cd72017-06-12 13:25:26 +030016import time
Victor Ryzhenkin3ffa2b42017-10-05 16:38:44 +040017from uuid import uuid4
Artem Panchenko0594cd72017-06-12 13:25:26 +030018
19import yaml
20
21from devops.helpers import helpers
Victor Ryzhenkin66d39372017-09-28 19:25:48 +040022from devops.error import DevopsCalledProcessError
Artem Panchenko0594cd72017-06-12 13:25:26 +030023
24from tcp_tests import logger
Victor Ryzhenkin66d39372017-09-28 19:25:48 +040025from tcp_tests.helpers import ext
26from tcp_tests.helpers.utils import retry
Artem Panchenko0594cd72017-06-12 13:25:26 +030027from tcp_tests.managers.execute_commands import ExecuteCommandsMixin
28from tcp_tests.managers.k8s import cluster
Sergey Vasilenkofd1fd612017-09-20 13:09:51 +030029from k8sclient.client.rest import ApiException
Artem Panchenko0594cd72017-06-12 13:25:26 +030030
31LOG = logger.logger
32
33
34class K8SManager(ExecuteCommandsMixin):
35 """docstring for K8SManager"""
36
37 __config = None
38 __underlay = None
39
40 def __init__(self, config, underlay, salt):
41 self.__config = config
42 self.__underlay = underlay
43 self._salt = salt
44 self._api_client = None
45 super(K8SManager, self).__init__(
46 config=config, underlay=underlay)
47
48 def install(self, commands):
49 self.execute_commands(commands,
50 label='Install Kubernetes services')
51 self.__config.k8s.k8s_installed = True
52 self.__config.k8s.kube_host = self.get_proxy_api()
53
54 def get_proxy_api(self):
55 k8s_proxy_ip_pillars = self._salt.get_pillar(
vrovachev99228d32017-06-08 19:46:10 +040056 tgt='I@haproxy:proxy:enabled:true and I@kubernetes:master',
Artem Panchenko0594cd72017-06-12 13:25:26 +030057 pillar='haproxy:proxy:listen:k8s_secure:binds:address')
vrovachev99228d32017-06-08 19:46:10 +040058 k8s_hosts = self._salt.get_pillar(
59 tgt='I@haproxy:proxy:enabled:true and I@kubernetes:master',
60 pillar='kubernetes:pool:apiserver:host')
Artem Panchenko0594cd72017-06-12 13:25:26 +030061 k8s_proxy_ip = set([ip
62 for item in k8s_proxy_ip_pillars
Dina Belovae6fdffb2017-09-19 13:58:34 -070063 for node, ip in item.items() if ip])
vrovachev99228d32017-06-08 19:46:10 +040064 k8s_hosts = set([ip
Dina Belovae6fdffb2017-09-19 13:58:34 -070065 for item in k8s_hosts
66 for node, ip in item.items() if ip])
vrovachev99228d32017-06-08 19:46:10 +040067 assert len(k8s_hosts) == 1, (
68 "Found more than one Kubernetes API hosts in pillars:{0}, "
69 "expected one!").format(k8s_hosts)
70 k8s_host = k8s_hosts.pop()
71 assert k8s_host in k8s_proxy_ip, (
72 "Kubernetes API host:{0} not found in proxies:{} "
73 "on k8s master nodes. K8s proxies are expected on "
74 "nodes with K8s master").format(k8s_host, k8s_proxy_ip)
75 return k8s_host
Artem Panchenko0594cd72017-06-12 13:25:26 +030076
77 @property
78 def api(self):
79 if self._api_client is None:
80 self._api_client = cluster.K8sCluster(
81 user=self.__config.k8s_deploy.kubernetes_admin_user,
82 password=self.__config.k8s_deploy.kubernetes_admin_password,
83 host=self.__config.k8s.kube_host,
84 port=self.__config.k8s.kube_apiserver_port,
85 default_namespace='default')
86 return self._api_client
87
Victor Ryzhenkin66d39372017-09-28 19:25:48 +040088 @property
89 def ctl_host(self):
90 nodes = [node for node in self.__config.underlay.ssh if
91 ext.UNDERLAY_NODE_ROLES.k8s_controller in node['roles']]
92 return nodes[0]['node_name']
93
Artem Panchenko0594cd72017-06-12 13:25:26 +030094 def get_pod_phase(self, pod_name, namespace=None):
95 return self.api.pods.get(
96 name=pod_name, namespace=namespace).phase
97
98 def wait_pod_phase(self, pod_name, phase, namespace=None, timeout=60):
99 """Wait phase of pod_name from namespace while timeout
100
101 :param str: pod_name
102 :param str: namespace
103 :param list or str: phase
104 :param int: timeout
105
106 :rtype: None
107 """
108 if isinstance(phase, str):
109 phase = [phase]
110
111 def check():
112 return self.get_pod_phase(pod_name, namespace) in phase
113
114 helpers.wait(check, timeout=timeout,
115 timeout_msg='Timeout waiting, pod {pod_name} is not in '
116 '"{phase}" phase'.format(
117 pod_name=pod_name, phase=phase))
118
119 def wait_pods_phase(self, pods, phase, timeout=60):
120 """Wait timeout seconds for phase of pods
121
122 :param pods: list of K8sPod
123 :param phase: list or str
124 :param timeout: int
125
126 :rtype: None
127 """
128 if isinstance(phase, str):
129 phase = [phase]
130
131 def check(pod_name, namespace):
132 return self.get_pod_phase(pod_name, namespace) in phase
133
134 def check_all_pods():
135 return all(check(pod.name, pod.metadata.namespace) for pod in pods)
136
137 helpers.wait(
138 check_all_pods,
139 timeout=timeout,
140 timeout_msg='Timeout waiting, pods {0} are not in "{1}" '
141 'phase'.format([pod.name for pod in pods], phase))
142
143 def check_pod_create(self, body, namespace=None, timeout=300, interval=5):
144 """Check creating sample pod
145
146 :param k8s_pod: V1Pod
147 :param namespace: str
148 :rtype: V1Pod
149 """
150 LOG.info("Creating pod in k8s cluster")
151 LOG.debug(
152 "POD spec to create:\n{}".format(
153 yaml.dump(body, default_flow_style=False))
154 )
155 LOG.debug("Timeout for creation is set to {}".format(timeout))
156 LOG.debug("Checking interval is set to {}".format(interval))
157 pod = self.api.pods.create(body=body, namespace=namespace)
158 pod.wait_running(timeout=300, interval=5)
159 LOG.info("Pod '{0}' is created in '{1}' namespace".format(
160 pod.name, pod.namespace))
161 return self.api.pods.get(name=pod.name, namespace=pod.namespace)
162
163 def wait_pod_deleted(self, podname, timeout=60, interval=5):
164 helpers.wait(
165 lambda: podname not in [pod.name for pod in self.api.pods.list()],
166 timeout=timeout,
167 interval=interval,
168 timeout_msg="Pod deletion timeout reached!"
169 )
170
171 def check_pod_delete(self, k8s_pod, timeout=300, interval=5,
172 namespace=None):
173 """Deleting pod from k8s
174
175 :param k8s_pod: tcp_tests.managers.k8s.nodes.K8sNode
176 :param k8sclient: tcp_tests.managers.k8s.cluster.K8sCluster
177 """
178 LOG.info("Deleting pod '{}'".format(k8s_pod.name))
179 LOG.debug("Pod status:\n{}".format(k8s_pod.status))
180 LOG.debug("Timeout for deletion is set to {}".format(timeout))
181 LOG.debug("Checking interval is set to {}".format(interval))
182 self.api.pods.delete(body=k8s_pod, name=k8s_pod.name,
183 namespace=namespace)
184 self.wait_pod_deleted(k8s_pod.name, timeout, interval)
185 LOG.debug("Pod '{}' is deleted".format(k8s_pod.name))
186
187 def check_service_create(self, body, namespace=None):
188 """Check creating k8s service
189
190 :param body: dict, service spec
191 :param namespace: str
192 :rtype: K8sService object
193 """
194 LOG.info("Creating service in k8s cluster")
195 LOG.debug(
196 "Service spec to create:\n{}".format(
197 yaml.dump(body, default_flow_style=False))
198 )
199 service = self.api.services.create(body=body, namespace=namespace)
200 LOG.info("Service '{0}' is created in '{1}' namespace".format(
201 service.name, service.namespace))
202 return self.api.services.get(name=service.name,
203 namespace=service.namespace)
204
205 def check_ds_create(self, body, namespace=None):
206 """Check creating k8s DaemonSet
207
208 :param body: dict, DaemonSet spec
209 :param namespace: str
210 :rtype: K8sDaemonSet object
211 """
212 LOG.info("Creating DaemonSet in k8s cluster")
213 LOG.debug(
214 "DaemonSet spec to create:\n{}".format(
215 yaml.dump(body, default_flow_style=False))
216 )
217 ds = self.api.daemonsets.create(body=body, namespace=namespace)
218 LOG.info("DaemonSet '{0}' is created in '{1}' namespace".format(
219 ds.name, ds.namespace))
220 return self.api.daemonsets.get(name=ds.name, namespace=ds.namespace)
221
222 def check_ds_ready(self, dsname, namespace=None):
223 """Check if k8s DaemonSet is ready
224
225 :param dsname: str, ds name
226 :return: bool
227 """
228 ds = self.api.daemonsets.get(name=dsname, namespace=namespace)
229 return (ds.status.current_number_scheduled ==
230 ds.status.desired_number_scheduled)
231
232 def wait_ds_ready(self, dsname, namespace=None, timeout=60, interval=5):
233 """Wait until all pods are scheduled on nodes
234
235 :param dsname: str, ds name
236 :param timeout: int
237 :param interval: int
238 """
239 helpers.wait(
240 lambda: self.check_ds_ready(dsname, namespace=namespace),
241 timeout=timeout, interval=interval)
242
Artem Panchenko501e67e2017-06-14 14:59:18 +0300243 def check_deploy_create(self, body, namespace=None):
244 """Check creating k8s Deployment
245
246 :param body: dict, Deployment spec
247 :param namespace: str
248 :rtype: K8sDeployment object
249 """
250 LOG.info("Creating Deployment in k8s cluster")
251 LOG.debug(
252 "Deployment spec to create:\n{}".format(
253 yaml.dump(body, default_flow_style=False))
254 )
255 deploy = self.api.deployments.create(body=body, namespace=namespace)
256 LOG.info("Deployment '{0}' is created in '{1}' namespace".format(
257 deploy.name, deploy.namespace))
258 return self.api.deployments.get(name=deploy.name,
259 namespace=deploy.namespace)
260
261 def check_deploy_ready(self, deploy_name, namespace=None):
262 """Check if k8s Deployment is ready
263
264 :param deploy_name: str, deploy name
265 :return: bool
266 """
Dina Belovae6fdffb2017-09-19 13:58:34 -0700267 deploy = self.api.deployments.get(name=deploy_name,
268 namespace=namespace)
Artem Panchenko501e67e2017-06-14 14:59:18 +0300269 return deploy.status.available_replicas == deploy.status.replicas
270
Dina Belovae6fdffb2017-09-19 13:58:34 -0700271 def wait_deploy_ready(self, deploy_name, namespace=None, timeout=60,
272 interval=5):
Artem Panchenko501e67e2017-06-14 14:59:18 +0300273 """Wait until all pods are scheduled on nodes
274
275 :param deploy_name: str, deploy name
276 :param timeout: int
277 :param interval: int
278 """
279 helpers.wait(
280 lambda: self.check_deploy_ready(deploy_name, namespace=namespace),
281 timeout=timeout, interval=interval)
282
Artem Panchenko0594cd72017-06-12 13:25:26 +0300283 def check_namespace_create(self, name):
284 """Check creating k8s Namespace
285
286 :param name: str
287 :rtype: K8sNamespace object
288 """
Sergey Vasilenkofd1fd612017-09-20 13:09:51 +0300289 try:
290 ns = self.api.namespaces.get(name=name)
291 LOG.info("Namespace '{0}' is already exists".format(ns.name))
292 except ApiException as e:
Dennis Dmitriev9b02c8b2017-11-13 15:31:35 +0200293 if hasattr(e, "status") and 404 == e.status:
294 LOG.info("Creating Namespace in k8s cluster")
295 ns = self.api.namespaces.create(
296 body={'metadata': {'name': name}})
297 LOG.info("Namespace '{0}' is created".format(ns.name))
298 # wait 10 seconds until a token for new service account
299 # is created
300 time.sleep(10)
301 ns = self.api.namespaces.get(name=ns.name)
302 else:
303 raise
Sergey Vasilenkofd1fd612017-09-20 13:09:51 +0300304 return ns
Artem Panchenko0594cd72017-06-12 13:25:26 +0300305
306 def create_objects(self, path):
307 if isinstance(path, str):
308 path = [path]
309 params = ' '.join(["-f {}".format(p) for p in path])
310 cmd = 'kubectl create {params}'.format(params=params)
311 with self.__underlay.remote(
Victor Ryzhenkin66d39372017-09-28 19:25:48 +0400312 node_name=self.ctl_host) as remote:
Artem Panchenko0594cd72017-06-12 13:25:26 +0300313 LOG.info("Running command '{cmd}' on node {node}".format(
314 cmd=cmd,
315 node=remote.hostname)
316 )
317 result = remote.check_call(cmd)
318 LOG.info(result['stdout'])
319
320 def get_running_pods(self, pod_name, namespace=None):
321 pods = [pod for pod in self.api.pods.list(namespace=namespace)
322 if (pod_name in pod.name and pod.status.phase == 'Running')]
323 return pods
324
325 def get_pods_number(self, pod_name, namespace=None):
326 pods = self.get_running_pods(pod_name, namespace)
327 return len(pods)
328
329 def get_running_pods_by_ssh(self, pod_name, namespace=None):
330 with self.__underlay.remote(
Victor Ryzhenkin66d39372017-09-28 19:25:48 +0400331 node_name=self.ctl_host) as remote:
Artem Panchenko0594cd72017-06-12 13:25:26 +0300332 result = remote.check_call("kubectl get pods --namespace {} |"
333 " grep {} | awk '{{print $1 \" \""
334 " $3}}'".format(namespace,
335 pod_name))['stdout']
336 running_pods = [data.strip().split()[0] for data in result
337 if data.strip().split()[1] == 'Running']
338 return running_pods
339
340 def get_pods_restarts(self, pod_name, namespace=None):
341 pods = [pod.status.container_statuses[0].restart_count
342 for pod in self.get_running_pods(pod_name, namespace)]
343 return sum(pods)
vrovacheva9d08332017-06-22 20:01:59 +0400344
Vladimir Jigulin62bcf462018-05-28 18:17:01 +0400345 def run_conformance(self, timeout=60 * 60, log_out='k8s_conformance.log',
Vladimir Jigulinee1faa52018-06-25 13:00:51 +0400346 raise_on_err=True, node_name=None,
347 api_server='http://127.0.0.1:8080'):
348 if node_name is None:
349 node_name = self.ctl_host
350 cmd = "set -o pipefail; docker run --net=host -e API_SERVER="\
351 "'{api}' {image} | tee '{log}'".format(
352 api=api_server, image=self.__config.k8s.k8s_conformance_image,
353 log=log_out)
354 return self.__underlay.check_call(
355 cmd=cmd, node_name=node_name, timeout=timeout,
356 raise_on_err=raise_on_err)
Artem Panchenko501e67e2017-06-14 14:59:18 +0300357
358 def get_k8s_masters(self):
359 k8s_masters_fqdn = self._salt.get_pillar(tgt='I@kubernetes:master',
360 pillar='linux:network:fqdn')
361 return [self._K8SManager__underlay.host_by_node_name(node_name=v)
362 for pillar in k8s_masters_fqdn for k, v in pillar.items()]
Victor Ryzhenkin14354ac2017-09-27 17:42:30 +0400363
364 def kubectl_run(self, name, image, port):
365 with self.__underlay.remote(
Victor Ryzhenkin66d39372017-09-28 19:25:48 +0400366 node_name=self.ctl_host) as remote:
Victor Ryzhenkin14354ac2017-09-27 17:42:30 +0400367 result = remote.check_call(
368 "kubectl run {0} --image={1} --port={2}".format(
369 name, image, port
370 )
371 )
372 return result
373
374 def kubectl_expose(self, resource, name, port, type):
375 with self.__underlay.remote(
Victor Ryzhenkin66d39372017-09-28 19:25:48 +0400376 node_name=self.ctl_host) as remote:
Victor Ryzhenkin14354ac2017-09-27 17:42:30 +0400377 result = remote.check_call(
378 "kubectl expose {0} {1} --port={2} --type={3}".format(
379 resource, name, port, type
380 )
381 )
382 return result
383
Victor Ryzhenkin66d39372017-09-28 19:25:48 +0400384 def kubectl_annotate(self, resource, name, annotation):
Victor Ryzhenkin14354ac2017-09-27 17:42:30 +0400385 with self.__underlay.remote(
Victor Ryzhenkin66d39372017-09-28 19:25:48 +0400386 node_name=self.ctl_host) as remote:
Victor Ryzhenkin14354ac2017-09-27 17:42:30 +0400387 result = remote.check_call(
Victor Ryzhenkin66d39372017-09-28 19:25:48 +0400388 "kubectl annotate {0} {1} {2}".format(
389 resource, name, annotation
Victor Ryzhenkin14354ac2017-09-27 17:42:30 +0400390 )
391 )
392 return result
393
Victor Ryzhenkin66d39372017-09-28 19:25:48 +0400394 def get_svc_ip(self, name, namespace='kube-system'):
Victor Ryzhenkin14354ac2017-09-27 17:42:30 +0400395 with self.__underlay.remote(
Victor Ryzhenkin66d39372017-09-28 19:25:48 +0400396 node_name=self.ctl_host) as remote:
Victor Ryzhenkin14354ac2017-09-27 17:42:30 +0400397 result = remote.check_call(
Victor Ryzhenkin66d39372017-09-28 19:25:48 +0400398 "kubectl get svc {0} -n {1} | "
Vladimir Jigulin62bcf462018-05-28 18:17:01 +0400399 "awk '{{print $3}}' | tail -1".format(name, namespace)
Victor Ryzhenkin14354ac2017-09-27 17:42:30 +0400400 )
401 return result['stdout'][0].strip()
402
Victor Ryzhenkin66d39372017-09-28 19:25:48 +0400403 @retry(300, exception=DevopsCalledProcessError)
Victor Ryzhenkin14354ac2017-09-27 17:42:30 +0400404 def nslookup(self, host, src):
405 with self.__underlay.remote(
Victor Ryzhenkin66d39372017-09-28 19:25:48 +0400406 node_name=self.ctl_host) as remote:
Victor Ryzhenkin14354ac2017-09-27 17:42:30 +0400407 remote.check_call("nslookup {0} {1}".format(host, src))
408
Vladimir Jigulin62bcf462018-05-28 18:17:01 +0400409 @retry(300, exception=DevopsCalledProcessError)
410 def curl(self, url):
411 """
412 Run curl on controller and return stdout
413
414 :param url: url to curl
415 :return: response string
416 """
417 with self.__underlay.remote(node_name=self.ctl_host) as r:
418 return r.check_call("curl -s -S \"{}\"".format(url))['stdout']
419
Victor Ryzhenkin3ffa2b42017-10-05 16:38:44 +0400420# ---------------------------- Virtlet methods -------------------------------
421 def install_jq(self):
422 """Install JQuery on node. Required for changing yamls on the fly.
423
424 :return:
425 """
426 cmd = "apt install jq -y"
427 return self.__underlay.check_call(cmd, node_name=self.ctl_host)
428
Victor Ryzhenkin3ffa2b42017-10-05 16:38:44 +0400429 def git_clone(self, project, target):
430 cmd = "git clone {0} {1}".format(project, target)
431 return self.__underlay.check_call(cmd, node_name=self.ctl_host)
432
433 def run_vm(self, name=None, yaml_path='~/virtlet/examples/cirros-vm.yaml'):
434 if not name:
435 name = 'virtlet-vm-{}'.format(uuid4())
436 cmd = (
437 "kubectl convert -f {0} --local "
438 "-o json | jq '.metadata.name|=\"{1}\"' | kubectl create -f -")
439 self.__underlay.check_call(cmd.format(yaml_path, name),
440 node_name=self.ctl_host)
441 return name
442
443 def get_vm_info(self, name, jsonpath="{.status.phase}", expected=None):
444 cmd = "kubectl get po {} -n default".format(name)
445 if jsonpath:
446 cmd += " -o jsonpath={}".format(jsonpath)
447 return self.__underlay.check_call(
448 cmd, node_name=self.ctl_host, expected=expected)
449
450 def wait_active_state(self, name, timeout=180):
451 helpers.wait(
452 lambda: self.get_vm_info(name)['stdout'][0] == 'Running',
453 timeout=timeout,
454 timeout_msg="VM {} didn't Running state in {} sec. "
455 "Current state: ".format(
456 name, timeout, self.get_vm_info(name)['stdout'][0]))
457
458 def delete_vm(self, name, timeout=180):
459 cmd = "kubectl delete po -n default {}".format(name)
460 self.__underlay.check_call(cmd, node_name=self.ctl_host)
461
462 helpers.wait(
463 lambda:
464 "Error from server (NotFound):" in
465 " ".join(self.get_vm_info(name, expected=[0, 1])['stderr']),
466 timeout=timeout,
467 timeout_msg="VM {} didn't Running state in {} sec. "
468 "Current state: ".format(
469 name, timeout, self.get_vm_info(name)['stdout'][0]))
470
471 def adjust_cirros_resources(
472 self, cpu=2, memory='256',
473 target_yaml='virtlet/examples/cirros-vm-exp.yaml'):
474 # We will need to change params in case of example change
475 cmd = ("cd ~/virtlet/examples && "
476 "cp cirros-vm.yaml {2} && "
477 "sed -r 's/^(\s*)(VirtletVCPUCount\s*:\s*\"1\"\s*$)/ "
478 "\1VirtletVCPUCount: \"{0}\"/' {2} && "
479 "sed -r 's/^(\s*)(memory\s*:\s*128Mi\s*$)/\1memory: "
480 "{1}Mi/' {2}".format(cpu, memory, target_yaml))
481 self.__underlay.check_call(cmd, node_name=self.ctl_host)
482
483 def get_domain_name(self, vm_name):
484 cmd = ("~/virtlet/examples/virsh.sh list --name | "
485 "grep -i {0} ".format(vm_name))
486 result = self.__underlay.check_call(cmd, node_name=self.ctl_host)
487 return result['stdout'].strip()
488
489 def get_vm_cpu_count(self, domain_name):
490 cmd = ("~/virtlet/examples/virsh.sh dumpxml {0} | "
491 "grep 'cpu' | grep -o '[[:digit:]]*'".format(domain_name))
492 result = self.__underlay.check_call(cmd, node_name=self.ctl_host)
493 return int(result['stdout'].strip())
494
495 def get_vm_memory_count(self, domain_name):
496 cmd = ("~/virtlet/examples/virsh.sh dumpxml {0} | "
497 "grep 'memory unit' | "
498 "grep -o '[[:digit:]]*'".format(domain_name))
499 result = self.__underlay.check_call(cmd, node_name=self.ctl_host)
500 return int(result['stdout'].strip())
501
502 def get_domain_id(self, domain_name):
503 cmd = ("virsh dumpxml {} | grep id=\' | "
504 "grep -o [[:digit:]]*".format(domain_name))
505 result = self.__underlay.check_call(cmd, node_name=self.ctl_host)
506 return int(result['stdout'].strip())
507
508 def list_vm_volumes(self, domain_name):
509 domain_id = self.get_domain_id(domain_name)
510 cmd = ("~/virtlet/examples/virsh.sh domblklist {} | "
511 "tail -n +3 | awk {{'print $2'}}".format(domain_id))
512 result = self.__underlay.check_call(cmd, node_name=self.ctl_host)
Dennis Dmitriev9b02c8b2017-11-13 15:31:35 +0200513 return result['stdout'].strip()
Victor Ryzhenkin8ff3c3f2018-01-17 19:37:05 +0400514
Victor Ryzhenkinac37a752018-02-21 17:55:45 +0400515 def run_virtlet_conformance(self, timeout=60 * 120,
Victor Ryzhenkin8ff3c3f2018-01-17 19:37:05 +0400516 log_file='virtlet_conformance.log'):
517 if self.__config.k8s.run_extended_virtlet_conformance:
518 ci_image = "cloud-images.ubuntu.com/xenial/current/" \
519 "xenial-server-cloudimg-amd64-disk1.img"
520 cmd = ("set -o pipefail; "
521 "docker run --net=host {0} /virtlet-e2e-tests "
Victor Ryzhenkin53b7ad22018-02-08 20:05:40 +0400522 "-include-cloud-init-tests -junitOutput report.xml "
523 "-image {2} -sshuser ubuntu -memoryLimit 1024 "
Victor Ryzhenkin8ff3c3f2018-01-17 19:37:05 +0400524 "-alsologtostderr -cluster-url http://127.0.0.1:8080 "
525 "-ginkgo.focus '\[Conformance\]' "
526 "| tee {1}".format(
527 self.__config.k8s_deploy.kubernetes_virtlet_image,
528 log_file, ci_image))
529 else:
530 cmd = ("set -o pipefail; "
531 "docker run --net=host {0} /virtlet-e2e-tests "
Victor Ryzhenkin53b7ad22018-02-08 20:05:40 +0400532 "-junitOutput report.xml "
Victor Ryzhenkin8ff3c3f2018-01-17 19:37:05 +0400533 "-alsologtostderr -cluster-url http://127.0.0.1:8080 "
534 "-ginkgo.focus '\[Conformance\]' "
535 "| tee {1}".format(
536 self.__config.k8s_deploy.kubernetes_virtlet_image,
537 log_file))
538 LOG.info("Executing: {}".format(cmd))
539 with self.__underlay.remote(
540 node_name=self.ctl_host) as remote:
541 result = remote.check_call(cmd, timeout=timeout)
542 stderr = result['stderr']
543 stdout = result['stdout']
544 LOG.info("Test results stdout: {}".format(stdout))
545 LOG.info("Test results stderr: {}".format(stderr))
546 return result
547
Victor Ryzhenkin87a31422018-03-16 22:25:27 +0400548 def start_k8s_cncf_verification(self, timeout=60 * 90):
549 cncf_cmd = ("curl -L https://raw.githubusercontent.com/cncf/"
550 "k8s-conformance/master/sonobuoy-conformance.yaml"
551 " | kubectl apply -f -")
552 with self.__underlay.remote(
553 node_name=self.ctl_host) as remote:
554 remote.check_call(cncf_cmd, timeout=60)
555 self.wait_pod_phase('sonobuoy', 'Running',
556 namespace='sonobuoy', timeout=120)
557 wait_cmd = ('kubectl logs -n sonobuoy sonobuoy | '
558 'grep "sonobuoy is now blocking"')
Victor Ryzhenkin53b7ad22018-02-08 20:05:40 +0400559
Victor Ryzhenkin87a31422018-03-16 22:25:27 +0400560 expected = [0, 1]
561 helpers.wait(
562 lambda: remote.check_call(
563 wait_cmd, expected=expected).exit_code == 0,
564 interval=30, timeout=timeout,
565 timeout_msg="Timeout for CNCF reached."
566 )
567
568 def extract_file_to_node(self, system='docker',
569 container='virtlet',
Vladimir Jigulin62bcf462018-05-28 18:17:01 +0400570 file_path='report.xml',
571 out_dir='.',
572 **kwargs):
Victor Ryzhenkin87a31422018-03-16 22:25:27 +0400573 """
574 Download file from docker or k8s container to node
575
576 :param system: docker or k8s
Victor Ryzhenkin53b7ad22018-02-08 20:05:40 +0400577 :param container: Full name of part of name
578 :param file_path: File path in container
Victor Ryzhenkin87a31422018-03-16 22:25:27 +0400579 :param kwargs: Used to control pod and namespace
Vladimir Jigulin62bcf462018-05-28 18:17:01 +0400580 :param out_dir: Output directory
Victor Ryzhenkin53b7ad22018-02-08 20:05:40 +0400581 :return:
582 """
583 with self.__underlay.remote(
584 node_name=self.ctl_host) as remote:
Victor Ryzhenkin87a31422018-03-16 22:25:27 +0400585 if system is 'docker':
Vladimir Jigulin62bcf462018-05-28 18:17:01 +0400586 cmd = ("docker ps --all | grep \"{0}\" |"
Victor Ryzhenkin87a31422018-03-16 22:25:27 +0400587 " awk '{{print $1}}'".format(container))
Victor Ryzhenkin4e4126c2018-05-22 19:09:20 +0400588 result = remote.check_call(cmd, raise_on_err=False)
589 if result['stdout']:
590 container_id = result['stdout'][0].strip()
591 else:
592 LOG.info('No container found, skipping extraction...')
593 return
Victor Ryzhenkin87a31422018-03-16 22:25:27 +0400594 cmd = "docker start {}".format(container_id)
Victor Ryzhenkin4e4126c2018-05-22 19:09:20 +0400595 remote.check_call(cmd, raise_on_err=False)
Vladimir Jigulin62bcf462018-05-28 18:17:01 +0400596 cmd = "docker cp \"{0}:/{1}\" \"{2}\"".format(
597 container_id, file_path, out_dir)
Victor Ryzhenkin4e4126c2018-05-22 19:09:20 +0400598 remote.check_call(cmd, raise_on_err=False)
Victor Ryzhenkin87a31422018-03-16 22:25:27 +0400599 else:
600 # system is k8s
601 pod_name = kwargs.get('pod_name')
602 pod_namespace = kwargs.get('pod_namespace')
Vladimir Jigulin62bcf462018-05-28 18:17:01 +0400603 cmd = 'kubectl cp \"{0}/{1}:/{2}\" \"{3}\"'.format(
604 pod_namespace, pod_name, file_path, out_dir)
Victor Ryzhenkin4e4126c2018-05-22 19:09:20 +0400605 remote.check_call(cmd, raise_on_err=False)
Victor Ryzhenkin53b7ad22018-02-08 20:05:40 +0400606
Victor Ryzhenkin87a31422018-03-16 22:25:27 +0400607 def download_k8s_logs(self, files):
Victor Ryzhenkin53b7ad22018-02-08 20:05:40 +0400608 """
609 Download JUnit report and conformance logs from cluster
610 :param files:
611 :return:
612 """
Victor Ryzhenkin8ff3c3f2018-01-17 19:37:05 +0400613 master_host = self.__config.salt.salt_master_host
614 with self.__underlay.remote(host=master_host) as r:
Victor Ryzhenkin53b7ad22018-02-08 20:05:40 +0400615 for log_file in files:
Vladimir Jigulin62bcf462018-05-28 18:17:01 +0400616 cmd = "rsync -r \"{0}:/root/{1}\" /root/".format(
617 self.ctl_host, log_file)
Victor Ryzhenkin53b7ad22018-02-08 20:05:40 +0400618 r.check_call(cmd, raise_on_err=False)
619 LOG.info("Downloading the artifact {0}".format(log_file))
620 r.download(destination=log_file, target=os.getcwd())
Victor Ryzhenkin87a31422018-03-16 22:25:27 +0400621
Victor Ryzhenkincf26c932018-03-29 20:08:21 +0400622 def combine_xunit(self, path, output):
623 """
624 Function to combine multiple xmls with test results to
625 one.
626
627 :param path: Path where xmls to combine located
628 :param output: Path to xml file where output will stored
629 :return:
630 """
631 with self.__underlay.remote(node_name=self.ctl_host) as r:
Tatyana Leontovichfe1834c2018-04-19 13:52:05 +0300632 cmd = ("apt-get install python-setuptools -y; "
Vladimir Jigulinb76d33f2018-05-23 21:04:06 +0400633 "pip install git+https://github.com/mogaika/xunitmerge.git")
Victor Ryzhenkincf26c932018-03-29 20:08:21 +0400634 LOG.debug('Installing xunitmerge')
Victor Ryzhenkin4e4126c2018-05-22 19:09:20 +0400635 r.check_call(cmd, raise_on_err=False)
Victor Ryzhenkincf26c932018-03-29 20:08:21 +0400636 LOG.debug('Merging xunit')
637 cmd = ("cd {0}; arg = ''; "
638 "for i in $(ls | grep xml); "
639 "do arg=\"$arg $i\"; done && "
640 "xunitmerge $arg {1}".format(path, output))
Victor Ryzhenkin4e4126c2018-05-22 19:09:20 +0400641 r.check_call(cmd, raise_on_err=False)
Victor Ryzhenkincf26c932018-03-29 20:08:21 +0400642
Victor Ryzhenkin87a31422018-03-16 22:25:27 +0400643 def manage_cncf_archive(self):
644 """
645 Function to untar archive, move files, that we are needs to the
646 home folder, prepare it to downloading and clean the trash.
647 Will generate files: e2e.log, junit_01.xml, cncf_results.tar.gz
648 and version.txt
649 :return:
650 """
651
652 # Namespace and pod name may be hardcoded since this function is
653 # very specific for cncf and cncf is not going to change
654 # those launch pod name and namespace.
655 get_tar_name_cmd = ("kubectl logs -n sonobuoy sonobuoy | "
656 "grep 'Results available' | "
657 "sed 's/.*\///' | tr -d '\"'")
658
659 with self.__underlay.remote(
660 node_name=self.ctl_host) as remote:
661 tar_name = remote.check_call(get_tar_name_cmd)['stdout'][0].strip()
662 untar = "mkdir result && tar -C result -xzf {0}".format(tar_name)
Victor Ryzhenkin4e4126c2018-05-22 19:09:20 +0400663 remote.check_call(untar, raise_on_err=False)
Victor Ryzhenkin87a31422018-03-16 22:25:27 +0400664 manage_results = ("mv result/plugins/e2e/results/e2e.log . && "
665 "mv result/plugins/e2e/results/junit_01.xml . ;"
666 "kubectl version > version.txt")
667 remote.check_call(manage_results, raise_on_err=False)
668 cleanup_host = "rm -rf result"
Victor Ryzhenkin4e4126c2018-05-22 19:09:20 +0400669 remote.check_call(cleanup_host, raise_on_err=False)
Victor Ryzhenkin87a31422018-03-16 22:25:27 +0400670 # This one needed to use download fixture, since I don't know
671 # how possible apply fixture arg dynamically from test.
672 rename_tar = "mv {0} cncf_results.tar.gz".format(tar_name)
Victor Ryzhenkin4e4126c2018-05-22 19:09:20 +0400673 remote.check_call(rename_tar, raise_on_err=False)
Vladimir Jigulin62bcf462018-05-28 18:17:01 +0400674
675 def update_k8s_images(self, tag):
676 """
677 Update k8s images tag version in cluster meta and apply required
678 for update states
679
680 :param tag: New version tag of k8s images
681 :return:
682 """
683 master_host = self.__config.salt.salt_master_host
684
685 def update_image_tag_meta(config, image_name):
686 image_old = config.get(image_name)
687 image_base = image_old.split(':')[0]
688 image_new = "{}:{}".format(image_base, tag)
689 LOG.info("Changing k8s '{0}' image cluster meta to '{1}'".format(
690 image_name, image_new))
691
692 with self.__underlay.remote(host=master_host) as r:
693 cmd = "salt-call reclass.cluster_meta_set" \
694 " name={0} value={1}".format(image_name, image_new)
695 r.check_call(cmd)
696 return image_new
697
698 cfg = self.__config
699
700 update_image_tag_meta(cfg.k8s_deploy, "kubernetes_hyperkube_image")
701 update_image_tag_meta(cfg.k8s_deploy, "kubernetes_pause_image")
702 cfg.k8s.k8s_conformance_image = update_image_tag_meta(
703 cfg.k8s, "k8s_conformance_image")
704
705 steps_path = cfg.k8s_deploy.k8s_update_steps_path
706 update_commands = self.__underlay.read_template(steps_path)
707 self.execute_commands(
708 update_commands, label="Updating kubernetes to '{}'".format(tag))
Vladimir Jigulinee1faa52018-06-25 13:00:51 +0400709
710 def get_keepalived_vip(self):
711 """
712 Return k8s VIP IP address
713
714 :return: str, IP address
715 """
716 ctl_vip_pillar = self._salt.get_pillar(
717 tgt="I@kubernetes:control:enabled:True",
718 pillar="_param:cluster_vip_address")[0]
719 return [vip for minion_id, vip in ctl_vip_pillar.items()][0]