Artem Panchenko | 0594cd7 | 2017-06-12 13:25:26 +0300 | [diff] [blame] | 1 | # Copyright 2017 Mirantis, Inc. |
| 2 | # |
| 3 | # Licensed under the Apache License, Version 2.0 (the "License"); you may |
| 4 | # not use this file except in compliance with the License. You may obtain |
| 5 | # a copy of the License at |
| 6 | # |
| 7 | # http://www.apache.org/licenses/LICENSE-2.0 |
| 8 | # |
| 9 | # Unless required by applicable law or agreed to in writing, software |
| 10 | # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT |
| 11 | # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the |
| 12 | # License for the specific language governing permissions and limitations |
| 13 | # under the License. |
| 14 | |
| 15 | import time |
Victor Ryzhenkin | 3ffa2b4 | 2017-10-05 16:38:44 +0400 | [diff] [blame^] | 16 | from uuid import uuid4 |
Artem Panchenko | 0594cd7 | 2017-06-12 13:25:26 +0300 | [diff] [blame] | 17 | |
| 18 | import yaml |
| 19 | |
| 20 | from devops.helpers import helpers |
Victor Ryzhenkin | 66d3937 | 2017-09-28 19:25:48 +0400 | [diff] [blame] | 21 | from devops.error import DevopsCalledProcessError |
Artem Panchenko | 0594cd7 | 2017-06-12 13:25:26 +0300 | [diff] [blame] | 22 | |
| 23 | from tcp_tests import logger |
Victor Ryzhenkin | 66d3937 | 2017-09-28 19:25:48 +0400 | [diff] [blame] | 24 | from tcp_tests.helpers import ext |
| 25 | from tcp_tests.helpers.utils import retry |
Artem Panchenko | 0594cd7 | 2017-06-12 13:25:26 +0300 | [diff] [blame] | 26 | from tcp_tests.managers.execute_commands import ExecuteCommandsMixin |
| 27 | from tcp_tests.managers.k8s import cluster |
Sergey Vasilenko | fd1fd61 | 2017-09-20 13:09:51 +0300 | [diff] [blame] | 28 | from k8sclient.client.rest import ApiException |
Artem Panchenko | 0594cd7 | 2017-06-12 13:25:26 +0300 | [diff] [blame] | 29 | |
| 30 | LOG = logger.logger |
| 31 | |
| 32 | |
| 33 | class K8SManager(ExecuteCommandsMixin): |
| 34 | """docstring for K8SManager""" |
| 35 | |
| 36 | __config = None |
| 37 | __underlay = None |
| 38 | |
| 39 | def __init__(self, config, underlay, salt): |
| 40 | self.__config = config |
| 41 | self.__underlay = underlay |
| 42 | self._salt = salt |
| 43 | self._api_client = None |
| 44 | super(K8SManager, self).__init__( |
| 45 | config=config, underlay=underlay) |
| 46 | |
| 47 | def install(self, commands): |
| 48 | self.execute_commands(commands, |
| 49 | label='Install Kubernetes services') |
| 50 | self.__config.k8s.k8s_installed = True |
| 51 | self.__config.k8s.kube_host = self.get_proxy_api() |
| 52 | |
| 53 | def get_proxy_api(self): |
| 54 | k8s_proxy_ip_pillars = self._salt.get_pillar( |
vrovachev | 99228d3 | 2017-06-08 19:46:10 +0400 | [diff] [blame] | 55 | tgt='I@haproxy:proxy:enabled:true and I@kubernetes:master', |
Artem Panchenko | 0594cd7 | 2017-06-12 13:25:26 +0300 | [diff] [blame] | 56 | pillar='haproxy:proxy:listen:k8s_secure:binds:address') |
vrovachev | 99228d3 | 2017-06-08 19:46:10 +0400 | [diff] [blame] | 57 | k8s_hosts = self._salt.get_pillar( |
| 58 | tgt='I@haproxy:proxy:enabled:true and I@kubernetes:master', |
| 59 | pillar='kubernetes:pool:apiserver:host') |
Artem Panchenko | 0594cd7 | 2017-06-12 13:25:26 +0300 | [diff] [blame] | 60 | k8s_proxy_ip = set([ip |
| 61 | for item in k8s_proxy_ip_pillars |
Dina Belova | e6fdffb | 2017-09-19 13:58:34 -0700 | [diff] [blame] | 62 | for node, ip in item.items() if ip]) |
vrovachev | 99228d3 | 2017-06-08 19:46:10 +0400 | [diff] [blame] | 63 | k8s_hosts = set([ip |
Dina Belova | e6fdffb | 2017-09-19 13:58:34 -0700 | [diff] [blame] | 64 | for item in k8s_hosts |
| 65 | for node, ip in item.items() if ip]) |
vrovachev | 99228d3 | 2017-06-08 19:46:10 +0400 | [diff] [blame] | 66 | assert len(k8s_hosts) == 1, ( |
| 67 | "Found more than one Kubernetes API hosts in pillars:{0}, " |
| 68 | "expected one!").format(k8s_hosts) |
| 69 | k8s_host = k8s_hosts.pop() |
| 70 | assert k8s_host in k8s_proxy_ip, ( |
| 71 | "Kubernetes API host:{0} not found in proxies:{} " |
| 72 | "on k8s master nodes. K8s proxies are expected on " |
| 73 | "nodes with K8s master").format(k8s_host, k8s_proxy_ip) |
| 74 | return k8s_host |
Artem Panchenko | 0594cd7 | 2017-06-12 13:25:26 +0300 | [diff] [blame] | 75 | |
| 76 | @property |
| 77 | def api(self): |
| 78 | if self._api_client is None: |
| 79 | self._api_client = cluster.K8sCluster( |
| 80 | user=self.__config.k8s_deploy.kubernetes_admin_user, |
| 81 | password=self.__config.k8s_deploy.kubernetes_admin_password, |
| 82 | host=self.__config.k8s.kube_host, |
| 83 | port=self.__config.k8s.kube_apiserver_port, |
| 84 | default_namespace='default') |
| 85 | return self._api_client |
| 86 | |
Victor Ryzhenkin | 66d3937 | 2017-09-28 19:25:48 +0400 | [diff] [blame] | 87 | @property |
| 88 | def ctl_host(self): |
| 89 | nodes = [node for node in self.__config.underlay.ssh if |
| 90 | ext.UNDERLAY_NODE_ROLES.k8s_controller in node['roles']] |
| 91 | return nodes[0]['node_name'] |
| 92 | |
Artem Panchenko | 0594cd7 | 2017-06-12 13:25:26 +0300 | [diff] [blame] | 93 | def get_pod_phase(self, pod_name, namespace=None): |
| 94 | return self.api.pods.get( |
| 95 | name=pod_name, namespace=namespace).phase |
| 96 | |
| 97 | def wait_pod_phase(self, pod_name, phase, namespace=None, timeout=60): |
| 98 | """Wait phase of pod_name from namespace while timeout |
| 99 | |
| 100 | :param str: pod_name |
| 101 | :param str: namespace |
| 102 | :param list or str: phase |
| 103 | :param int: timeout |
| 104 | |
| 105 | :rtype: None |
| 106 | """ |
| 107 | if isinstance(phase, str): |
| 108 | phase = [phase] |
| 109 | |
| 110 | def check(): |
| 111 | return self.get_pod_phase(pod_name, namespace) in phase |
| 112 | |
| 113 | helpers.wait(check, timeout=timeout, |
| 114 | timeout_msg='Timeout waiting, pod {pod_name} is not in ' |
| 115 | '"{phase}" phase'.format( |
| 116 | pod_name=pod_name, phase=phase)) |
| 117 | |
| 118 | def wait_pods_phase(self, pods, phase, timeout=60): |
| 119 | """Wait timeout seconds for phase of pods |
| 120 | |
| 121 | :param pods: list of K8sPod |
| 122 | :param phase: list or str |
| 123 | :param timeout: int |
| 124 | |
| 125 | :rtype: None |
| 126 | """ |
| 127 | if isinstance(phase, str): |
| 128 | phase = [phase] |
| 129 | |
| 130 | def check(pod_name, namespace): |
| 131 | return self.get_pod_phase(pod_name, namespace) in phase |
| 132 | |
| 133 | def check_all_pods(): |
| 134 | return all(check(pod.name, pod.metadata.namespace) for pod in pods) |
| 135 | |
| 136 | helpers.wait( |
| 137 | check_all_pods, |
| 138 | timeout=timeout, |
| 139 | timeout_msg='Timeout waiting, pods {0} are not in "{1}" ' |
| 140 | 'phase'.format([pod.name for pod in pods], phase)) |
| 141 | |
| 142 | def check_pod_create(self, body, namespace=None, timeout=300, interval=5): |
| 143 | """Check creating sample pod |
| 144 | |
| 145 | :param k8s_pod: V1Pod |
| 146 | :param namespace: str |
| 147 | :rtype: V1Pod |
| 148 | """ |
| 149 | LOG.info("Creating pod in k8s cluster") |
| 150 | LOG.debug( |
| 151 | "POD spec to create:\n{}".format( |
| 152 | yaml.dump(body, default_flow_style=False)) |
| 153 | ) |
| 154 | LOG.debug("Timeout for creation is set to {}".format(timeout)) |
| 155 | LOG.debug("Checking interval is set to {}".format(interval)) |
| 156 | pod = self.api.pods.create(body=body, namespace=namespace) |
| 157 | pod.wait_running(timeout=300, interval=5) |
| 158 | LOG.info("Pod '{0}' is created in '{1}' namespace".format( |
| 159 | pod.name, pod.namespace)) |
| 160 | return self.api.pods.get(name=pod.name, namespace=pod.namespace) |
| 161 | |
| 162 | def wait_pod_deleted(self, podname, timeout=60, interval=5): |
| 163 | helpers.wait( |
| 164 | lambda: podname not in [pod.name for pod in self.api.pods.list()], |
| 165 | timeout=timeout, |
| 166 | interval=interval, |
| 167 | timeout_msg="Pod deletion timeout reached!" |
| 168 | ) |
| 169 | |
| 170 | def check_pod_delete(self, k8s_pod, timeout=300, interval=5, |
| 171 | namespace=None): |
| 172 | """Deleting pod from k8s |
| 173 | |
| 174 | :param k8s_pod: tcp_tests.managers.k8s.nodes.K8sNode |
| 175 | :param k8sclient: tcp_tests.managers.k8s.cluster.K8sCluster |
| 176 | """ |
| 177 | LOG.info("Deleting pod '{}'".format(k8s_pod.name)) |
| 178 | LOG.debug("Pod status:\n{}".format(k8s_pod.status)) |
| 179 | LOG.debug("Timeout for deletion is set to {}".format(timeout)) |
| 180 | LOG.debug("Checking interval is set to {}".format(interval)) |
| 181 | self.api.pods.delete(body=k8s_pod, name=k8s_pod.name, |
| 182 | namespace=namespace) |
| 183 | self.wait_pod_deleted(k8s_pod.name, timeout, interval) |
| 184 | LOG.debug("Pod '{}' is deleted".format(k8s_pod.name)) |
| 185 | |
| 186 | def check_service_create(self, body, namespace=None): |
| 187 | """Check creating k8s service |
| 188 | |
| 189 | :param body: dict, service spec |
| 190 | :param namespace: str |
| 191 | :rtype: K8sService object |
| 192 | """ |
| 193 | LOG.info("Creating service in k8s cluster") |
| 194 | LOG.debug( |
| 195 | "Service spec to create:\n{}".format( |
| 196 | yaml.dump(body, default_flow_style=False)) |
| 197 | ) |
| 198 | service = self.api.services.create(body=body, namespace=namespace) |
| 199 | LOG.info("Service '{0}' is created in '{1}' namespace".format( |
| 200 | service.name, service.namespace)) |
| 201 | return self.api.services.get(name=service.name, |
| 202 | namespace=service.namespace) |
| 203 | |
| 204 | def check_ds_create(self, body, namespace=None): |
| 205 | """Check creating k8s DaemonSet |
| 206 | |
| 207 | :param body: dict, DaemonSet spec |
| 208 | :param namespace: str |
| 209 | :rtype: K8sDaemonSet object |
| 210 | """ |
| 211 | LOG.info("Creating DaemonSet in k8s cluster") |
| 212 | LOG.debug( |
| 213 | "DaemonSet spec to create:\n{}".format( |
| 214 | yaml.dump(body, default_flow_style=False)) |
| 215 | ) |
| 216 | ds = self.api.daemonsets.create(body=body, namespace=namespace) |
| 217 | LOG.info("DaemonSet '{0}' is created in '{1}' namespace".format( |
| 218 | ds.name, ds.namespace)) |
| 219 | return self.api.daemonsets.get(name=ds.name, namespace=ds.namespace) |
| 220 | |
| 221 | def check_ds_ready(self, dsname, namespace=None): |
| 222 | """Check if k8s DaemonSet is ready |
| 223 | |
| 224 | :param dsname: str, ds name |
| 225 | :return: bool |
| 226 | """ |
| 227 | ds = self.api.daemonsets.get(name=dsname, namespace=namespace) |
| 228 | return (ds.status.current_number_scheduled == |
| 229 | ds.status.desired_number_scheduled) |
| 230 | |
| 231 | def wait_ds_ready(self, dsname, namespace=None, timeout=60, interval=5): |
| 232 | """Wait until all pods are scheduled on nodes |
| 233 | |
| 234 | :param dsname: str, ds name |
| 235 | :param timeout: int |
| 236 | :param interval: int |
| 237 | """ |
| 238 | helpers.wait( |
| 239 | lambda: self.check_ds_ready(dsname, namespace=namespace), |
| 240 | timeout=timeout, interval=interval) |
| 241 | |
Artem Panchenko | 501e67e | 2017-06-14 14:59:18 +0300 | [diff] [blame] | 242 | def check_deploy_create(self, body, namespace=None): |
| 243 | """Check creating k8s Deployment |
| 244 | |
| 245 | :param body: dict, Deployment spec |
| 246 | :param namespace: str |
| 247 | :rtype: K8sDeployment object |
| 248 | """ |
| 249 | LOG.info("Creating Deployment in k8s cluster") |
| 250 | LOG.debug( |
| 251 | "Deployment spec to create:\n{}".format( |
| 252 | yaml.dump(body, default_flow_style=False)) |
| 253 | ) |
| 254 | deploy = self.api.deployments.create(body=body, namespace=namespace) |
| 255 | LOG.info("Deployment '{0}' is created in '{1}' namespace".format( |
| 256 | deploy.name, deploy.namespace)) |
| 257 | return self.api.deployments.get(name=deploy.name, |
| 258 | namespace=deploy.namespace) |
| 259 | |
| 260 | def check_deploy_ready(self, deploy_name, namespace=None): |
| 261 | """Check if k8s Deployment is ready |
| 262 | |
| 263 | :param deploy_name: str, deploy name |
| 264 | :return: bool |
| 265 | """ |
Dina Belova | e6fdffb | 2017-09-19 13:58:34 -0700 | [diff] [blame] | 266 | deploy = self.api.deployments.get(name=deploy_name, |
| 267 | namespace=namespace) |
Artem Panchenko | 501e67e | 2017-06-14 14:59:18 +0300 | [diff] [blame] | 268 | return deploy.status.available_replicas == deploy.status.replicas |
| 269 | |
Dina Belova | e6fdffb | 2017-09-19 13:58:34 -0700 | [diff] [blame] | 270 | def wait_deploy_ready(self, deploy_name, namespace=None, timeout=60, |
| 271 | interval=5): |
Artem Panchenko | 501e67e | 2017-06-14 14:59:18 +0300 | [diff] [blame] | 272 | """Wait until all pods are scheduled on nodes |
| 273 | |
| 274 | :param deploy_name: str, deploy name |
| 275 | :param timeout: int |
| 276 | :param interval: int |
| 277 | """ |
| 278 | helpers.wait( |
| 279 | lambda: self.check_deploy_ready(deploy_name, namespace=namespace), |
| 280 | timeout=timeout, interval=interval) |
| 281 | |
Artem Panchenko | 0594cd7 | 2017-06-12 13:25:26 +0300 | [diff] [blame] | 282 | def check_namespace_create(self, name): |
| 283 | """Check creating k8s Namespace |
| 284 | |
| 285 | :param name: str |
| 286 | :rtype: K8sNamespace object |
| 287 | """ |
Sergey Vasilenko | fd1fd61 | 2017-09-20 13:09:51 +0300 | [diff] [blame] | 288 | try: |
| 289 | ns = self.api.namespaces.get(name=name) |
| 290 | LOG.info("Namespace '{0}' is already exists".format(ns.name)) |
| 291 | except ApiException as e: |
| 292 | if hasattr(e,"status") and 404 == e.status: |
| 293 | LOG.info("Creating Namespace in k8s cluster") |
| 294 | ns = self.api.namespaces.create(body={'metadata': {'name': name}}) |
| 295 | LOG.info("Namespace '{0}' is created".format(ns.name)) |
| 296 | # wait 10 seconds until a token for new service account is created |
| 297 | time.sleep(10) |
| 298 | ns = self.api.namespaces.get(name=ns.name) |
| 299 | else: |
| 300 | raise |
| 301 | return ns |
Artem Panchenko | 0594cd7 | 2017-06-12 13:25:26 +0300 | [diff] [blame] | 302 | |
| 303 | def create_objects(self, path): |
| 304 | if isinstance(path, str): |
| 305 | path = [path] |
| 306 | params = ' '.join(["-f {}".format(p) for p in path]) |
| 307 | cmd = 'kubectl create {params}'.format(params=params) |
| 308 | with self.__underlay.remote( |
Victor Ryzhenkin | 66d3937 | 2017-09-28 19:25:48 +0400 | [diff] [blame] | 309 | node_name=self.ctl_host) as remote: |
Artem Panchenko | 0594cd7 | 2017-06-12 13:25:26 +0300 | [diff] [blame] | 310 | LOG.info("Running command '{cmd}' on node {node}".format( |
| 311 | cmd=cmd, |
| 312 | node=remote.hostname) |
| 313 | ) |
| 314 | result = remote.check_call(cmd) |
| 315 | LOG.info(result['stdout']) |
| 316 | |
| 317 | def get_running_pods(self, pod_name, namespace=None): |
| 318 | pods = [pod for pod in self.api.pods.list(namespace=namespace) |
| 319 | if (pod_name in pod.name and pod.status.phase == 'Running')] |
| 320 | return pods |
| 321 | |
| 322 | def get_pods_number(self, pod_name, namespace=None): |
| 323 | pods = self.get_running_pods(pod_name, namespace) |
| 324 | return len(pods) |
| 325 | |
| 326 | def get_running_pods_by_ssh(self, pod_name, namespace=None): |
| 327 | with self.__underlay.remote( |
Victor Ryzhenkin | 66d3937 | 2017-09-28 19:25:48 +0400 | [diff] [blame] | 328 | node_name=self.ctl_host) as remote: |
Artem Panchenko | 0594cd7 | 2017-06-12 13:25:26 +0300 | [diff] [blame] | 329 | result = remote.check_call("kubectl get pods --namespace {} |" |
| 330 | " grep {} | awk '{{print $1 \" \"" |
| 331 | " $3}}'".format(namespace, |
| 332 | pod_name))['stdout'] |
| 333 | running_pods = [data.strip().split()[0] for data in result |
| 334 | if data.strip().split()[1] == 'Running'] |
| 335 | return running_pods |
| 336 | |
| 337 | def get_pods_restarts(self, pod_name, namespace=None): |
| 338 | pods = [pod.status.container_statuses[0].restart_count |
| 339 | for pod in self.get_running_pods(pod_name, namespace)] |
| 340 | return sum(pods) |
vrovachev | a9d0833 | 2017-06-22 20:01:59 +0400 | [diff] [blame] | 341 | |
| 342 | def run_conformance(self, timeout=60 * 60): |
| 343 | with self.__underlay.remote( |
Victor Ryzhenkin | 66d3937 | 2017-09-28 19:25:48 +0400 | [diff] [blame] | 344 | node_name=self.ctl_host) as remote: |
vrovachev | a9d0833 | 2017-06-22 20:01:59 +0400 | [diff] [blame] | 345 | result = remote.check_call( |
| 346 | "docker run --rm --net=host -e API_SERVER=" |
| 347 | "'http://127.0.0.1:8080' {}".format( |
| 348 | self.__config.k8s.k8s_conformance_image), |
| 349 | timeout=timeout)['stdout'] |
| 350 | return result |
Artem Panchenko | 501e67e | 2017-06-14 14:59:18 +0300 | [diff] [blame] | 351 | |
| 352 | def get_k8s_masters(self): |
| 353 | k8s_masters_fqdn = self._salt.get_pillar(tgt='I@kubernetes:master', |
| 354 | pillar='linux:network:fqdn') |
| 355 | return [self._K8SManager__underlay.host_by_node_name(node_name=v) |
| 356 | for pillar in k8s_masters_fqdn for k, v in pillar.items()] |
Victor Ryzhenkin | 14354ac | 2017-09-27 17:42:30 +0400 | [diff] [blame] | 357 | |
| 358 | def kubectl_run(self, name, image, port): |
| 359 | with self.__underlay.remote( |
Victor Ryzhenkin | 66d3937 | 2017-09-28 19:25:48 +0400 | [diff] [blame] | 360 | node_name=self.ctl_host) as remote: |
Victor Ryzhenkin | 14354ac | 2017-09-27 17:42:30 +0400 | [diff] [blame] | 361 | result = remote.check_call( |
| 362 | "kubectl run {0} --image={1} --port={2}".format( |
| 363 | name, image, port |
| 364 | ) |
| 365 | ) |
| 366 | return result |
| 367 | |
| 368 | def kubectl_expose(self, resource, name, port, type): |
| 369 | with self.__underlay.remote( |
Victor Ryzhenkin | 66d3937 | 2017-09-28 19:25:48 +0400 | [diff] [blame] | 370 | node_name=self.ctl_host) as remote: |
Victor Ryzhenkin | 14354ac | 2017-09-27 17:42:30 +0400 | [diff] [blame] | 371 | result = remote.check_call( |
| 372 | "kubectl expose {0} {1} --port={2} --type={3}".format( |
| 373 | resource, name, port, type |
| 374 | ) |
| 375 | ) |
| 376 | return result |
| 377 | |
Victor Ryzhenkin | 66d3937 | 2017-09-28 19:25:48 +0400 | [diff] [blame] | 378 | def kubectl_annotate(self, resource, name, annotation): |
Victor Ryzhenkin | 14354ac | 2017-09-27 17:42:30 +0400 | [diff] [blame] | 379 | with self.__underlay.remote( |
Victor Ryzhenkin | 66d3937 | 2017-09-28 19:25:48 +0400 | [diff] [blame] | 380 | node_name=self.ctl_host) as remote: |
Victor Ryzhenkin | 14354ac | 2017-09-27 17:42:30 +0400 | [diff] [blame] | 381 | result = remote.check_call( |
Victor Ryzhenkin | 66d3937 | 2017-09-28 19:25:48 +0400 | [diff] [blame] | 382 | "kubectl annotate {0} {1} {2}".format( |
| 383 | resource, name, annotation |
Victor Ryzhenkin | 14354ac | 2017-09-27 17:42:30 +0400 | [diff] [blame] | 384 | ) |
| 385 | ) |
| 386 | return result |
| 387 | |
Victor Ryzhenkin | 66d3937 | 2017-09-28 19:25:48 +0400 | [diff] [blame] | 388 | def get_svc_ip(self, name, namespace='kube-system'): |
Victor Ryzhenkin | 14354ac | 2017-09-27 17:42:30 +0400 | [diff] [blame] | 389 | with self.__underlay.remote( |
Victor Ryzhenkin | 66d3937 | 2017-09-28 19:25:48 +0400 | [diff] [blame] | 390 | node_name=self.ctl_host) as remote: |
Victor Ryzhenkin | 14354ac | 2017-09-27 17:42:30 +0400 | [diff] [blame] | 391 | result = remote.check_call( |
Victor Ryzhenkin | 66d3937 | 2017-09-28 19:25:48 +0400 | [diff] [blame] | 392 | "kubectl get svc {0} -n {1} | " |
| 393 | "awk '{{print $2}}' | tail -1".format(name, namespace) |
Victor Ryzhenkin | 14354ac | 2017-09-27 17:42:30 +0400 | [diff] [blame] | 394 | ) |
| 395 | return result['stdout'][0].strip() |
| 396 | |
Victor Ryzhenkin | 66d3937 | 2017-09-28 19:25:48 +0400 | [diff] [blame] | 397 | @retry(300, exception=DevopsCalledProcessError) |
Victor Ryzhenkin | 14354ac | 2017-09-27 17:42:30 +0400 | [diff] [blame] | 398 | def nslookup(self, host, src): |
| 399 | with self.__underlay.remote( |
Victor Ryzhenkin | 66d3937 | 2017-09-28 19:25:48 +0400 | [diff] [blame] | 400 | node_name=self.ctl_host) as remote: |
Victor Ryzhenkin | 14354ac | 2017-09-27 17:42:30 +0400 | [diff] [blame] | 401 | remote.check_call("nslookup {0} {1}".format(host, src)) |
| 402 | |
Victor Ryzhenkin | 3ffa2b4 | 2017-10-05 16:38:44 +0400 | [diff] [blame^] | 403 | # ---------------------------- Virtlet methods ------------------------------- |
| 404 | def install_jq(self): |
| 405 | """Install JQuery on node. Required for changing yamls on the fly. |
| 406 | |
| 407 | :return: |
| 408 | """ |
| 409 | cmd = "apt install jq -y" |
| 410 | return self.__underlay.check_call(cmd, node_name=self.ctl_host) |
| 411 | |
| 412 | |
| 413 | def git_clone(self, project, target): |
| 414 | cmd = "git clone {0} {1}".format(project, target) |
| 415 | return self.__underlay.check_call(cmd, node_name=self.ctl_host) |
| 416 | |
| 417 | def run_vm(self, name=None, yaml_path='~/virtlet/examples/cirros-vm.yaml'): |
| 418 | if not name: |
| 419 | name = 'virtlet-vm-{}'.format(uuid4()) |
| 420 | cmd = ( |
| 421 | "kubectl convert -f {0} --local " |
| 422 | "-o json | jq '.metadata.name|=\"{1}\"' | kubectl create -f -") |
| 423 | self.__underlay.check_call(cmd.format(yaml_path, name), |
| 424 | node_name=self.ctl_host) |
| 425 | return name |
| 426 | |
| 427 | def get_vm_info(self, name, jsonpath="{.status.phase}", expected=None): |
| 428 | cmd = "kubectl get po {} -n default".format(name) |
| 429 | if jsonpath: |
| 430 | cmd += " -o jsonpath={}".format(jsonpath) |
| 431 | return self.__underlay.check_call( |
| 432 | cmd, node_name=self.ctl_host, expected=expected) |
| 433 | |
| 434 | def wait_active_state(self, name, timeout=180): |
| 435 | helpers.wait( |
| 436 | lambda: self.get_vm_info(name)['stdout'][0] == 'Running', |
| 437 | timeout=timeout, |
| 438 | timeout_msg="VM {} didn't Running state in {} sec. " |
| 439 | "Current state: ".format( |
| 440 | name, timeout, self.get_vm_info(name)['stdout'][0])) |
| 441 | |
| 442 | def delete_vm(self, name, timeout=180): |
| 443 | cmd = "kubectl delete po -n default {}".format(name) |
| 444 | self.__underlay.check_call(cmd, node_name=self.ctl_host) |
| 445 | |
| 446 | helpers.wait( |
| 447 | lambda: |
| 448 | "Error from server (NotFound):" in |
| 449 | " ".join(self.get_vm_info(name, expected=[0, 1])['stderr']), |
| 450 | timeout=timeout, |
| 451 | timeout_msg="VM {} didn't Running state in {} sec. " |
| 452 | "Current state: ".format( |
| 453 | name, timeout, self.get_vm_info(name)['stdout'][0])) |
| 454 | |
| 455 | def adjust_cirros_resources( |
| 456 | self, cpu=2, memory='256', |
| 457 | target_yaml='virtlet/examples/cirros-vm-exp.yaml'): |
| 458 | # We will need to change params in case of example change |
| 459 | cmd = ("cd ~/virtlet/examples && " |
| 460 | "cp cirros-vm.yaml {2} && " |
| 461 | "sed -r 's/^(\s*)(VirtletVCPUCount\s*:\s*\"1\"\s*$)/ " |
| 462 | "\1VirtletVCPUCount: \"{0}\"/' {2} && " |
| 463 | "sed -r 's/^(\s*)(memory\s*:\s*128Mi\s*$)/\1memory: " |
| 464 | "{1}Mi/' {2}".format(cpu, memory, target_yaml)) |
| 465 | self.__underlay.check_call(cmd, node_name=self.ctl_host) |
| 466 | |
| 467 | def get_domain_name(self, vm_name): |
| 468 | cmd = ("~/virtlet/examples/virsh.sh list --name | " |
| 469 | "grep -i {0} ".format(vm_name)) |
| 470 | result = self.__underlay.check_call(cmd, node_name=self.ctl_host) |
| 471 | return result['stdout'].strip() |
| 472 | |
| 473 | def get_vm_cpu_count(self, domain_name): |
| 474 | cmd = ("~/virtlet/examples/virsh.sh dumpxml {0} | " |
| 475 | "grep 'cpu' | grep -o '[[:digit:]]*'".format(domain_name)) |
| 476 | result = self.__underlay.check_call(cmd, node_name=self.ctl_host) |
| 477 | return int(result['stdout'].strip()) |
| 478 | |
| 479 | def get_vm_memory_count(self, domain_name): |
| 480 | cmd = ("~/virtlet/examples/virsh.sh dumpxml {0} | " |
| 481 | "grep 'memory unit' | " |
| 482 | "grep -o '[[:digit:]]*'".format(domain_name)) |
| 483 | result = self.__underlay.check_call(cmd, node_name=self.ctl_host) |
| 484 | return int(result['stdout'].strip()) |
| 485 | |
| 486 | def get_domain_id(self, domain_name): |
| 487 | cmd = ("virsh dumpxml {} | grep id=\' | " |
| 488 | "grep -o [[:digit:]]*".format(domain_name)) |
| 489 | result = self.__underlay.check_call(cmd, node_name=self.ctl_host) |
| 490 | return int(result['stdout'].strip()) |
| 491 | |
| 492 | def list_vm_volumes(self, domain_name): |
| 493 | domain_id = self.get_domain_id(domain_name) |
| 494 | cmd = ("~/virtlet/examples/virsh.sh domblklist {} | " |
| 495 | "tail -n +3 | awk {{'print $2'}}".format(domain_id)) |
| 496 | result = self.__underlay.check_call(cmd, node_name=self.ctl_host) |
| 497 | return result['stdout'].strip() |