Dennis Dmitriev | f5f2e60 | 2017-11-03 15:36:19 +0200 | [diff] [blame] | 1 | # Copyright 2019 Mirantis, Inc. |
| 2 | # |
| 3 | # Licensed under the Apache License, Version 2.0 (the "License"); you may |
| 4 | # not use this file except in compliance with the License. You may obtain |
| 5 | # a copy of the License at |
| 6 | # |
| 7 | # http://www.apache.org/licenses/LICENSE-2.0 |
| 8 | # |
| 9 | # Unless required by applicable law or agreed to in writing, software |
| 10 | # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT |
| 11 | # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the |
| 12 | # License for the specific language governing permissions and limitations |
| 13 | # under the License. |
| 14 | |
| 15 | import os |
| 16 | import netaddr |
| 17 | import yaml |
| 18 | |
| 19 | from devops.helpers import helpers |
| 20 | from devops.helpers.helpers import ssh_client |
| 21 | from retry import retry |
| 22 | |
| 23 | from cached_property import cached_property |
| 24 | |
| 25 | from heatclient import client as heatclient |
| 26 | from heatclient import exc as heat_exceptions |
| 27 | from heatclient.common import template_utils |
| 28 | from keystoneauth1.identity import v3 as keystone_v3 |
| 29 | from keystoneauth1 import session as keystone_session |
| 30 | |
| 31 | import requests |
| 32 | from requests.packages.urllib3.exceptions import InsecureRequestWarning |
| 33 | |
| 34 | from oslo_config import cfg |
| 35 | from paramiko.ssh_exception import ( |
| 36 | AuthenticationException, |
| 37 | BadAuthenticationType) |
| 38 | |
| 39 | from tcp_tests import settings |
| 40 | from tcp_tests import settings_oslo |
| 41 | from tcp_tests.helpers import exceptions |
| 42 | from tcp_tests import logger |
| 43 | |
| 44 | LOG = logger.logger |
| 45 | |
| 46 | EXPECTED_STACK_STATUS = "CREATE_COMPLETE" |
| 47 | BAD_STACK_STATUSES = ["CREATE_FAILED"] |
| 48 | |
| 49 | # Disable multiple notifications like: |
| 50 | # "InsecureRequestWarning: Unverified HTTPS request is being made." |
| 51 | requests.packages.urllib3.disable_warnings(InsecureRequestWarning) |
| 52 | |
| 53 | |
| 54 | class EnvironmentManagerHeat(object): |
| 55 | """Class-helper for creating VMs via devops environments""" |
| 56 | |
| 57 | __config = None |
| 58 | |
| 59 | # Do not use self.__heatclient directly! Use properties |
| 60 | # for necessary resources with catching HTTPUnauthorized exception |
| 61 | __heatclient = None |
| 62 | |
| 63 | def __init__(self, config=None): |
| 64 | """Create/connect to the Heat stack with test environment |
| 65 | |
| 66 | :param config: oslo.config object |
| 67 | :param config.hardware.heat_version: Heat version |
| 68 | :param config.hardware.os_auth_url: OS auth URL to access heat |
| 69 | :param config.hardware.os_username: OS username |
| 70 | :param config.hardware.os_password: OS password |
| 71 | :param config.hardware.os_project_name: OS tenant name |
| 72 | """ |
| 73 | self.__config = config |
| 74 | |
| 75 | if not self.__config.hardware.heat_stack_name: |
| 76 | self.__config.hardware.heat_stack_name = settings.ENV_NAME |
| 77 | |
| 78 | self.__init_heatclient() |
| 79 | |
| 80 | try: |
| 81 | stack_status = self._current_stack.stack_status |
| 82 | if stack_status != EXPECTED_STACK_STATUS: |
| 83 | raise exceptions.EnvironmentWrongStatus( |
| 84 | self.__config.hardware.heat_stack_name, |
| 85 | EXPECTED_STACK_STATUS, |
| 86 | stack_status |
| 87 | ) |
| 88 | LOG.info("Heat stack '{0}' already exists".format( |
| 89 | self.__config.hardware.heat_stack_name)) |
| 90 | except heat_exceptions.HTTPNotFound: |
| 91 | self._create_environment() |
| 92 | LOG.info("Heat stack '{0}' created".format( |
| 93 | self.__config.hardware.heat_stack_name)) |
| 94 | |
| 95 | self.set_address_pools_config() |
| 96 | self.set_dhcp_ranges_config() |
| 97 | |
| 98 | @cached_property |
| 99 | def _keystone_session(self): |
| 100 | keystone_auth = keystone_v3.Password( |
| 101 | auth_url=settings.OS_AUTH_URL, |
| 102 | username=settings.OS_USERNAME, |
| 103 | password=settings.OS_PASSWORD, |
| 104 | project_name=settings.OS_PROJECT_NAME, |
Dennis Dmitriev | c902ad8 | 2019-04-12 13:41:30 +0300 | [diff] [blame^] | 105 | user_domain_name=settings.OS_USER_DOMAIN_NAME, |
Dennis Dmitriev | f5f2e60 | 2017-11-03 15:36:19 +0200 | [diff] [blame] | 106 | project_domain_name='Default') |
| 107 | return keystone_session.Session(auth=keystone_auth, verify=False) |
| 108 | |
| 109 | def __init_heatclient(self): |
| 110 | token = self._keystone_session.get_token() |
| 111 | endpoint_url = self._keystone_session.get_endpoint( |
| 112 | service_type='orchestration', endpoint_type='publicURL') |
| 113 | self.__heatclient = heatclient.Client( |
| 114 | version=settings.OS_HEAT_VERSION, endpoint=endpoint_url, |
| 115 | token=token, insecure=True) |
| 116 | |
| 117 | @property |
| 118 | def _current_stack(self): |
| 119 | return self.__stacks.get( |
| 120 | self.__config.hardware.heat_stack_name) |
| 121 | |
| 122 | @property |
| 123 | def __stacks(self): |
| 124 | try: |
| 125 | return self.__heatclient.stacks |
| 126 | except heat_exceptions.HTTPUnauthorized: |
| 127 | LOG.warning("Authorization token outdated, refreshing") |
| 128 | self.__init_heatclient() |
| 129 | return self.__heatclient.stacks |
| 130 | |
| 131 | @property |
| 132 | def __resources(self): |
| 133 | try: |
| 134 | return self.__heatclient.resources |
| 135 | except heat_exceptions.HTTPUnauthorized: |
| 136 | LOG.warning("Authorization token outdated, refreshing") |
| 137 | self.__init_heatclient() |
| 138 | return self.__heatclient.resources |
| 139 | |
| 140 | def _get_resources_by_type(self, resource_type): |
| 141 | res = [] |
| 142 | for item in self.__resources.list( |
| 143 | self.__config.hardware.heat_stack_name): |
| 144 | if item.resource_type == resource_type: |
| 145 | resource = self.__resources.get( |
| 146 | self.__config.hardware.heat_stack_name, |
| 147 | item.resource_name) |
| 148 | res.append(resource) |
| 149 | return res |
| 150 | |
| 151 | @cached_property |
| 152 | def _nodes(self): |
| 153 | """Get list of nodenames from heat |
| 154 | |
| 155 | Returns list of dicts. |
| 156 | Example: |
| 157 | - name: cfg01 |
| 158 | roles: |
| 159 | - salt_master |
| 160 | addresses: # Optional. May be an empty dict |
| 161 | admin-pool01: p.p.p.202 |
| 162 | - name: ctl01 |
| 163 | roles: |
| 164 | - salt_minion |
| 165 | - openstack_controller |
| 166 | - openstack_messaging |
| 167 | - openstack_database |
| 168 | addresses: {} # Optional. May be an empty dict |
| 169 | |
| 170 | 'name': taken from heat template resource's ['name'] parameter |
| 171 | 'roles': a list taken from resource's ['metadata']['roles'] parameter |
| 172 | """ |
| 173 | address_pools = self._address_pools |
| 174 | nodes = [] |
| 175 | for heat_node in self._get_resources_by_type("OS::Nova::Server"): |
| 176 | # addresses will have the following dict structure: |
| 177 | # {'admin-pool01': <floating_ip1>, |
| 178 | # 'private-pool01': <floating_ip2>, |
| 179 | # 'external-pool01': <floating_ip3> |
| 180 | # } |
| 181 | # , where key is one of roles from OS::Neutron::Subnet, |
| 182 | # and value is a floating IP associated to the fixed IP |
| 183 | # in this subnet (if exists). |
| 184 | # If no floating IPs associated to the server, |
| 185 | # then addresses will be an empty list. |
| 186 | addresses = {} |
| 187 | for network in heat_node.attributes['addresses']: |
| 188 | fixed = None |
| 189 | floating = None |
| 190 | for address in heat_node.attributes['addresses'][network]: |
| 191 | addr_type = address['OS-EXT-IPS:type'] |
| 192 | if addr_type == 'fixed': |
| 193 | fixed = address['addr'] |
| 194 | elif addr_type == 'floating': |
| 195 | floating = address['addr'] |
| 196 | else: |
| 197 | LOG.error("Unexpected OS-EXT-IPS:type={0} " |
| 198 | "in node '{1}' for network '{2}'" |
| 199 | .format(addr_type, |
| 200 | heat_node.attributes['name'], |
| 201 | network)) |
| 202 | if fixed is None or floating is None: |
| 203 | LOG.error("Unable to determine the correct IP address " |
| 204 | "in node '{0}' for network '{1}'" |
| 205 | .format(heat_node.attributes['name'], network)) |
| 206 | continue |
| 207 | # Check which address pool has the fixed address, and set |
| 208 | # the floating address as the access to this address pool. |
| 209 | for address_pool in address_pools: |
| 210 | pool_net = netaddr.IPNetwork(address_pool['cidr']) |
| 211 | if fixed in pool_net: |
| 212 | for role in address_pool['roles']: |
Dennis Dmitriev | c902ad8 | 2019-04-12 13:41:30 +0300 | [diff] [blame^] | 213 | # addresses[role] = floating |
| 214 | # Use fixed addresses for SSH access |
| 215 | addresses[role] = fixed |
Dennis Dmitriev | f5f2e60 | 2017-11-03 15:36:19 +0200 | [diff] [blame] | 216 | |
| 217 | nodes.append({ |
| 218 | 'name': heat_node.attributes['name'], |
| 219 | 'roles': yaml.load(heat_node.attributes['metadata']['roles']), |
| 220 | 'addresses': addresses, |
| 221 | }) |
| 222 | return nodes |
| 223 | |
| 224 | @cached_property |
| 225 | def _address_pools(self): |
| 226 | """Get address pools from subnets OS::Neutron::Subnet |
| 227 | |
| 228 | Returns list of dicts. |
| 229 | Example: |
| 230 | - roles: |
| 231 | - admin-pool01 |
| 232 | cidr: x.x.x.x/y |
| 233 | start: x.x.x.2 |
| 234 | end: x.x.x.254 |
| 235 | gateway: x.x.x.1 # or None |
| 236 | """ |
| 237 | pools = [] |
| 238 | for heat_subnet in self._get_resources_by_type("OS::Neutron::Subnet"): |
| 239 | pools.append({ |
| 240 | 'roles': heat_subnet.attributes['tags'], |
| 241 | 'cidr': heat_subnet.attributes['cidr'], |
| 242 | 'gateway': heat_subnet.attributes['gateway_ip'], |
| 243 | 'start': heat_subnet.attributes[ |
| 244 | 'allocation_pools'][0]['start'], |
| 245 | 'end': heat_subnet.attributes['allocation_pools'][0]['end'], |
| 246 | }) |
| 247 | return pools |
| 248 | |
| 249 | def _get_nodes_by_roles(self, roles=None): |
| 250 | nodes = [] |
| 251 | if roles is None: |
| 252 | return self._nodes |
| 253 | |
| 254 | for node in self._nodes: |
| 255 | if set(node['roles']).intersection(set(roles)): |
| 256 | nodes.append(node) |
| 257 | return nodes |
| 258 | |
| 259 | def get_ssh_data(self, roles=None): |
| 260 | """Generate ssh config for Underlay |
| 261 | |
| 262 | :param roles: list of strings |
| 263 | """ |
| 264 | if roles is None: |
| 265 | raise Exception("No roles specified for the environment!") |
| 266 | |
| 267 | config_ssh = [] |
| 268 | for d_node in self._get_nodes_by_roles(roles=roles): |
| 269 | for pool_name in d_node['addresses']: |
| 270 | ssh_data = { |
| 271 | 'node_name': d_node['name'], |
| 272 | 'minion_id': d_node['name'], |
| 273 | 'roles': d_node['roles'], |
| 274 | 'address_pool': pool_name, |
| 275 | 'host': d_node['addresses'][pool_name], |
| 276 | 'login': settings.SSH_NODE_CREDENTIALS['login'], |
| 277 | 'password': settings.SSH_NODE_CREDENTIALS['password'], |
| 278 | 'keys': [k['private'] |
| 279 | for k in self.__config.underlay.ssh_keys] |
| 280 | } |
| 281 | config_ssh.append(ssh_data) |
| 282 | return config_ssh |
| 283 | |
| 284 | def _get_resources_with_wrong_status(self): |
| 285 | res = [] |
| 286 | for item in self.__resources.list( |
| 287 | self.__config.hardware.heat_stack_name): |
| 288 | if item.resource_status in BAD_STACK_STATUSES: |
| 289 | res.append({ |
| 290 | 'resource_name': item.resource_name, |
| 291 | 'resource_status': item.resource_status, |
| 292 | 'resource_status_reason': item.resource_status_reason, |
| 293 | 'resource_type': item.resource_type |
| 294 | }) |
| 295 | wrong_resources = '\n'.join([ |
| 296 | "*** Heat stack resource '{0}' ({1}) has wrong status '{2}': {3}" |
| 297 | .format(item['resource_name'], |
| 298 | item['resource_type'], |
| 299 | item['resource_status'], |
| 300 | item['resource_status_reason']) |
| 301 | for item in res |
| 302 | ]) |
| 303 | return wrong_resources |
| 304 | |
| 305 | def wait_of_stack_status(self, status, delay=30, tries=60): |
| 306 | |
| 307 | @retry(exceptions.EnvironmentWrongStatus, delay=delay, tries=tries) |
| 308 | def wait(): |
| 309 | st = self._current_stack.stack_status |
| 310 | if st == status: |
| 311 | return |
| 312 | elif st in BAD_STACK_STATUSES: |
| 313 | wrong_resources = self._get_resources_with_wrong_status() |
| 314 | raise exceptions.EnvironmentBadStatus( |
| 315 | self.__config.hardware.heat_stack_name, |
| 316 | status, |
| 317 | st, |
| 318 | wrong_resources |
| 319 | ) |
| 320 | else: |
| 321 | LOG.info("Stack {0} status: {1}".format( |
| 322 | self.__config.hardware.heat_stack_name, st)) |
| 323 | raise exceptions.EnvironmentWrongStatus( |
| 324 | self.__config.hardware.heat_stack_name, |
| 325 | status, |
| 326 | st |
| 327 | ) |
| 328 | LOG.info("Waiting for stack '{0}' status <{1}>".format( |
| 329 | self.__config.hardware.heat_stack_name, status)) |
| 330 | wait() |
| 331 | |
| 332 | def revert_snapshot(self, name): |
| 333 | """Revert snapshot by name |
| 334 | |
| 335 | - Revert the heat snapshot in the environment |
| 336 | - Try to reload 'config' object from a file 'config_<name>.ini' |
| 337 | If the file not found, then pass with defaults. |
| 338 | - Set <name> as the current state of the environment after reload |
| 339 | |
| 340 | :param name: string |
| 341 | """ |
| 342 | LOG.info("Reading INI config (without reverting env to snapshot) " |
| 343 | "named '{0}'".format(name)) |
| 344 | |
| 345 | try: |
| 346 | test_config_path = self._get_snapshot_config_name(name) |
| 347 | settings_oslo.reload_snapshot_config(self.__config, |
| 348 | test_config_path) |
| 349 | except cfg.ConfigFilesNotFoundError as conf_err: |
| 350 | LOG.error("Config file(s) {0} not found!".format( |
| 351 | conf_err.config_files)) |
| 352 | |
| 353 | self.__config.hardware.current_snapshot = name |
| 354 | |
| 355 | def create_snapshot(self, name, *args, **kwargs): |
| 356 | """Create named snapshot of current env. |
| 357 | |
| 358 | - Create a snapshot for the environment |
| 359 | - Save 'config' object to a file 'config_<name>.ini' |
| 360 | |
| 361 | :name: string |
| 362 | """ |
| 363 | LOG.info("Store INI config (without env snapshot) named '{0}'" |
| 364 | .format(name)) |
| 365 | self.__config.hardware.current_snapshot = name |
| 366 | settings_oslo.save_config(self.__config, |
| 367 | name, |
| 368 | self.__config.hardware.heat_stack_name) |
| 369 | |
| 370 | def _get_snapshot_config_name(self, snapshot_name): |
| 371 | """Get config name for the environment""" |
| 372 | env_name = self.__config.hardware.heat_stack_name |
| 373 | if env_name is None: |
| 374 | env_name = 'config' |
| 375 | test_config_path = os.path.join( |
| 376 | settings.LOGS_DIR, '{0}_{1}.ini'.format(env_name, snapshot_name)) |
| 377 | return test_config_path |
| 378 | |
| 379 | def has_snapshot(self, name): |
| 380 | # Heat doesn't support live snapshots, so just |
| 381 | # check if an INI file was created for this environment, |
| 382 | # assuming that the environment has the configuration |
| 383 | # described in this INI. |
| 384 | return self.has_snapshot_config(name) |
| 385 | |
| 386 | def has_snapshot_config(self, name): |
| 387 | test_config_path = self._get_snapshot_config_name(name) |
| 388 | return os.path.isfile(test_config_path) |
| 389 | |
| 390 | def start(self, underlay_node_roles, timeout=480): |
| 391 | """Start environment""" |
| 392 | LOG.warning("HEAT Manager doesn't support start environment feature. " |
| 393 | "Waiting for finish the bootstrap process on the nodes " |
| 394 | "with accessible SSH") |
| 395 | |
| 396 | check_cloudinit_started = '[ -f /is_cloud_init_started ]' |
| 397 | check_cloudinit_finished = ('[ -f /is_cloud_init_finished ] || ' |
| 398 | '[ -f /var/log/mcp/.bootstrap_done ]') |
| 399 | check_cloudinit_failed = 'cat /is_cloud_init_failed' |
| 400 | passed = {} |
| 401 | for node in self._get_nodes_by_roles(roles=underlay_node_roles): |
| 402 | |
| 403 | try: |
| 404 | node_ip = self.node_ip(node) |
| 405 | except exceptions.EnvironmentNodeAccessError: |
| 406 | LOG.warning("Node {0} doesn't have accessible IP address" |
| 407 | ", skipping".format(node['name'])) |
| 408 | continue |
| 409 | |
| 410 | LOG.info("Waiting for SSH on node '{0}' / {1} ...".format( |
| 411 | node['name'], node_ip)) |
| 412 | |
| 413 | def _ssh_check(host, |
| 414 | port, |
| 415 | username=settings.SSH_NODE_CREDENTIALS['login'], |
| 416 | password=settings.SSH_NODE_CREDENTIALS['password'], |
| 417 | timeout=0): |
| 418 | try: |
| 419 | ssh = ssh_client.SSHClient( |
| 420 | host=host, port=port, |
| 421 | auth=ssh_client.SSHAuth( |
| 422 | username=username, |
| 423 | password=password)) |
| 424 | |
| 425 | # If '/is_cloud_init_started' exists, then wait for |
| 426 | # the flag /is_cloud_init_finished |
| 427 | if ssh.execute(check_cloudinit_started)['exit_code'] == 0: |
| 428 | result = ssh.execute(check_cloudinit_failed) |
| 429 | if result['exit_code'] == 0: |
| 430 | raise exceptions.EnvironmentNodeIsNotStarted( |
| 431 | "{0}:{1}".format(host, port), |
| 432 | result.stdout_str) |
| 433 | |
| 434 | status = ssh.execute( |
| 435 | check_cloudinit_finished)['exit_code'] == 0 |
| 436 | # Else, just wait for SSH |
| 437 | else: |
| 438 | status = ssh.execute('echo ok')['exit_code'] == 0 |
| 439 | return status |
| 440 | |
| 441 | except (AuthenticationException, BadAuthenticationType): |
| 442 | return True |
| 443 | except Exception: |
| 444 | return False |
| 445 | |
| 446 | def _ssh_wait(host, |
| 447 | port, |
| 448 | username=settings.SSH_NODE_CREDENTIALS['login'], |
| 449 | password=settings.SSH_NODE_CREDENTIALS['password'], |
| 450 | timeout=0): |
| 451 | |
| 452 | if host in passed and passed[host] >= 2: |
| 453 | # host already passed the check |
| 454 | return True |
| 455 | |
| 456 | for node in self._get_nodes_by_roles( |
| 457 | roles=underlay_node_roles): |
| 458 | ip = node_ip |
| 459 | if ip not in passed: |
| 460 | passed[ip] = 0 |
| 461 | if _ssh_check(ip, port): |
| 462 | passed[ip] += 1 |
| 463 | else: |
| 464 | passed[ip] = 0 |
| 465 | |
| 466 | helpers.wait( |
| 467 | lambda: _ssh_wait(node_ip, 22), |
| 468 | timeout=timeout, |
| 469 | timeout_msg="Node '{}' didn't open SSH in {} sec".format( |
| 470 | node['name'], timeout |
| 471 | ) |
| 472 | ) |
| 473 | LOG.info('Heat stack "{0}" ready' |
| 474 | .format(self.__config.hardware.heat_stack_name)) |
| 475 | |
| 476 | def _create_environment(self): |
| 477 | tpl_files, template = template_utils.get_template_contents( |
| 478 | self.__config.hardware.heat_conf_path) |
| 479 | env_files_list = [] |
| 480 | env_files, env = ( |
| 481 | template_utils.process_multiple_environments_and_files( |
| 482 | env_paths=[self.__config.hardware.heat_env_path], |
| 483 | env_list_tracker=env_files_list)) |
| 484 | |
| 485 | fields = { |
| 486 | 'stack_name': self.__config.hardware.heat_stack_name, |
| 487 | 'template': template, |
| 488 | 'files': dict(list(tpl_files.items()) + list(env_files.items())), |
| 489 | 'environment': env, |
Dennis Dmitriev | c902ad8 | 2019-04-12 13:41:30 +0300 | [diff] [blame^] | 490 | 'parameters': { |
| 491 | 'mcp_version': settings.MCP_VERSION, |
| 492 | 'env_name': settings.ENV_NAME, |
| 493 | } |
Dennis Dmitriev | f5f2e60 | 2017-11-03 15:36:19 +0200 | [diff] [blame] | 494 | } |
| 495 | |
| 496 | if env_files_list: |
| 497 | fields['environment_files'] = env_files_list |
| 498 | |
| 499 | self.__stacks.create(**fields) |
| 500 | self.wait_of_stack_status(EXPECTED_STACK_STATUS) |
| 501 | LOG.info("Stack '{0}' created" |
| 502 | .format(self.__config.hardware.heat_stack_name)) |
| 503 | |
| 504 | def stop(self): |
| 505 | """Stop environment""" |
| 506 | LOG.warning("HEAT Manager doesn't support stop environment feature") |
| 507 | pass |
| 508 | |
| 509 | # TODO(ddmitriev): add all Environment methods |
| 510 | @staticmethod |
| 511 | def node_ip(node, address_pool_name='admin-pool01'): |
| 512 | """Determine node's IP |
| 513 | |
| 514 | :param node: a dict element from the self._nodes |
| 515 | :return: string |
| 516 | """ |
| 517 | if address_pool_name in node['addresses']: |
| 518 | addr = node['addresses'][address_pool_name] |
| 519 | LOG.debug('{0} IP= {1}'.format(node['name'], addr)) |
| 520 | return addr |
| 521 | else: |
| 522 | raise exceptions.EnvironmentNodeAccessError( |
| 523 | node['name'], |
| 524 | "No addresses available for the subnet {0}" |
| 525 | .format(address_pool_name)) |
| 526 | |
| 527 | def set_address_pools_config(self): |
| 528 | """Store address pools CIDRs in config object""" |
| 529 | for ap in self._address_pools: |
| 530 | for role in ap['roles']: |
| 531 | self.__config.underlay.address_pools[role] = ap['cidr'] |
| 532 | |
| 533 | def set_dhcp_ranges_config(self): |
| 534 | """Store DHCP ranges in config object""" |
| 535 | for ap in self._address_pools: |
| 536 | for role in ap['roles']: |
| 537 | self.__config.underlay.dhcp_ranges[role] = { |
| 538 | "cidr": ap['cidr'], |
| 539 | "start": ap['start'], |
| 540 | "end": ap['end'], |
| 541 | "gateway": ap['gateway'], |
| 542 | } |
| 543 | |
| 544 | def wait_for_node_state(self, node_name, state, timeout): |
| 545 | raise NotImplementedError() |
| 546 | |
| 547 | def warm_shutdown_nodes(self, underlay, nodes_prefix, timeout=600): |
| 548 | raise NotImplementedError() |
| 549 | |
| 550 | def warm_restart_nodes(self, underlay, nodes_prefix, timeout=600): |
| 551 | raise NotImplementedError() |
| 552 | |
| 553 | @property |
| 554 | def slave_nodes(self): |
| 555 | raise NotImplementedError() |