Add initial failover tests
* Warm restart of node by role
* Warm shutdown node by role
* Run func tests after fail
Need to do: assert test results for fails
Change-Id: I51934227340a60d5150e14f5004f2c3575264a36
diff --git a/tcp_tests/managers/envmanager_devops.py b/tcp_tests/managers/envmanager_devops.py
index c800cc2..8afb177 100644
--- a/tcp_tests/managers/envmanager_devops.py
+++ b/tcp_tests/managers/envmanager_devops.py
@@ -330,6 +330,39 @@
raise exceptions.EnvironmentIsNotSet()
self.__env.destroy()
+ def destroy_node(self, node_name):
+ """Destroy node"""
+ node = self.__env.get_node(name=node_name)
+ node.destroy()
+
+ def start_node(self, node_name):
+ """Start node"""
+ node = self.__env.get_node(name=node_name)
+ node.start()
+
+ def reboot_node(self, node_name):
+ """Reboot node"""
+ node = self.__env.get_node(name=node_name)
+ node.reboot()
+
+ def remove_node(self, node_name):
+ """Remove node"""
+ node = self.__env.get_node(name=node_name)
+ node.remove()
+
+ def wait_for_node_state(self, node_name, state, timeout):
+ node = self.__env.get_node(name=node_name)
+ if 'active' in state:
+ helpers.wait(lambda: node.is_active(),
+ timeout=timeout,
+ timeout_msg=('Node {0} failed '
+ 'to become active'.format(node)))
+ else:
+ helpers.wait(lambda: not node.is_active(),
+ timeout=timeout,
+ timeout_msg=('Node {0} failed '
+ 'to become active'.format(node)))
+
def has_snapshot(self, name):
return self.__env.has_snapshot(name)
@@ -353,6 +386,15 @@
LOG.debug('Trying to get nodes by role {0}'.format(node_role))
return self.__env.get_nodes(role=node_role)
+ def __get_nodes_by_name(self, node_name):
+ """Get node by given role name
+
+ :param node_name: string
+ :rtype: devops.models.Node
+ """
+ LOG.debug('Trying to get nodes by role {0}'.format(node_name))
+ return self.__env.get_nodes(name=node_name)
+
@property
def master_nodes(self):
"""Get all master nodes
diff --git a/tcp_tests/managers/openstack_manager.py b/tcp_tests/managers/openstack_manager.py
index 6454bdd..ebcc574 100644
--- a/tcp_tests/managers/openstack_manager.py
+++ b/tcp_tests/managers/openstack_manager.py
@@ -24,10 +24,12 @@
__config = None
__underlay = None
+ __hardware = None
- def __init__(self, config, underlay, salt):
+ def __init__(self, config, underlay, hardware, salt):
self.__config = config
self.__underlay = underlay
+ self.__hardware = hardware
self._salt = salt
super(OpenstackManager, self).__init__(
config=config, underlay=underlay)
@@ -88,3 +90,34 @@
file_name = result['stdout'][0].rstrip()
LOG.debug("Found files {0}".format(file_name))
r.download(destination=file_name, target=os.getcwd())
+
+ def get_node_name_by_subname(self, node_sub_name):
+ return [node_name for node_name
+ in self.__underlay.node_names()
+ if node_sub_name in node_name]
+
+ def warm_shutdown_openstack_nodes(self, node_sub_name, timeout=10 * 60):
+ """Gracefully shutting down the node """
+ node_names = self.get_node_name_by_subname(node_sub_name)
+ LOG.info('Shutting down nodes {}'.format(node_names))
+ for node in node_names:
+ LOG.debug('Shutdown node {0}'.format(node))
+ self.__underlay.check_call(cmd="shutdown +1", node_name=node)
+ for node in node_names:
+ LOG.info('Destroy node {}'.format(node))
+ self.__hardware.destroy_node(node)
+ self.__hardware.wait_for_node_state(
+ node, state='offline', timeout=timeout)
+
+ def warm_start_nodes(self, node_sub_name, timeout=10 * 60):
+ node_names = self.get_node_name_by_subname(node_sub_name)
+ LOG.info('Starting nodes {}'.format(node_names))
+ for node in node_names:
+ self.__hardware.start_node(node)
+ self.__hardware.wait_for_node_state(
+ node, state='active', timeout=timeout)
+
+ def warm_restart_nodes(self, node_names, timeout=10 * 60):
+ LOG.info('Reboot (warm restart) nodes {0}'.format(node_names))
+ self.warm_shutdown_openstack_nodes(node_names, timeout=timeout)
+ self.warm_start_nodes(node_names)
diff --git a/tcp_tests/managers/underlay_ssh_manager.py b/tcp_tests/managers/underlay_ssh_manager.py
index 12ea22d..c9f2f4b 100644
--- a/tcp_tests/managers/underlay_ssh_manager.py
+++ b/tcp_tests/managers/underlay_ssh_manager.py
@@ -407,13 +407,17 @@
minion_nodes = [ssh for ssh in self.config_ssh
if node_role not in ssh['roles']]
for node in minion_nodes:
- with self.remote(host=node['host']) as r_node:
- r_node.check_call(('tar '
- '--absolute-names '
- '--warning=no-file-changed '
- '-czf {t} {d}'.format(
- t='{0}.tar.gz'.format(node['node_name']), d='/var/log')),
- verbose=True, raise_on_err=False)
+ try:
+ with self.remote(host=node['host']) as r_node:
+ r_node.check_call(('tar '
+ '--absolute-names '
+ '--warning=no-file-changed '
+ '-czf {t} {d}'.format(
+ t='{0}.tar.gz'.format(node['node_name']),
+ d='/var/log')),
+ verbose=True, raise_on_err=False)
+ except:
+ LOG.info("Can not ssh for node {}".format(node))
with self.remote(master_node['node_name']) as r:
for node in minion_nodes:
packages_minion_cmd = ("salt '{0}*' cmd.run "