Add k8s keepalived failover test

Rewrite openstack_actions to remove hardware dependency from it.
Rewrite conformance method to allow more flexibility.
Add timeout after restarting keepalived workaround because
sometimes it did not have time to go into MASTER state before
test starts.

Change-Id: I668844f91f4a8fb473e199977f1ebe6ca7ddc35a
Related-PROD: PROD-20878
diff --git a/tcp_tests/managers/ceph_manager.py b/tcp_tests/managers/ceph_manager.py
index bd68496..b2111be 100644
--- a/tcp_tests/managers/ceph_manager.py
+++ b/tcp_tests/managers/ceph_manager.py
@@ -23,12 +23,10 @@
 
     __config = None
     __underlay = None
-    __hardware = None
 
-    def __init__(self, config, underlay,  hardware, salt):
+    def __init__(self, config, underlay, salt):
         self.__config = config
         self.__underlay = underlay
-        self.__hardware = hardware
         self._salt = salt
         super(CephManager, self).__init__(
             config=config, underlay=underlay)
diff --git a/tcp_tests/managers/envmanager_devops.py b/tcp_tests/managers/envmanager_devops.py
index d02cff5..0e56756 100644
--- a/tcp_tests/managers/envmanager_devops.py
+++ b/tcp_tests/managers/envmanager_devops.py
@@ -420,6 +420,22 @@
                          timeout_msg=('Node {0} failed '
                                       'to become active'.format(node)))
 
+    def warm_shutdown_nodes(self, underlay, nodes_prefix, timeout=600):
+        node_names = underlay.get_target_node_names(nodes_prefix)
+        for node in node_names:
+            LOG.debug('Shutdown node {0}'.format(node))
+            underlay.check_call(cmd="shutdown +1", node_name=node)
+        for node in node_names:
+            self.wait_for_node_state(node, state='offline', timeout=timeout)
+
+    def warm_restart_nodes(self, underlay, nodes_prefix, timeout=600):
+        self.warm_shutdown_nodes(underlay, nodes_prefix, timeout)
+        node_names = underlay.get_target_node_names(nodes_prefix)
+        for node in node_names:
+            LOG.debug('Starting node {0}'.format(node))
+            self.start_node(node)
+            self.wait_for_node_state(node, state='active', timeout=timeout)
+
     def has_snapshot(self, name):
         return self.__env.has_snapshot(name)
 
diff --git a/tcp_tests/managers/envmanager_empty.py b/tcp_tests/managers/envmanager_empty.py
index b543c87..c4bb57e 100644
--- a/tcp_tests/managers/envmanager_empty.py
+++ b/tcp_tests/managers/envmanager_empty.py
@@ -104,3 +104,11 @@
     def delete_environment(self):
         """Delete environment"""
         pass
+
+    def warm_shutdown_nodes(self, underlay, nodes_prefix, timeout=600):
+        raise Exception(
+            "Node shutdown method unsupported on this environment manager")
+
+    def warm_restart_nodes(self, underlay, nodes_prefix, timeout=600):
+        raise Exception(
+            "Node restart method unsupported on this environment manager")
diff --git a/tcp_tests/managers/k8smanager.py b/tcp_tests/managers/k8smanager.py
index 1a6144f..38521c7 100644
--- a/tcp_tests/managers/k8smanager.py
+++ b/tcp_tests/managers/k8smanager.py
@@ -343,15 +343,17 @@
         return sum(pods)
 
     def run_conformance(self, timeout=60 * 60, log_out='k8s_conformance.log',
-                        raise_on_err=True):
-        with self.__underlay.remote(
-                node_name=self.ctl_host) as remote:
-            result = remote.check_call(
-                "set -o pipefail; docker run --net=host -e API_SERVER="
-                "'http://127.0.0.1:8080' {0} | tee {1}".format(
-                    self.__config.k8s.k8s_conformance_image, log_out),
-                timeout=timeout, raise_on_err=raise_on_err)['stdout']
-            return result
+                        raise_on_err=True, node_name=None,
+                        api_server='http://127.0.0.1:8080'):
+        if node_name is None:
+            node_name = self.ctl_host
+        cmd = "set -o pipefail; docker run --net=host -e API_SERVER="\
+              "'{api}' {image} | tee '{log}'".format(
+               api=api_server, image=self.__config.k8s.k8s_conformance_image,
+               log=log_out)
+        return self.__underlay.check_call(
+               cmd=cmd, node_name=node_name, timeout=timeout,
+               raise_on_err=raise_on_err)
 
     def get_k8s_masters(self):
         k8s_masters_fqdn = self._salt.get_pillar(tgt='I@kubernetes:master',
@@ -704,3 +706,14 @@
         update_commands = self.__underlay.read_template(steps_path)
         self.execute_commands(
             update_commands, label="Updating kubernetes to '{}'".format(tag))
+
+    def get_keepalived_vip(self):
+        """
+        Return k8s VIP IP address
+
+        :return: str, IP address
+        """
+        ctl_vip_pillar = self._salt.get_pillar(
+            tgt="I@kubernetes:control:enabled:True",
+            pillar="_param:cluster_vip_address")[0]
+        return [vip for minion_id, vip in ctl_vip_pillar.items()][0]
diff --git a/tcp_tests/managers/openstack_manager.py b/tcp_tests/managers/openstack_manager.py
index 464dc56..d47aceb 100644
--- a/tcp_tests/managers/openstack_manager.py
+++ b/tcp_tests/managers/openstack_manager.py
@@ -26,12 +26,10 @@
 
     __config = None
     __underlay = None
-    __hardware = None
 
-    def __init__(self, config, underlay,  hardware, salt):
+    def __init__(self, config, underlay, salt):
         self.__config = config
         self.__underlay = underlay
-        self.__hardware = hardware
         self._salt = salt
         super(OpenstackManager, self).__init__(
             config=config, underlay=underlay)
@@ -164,37 +162,6 @@
             LOG.debug("Found files {0}".format(file_name))
             r.download(destination=file_name, target=os.getcwd())
 
-    def get_node_name_by_subname(self, node_sub_name):
-        return [node_name for node_name
-                in self.__underlay.node_names()
-                if node_sub_name in node_name]
-
-    def warm_shutdown_openstack_nodes(self, node_sub_name, timeout=10 * 60):
-        """Gracefully shutting down the node  """
-        node_names = self.get_node_name_by_subname(node_sub_name)
-        LOG.info('Shutting down nodes {}'.format(node_names))
-        for node in node_names:
-            LOG.debug('Shutdown node {0}'.format(node))
-            self.__underlay.check_call(cmd="shutdown +1", node_name=node)
-        for node in node_names:
-            LOG.info('Destroy node {}'.format(node))
-            self.__hardware.destroy_node(node)
-            self.__hardware.wait_for_node_state(
-                node, state='offline', timeout=timeout)
-
-    def warm_start_nodes(self, node_sub_name, timeout=10 * 60):
-        node_names = self.get_node_name_by_subname(node_sub_name)
-        LOG.info('Starting nodes {}'.format(node_names))
-        for node in node_names:
-            self.__hardware.start_node(node)
-            self.__hardware.wait_for_node_state(
-                node, state='active', timeout=timeout)
-
-    def warm_restart_nodes(self, node_names, timeout=10 * 60):
-        LOG.info('Reboot (warm restart) nodes {0}'.format(node_names))
-        self.warm_shutdown_openstack_nodes(node_names, timeout=timeout)
-        self.warm_start_nodes(node_names)
-
     def auth_in_horizon(self, host, port, user, password):
         client = requests.session()
         url = "http://{0}:{1}".format(