Add test for run virtlet VM

Added test for run VM inside virtlet pod
Refactored deploy config for virtlet pod

Change-Id: I38d51b18030c9147168f6d0b2bd815c56b021231
Reviewed-on: https://review.gerrithub.io/364662
Reviewed-by: Dennis Dmitriev <dis.xcom@gmail.com>
Tested-by: Dennis Dmitriev <dis.xcom@gmail.com>
diff --git a/tcp_tests/managers/k8smanager.py b/tcp_tests/managers/k8smanager.py
index 3801436..f16be6e 100644
--- a/tcp_tests/managers/k8smanager.py
+++ b/tcp_tests/managers/k8smanager.py
@@ -48,15 +48,26 @@
 
     def get_proxy_api(self):
         k8s_proxy_ip_pillars = self._salt.get_pillar(
-            tgt='I@haproxy:proxy:enabled:true',
+            tgt='I@haproxy:proxy:enabled:true and I@kubernetes:master',
             pillar='haproxy:proxy:listen:k8s_secure:binds:address')
+        k8s_hosts = self._salt.get_pillar(
+            tgt='I@haproxy:proxy:enabled:true and I@kubernetes:master',
+            pillar='kubernetes:pool:apiserver:host')
         k8s_proxy_ip = set([ip
                             for item in k8s_proxy_ip_pillars
-                            for node,ip in item.items()])
-        assert len(k8s_proxy_ip) == 1, \
-            ("Found {0} Kubernetes endpoints in pillars,"
-             " expected one!").format(len(k8s_proxy_ip))
-        return k8s_proxy_ip.pop()
+                            for node,ip in item.items() if ip])
+        k8s_hosts = set([ip
+                            for item in k8s_hosts
+                            for node,ip in item.items() if ip])
+        assert len(k8s_hosts) == 1, (
+            "Found more than one Kubernetes API hosts in pillars:{0}, "
+            "expected one!").format(k8s_hosts)
+        k8s_host = k8s_hosts.pop()
+        assert k8s_host in k8s_proxy_ip, (
+            "Kubernetes API host:{0} not found in proxies:{} "
+            "on k8s master nodes. K8s proxies are expected on "
+            "nodes with K8s master").format(k8s_host, k8s_proxy_ip)
+        return k8s_host
 
     @property
     def api(self):
diff --git a/tcp_tests/managers/virtlet_manager.py b/tcp_tests/managers/virtlet_manager.py
index f8d1ced..2d126e0 100644
--- a/tcp_tests/managers/virtlet_manager.py
+++ b/tcp_tests/managers/virtlet_manager.py
@@ -12,8 +12,13 @@
 #    License for the specific language governing permissions and limitations
 #    under the License.
 
+from uuid import uuid4
+
+from tcp_tests.helpers import ext
 from tcp_tests.managers.execute_commands import ExecuteCommandsMixin
 
+from devops.helpers import helpers
+
 
 class VirtletManager(ExecuteCommandsMixin):
     """docstring for VirtletManager"""
@@ -24,6 +29,9 @@
     def __init__(self, config, underlay):
         self.__config = config
         self.__underlay = underlay
+        self.virtlet_nodes = [
+            i for i in self.__config.underlay.ssh
+            if ext.UNDERLAY_NODE_ROLES.virtlet_node in i['roles']]
         super(VirtletManager, self).__init__(
             config=config, underlay=underlay)
 
@@ -31,3 +39,45 @@
         self.execute_commands(commands,
                               label='Install Virtlet project')
         self.__config.virtlet.virtlet_installed = True
+
+    def run_vm(self, name=None):
+        if not name:
+            name = 'virtlet-vm-{}'.format(uuid4())
+        virt_node = self.virtlet_nodes[0]
+        cmd = (
+            "kubectl convert -f virtlet/examples/cirros-vm.yaml --local "
+            "-o json | jq '.metadata.name|=\"{}\"' | kubectl create -f -")
+        self.__underlay.check_call(
+            cmd.format(name),
+            node_name=virt_node['node_name'])
+        return name
+
+    def get_vm_info(self, name, jsonpath="{.status.phase}", expected=None):
+        virt_node = self.virtlet_nodes[0]
+        cmd = "kubectl get po {} -n default".format(name)
+        if jsonpath:
+            cmd += " -o jsonpath={}".format(jsonpath)
+        return self.__underlay.check_call(
+            cmd, node_name=virt_node['node_name'], expected=expected)
+
+    def wait_active_state(self, name, timeout=180):
+        helpers.wait(
+            lambda: self.get_vm_info(name)['stdout'][0] == 'Running',
+            timeout=timeout,
+            timeout_msg="VM {} didn't Running state in {} sec. "
+                        "Current state: ".format(
+                name, timeout, self.get_vm_info(name)['stdout'][0]))
+
+    def delete_vm(self, name, timeout=180):
+        virt_node = self.virtlet_nodes[0]
+        cmd = "kubectl delete po -n default {}".format(name)
+        self.__underlay.check_call(cmd, node_name=virt_node['node_name'])
+
+        helpers.wait(
+            lambda:
+            "Error from server (NotFound):" in
+            " ".join(self.get_vm_info(name, expected=[0, 1])['stderr']),
+            timeout=timeout,
+            timeout_msg="VM {} didn't Running state in {} sec. "
+                        "Current state: ".format(
+                name, timeout, self.get_vm_info(name)['stdout'][0]))