Add test for run virtlet VM

Added test for run VM inside virtlet pod
Refactored deploy config for virtlet pod

Change-Id: I38d51b18030c9147168f6d0b2bd815c56b021231
Reviewed-on: https://review.gerrithub.io/364662
Reviewed-by: Dennis Dmitriev <dis.xcom@gmail.com>
Tested-by: Dennis Dmitriev <dis.xcom@gmail.com>
diff --git a/tcp_tests/helpers/ext.py b/tcp_tests/helpers/ext.py
index 518f483..3d11d6e 100644
--- a/tcp_tests/helpers/ext.py
+++ b/tcp_tests/helpers/ext.py
@@ -26,6 +26,7 @@
 UNDERLAY_NODE_ROLES = enum(
     'salt_master',
     'salt_minion',
+    'k8s_virtlet',
 )
 
 NETWORK_TYPE = enum(
diff --git a/tcp_tests/managers/k8smanager.py b/tcp_tests/managers/k8smanager.py
index 3801436..f16be6e 100644
--- a/tcp_tests/managers/k8smanager.py
+++ b/tcp_tests/managers/k8smanager.py
@@ -48,15 +48,26 @@
 
     def get_proxy_api(self):
         k8s_proxy_ip_pillars = self._salt.get_pillar(
-            tgt='I@haproxy:proxy:enabled:true',
+            tgt='I@haproxy:proxy:enabled:true and I@kubernetes:master',
             pillar='haproxy:proxy:listen:k8s_secure:binds:address')
+        k8s_hosts = self._salt.get_pillar(
+            tgt='I@haproxy:proxy:enabled:true and I@kubernetes:master',
+            pillar='kubernetes:pool:apiserver:host')
         k8s_proxy_ip = set([ip
                             for item in k8s_proxy_ip_pillars
-                            for node,ip in item.items()])
-        assert len(k8s_proxy_ip) == 1, \
-            ("Found {0} Kubernetes endpoints in pillars,"
-             " expected one!").format(len(k8s_proxy_ip))
-        return k8s_proxy_ip.pop()
+                            for node,ip in item.items() if ip])
+        k8s_hosts = set([ip
+                            for item in k8s_hosts
+                            for node,ip in item.items() if ip])
+        assert len(k8s_hosts) == 1, (
+            "Found more than one Kubernetes API hosts in pillars:{0}, "
+            "expected one!").format(k8s_hosts)
+        k8s_host = k8s_hosts.pop()
+        assert k8s_host in k8s_proxy_ip, (
+            "Kubernetes API host:{0} not found in proxies:{} "
+            "on k8s master nodes. K8s proxies are expected on "
+            "nodes with K8s master").format(k8s_host, k8s_proxy_ip)
+        return k8s_host
 
     @property
     def api(self):
diff --git a/tcp_tests/managers/virtlet_manager.py b/tcp_tests/managers/virtlet_manager.py
index f8d1ced..2d126e0 100644
--- a/tcp_tests/managers/virtlet_manager.py
+++ b/tcp_tests/managers/virtlet_manager.py
@@ -12,8 +12,13 @@
 #    License for the specific language governing permissions and limitations
 #    under the License.
 
+from uuid import uuid4
+
+from tcp_tests.helpers import ext
 from tcp_tests.managers.execute_commands import ExecuteCommandsMixin
 
+from devops.helpers import helpers
+
 
 class VirtletManager(ExecuteCommandsMixin):
     """docstring for VirtletManager"""
@@ -24,6 +29,9 @@
     def __init__(self, config, underlay):
         self.__config = config
         self.__underlay = underlay
+        self.virtlet_nodes = [
+            i for i in self.__config.underlay.ssh
+            if ext.UNDERLAY_NODE_ROLES.virtlet_node in i['roles']]
         super(VirtletManager, self).__init__(
             config=config, underlay=underlay)
 
@@ -31,3 +39,45 @@
         self.execute_commands(commands,
                               label='Install Virtlet project')
         self.__config.virtlet.virtlet_installed = True
+
+    def run_vm(self, name=None):
+        if not name:
+            name = 'virtlet-vm-{}'.format(uuid4())
+        virt_node = self.virtlet_nodes[0]
+        cmd = (
+            "kubectl convert -f virtlet/examples/cirros-vm.yaml --local "
+            "-o json | jq '.metadata.name|=\"{}\"' | kubectl create -f -")
+        self.__underlay.check_call(
+            cmd.format(name),
+            node_name=virt_node['node_name'])
+        return name
+
+    def get_vm_info(self, name, jsonpath="{.status.phase}", expected=None):
+        virt_node = self.virtlet_nodes[0]
+        cmd = "kubectl get po {} -n default".format(name)
+        if jsonpath:
+            cmd += " -o jsonpath={}".format(jsonpath)
+        return self.__underlay.check_call(
+            cmd, node_name=virt_node['node_name'], expected=expected)
+
+    def wait_active_state(self, name, timeout=180):
+        helpers.wait(
+            lambda: self.get_vm_info(name)['stdout'][0] == 'Running',
+            timeout=timeout,
+            timeout_msg="VM {} didn't Running state in {} sec. "
+                        "Current state: ".format(
+                name, timeout, self.get_vm_info(name)['stdout'][0]))
+
+    def delete_vm(self, name, timeout=180):
+        virt_node = self.virtlet_nodes[0]
+        cmd = "kubectl delete po -n default {}".format(name)
+        self.__underlay.check_call(cmd, node_name=virt_node['node_name'])
+
+        helpers.wait(
+            lambda:
+            "Error from server (NotFound):" in
+            " ".join(self.get_vm_info(name, expected=[0, 1])['stderr']),
+            timeout=timeout,
+            timeout_msg="VM {} didn't Running state in {} sec. "
+                        "Current state: ".format(
+                name, timeout, self.get_vm_info(name)['stdout'][0]))
diff --git a/tcp_tests/settings_oslo.py b/tcp_tests/settings_oslo.py
index a932296..77ed1b0 100644
--- a/tcp_tests/settings_oslo.py
+++ b/tcp_tests/settings_oslo.py
@@ -79,7 +79,8 @@
     ct.Cfg('roles', ct.JSONList(),
            help="Node roles managed by underlay in the environment",
            default=[ext.UNDERLAY_NODE_ROLES.salt_master,
-                    ext.UNDERLAY_NODE_ROLES.salt_minion, ]),
+                    ext.UNDERLAY_NODE_ROLES.salt_minion,
+                    ext.UNDERLAY_NODE_ROLES.k8s_virtlet, ]),
     ct.Cfg('bootstrap_timeout', ct.Integer(),
            help="Timeout of waiting SSH for nodes with specified roles",
            default=480),
diff --git a/tcp_tests/templates/virtual-mcp11-k8s-calico/underlay.yaml b/tcp_tests/templates/virtual-mcp11-k8s-calico/underlay.yaml
index 2434579..985110c 100644
--- a/tcp_tests/templates/virtual-mcp11-k8s-calico/underlay.yaml
+++ b/tcp_tests/templates/virtual-mcp11-k8s-calico/underlay.yaml
@@ -181,7 +181,7 @@
                     - private
 
           - name: {{ HOSTNAME_CTL02 }}
-            role: salt_minion
+            role: k8s_virtlet
             params:
               vcpu: !os_env SLAVE_NODE_CPU, 4
               memory: !os_env SLAVE_NODE_MEMORY, 2048
diff --git a/tcp_tests/templates/virtual-mcp11-k8s-calico/virtlet.yaml b/tcp_tests/templates/virtual-mcp11-k8s-calico/virtlet.yaml
index 4f2abb3..6e66fc4 100644
--- a/tcp_tests/templates/virtual-mcp11-k8s-calico/virtlet.yaml
+++ b/tcp_tests/templates/virtual-mcp11-k8s-calico/virtlet.yaml
@@ -1,4 +1,5 @@
 {% from 'virtual-mcp11-k8s-calico/underlay.yaml' import HOSTNAME_CTL02 with context %}
+{% from 'virtual-mcp11-k8s-calico/underlay.yaml' import HOSTNAME_CFG01 with context %}
 
 # Clone virtlet project from git to the ctl01 node for start virtlet pod from yaml
 - description: Cloning virtlet project on ctl02
@@ -14,6 +15,21 @@
   retry: {count: 1, delay: 1}
   skip_fail: false
 
+# Add route for internal kube-services if necessary
+- description: Adding route for internal kube-services if necessary
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@kubernetes:pool' cmd.run "ip r | grep 10.254 || ip ro add 10.254.0.0/16 dev ens4"
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 1}
+  skip_fail: false
+
+
+# Install jq
+- description: Install jq
+  cmd: apt-get install jq -y
+  node_name: {{ HOSTNAME_CTL02 }}
+  retry: {count: 1, delay: 1}
+  skip_fail: false
+
 # Add extra parameter for kubelet service on virtlet node
 - description: Adding extra parameter for kubelet service on virtlet node
   cmd:  sed -i.bak "s|^\"|--feature-gates=DynamicKubeletConfig=true \\\\\\n\"|" /etc/default/kubelet
@@ -21,16 +37,19 @@
   retry: {count: 1, delay: 1}
   skip_fail: false
 
-# Restart kubelet and kube-api services on virtlet node
-- description: Restart kubelet and kube-api services on ctl02
-  cmd:  systemctl restart kube-apiserver kubelet
+# Restart kubelet service on virtlet node
+- description: Restart kubelet service on ctl02
+  cmd:  systemctl restart kubelet
   node_name: {{ HOSTNAME_CTL02 }}
   retry: {count: 1, delay: 1}
   skip_fail: false
 
 # Create virtlet pod
 - description: Creating virtlet pod
-  cmd:  kubectl create -f virtlet/deploy/virtlet-ds.yaml
+  cmd: |
+    kubectl convert -f virtlet/deploy/virtlet-ds.yaml --local -o json | jq \
+    '.items[0].spec.template.spec.containers[0].env|=.+[{"name":"VIRTLET_DISABLE_KVM","value":"y"}] | .items[0].spec.template.spec.volumes|=.+[{"name":"etcd","hostPath":{"path":"/var/lib/etcd"}},{"name":"kubernetes","hostPath":{"path":"/etc/kubernetes"}}] | .items[0].spec.template.spec.containers[0].volumeMounts|=.+[{"mountPath":"/etc/kubernetes","name":"kubernetes"},{"mountPath":"/var/lib/etcd","name":"etcd"}]' | \
+    kubectl create -f -
   node_name: {{ HOSTNAME_CTL02 }}
   retry: {count: 1, delay: 1}
   skip_fail: false
diff --git a/tcp_tests/tests/system/test_virtlet_actions.py b/tcp_tests/tests/system/test_virtlet_actions.py
new file mode 100644
index 0000000..523aceb
--- /dev/null
+++ b/tcp_tests/tests/system/test_virtlet_actions.py
@@ -0,0 +1,48 @@
+#    Copyright 2017 Mirantis, Inc.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+import copy
+import time
+
+import pytest
+
+from tcp_tests import settings
+from tcp_tests.helpers import ext
+from tcp_tests import logger
+
+LOG = logger.logger
+
+
+class TestVirtletActions(object):
+    """Test class for testing Virtlet actions"""
+
+    #salt_cmd = 'salt -l debug '  # For debug output
+    #salt_call_cmd = 'salt-call -l debug '  # For debug output
+    salt_cmd = 'salt --hard-crash --state-output=mixed --state-verbose=False '  # For cause only output
+    salt_call_cmd = 'salt-call --hard-crash --state-output=mixed --state-verbose=False '  # For cause only output
+    #salt_cmd = 'salt --state-output=terse --state-verbose=False '  # For reduced output
+    #salt_call_cmd = 'salt-call --state-output=terse --state-verbose=False '  # For reduced output
+
+    def test_virtlet_create_delete_vm(self, underlay, virtlet_deployed,
+                                     show_step, virtlet_actions):
+        """Test for deploying an mcp environment with virtlet
+
+        Scenario:
+            1. Start VM as a virtlet pod
+            2. Wait active state of VM
+            3. Delete VM and wait to delete pod
+
+        """
+        vm_name = virtlet_actions.run_vm()
+        virtlet_actions.wait_active_state(vm_name)
+        virtlet_actions.delete_vm(vm_name)