[virtlet] Refactor virtlet part

- Remove virtlet roles
- Remove virtlet fixtures due virtlet officially installed as
addon if option is enabled
- Move virtlet core test functions under k8smanager.py
- Add show_step into virtlet tests
- Add checks that virtlet is enabled to the beginning of tests

Change-Id: Ibfb499b29727b367afafc68fa13199db13bcccdf
diff --git a/tcp_tests/fixtures/virtlet_ceph_fixtures.py b/tcp_tests/fixtures/virtlet_ceph_fixtures.py
deleted file mode 100644
index a9cd3db..0000000
--- a/tcp_tests/fixtures/virtlet_ceph_fixtures.py
+++ /dev/null
@@ -1,79 +0,0 @@
-#    Copyright 2017 Mirantis, Inc.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-import pytest
-
-from tcp_tests import logger
-from tcp_tests.helpers import ext
-from tcp_tests.managers import virtlet_ceph_manager
-
-LOG = logger.logger
-
-
-@pytest.fixture(scope='function')
-def virtlet_ceph_actions(config, underlay):
-    """Fixture that provides various actions for Virtlet project
-
-    :param config: fixture provides oslo.config
-    :param underlay: fixture provides underlay manager
-    :rtype: VirtletCephManager
-    """
-    return virtlet_ceph_manager.VirtletCephManager(config, underlay)
-
-
-@pytest.mark.revert_snapshot(ext.SNAPSHOT.virtlet_ceph_deployed)
-@pytest.fixture(scope='function')
-def virtlet_ceph_deployed(revert_snapshot, config, hardware, underlay,
-                          virtlet_deployed, virtlet_ceph_actions):
-    """Fixture to get or install Virtlet project on the environment
-
-    :param revert_snapshot: fixture that reverts snapshot that is specified
-                            in test with @pytest.mark.revert_snapshot(<name>)
-    :param config: fixture provides oslo.config
-    :param hardware: fixture provides enviromnet manager
-    :param underlay: fixture provides underlay manager
-    :param virtlet_deployed: fixture provides VirtletManager instance
-    :param virtlet_ceph_actions: fixture provides VirtletCephManager
-    :rtype: VirtletCephManager
-
-    If config.virtlet.ceph_installed is not set, this
-    fixture assumes that the One-node Ceph for Virtlet was not installed,
-    and do the following:
-    - install One-node Ceph cluster to the desired node
-    - make snapshot with name 'virtlet_ceph_deployed'
-    - return VirtletCephManager
-
-    If config.virtlet.ceph_installed was set, this fixture assumes that
-    the One-node Ceph cluster was already installed, and do the following:
-    - return VirtletCephManager instance
-
-    If you want to revert 'virtlet_ceph_deployed' snapshot, please use mark:
-    @pytest.mark.revert_snapshot("virtlet_ceph_deployed")
-    """
-    # Deploy Virtlet with Ceph for Kubernetes
-    if not config.virtlet.ceph_installed:
-        steps_path = config.virtlet_deploy.virtlet_ceph_steps_path
-        commands = underlay.read_template(steps_path)
-        virtlet_ceph_actions.install(commands)
-        hardware.create_snapshot(ext.SNAPSHOT.virtlet_ceph_deployed)
-
-    else:
-        # 1. hardware environment created and powered on
-        # 2. config.underlay.ssh contains SSH access to provisioned nodes
-        #    (can be passed from external config with TESTS_CONFIGS variable)
-        # 3. config.tcp.* options contain access credentials to the already
-        #    installed TCP API endpoint
-        pass
-
-    return virtlet_ceph_actions
diff --git a/tcp_tests/fixtures/virtlet_fixtures.py b/tcp_tests/fixtures/virtlet_fixtures.py
deleted file mode 100644
index 301800e..0000000
--- a/tcp_tests/fixtures/virtlet_fixtures.py
+++ /dev/null
@@ -1,79 +0,0 @@
-#    Copyright 2017 Mirantis, Inc.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-import pytest
-
-from tcp_tests import logger
-from tcp_tests.helpers import ext
-from tcp_tests.managers import virtlet_manager
-
-LOG = logger.logger
-
-
-@pytest.fixture(scope='function')
-def virtlet_actions(config, underlay):
-    """Fixture that provides various actions for Virtlet project
-
-    :param config: fixture provides oslo.config
-    :param underlay: fixture provides underlay manager
-    :rtype: VirtletManager
-    """
-    return virtlet_manager.VirtletManager(config, underlay)
-
-
-@pytest.mark.revert_snapshot(ext.SNAPSHOT.virtlet_deployed)
-@pytest.fixture(scope='function')
-def virtlet_deployed(revert_snapshot, config, hardware, underlay,
-                     k8s_deployed, virtlet_actions):
-    """Fixture to get or install Virtlet project on the environment
-
-    :param revert_snapshot: fixture that reverts snapshot that is specified
-                            in test with @pytest.mark.revert_snapshot(<name>)
-    :param config: fixture provides oslo.config
-    :param hardware: fixture provides enviromnet manager
-    :param underlay: fixture provides underlay manager
-    :param k8s_deployed: fixture provides K8SManager instance
-    :param virtlet_actions: fixture provides VirtletManager instance
-    :rtype: VirtletManager
-
-    If config.virtlet.virtlet_installed is not set, this
-    fixture assumes that the Virtlet project was not installed,
-    and do the following:
-    - install Virtlet project
-    - make snapshot with name 'virtlet_deployed'
-    - return VirtletManager
-
-    If config.virtlet.virtlet_installed was set, this fixture assumes that
-    the Virtlet project was already installed, and do the following:
-    - return VirtletManager instance
-
-    If you want to revert 'virtlet_deployed' snapshot, please use mark:
-    @pytest.mark.revert_snapshot("virtlet_deployed")
-    """
-    # Deploy Virtlet for Kubernetes
-    if not config.virtlet.virtlet_installed:
-        steps_path = config.virtlet_deploy.virtlet_steps_path
-        commands = underlay.read_template(steps_path)
-        virtlet_actions.install(commands)
-        hardware.create_snapshot(ext.SNAPSHOT.virtlet_deployed)
-
-    else:
-        # 1. hardware environment created and powered on
-        # 2. config.underlay.ssh contains SSH access to provisioned nodes
-        #    (can be passed from external config with TESTS_CONFIGS variable)
-        # 3. config.tcp.* options contain access credentials to the already
-        #    installed TCP API endpoint
-        pass
-
-    return virtlet_actions
diff --git a/tcp_tests/managers/k8smanager.py b/tcp_tests/managers/k8smanager.py
index 87e3eaf..12dd2dc 100644
--- a/tcp_tests/managers/k8smanager.py
+++ b/tcp_tests/managers/k8smanager.py
@@ -13,6 +13,7 @@
 #    under the License.
 
 import time
+from uuid import uuid4
 
 import yaml
 
@@ -399,3 +400,98 @@
                 node_name=self.ctl_host) as remote:
             remote.check_call("nslookup {0} {1}".format(host, src))
 
+# ---------------------------- Virtlet methods -------------------------------
+    def install_jq(self):
+        """Install JQuery on node. Required for changing yamls on the fly.
+
+        :return:
+        """
+        cmd = "apt install jq -y"
+        return self.__underlay.check_call(cmd, node_name=self.ctl_host)
+
+
+    def git_clone(self, project, target):
+        cmd = "git clone {0} {1}".format(project, target)
+        return self.__underlay.check_call(cmd, node_name=self.ctl_host)
+
+    def run_vm(self, name=None, yaml_path='~/virtlet/examples/cirros-vm.yaml'):
+        if not name:
+            name = 'virtlet-vm-{}'.format(uuid4())
+        cmd = (
+            "kubectl convert -f {0} --local "
+            "-o json | jq '.metadata.name|=\"{1}\"' | kubectl create -f -")
+        self.__underlay.check_call(cmd.format(yaml_path, name),
+                                   node_name=self.ctl_host)
+        return name
+
+    def get_vm_info(self, name, jsonpath="{.status.phase}", expected=None):
+        cmd = "kubectl get po {} -n default".format(name)
+        if jsonpath:
+            cmd += " -o jsonpath={}".format(jsonpath)
+        return self.__underlay.check_call(
+            cmd, node_name=self.ctl_host, expected=expected)
+
+    def wait_active_state(self, name, timeout=180):
+        helpers.wait(
+            lambda: self.get_vm_info(name)['stdout'][0] == 'Running',
+            timeout=timeout,
+            timeout_msg="VM {} didn't Running state in {} sec. "
+                        "Current state: ".format(
+                name, timeout, self.get_vm_info(name)['stdout'][0]))
+
+    def delete_vm(self, name, timeout=180):
+        cmd = "kubectl delete po -n default {}".format(name)
+        self.__underlay.check_call(cmd, node_name=self.ctl_host)
+
+        helpers.wait(
+            lambda:
+            "Error from server (NotFound):" in
+            " ".join(self.get_vm_info(name, expected=[0, 1])['stderr']),
+            timeout=timeout,
+            timeout_msg="VM {} didn't Running state in {} sec. "
+                        "Current state: ".format(
+                name, timeout, self.get_vm_info(name)['stdout'][0]))
+
+    def adjust_cirros_resources(
+            self, cpu=2, memory='256',
+            target_yaml='virtlet/examples/cirros-vm-exp.yaml'):
+        # We will need to change params in case of example change
+        cmd = ("cd ~/virtlet/examples && "
+               "cp cirros-vm.yaml {2} && "
+               "sed -r 's/^(\s*)(VirtletVCPUCount\s*:\s*\"1\"\s*$)/ "
+               "\1VirtletVCPUCount: \"{0}\"/' {2} && "
+               "sed -r 's/^(\s*)(memory\s*:\s*128Mi\s*$)/\1memory: "
+               "{1}Mi/' {2}".format(cpu, memory, target_yaml))
+        self.__underlay.check_call(cmd, node_name=self.ctl_host)
+
+    def get_domain_name(self, vm_name):
+        cmd = ("~/virtlet/examples/virsh.sh list --name | "
+               "grep -i {0} ".format(vm_name))
+        result = self.__underlay.check_call(cmd, node_name=self.ctl_host)
+        return result['stdout'].strip()
+
+    def get_vm_cpu_count(self, domain_name):
+        cmd = ("~/virtlet/examples/virsh.sh dumpxml {0} | "
+               "grep 'cpu' | grep -o '[[:digit:]]*'".format(domain_name))
+        result = self.__underlay.check_call(cmd, node_name=self.ctl_host)
+        return int(result['stdout'].strip())
+
+    def get_vm_memory_count(self, domain_name):
+        cmd = ("~/virtlet/examples/virsh.sh dumpxml {0} | "
+               "grep 'memory unit' | "
+               "grep -o '[[:digit:]]*'".format(domain_name))
+        result = self.__underlay.check_call(cmd, node_name=self.ctl_host)
+        return int(result['stdout'].strip())
+
+    def get_domain_id(self, domain_name):
+        cmd = ("virsh dumpxml {} | grep id=\' | "
+               "grep -o [[:digit:]]*".format(domain_name))
+        result = self.__underlay.check_call(cmd, node_name=self.ctl_host)
+        return int(result['stdout'].strip())
+
+    def list_vm_volumes(self, domain_name):
+        domain_id = self.get_domain_id(domain_name)
+        cmd = ("~/virtlet/examples/virsh.sh domblklist {} | "
+               "tail -n +3 | awk {{'print $2'}}".format(domain_id))
+        result = self.__underlay.check_call(cmd, node_name=self.ctl_host)
+        return result['stdout'].strip()
\ No newline at end of file
diff --git a/tcp_tests/managers/virtlet_ceph_manager.py b/tcp_tests/managers/virtlet_ceph_manager.py
deleted file mode 100644
index 28f3dad..0000000
--- a/tcp_tests/managers/virtlet_ceph_manager.py
+++ /dev/null
@@ -1,34 +0,0 @@
-#    Copyright 2017 Mirantis, Inc.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-from tcp_tests.managers.execute_commands import ExecuteCommandsMixin
-
-
-class VirtletCephManager(ExecuteCommandsMixin):
-    """docstring for VirtletCephManager"""
-
-    __config = None
-    __underlay = None
-
-    def __init__(self, config, underlay):
-        self.__config = config
-        self.__underlay = underlay
-        super(VirtletCephManager, self).__init__(
-            config=config, underlay=underlay)
-
-    def install(self, commands):
-        self.execute_commands(
-            commands,
-            label='Install One-node Ceph cluster in docker container')
-        self.__config.virtlet.ceph_installed = True
diff --git a/tcp_tests/managers/virtlet_manager.py b/tcp_tests/managers/virtlet_manager.py
deleted file mode 100644
index f2a05ed..0000000
--- a/tcp_tests/managers/virtlet_manager.py
+++ /dev/null
@@ -1,138 +0,0 @@
-#    Copyright 2017 Mirantis, Inc.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-from uuid import uuid4
-
-from tcp_tests.helpers import ext
-from tcp_tests.managers.execute_commands import ExecuteCommandsMixin
-
-from devops.helpers import helpers
-
-
-class VirtletManager(ExecuteCommandsMixin):
-    """docstring for VirtletManager"""
-
-    __config = None
-    __underlay = None
-
-    def __init__(self, config, underlay):
-        self.__config = config
-        self.__underlay = underlay
-        self.virtlet_nodes = [
-            i for i in self.__config.underlay.ssh
-            if ext.UNDERLAY_NODE_ROLES.k8s_virtlet in i['roles']]
-        super(VirtletManager, self).__init__(
-            config=config, underlay=underlay)
-
-    def install(self, commands):
-        self.execute_commands(commands,
-                              label='Install Virtlet project')
-        self.__config.virtlet.virtlet_installed = True
-
-    def run_vm(self, name=None, yaml_path='virtlet/examples/cirros-vm.yaml'):
-        if not name:
-            name = 'virtlet-vm-{}'.format(uuid4())
-        virt_node = self.virtlet_nodes[0]
-        cmd = (
-            "kubectl convert -f {0} --local "
-            "-o json | jq '.metadata.name|=\"{1}\"' | kubectl create -f -")
-        self.__underlay.check_call(
-            cmd.format(name, yaml_path),
-            node_name=virt_node['node_name'])
-        return name
-
-    def get_vm_info(self, name, jsonpath="{.status.phase}", expected=None):
-        virt_node = self.virtlet_nodes[0]
-        cmd = "kubectl get po {} -n default".format(name)
-        if jsonpath:
-            cmd += " -o jsonpath={}".format(jsonpath)
-        return self.__underlay.check_call(
-            cmd, node_name=virt_node['node_name'], expected=expected)
-
-    def wait_active_state(self, name, timeout=180):
-        helpers.wait(
-            lambda: self.get_vm_info(name)['stdout'][0] == 'Running',
-            timeout=timeout,
-            timeout_msg="VM {} didn't Running state in {} sec. "
-                        "Current state: ".format(
-                name, timeout, self.get_vm_info(name)['stdout'][0]))
-
-    def delete_vm(self, name, timeout=180):
-        virt_node = self.virtlet_nodes[0]
-        cmd = "kubectl delete po -n default {}".format(name)
-        self.__underlay.check_call(cmd, node_name=virt_node['node_name'])
-
-        helpers.wait(
-            lambda:
-            "Error from server (NotFound):" in
-            " ".join(self.get_vm_info(name, expected=[0, 1])['stderr']),
-            timeout=timeout,
-            timeout_msg="VM {} didn't Running state in {} sec. "
-                        "Current state: ".format(
-                name, timeout, self.get_vm_info(name)['stdout'][0]))
-
-    def adjust_cirros_resources(
-            self, cpu=2, memory='256',
-            target_yaml='virtlet/examples/cirros-vm-exp.yaml'):
-        virt_node = self.virtlet_nodes[0]
-        # We will need to change params in case of example change
-        cmd = ("cd ~/virtlet/examples && "
-               "cp cirros-vm.yaml {2} && "
-               "sed -r 's/^(\s*)(VirtletVCPUCount\s*:\s*\"1\"\s*$)/ "
-               "\1VirtletVCPUCount: \"{0}\"/' {2} && "
-               "sed -r 's/^(\s*)(memory\s*:\s*128Mi\s*$)/\1memory: "
-               "{1}Mi/' {2}".format(cpu, memory, target_yaml))
-        self.__underlay.check_call(cmd, node_name=virt_node['node_name'])
-
-    def get_domain_name(self, vm_name):
-        virt_node = self.virtlet_nodes[0]
-        cmd = ("~/virtlet/examples/virsh.sh list --name | "
-               "grep -i {0} ".format(vm_name))
-        result = self.__underlay.check_call(cmd,
-                                            node_name=virt_node['node_name'])
-        return result['stdout'].strip()
-
-    def get_vm_cpu_count(self, domain_name):
-        virt_node = self.virtlet_nodes[0]
-        cmd = ("~/virtlet/examples/virsh.sh dumpxml {0} | "
-               "grep 'cpu' | grep -o '[[:digit:]]*'".format(domain_name))
-        result = self.__underlay.check_call(cmd,
-                                            node_name=virt_node['node_name'])
-        return int(result['stdout'].strip())
-
-    def get_vm_memory_count(self, domain_name):
-        virt_node = self.virtlet_nodes[0]
-        cmd = ("~/virtlet/examples/virsh.sh dumpxml {0} | "
-               "grep 'memory unit' | "
-               "grep -o '[[:digit:]]*'".format(domain_name))
-        result = self.__underlay.check_call(cmd,
-                                            node_name=virt_node['node_name'])
-        return int(result['stdout'].strip())
-
-    def get_domain_id(self, domain_name):
-        virt_node = self.virtlet_nodes[0]
-        cmd = ("virsh dumpxml {} | grep id=\' | "
-               "grep -o [[:digit:]]*".format(domain_name))
-        result = self.__underlay.check_call(cmd,
-                                            node_name=virt_node['node_name'])
-        return int(result['stdout'].strip())
-
-    def list_vm_volumes(self, domain_name):
-        virt_node = self.virtlet_nodes[0]
-        domain_id = self.get_domain_id(domain_name)
-        cmd = ("~/virtlet/examples/virsh.sh domblklist {} | "
-               "tail -n +3 | awk {{'print $2'}}".format(domain_id))
-        result = self.__underlay.check_call(cmd,
-                                            node_name=virt_node['node_name'])
-        return result['stdout'].strip()
diff --git a/tcp_tests/settings_oslo.py b/tcp_tests/settings_oslo.py
index 7a2d82d..ed6d622 100644
--- a/tcp_tests/settings_oslo.py
+++ b/tcp_tests/settings_oslo.py
@@ -56,13 +56,6 @@
 _default_sl_prepare_tests_steps_path = pkg_resources.resource_filename(
     __name__, 'templates/{0}/sl.yaml'.format(
         settings.LAB_CONFIG_NAME))
-_default_virtlet_prepare_tests_steps_path = pkg_resources.resource_filename(
-    __name__, 'templates/{0}/virtlet.yaml'.format(
-        settings.LAB_CONFIG_NAME))
-_default_virtlet_ceph_prepare_tests_steps_path = \
-    pkg_resources.resource_filename(
-        __name__, 'templates/{0}/virtlet_ceph.yaml'.format(
-            settings.LAB_CONFIG_NAME))
 _default_k8s_steps = pkg_resources.resource_filename(
     __name__, 'templates/{0}/k8s.yaml'.format(
         settings.LAB_CONFIG_NAME))
@@ -94,7 +87,6 @@
            help="Node roles managed by underlay in the environment",
            default=[ext.UNDERLAY_NODE_ROLES.salt_master,
                     ext.UNDERLAY_NODE_ROLES.salt_minion,
-                    ext.UNDERLAY_NODE_ROLES.k8s_virtlet,
                     ext.UNDERLAY_NODE_ROLES.k8s_controller]),
     ct.Cfg('bootstrap_timeout', ct.Integer(),
            help="Timeout of waiting SSH for nodes with specified roles",
@@ -226,24 +218,6 @@
            help="Proemtheus protocol", default='http'),
 ]
 
-virtlet_deploy_opts = [
-    ct.Cfg('virtlet_steps_path', ct.String(),
-           help="Path to YAML with steps to deploy virtlet",
-           default=_default_virtlet_prepare_tests_steps_path),
-    ct.Cfg('virtlet_ceph_steps_path', ct.String(),
-           help="Path to YAML with steps to deploy one-node ceph cluster for "
-                "Virtlet Flexvolumes testing",
-           default=_default_virtlet_ceph_prepare_tests_steps_path)
-]
-
-virtlet_opts = [
-    ct.Cfg('virtlet_installed', ct.Boolean(),
-           help="", default=False),
-    ct.Cfg('ceph_installed', ct.Boolean(),
-           help="Determine, installed one-node ceph cluster or not",
-           default=False)
-]
-
 k8s_deploy_opts = [
     ct.Cfg('k8s_steps_path', ct.String(),
            help="Path to YAML with steps to deploy Kubernetes",
@@ -320,8 +294,6 @@
     ('opencontrail', opencontrail_opts),
     ('stack_light', sl_opts),
     ('sl_deploy', sl_deploy_opts),
-    ('virtlet_deploy', virtlet_deploy_opts),
-    ('virtlet', virtlet_opts),
     ('k8s_deploy', k8s_deploy_opts),
     ('k8s', k8s_opts),
 ]
@@ -395,12 +367,6 @@
                      title="SL deploy config and credentials",
                      help=""))
     config.register_opts(group='sl_deploy', opts=sl_deploy_opts)
-    config.register_group(cfg.OptGroup(name='virtlet_deploy',
-                                       title='Virtlet deploy config', help=""))
-    config.register_opts(group='virtlet_deploy', opts=virtlet_deploy_opts)
-    config.register_group(cfg.OptGroup(name='virtlet',
-                                       title='Virtlet config', help=""))
-    config.register_opts(group='virtlet', opts=virtlet_opts)
 
     config.register_group(cfg.OptGroup(name='k8s_deploy',
                                        title="K8s deploy configuration"))
diff --git a/tcp_tests/templates/virtual-mcp11-k8s-calico-minimal/k8s.yaml b/tcp_tests/templates/virtual-mcp11-k8s-calico-minimal/k8s.yaml
index 38c9ec4..e9c17ec 100644
--- a/tcp_tests/templates/virtual-mcp11-k8s-calico-minimal/k8s.yaml
+++ b/tcp_tests/templates/virtual-mcp11-k8s-calico-minimal/k8s.yaml
@@ -20,7 +20,7 @@
      salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@kubernetes:pool' state.sls kubernetes.pool;
      salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@kubernetes:pool' cmd.run 'calicoctl node status';
   node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
+  retry: {count: 3, delay: 5}
   skip_fail: false
 
 - description: Setup NAT for Calico
diff --git a/tcp_tests/templates/virtual-mcp11-k8s-calico/k8s.yaml b/tcp_tests/templates/virtual-mcp11-k8s-calico/k8s.yaml
index 0122003..182453a 100644
--- a/tcp_tests/templates/virtual-mcp11-k8s-calico/k8s.yaml
+++ b/tcp_tests/templates/virtual-mcp11-k8s-calico/k8s.yaml
@@ -21,7 +21,7 @@
      salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@kubernetes:pool' state.sls kubernetes.pool;
      salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@kubernetes:pool' cmd.run 'calicoctl node status';
   node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
+  retry: {count: 3, delay: 5}
   skip_fail: false
 
 - description: Setup NAT for Calico
diff --git a/tcp_tests/templates/virtual-mcp11-k8s-calico/underlay.yaml b/tcp_tests/templates/virtual-mcp11-k8s-calico/underlay.yaml
index 658660c..d872f9a 100644
--- a/tcp_tests/templates/virtual-mcp11-k8s-calico/underlay.yaml
+++ b/tcp_tests/templates/virtual-mcp11-k8s-calico/underlay.yaml
@@ -183,7 +183,7 @@
                     - private
 
           - name: {{ HOSTNAME_CTL02 }}
-            role: k8s_virtlet
+            role: salt_minion
             params:
               vcpu: !os_env SLAVE_NODE_CPU, 2
               memory: !os_env SLAVE_NODE_MEMORY, 2048
diff --git a/tcp_tests/templates/virtual-mcp11-k8s-calico/virtlet.yaml b/tcp_tests/templates/virtual-mcp11-k8s-calico/virtlet.yaml
deleted file mode 100644
index 6e66fc4..0000000
--- a/tcp_tests/templates/virtual-mcp11-k8s-calico/virtlet.yaml
+++ /dev/null
@@ -1,91 +0,0 @@
-{% from 'virtual-mcp11-k8s-calico/underlay.yaml' import HOSTNAME_CTL02 with context %}
-{% from 'virtual-mcp11-k8s-calico/underlay.yaml' import HOSTNAME_CFG01 with context %}
-
-# Clone virtlet project from git to the ctl01 node for start virtlet pod from yaml
-- description: Cloning virtlet project on ctl02
-  cmd:  git clone -b master https://github.com/Mirantis/virtlet.git
-  node_name: {{ HOSTNAME_CTL02 }}
-  retry: {count: 1, delay: 1}
-  skip_fail: false
-
-# Add 'virtlet' label for ctl02
-- description: Adding virtlet label for ctl02
-  cmd:  kubectl label node ctl02 extraRuntime=virtlet
-  node_name: {{ HOSTNAME_CTL02 }}
-  retry: {count: 1, delay: 1}
-  skip_fail: false
-
-# Add route for internal kube-services if necessary
-- description: Adding route for internal kube-services if necessary
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@kubernetes:pool' cmd.run "ip r | grep 10.254 || ip ro add 10.254.0.0/16 dev ens4"
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 1}
-  skip_fail: false
-
-
-# Install jq
-- description: Install jq
-  cmd: apt-get install jq -y
-  node_name: {{ HOSTNAME_CTL02 }}
-  retry: {count: 1, delay: 1}
-  skip_fail: false
-
-# Add extra parameter for kubelet service on virtlet node
-- description: Adding extra parameter for kubelet service on virtlet node
-  cmd:  sed -i.bak "s|^\"|--feature-gates=DynamicKubeletConfig=true \\\\\\n\"|" /etc/default/kubelet
-  node_name: {{ HOSTNAME_CTL02 }}
-  retry: {count: 1, delay: 1}
-  skip_fail: false
-
-# Restart kubelet service on virtlet node
-- description: Restart kubelet service on ctl02
-  cmd:  systemctl restart kubelet
-  node_name: {{ HOSTNAME_CTL02 }}
-  retry: {count: 1, delay: 1}
-  skip_fail: false
-
-# Create virtlet pod
-- description: Creating virtlet pod
-  cmd: |
-    kubectl convert -f virtlet/deploy/virtlet-ds.yaml --local -o json | jq \
-    '.items[0].spec.template.spec.containers[0].env|=.+[{"name":"VIRTLET_DISABLE_KVM","value":"y"}] | .items[0].spec.template.spec.volumes|=.+[{"name":"etcd","hostPath":{"path":"/var/lib/etcd"}},{"name":"kubernetes","hostPath":{"path":"/etc/kubernetes"}}] | .items[0].spec.template.spec.containers[0].volumeMounts|=.+[{"mountPath":"/etc/kubernetes","name":"kubernetes"},{"mountPath":"/var/lib/etcd","name":"etcd"}]' | \
-    kubectl create -f -
-  node_name: {{ HOSTNAME_CTL02 }}
-  retry: {count: 1, delay: 1}
-  skip_fail: false
-
-# Virtlet pod will likely stay in Init:0/1 state because there's a problem
-# with automatic kubelet restart after applying the configmap.
-# As of now, you'll need to restart kubelet after ~30-60 seconds.
-- description: Restarting kubelet service on virtlet node
-  cmd: |
-    COUNTER=0
-    while [[ $(kubectl get pods -n kube-system | awk '/virtlet/{print $3}') != 'Init:0/1' ]]; do
-      COUNTER=$((COUNTER+1))
-      sleep 5
-      if [[ $COUNTER -eq 36 ]]; then
-        echo "We havenot Init:0/1 state for virtlet pod. Aborting.";
-        exit 1
-      fi
-    done
-    sleep 60
-    systemctl restart kubelet
-  node_name: {{ HOSTNAME_CTL02 }}
-  retry: {count: 1, delay: 1}
-  skip_fail: false
-
-# Wait Active state for virtlet pod
-- description: Waiting 'Active' state for virtlet pod
-  cmd: |
-    COUNTER=0
-    while [[ $(kubectl get pods -n kube-system | awk '/virtlet/{print $3}') != 'Running' ]]; do
-      COUNTER=$((COUNTER+1))
-      sleep 5
-      if [[ $COUNTER -eq 36 ]]; then
-        echo "We havenot Active state for virtlet pod. Aborting.";
-        exit 1
-      fi
-    done
-  node_name: {{ HOSTNAME_CTL02 }}
-  retry: {count: 1, delay: 1}
-  skip_fail: false
diff --git a/tcp_tests/templates/virtual-mcp11-k8s-calico/virtlet_ceph.yaml b/tcp_tests/templates/virtual-mcp11-k8s-calico/virtlet_ceph.yaml
deleted file mode 100644
index 3f2468e..0000000
--- a/tcp_tests/templates/virtual-mcp11-k8s-calico/virtlet_ceph.yaml
+++ /dev/null
@@ -1,31 +0,0 @@
-{% from 'virtual-mcp11-k8s-calico/underlay.yaml' import HOSTNAME_CTL02 with context %}
-
-# Clone virtlet project from git to the ctl02 node to get ceph scripts
-- description: Cloning virtlet project on ctl02
-  cmd: |
-    if [[ ! -d virtlet ]]; then
-      git clone -b master https://github.com/Mirantis/virtlet.git;
-    else
-      echo "Virtlet project already present on node";
-    fi
-  node_name: {{ HOSTNAME_CTL02 }}
-  retry: {count: 1, delay: 1}
-  skip_fail: false
-
-# Make run_ceph.sh runnable
-- description: Set monitor ip for ceph
-  cmd: |
-    NODE_IP=$(ifconfig | grep -A 1 ens3 | tail -n +2 | awk '{print $2}' | sed 's/addr://g')
-    sed -i "0,/MON_IP=.*/s//MON_IP=${NODE_IP}/" virtlet/tests/e2e/run_ceph.sh
-  node_name: {{ HOSTNAME_CTL02 }}
-  retry: {count: 1, delay: 1}
-  skip_fail: false
-
-# Run Ceph in docker
-- description: Start Ceph container
-  cmd: |
-    SCR_DIR=$(realpath virtlet/tests/e2e)
-    virtlet/tests/e2e/run_ceph.sh ${SCR_DIR}
-  node_name: {{ HOSTNAME_CTL02 }}
-  retry: {count: 1, delay: 1}
-  skip_fail: false
\ No newline at end of file
diff --git a/tcp_tests/tests/system/conftest.py b/tcp_tests/tests/system/conftest.py
index 9089870..9aa021e 100644
--- a/tcp_tests/tests/system/conftest.py
+++ b/tcp_tests/tests/system/conftest.py
@@ -23,8 +23,6 @@
 from tcp_tests.fixtures.oss_fixtures import *  # noqa
 from tcp_tests.fixtures.decapod_fixtures import *  # noqa
 from tcp_tests.fixtures.stacklight_fixtures import *  # noqa
-from tcp_tests.fixtures.virtlet_fixtures import *  # noqa
-from tcp_tests.fixtures.virtlet_ceph_fixtures import *  # noqa
 from tcp_tests.fixtures.k8s_fixtures import *  # noqa
 
 
@@ -62,9 +60,5 @@
     'sl_deployed',
     # k8s fixtures
     'k8s_actions',
-    'k8s_deployed',
-    'virtlet_actions',
-    'virtlet_deployed',
-    'virtlet_ceph_actions',
-    'virtlet_ceph_deployed'
+    'k8s_deployed'
 ])
diff --git a/tcp_tests/tests/system/test_virtlet_actions.py b/tcp_tests/tests/system/test_virtlet_actions.py
index 5c8c9ea..99d6389 100644
--- a/tcp_tests/tests/system/test_virtlet_actions.py
+++ b/tcp_tests/tests/system/test_virtlet_actions.py
@@ -11,6 +11,7 @@
 #    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
 #    License for the specific language governing permissions and limitations
 #    under the License.
+import pytest
 
 from tcp_tests import logger
 
@@ -20,8 +21,7 @@
 class TestVirtletActions(object):
     """Test class for testing Virtlet actions"""
 
-    def test_virtlet_create_delete_vm(self, underlay, virtlet_deployed,
-                                      show_step, virtlet_actions):
+    def test_virtlet_create_delete_vm(self, show_step, config, k8s_deployed):
         """Test for deploying an mcp environment with virtlet
 
         Scenario:
@@ -30,12 +30,21 @@
             3. Delete VM and wait to delete pod
 
         """
-        vm_name = virtlet_actions.run_vm()
-        virtlet_actions.wait_active_state(vm_name)
-        virtlet_actions.delete_vm(vm_name)
 
-    def test_vm_resource_quotas(self, underlay, virtlet_deployed, show_step,
-                                virtlet_actions):
+        if not config.k8s_deploy.kubernetes_virtlet_enabled:
+            pytest.skip("Test requires Virtlet addon enabled")
+
+        k8s_deployed.git_clone('https://github.com/Mirantis/virtlet',
+                               '~/virtlet')
+        k8s_deployed.install_jq()
+        show_step(1)
+        vm_name = k8s_deployed.run_vm()
+        show_step(2)
+        k8s_deployed.wait_active_state(vm_name)
+        show_step(3)
+        k8s_deployed.delete_vm(vm_name)
+
+    def test_vm_resource_quotas(self, show_step, config, k8s_deployed):
         """Test for deploying a VM with specific quotas
 
         Scenario:
@@ -46,28 +55,38 @@
 
         """
 
+        if not config.k8s_deploy.kubernetes_virtlet_enabled:
+            pytest.skip("Test requires Virtlet addon enabled")
+
+        k8s_deployed.git_clone('https://github.com/Mirantis/virtlet',
+                               '~/virtlet')
+        k8s_deployed.install_jq()
+        show_step(1)
         target_cpu = 2  # Cores
         target_memory = 256  # Size in MB
         target_memory_kb = target_memory * 1024
         target_yaml = 'virtlet/examples/cirros-vm-exp.yaml'
-        virtlet_actions.adjust_cirros_resources(cpu=target_cpu,
+        k8s_deployed.adjust_cirros_resources(cpu=target_cpu,
                                                 memory=target_memory,
                                                 target_yaml=target_yaml)
-        vm_name = virtlet_actions.run_vm(target_yaml)
-        virtlet_actions.wait_active_state(vm_name)
-        domain_name = virtlet_actions.get_domain_name(vm_name)
-        cpu = virtlet_actions.get_vm_cpu_count(domain_name)
-        mem = virtlet_actions.get_vm_memory_count(domain_name)
+        show_step(2)
+        vm_name = k8s_deployed.run_vm(target_yaml)
+        k8s_deployed.wait_active_state(vm_name)
+        show_step(3)
+        domain_name = k8s_deployed.get_domain_name(vm_name)
+        cpu = k8s_deployed.get_vm_cpu_count(domain_name)
+        mem = k8s_deployed.get_vm_memory_count(domain_name)
         fail_msg = '{0} is not correct memory unit for VM. Correct is {1}'.\
             format(mem, target_memory_kb)
         assert target_memory_kb == mem, fail_msg
         fail_msg = '{0} is not correct cpu cores count for VM. ' \
                    'Correct is {1}'.format(cpu, target_cpu)
         assert target_cpu == cpu, fail_msg
-        virtlet_actions.delete_vm(target_yaml)
+        show_step(4)
+        k8s_deployed.delete_vm(target_yaml)
 
-    def test_rbd_flexvolume_driver(self, underlay, virtlet_ceph_deployed,
-                                   show_step, virtlet_actions):
+    @pytest.mark.skip(reason="No configuration with ceph and k8s")
+    def test_rbd_flexvolume_driver(self, show_step, config, k8s_deployed):
         """Test for deploying a VM with Ceph RBD volume using flexvolumeDriver
 
         Scenario:
@@ -78,10 +97,17 @@
         """
         # From:
         # https://github.com/Mirantis/virtlet/blob/master/tests/e2e/run_ceph.sh
+        if not config.k8s_deploy.kubernetes_virtlet_enabled:
+            pytest.skip("Test requires Virtlet addon enabled")
+
+        k8s_deployed.git_clone('https://github.com/Mirantis/virtlet',
+                               '~/virtlet')
+        k8s_deployed.install_jq()
+
         target_yaml = "virtlet/tests/e2e/cirros-vm-rbd-volume.yaml"
-        vm_name = virtlet_actions.run_vm(target_yaml)
-        virtlet_actions.wait_active_state(vm_name)
-        domain_name = virtlet_actions.get_domain_name(vm_name)
-        vm_volumes_list = virtlet_actions.list_vm_volumes(domain_name)
+        vm_name = k8s_deployed.run_vm(target_yaml)
+        k8s_deployed.wait_active_state(vm_name)
+        domain_name = k8s_deployed.get_domain_name(vm_name)
+        vm_volumes_list = k8s_deployed.list_vm_volumes(domain_name)
         assert 'rbd' in vm_volumes_list
-        virtlet_actions.delete_vm(target_yaml)
+        k8s_deployed.delete_vm(target_yaml)