Merge "Add cookied-mcp-mitaka-ovs template"
diff --git a/tcp_tests/fixtures/k8s_fixtures.py b/tcp_tests/fixtures/k8s_fixtures.py
index 7ec1a19..08176fc 100644
--- a/tcp_tests/fixtures/k8s_fixtures.py
+++ b/tcp_tests/fixtures/k8s_fixtures.py
@@ -179,14 +179,15 @@
             for version in chain_versions:
                 container_name = "k8s-conformance:{}".format(version)
                 tmp_report_dir = "/root/report_{}".format(version)
-                report_path = "/root/report_{}.xml".format(version)
+                report_path = "report_{}.xml".format(version)
                 conformance_log_path = "k8s_conformance_{}.log".format(version)
 
                 k8s_deployed.extract_file_to_node(
                     system='docker', container=container_name,
                     out_dir=tmp_report_dir, file_path='report'
                 )
-                k8s_deployed.combine_xunit(tmp_report_dir, report_path)
+                k8s_deployed.combine_xunit(tmp_report_dir,
+                                           '/root/{}'.format(report_path))
 
                 k8s_deployed.download_k8s_logs(
                     [report_path, conformance_log_path])
diff --git a/tcp_tests/fixtures/runtest_fixtures.py b/tcp_tests/fixtures/runtest_fixtures.py
new file mode 100644
index 0000000..6f13cba
--- /dev/null
+++ b/tcp_tests/fixtures/runtest_fixtures.py
@@ -0,0 +1,39 @@
+#    Copyright 2018 Mirantis, Inc.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+import pytest
+from tcp_tests import settings
+from tcp_tests.managers.runtestmanager import RuntestManager
+
+
+@pytest.fixture(scope='function')
+def tempest_actions(underlay, salt_actions):
+    """
+    Run tempest tests
+    """
+    tempest_threads = settings.TEMPEST_THREADS
+    tempest_exclude_test_args = settings.TEMPEST_EXCLUDE_TEST_ARGS
+    tempest_pattern = settings.TEMPEST_PATTERN
+    cluster_name = settings.LAB_CONFIG_NAME
+    domain_name = "{}.local".format(cluster_name)
+    target = settings.TEMPEST_TARGET
+    runtest = RuntestManager(
+        underlay, salt_actions,
+        cluster_name=cluster_name,
+        domain_name=domain_name,
+        tempest_threads=tempest_threads,
+        tempest_exclude_test_args=tempest_exclude_test_args,
+        tempest_pattern=tempest_pattern,
+        target=target)
+    return runtest
diff --git a/tcp_tests/managers/backup_restore_manager.py b/tcp_tests/managers/backup_restore_manager.py
index e3b8c23..365ff6f 100644
--- a/tcp_tests/managers/backup_restore_manager.py
+++ b/tcp_tests/managers/backup_restore_manager.py
@@ -13,33 +13,68 @@
 #    under the License.
 
 from tcp_tests import logger
+from tcp_tests.managers.execute_commands import ExecuteCommandsMixin
 
 
 LOG = logger.logger
 
 
-class BackupRestoreManager(object):
+class BackupRestoreManager(ExecuteCommandsMixin):
     """Helper manager for execution backup restore"""
 
-    backup_cmd = 'backupninja -n --run /etc/backup.d/200.backup.rsync'
-
-    def __init__(self, underlay, salt_api, backup_cmd=None):
+    def __init__(self, config, underlay, salt_api):
+        self.__config = config
         self.underlay = underlay
         self.__salt_api = salt_api
-        self.backup_cmd = backup_cmd or self.backup_cmd
+        super(BackupRestoreManager, self).__init__(config, underlay)
 
     @property
     def salt_api(self):
         return self.__salt_api
 
-    def create_backup(self, tgt, backup_cmd=backup_cmd):
-        return self.salt_api.enforce_state(tgt, 'cmd.run', backup_cmd)
+    def get_node_name(self, tgt):
+        res = [node_name for node_name in
+               self.underlay.node_names() if tgt in node_name]
+        assert len(res) > 0, 'Can not find node name by tgt {}'.format(tgt)
+        return res[0]
 
-    def restore_salt_master(self, tgt):
-        return self.salt_api.local(tgt, 'salt.master.restore')
+    def create_backup(self, tgt, backup_cmd=None):
+        if not backup_cmd:
+            backup_cmd = 'backupninja -n --run /etc/backup.d/200.backup.rsync'
+        step = {'cmd': backup_cmd, 'node_name': self.get_node_name(tgt)}
+        self.execute_command(step, 'Running backup command')
 
-    def restore_salt_minion(self, tgt):
-        return self.salt_api.local(tgt, 'salt.minion.restore')
+    def check_file_exists(self, tgt, file_path=None):
+        if not file_path:
+            file_path = '/etc/backup.d/200.backup.rsync'
+        cmd = 'test -f {}'.format(file_path)
+        step = {'cmd': cmd, 'node_name': self.get_node_name(tgt)}
+        self.execute_command(step, 'Check file {} exists'.format(file_path))
+
+    def delete_dirs_files(self, tgt, file_path='/etc/pki/ca/salt_master_ca/'):
+        cmd = 'rm -rf {}'.format(file_path)
+        step = {'cmd': cmd, 'node_name': self.get_node_name(tgt)}
+        self.execute_command(step, 'Delete {}'.format(file_path))
+
+    def restore_salt(self, tgt):
+        cmd = 'salt-call state.sls salt.master.restore,salt.minion.restore'
+        step = {'cmd': cmd, 'node_name': self.get_node_name(tgt)}
+        self.execute_command(step, 'Restore salt master')
+
+    def ping_minions(self, tgt):
+        cmd = 'salt "*" test.ping'
+        step = {'cmd': cmd, 'node_name': self.get_node_name(tgt)}
+        self.execute_command(step, 'Ping minions')
+
+    def verify_salt_master_restored(self, tgt):
+        cmd = "salt -t2 '*' saltutil.refresh_pillar"
+        step = {'cmd': cmd, 'node_name': self.get_node_name(tgt)}
+        self.execute_command(step,
+                             'Verify that the Salt Master node is restored')
+        step = {'cmd': 'ls -la /etc/pki/ca/salt_master_ca/',
+                'node_name': self.get_node_name(tgt)}
+        self.execute_command(step,
+                             'Check pki files exists')
 
     def create_mysql_backup_backupninja(self, tgt, ):
         rets = []
@@ -56,7 +91,7 @@
         # for every restored database in /root/mysql/flags.
         return self.salt_api.local(tgt, 'mysql.client')
 
-    def create_mysql_xtrabackup(self, tgt, backup_cmd=backup_cmd):
+    def create_mysql_xtrabackup(self, tgt, backup_cmd=None):
         # Should be run on mysql master node
         return self.salt_api.enforce_state(
             tgt, 'cmd.run', '/usr/local/bin/innobackupex-runner.sh')
@@ -76,7 +111,7 @@
         return self.salt_api.enforce_state(tgt, 'service.stop mysql')
 
     def disconnect_wresp_master(self, tgt='I@galera:master'):
-        # TODO fins the way updated wresp
+        # TODO finds the way updated wresp
         return self.salt_api.enforce_state(
             tgt, 'cmd.run', 'wsrep_cluster_address=gcomm://')
 
diff --git a/tcp_tests/managers/execute_commands.py b/tcp_tests/managers/execute_commands.py
index 991fb58..ba12678 100644
--- a/tcp_tests/managers/execute_commands.py
+++ b/tcp_tests/managers/execute_commands.py
@@ -114,6 +114,8 @@
                         failed += 1
                     if s.startswith("[CRITICAL]"):
                         failed += 1
+                    if 'Fatal' in s:
+                        failed += 1
 
                 if result.exit_code != 0:
                     time.sleep(retry_delay)
diff --git a/tcp_tests/managers/runtestmanager.py b/tcp_tests/managers/runtestmanager.py
index 229a3ff..8957091 100644
--- a/tcp_tests/managers/runtestmanager.py
+++ b/tcp_tests/managers/runtestmanager.py
@@ -293,3 +293,13 @@
 
         return {'inspect': inspect,
                 'logs': logs}
+
+    def prepare_and_run_tempest(self, username='root'):
+        """
+        Run tempest tests
+        """
+        tempest_timeout = settings.TEMPEST_TIMEOUT
+        self.prepare()
+        test_res = self.run_tempest(tempest_timeout)
+        self.fetch_arficats(username=username)
+        self.save_runtime_logs(**test_res)
diff --git a/tcp_tests/settings.py b/tcp_tests/settings.py
index 169d089..50f3337 100644
--- a/tcp_tests/settings.py
+++ b/tcp_tests/settings.py
@@ -76,3 +76,8 @@
 TEMPEST_IMAGE_VERSION = os.environ.get('TEMPEST_IMAGE_VERSION', 'pike')
 TEMPEST_PATTERN = os.environ.get('TEMPEST_PATTERN', 'tempest')
 TEMPEST_TIMEOUT = int(os.environ.get('TEMPEST_TIMEOUT', 60 * 60 * 5))
+TEMPEST_THREADS = int(os.environ.get('TEMPEST_THREADS', 2))
+TEMPEST_EXCLUDE_TEST_ARGS = os.environ.get(
+    'TEMPEST_EXCLUDE_TEST_ARGS',
+    '--blacklist-file mcp_pike_lvm_skip.list')
+TEMPEST_TARGET = os.environ.get('TEMPEST_TARGET', 'gtw01')
diff --git a/tcp_tests/settings_oslo.py b/tcp_tests/settings_oslo.py
index c0ad0bd..52af7db 100644
--- a/tcp_tests/settings_oslo.py
+++ b/tcp_tests/settings_oslo.py
@@ -305,10 +305,10 @@
            default='sbPfel23ZigJF3Bm'),
     ct.Cfg('kubernetes_docker_package', ct.String(), default=''),
     ct.Cfg('kubernetes_hyperkube_image', ct.String(),
-           default='{}/mirantis/kubernetes/hyperkube-amd64:v1.8.13-11'.format(
+           default='{}/mirantis/kubernetes/hyperkube-amd64:v1.10.4-4'.format(
                settings.DOCKER_REGISTRY)),
     ct.Cfg('kubernetes_pause_image', ct.String(),
-           default='{}/mirantis/kubernetes/pause-amd64:v1.8.13-11'.format(
+           default='{}/mirantis/kubernetes/pause-amd64:v1.10.4-4'.format(
                settings.DOCKER_REGISTRY)),
     ct.Cfg('kubernetes_calico_image', ct.String(),
            default='{}/mirantis/projectcalico/calico/node:v2.6.9'.format(
@@ -335,7 +335,7 @@
     ct.Cfg('kubernetes_virtlet_enabled', ct.Boolean(),
            help="", default=False),
     ct.Cfg('kubernetes_virtlet_image', ct.String(),
-           help="", default='mirantis/virtlet:v0.8.0'),
+           help="", default='mirantis/virtlet:v1.1.0'),
     ct.Cfg('kubernetes_externaldns_enabled', ct.Boolean(),
            help="", default=False),
     ct.Cfg('kubernetes_externaldns_image', ct.String(),
diff --git a/tcp_tests/templates/SharedPipeline.groovy b/tcp_tests/templates/SharedPipeline.groovy
new file mode 100644
index 0000000..a34be31
--- /dev/null
+++ b/tcp_tests/templates/SharedPipeline.groovy
@@ -0,0 +1,114 @@
+common = new com.mirantis.mk.Common()
+
+def run_cmd(cmd, returnStdout=false) {
+    common.printMsg("Run shell command:\n" + cmd, "blue")
+    def VENV_PATH='/home/jenkins/fuel-devops30'
+    script = """\
+        set +x;
+        echo 'activate python virtualenv ${VENV_PATH}';
+        . ${VENV_PATH}/bin/activate;
+        bash -c 'set -ex; set -ex; ${cmd.stripIndent()}'
+    """
+    return sh(script: script, returnStdout: returnStdout)
+}
+
+def run_cmd_stdout(cmd) {
+    return run_cmd(cmd, true)
+}
+
+def generate_cookied_model() {
+        // do not fail if environment doesn't exists
+        def IPV4_NET_ADMIN=run_cmd_stdout("dos.py net-list ${ENV_NAME} | grep admin-pool01").trim().split().last()
+        def IPV4_NET_CONTROL=run_cmd_stdout("dos.py net-list ${ENV_NAME} | grep private-pool01").trim().split().last()
+        def IPV4_NET_TENANT=run_cmd_stdout("dos.py net-list ${ENV_NAME} | grep tenant-pool01").trim().split().last()
+        def IPV4_NET_EXTERNAL=run_cmd_stdout("dos.py net-list ${ENV_NAME} | grep external-pool01").trim().split().last()
+        println("IPV4_NET_ADMIN=" + IPV4_NET_ADMIN)
+        println("IPV4_NET_CONTROL=" + IPV4_NET_CONTROL)
+        println("IPV4_NET_TENANT=" + IPV4_NET_TENANT)
+        println("IPV4_NET_EXTERNAL=" + IPV4_NET_EXTERNAL)
+
+        def parameters = [
+                string(name: 'LAB_CONTEXT_NAME', value: "${LAB_CONFIG_NAME}"),
+                string(name: 'CLUSTER_NAME', value: "${LAB_CONFIG_NAME}"),
+                string(name: 'DOMAIN_NAME', value: "${LAB_CONFIG_NAME}.local"),
+                string(name: 'REPOSITORY_SUITE', value: "${MCP_VERSION}"),
+                string(name: 'SALT_MODELS_SYSTEM_COMMIT', value: "${MCP_VERSION}"),
+                string(name: 'COOKIECUTTER_TEMPLATE_COMMIT', value: "${MCP_VERSION}"),
+                string(name: 'TCP_QA_REVIEW', value: "${TCP_QA_REFS}"),
+                string(name: 'IPV4_NET_ADMIN', value: IPV4_NET_ADMIN),
+                string(name: 'IPV4_NET_CONTROL', value: IPV4_NET_CONTROL),
+                string(name: 'IPV4_NET_TENANT', value: IPV4_NET_TENANT),
+                string(name: 'IPV4_NET_EXTERNAL', value: IPV4_NET_EXTERNAL),
+            ]
+        common.printMsg("Start building job 'swarm-cookied-model-generator' with parameters:", "purple")
+        common.prettyPrint(parameters)
+        build job: 'swarm-cookied-model-generator',
+            parameters: parameters
+}
+
+def generate_configdrive_iso() {
+        def SALT_MASTER_IP=run_cmd_stdout("""\
+            export ENV_NAME=${ENV_NAME}
+            . ./tcp_tests/utils/env_salt
+            echo \$SALT_MASTER_IP
+            """).trim().split().last()
+        println("SALT_MASTER_IP=" + SALT_MASTER_IP)
+        def parameters = [
+                string(name: 'CLUSTER_NAME', value: "${LAB_CONFIG_NAME}"),
+                string(name: 'MODEL_URL', value: "http://cz8133.bud.mirantis.net:8098/${LAB_CONFIG_NAME}.git"),
+                string(name: 'MODEL_URL_OBJECT_TYPE', value: "git"),
+                booleanParam(name: 'DOWNLOAD_CONFIG_DRIVE', value: true),
+                string(name: 'MCP_VERSION', value: "${MCP_VERSION}"),
+                string(name: 'COMMON_SCRIPTS_COMMIT', value: "${MCP_VERSION}"),
+                string(name: 'NODE_NAME', value: "${NODE_NAME}"),
+                string(name: 'CONFIG_DRIVE_ISO_NAME', value: "${CFG01_CONFIG_IMAGE_NAME}"),
+                string(name: 'SALT_MASTER_DEPLOY_IP', value: SALT_MASTER_IP),
+                string(name: 'PIPELINE_REPO_URL', value: "https://github.com/Mirantis"),
+                booleanParam(name: 'PIPELINES_FROM_ISO', value: true),
+                string(name: 'MCP_SALT_REPO_URL', value: "http://apt.mirantis.com/xenial"),
+                string(name: 'MCP_SALT_REPO_KEY', value: "http://apt.mirantis.com/public.gpg"),
+                string(name: 'PIPELINE_LIBRARY_REF', value: "${PIPELINE_LIBRARY_REF}"),
+                string(name: 'MK_PIPELINES_REF', value: "${MK_PIPELINES_REF}"),
+            ]
+        common.printMsg("Start building job 'create-cfg-config-drive' with parameters:", "purple")
+        common.prettyPrint(parameters)
+        build job: 'create-cfg-config-drive',
+            parameters: parameters
+}
+
+def run_job_on_day01_node(stack_to_install) {
+    // stack_to_install="core,cicd"
+    def stack = "${stack_to_install}"
+    run_cmd("""\
+        export ENV_NAME=${ENV_NAME}
+        . ./tcp_tests/utils/env_salt
+        . ./tcp_tests/utils/env_jenkins_day01
+        JOB_PARAMETERS=\"{
+            \\\"SALT_MASTER_URL\\\": \\\"\${SALTAPI_URL}\\\",
+            \\\"STACK_INSTALL\\\": \\\"${stack}\\\"
+        }\"
+        JOB_PREFIX="[ {job_name}/{build_number}:${stack} {time} ] "
+        python ./tcp_tests/utils/run_jenkins_job.py --verbose --job-name=deploy_openstack --job-parameters="\$JOB_PARAMETERS" --job-output-prefix="\$JOB_PREFIX"
+    """)
+}
+
+def run_job_on_cicd_nodes(stack_to_install) {
+    // stack_to_install="k8s,calico,stacklight"
+    def stack = "${stack_to_install}"
+    run_cmd("""\
+        export ENV_NAME=${ENV_NAME}
+        . ./tcp_tests/utils/env_salt
+        . ./tcp_tests/utils/env_jenkins_cicd
+        JOB_PARAMETERS=\"{
+            \\\"SALT_MASTER_URL\\\": \\\"\${SALTAPI_URL}\\\",
+            \\\"STACK_INSTALL\\\": \\\"${stack}\\\"
+        }\"
+        JOB_PREFIX="[ {job_name}/{build_number}:${stack} {time} ] "
+        python ./tcp_tests/utils/run_jenkins_job.py --verbose --job-name=deploy_openstack --job-parameters="\$JOB_PARAMETERS" --job-output-prefix="\$JOB_PREFIX"
+        sleep 60  # Wait for IO calm down on cluster nodes
+    """)
+}
+
+
+// pretend a groovy class, DO NOT REMOVE
+return this
\ No newline at end of file
diff --git a/tcp_tests/templates/cookied-bm-mcp-ovs-dpdk/openstack.yaml b/tcp_tests/templates/cookied-bm-mcp-ovs-dpdk/openstack.yaml
index e47f36c..b8c6bd8 100644
--- a/tcp_tests/templates/cookied-bm-mcp-ovs-dpdk/openstack.yaml
+++ b/tcp_tests/templates/cookied-bm-mcp-ovs-dpdk/openstack.yaml
@@ -179,23 +179,6 @@
   retry: {count: 10, delay: 30}
   skip_fail: false
 
-
-  # Upload cirros image
-
-- description: Upload cirros image on ctl01
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
-    'wget http://download.cirros-cloud.net/0.3.4/cirros-0.3.4-i386-disk.img'
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 2, delay: 30}
-  skip_fail: false
-
-- description: Register image in glance
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
-    '. /root/keystonercv3; glance --timeout 120 image-create --name cirros --visibility public --disk-format qcow2 --container-format bare --progress < /root/cirros-0.3.4-i386-disk.img'
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 30}
-  skip_fail: false
-
 - description: Create net04_external
   cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
     '. /root/keystonercv3; neutron net-create net04_ext --router:external True --provider:physical_network physnet1 --provider:network_type flat'
@@ -243,12 +226,4 @@
     '. /root/keystonercv3; neutron router-interface-add net04_router01 net04__subnet'
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 30}
-  skip_fail: false
-
-- description: Temporary WR
-  cmd: |
-    rc=`salt "ctl01*" cmd.run 'cat /root/keystonercv3' | grep export`;
-    salt 'gtw01*' cmd.run "echo $rc > /root/keystonercv3";
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 10}
-  skip_fail: true
\ No newline at end of file
+  skip_fail: false
\ No newline at end of file
diff --git a/tcp_tests/templates/cookied-bm-mcp-ovs-dpdk/salt-context-cookiecutter-openstack_ovs_dpdk.yaml b/tcp_tests/templates/cookied-bm-mcp-ovs-dpdk/salt-context-cookiecutter-openstack_ovs_dpdk.yaml
index 41c12a5..fba1474 100644
--- a/tcp_tests/templates/cookied-bm-mcp-ovs-dpdk/salt-context-cookiecutter-openstack_ovs_dpdk.yaml
+++ b/tcp_tests/templates/cookied-bm-mcp-ovs-dpdk/salt-context-cookiecutter-openstack_ovs_dpdk.yaml
@@ -70,7 +70,7 @@
   cicd_public_key: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCyGITa8rXUofX88c72/khxPWiXeS4Ciu/oiBb7WlpP3DikWd0fYMdDFMmxnrb1yKD4QtUAodCeQ0PZT778MzbRqLll+k7CW9350v3n8VsZvrY7PHGTEGPJz8Ftuzwtxm1LVI35qB2SNAbVK2lyRRsWmmgn2fNaCcRx1h2idDppOGB35gusA5fTizdwyUJ+UKVCXeUYWjbhB2GT9jgzmuh1+p8Pk05IoSFg/91My6euLzMHltSJ+868JZQpQ7EuOTH3mtpjzbEwdShsxiTztFws9yrynLP3r8U+iIurEzWjOdMo2IjRtbTGN/sUWN8sKNNqkpk9HPz5aS/ObOPfksuv
   cluster_domain: cookied-bm-mcp-ovs-dpdk.local
   cluster_name: cookied-bm-mcp-ovs-dpdk
-  compute_bond_mode: active-backup
+  compute_bond_mode: balance-slb
   compute_primary_first_nic: eth1
   compute_primary_second_nic: eth2
   context_seed: zEFbUBMME6LFdiL0rJWFgHMdQGgywnDSE9vFYvHgEBeYHb4QJsDl3HqpdaTgqYlF
diff --git a/tcp_tests/templates/cookied-bm-mcp-ovs-dpdk/salt-context-environment.yaml b/tcp_tests/templates/cookied-bm-mcp-ovs-dpdk/salt-context-environment.yaml
index 2b693ab..64713fe 100644
--- a/tcp_tests/templates/cookied-bm-mcp-ovs-dpdk/salt-context-environment.yaml
+++ b/tcp_tests/templates/cookied-bm-mcp-ovs-dpdk/salt-context-environment.yaml
@@ -50,17 +50,16 @@
       - linux_system_codename_xenial
       - openstack_compute_sriov
       interfaces:
-        enp3s0f0:
-          role: single_mgm
-          deploy_address: 172.16.49.3
-        enp3s0f1:
-          role: single_vlan_ctl
-          single_address: 10.167.11.105
         enp5s0f0:
+          role: combined_vlan_ctl_mgm
+          single_address: 10.167.11.105
+        enp3s0f0:
+          role: bond_dpdk_prv_lacp
+          dpdk_pci: "0000:03:00.0"
           tenant_address: 10.167.12.105
-          role: single_ovs_dpdk_prv
-          dpdk_pci: "0000:05:00.0"
-
+        enp3s0f1:
+          role: bond_dpdk_prv_lacp
+          dpdk_pci: "0000:03:00.1"
 
     cmp02.cookied-bm-mcp-ovs-dpdk.local:
       reclass_storage_name: openstack_compute_node02
@@ -70,16 +69,16 @@
       - linux_system_codename_xenial
       - openstack_compute_sriov
       interfaces:
-        enp3s0f0:
-          role: single_mgm
-          deploy_address: 172.16.49.31
-        enp3s0f1:
-          role: single_vlan_ctl
-          single_address: 10.167.11.106
         enp5s0f0:
+          role: combined_vlan_ctl_mgm
+          single_address: 10.167.11.106
+        enp3s0f0:
+          role: bond_dpdk_prv_lacp
+          dpdk_pci: "0000:03:00.0"
           tenant_address: 10.167.12.106
-          role: single_ovs_dpdk_prv
-          dpdk_pci: "0000:05:00.0"
+        enp3s0f1:
+          role: bond_dpdk_prv_lacp
+          dpdk_pci: "0000:03:00.1"
 
     gtw01.cookied-bm-mcp-ovs-dpdk.local:
       reclass_storage_name: openstack_gateway_node01
diff --git a/tcp_tests/templates/cookied-bm-mcp-ovs-dpdk/salt.yaml b/tcp_tests/templates/cookied-bm-mcp-ovs-dpdk/salt.yaml
index e8eb622..53e5be1 100644
--- a/tcp_tests/templates/cookied-bm-mcp-ovs-dpdk/salt.yaml
+++ b/tcp_tests/templates/cookied-bm-mcp-ovs-dpdk/salt.yaml
@@ -35,14 +35,6 @@
 
 {{ SHARED.MACRO_GENERATE_INVENTORY() }}
 {{ SHARED.MACRO_NETWORKING_WORKAROUNDS() }}
-
-- description: "Workaround for PROD-14060"
-  cmd: |
-    set -e;
-    # Add tenant and single addresses for computes
-    salt-call reclass.cluster_meta_set tenant_address 10.167.12.105 /srv/salt/reclass/nodes/_generated/cmp01.cookied-bm-mcp-ovs-dpdk.local.yml
-    salt-call reclass.cluster_meta_set tenant_address 10.167.12.106 /srv/salt/reclass/nodes/_generated/cmp02.cookied-bm-mcp-ovs-dpdk.local.yml
-
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 10}
   skip_fail: false
@@ -64,15 +56,26 @@
   retry: {count: 1, delay: 10}
   skip_fail: false
 
-{{ SHARED.MACRO_BOOTSTRAP_ALL_MINIONS() }}
-
-- description: Temporary WR for PROD-###
-  cmd: |
-    salt --hard-crash --state-output=mixed --state-verbose=False -C 'cmp*' cmd.run "update-alternatives --remove ovs-vswitchd /usr/lib/openvswitch-switch/ovs-vswitchd";
-    salt --hard-crash --state-output=mixed --state-verbose=False -C 'cmp*' cmd.run "service openvswitch-switch restart"
+- description: "Workaround for PROD-18834: Pre-install linux-headers package"
+  cmd: salt 'cmp*' cmd.run "apt-get install -y linux-headers-$(uname -r)";
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 10}
-  skip_fail: true
+  skip_fail: false
+
+- description: "Workaround for PROD-17975: Pre-install ovs packages to update alternatives to DPDK"
+  cmd: |
+    set -ex;
+    salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@linux:system' state.sls linux.system.repo
+    salt 'cmp*' cmd.run "apt-get install -y openvswitch-switch openvswitch-switch-dpdk";
+    salt 'cmp*' cmd.run "service openvswitch-switch stop";
+    salt 'cmp*' cmd.run "rm -f /var/lib/openvswitch/*";
+    salt 'cmp*' cmd.run "update-alternatives --remove ovs-vswitchd /usr/lib/openvswitch-switch/ovs-vswitchd";
+    salt 'cmp*' cmd.run "service openvswitch-switch start";
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 10}
+  skip_fail: false
+
+{{ SHARED.MACRO_BOOTSTRAP_ALL_MINIONS() }}
 
 ########################################
 # Spin up Control Plane VMs on KVM nodes
diff --git a/tcp_tests/templates/cookied-bm-mcp-ovs-dpdk/underlay.yaml b/tcp_tests/templates/cookied-bm-mcp-ovs-dpdk/underlay.yaml
index c4307eb..ae10126 100644
--- a/tcp_tests/templates/cookied-bm-mcp-ovs-dpdk/underlay.yaml
+++ b/tcp_tests/templates/cookied-bm-mcp-ovs-dpdk/underlay.yaml
@@ -318,7 +318,7 @@
 
               root_volume_name: system     # see 'volumes' below
               cloud_init_volume_name: iso  # see 'volumes' below
-              cloud_init_iface_up: enp3s0f0  # see 'interfaces' below.
+              cloud_init_iface_up: enp5s0f0  # see 'interfaces' below.
               volumes:
                 - name: system
                   capacity: !os_env NODE_VOLUME_SIZE, 200
@@ -337,30 +337,18 @@
 
               interfaces:
                 - label: enp3s0f0
-                  l2_network_device: admin
                   mac_address: !os_env ETH0_MAC_ADDRESS_CMP01
                 - label: enp3s0f1
                   mac_address: !os_env ETH1_MAC_ADDRESS_CMP01
                 - label: enp5s0f0
+                  l2_network_device: admin
                   mac_address: !os_env ETH2_MAC_ADDRESS_CMP01
-                  features: ['dpdk', 'dpdk_pci: 0000:05:00.0']
                 - label: enp5s0f1
                   mac_address: !os_env ETH3_MAC_ADDRESS_CMP01
-                  features: ['dpdk', 'dpdk_pci: 0000:05:00.1']
-
               network_config:
-                enp3s0f0:
+                enp5s0f0:
                   networks:
                    - admin
-                bond0:
-                  networks:
-                   - control
-                  aggregation: active-backup
-                  parents:
-                   - enp3s0f0
-                   - enp3s0f1
-
-
 
           - name: {{ HOSTNAME_CMP02 }}
             role: salt_minion
@@ -374,7 +362,7 @@
 
               root_volume_name: system     # see 'volumes' below
               cloud_init_volume_name: iso  # see 'volumes' below
-              cloud_init_iface_up: enp3s0f0  # see 'interfaces' below.
+              cloud_init_iface_up: enp5s0f0  # see 'interfaces' below.
               volumes:
                 - name: system
                   capacity: !os_env NODE_VOLUME_SIZE, 200
@@ -393,28 +381,18 @@
 
               interfaces:
                 - label: enp3s0f0
-                  l2_network_device: admin
                   mac_address: !os_env ETH0_MAC_ADDRESS_CMP02
                 - label: enp3s0f1
                   mac_address: !os_env ETH1_MAC_ADDRESS_CMP02
                 - label: enp5s0f0
+                  l2_network_device: admin
                   mac_address: !os_env ETH2_MAC_ADDRESS_CMP02
-                  features: ['dpdk', 'dpdk_pci: 0000:05:00.0']
                 - label: enp5s0f1
                   mac_address: !os_env ETH3_MAC_ADDRESS_CMP02
-                  features: ['dpdk', 'dpdk_pci: 0000:05:00.1']
               network_config:
-                enp3s0f0:
+                enp5s0f0:
                   networks:
                    - admin
-                bond0:
-                  networks:
-                   - control
-                  aggregation: active-backup
-                  parents:
-                   - enp3s0f0
-                   - enp3s0f1
-
 
           - name: {{ HOSTNAME_GTW01 }}
             role: salt_minion
diff --git a/tcp_tests/templates/cookied-cicd-k8s-calico-sl/cookiecutter-context-k8s-sl.yaml b/tcp_tests/templates/cookied-cicd-k8s-calico-sl/cookiecutter-context-k8s-sl.yaml
new file mode 100644
index 0000000..e055d78
--- /dev/null
+++ b/tcp_tests/templates/cookied-cicd-k8s-calico-sl/cookiecutter-context-k8s-sl.yaml
@@ -0,0 +1,197 @@
+default_context:
+  backup_private_key: |-
+    -----BEGIN RSA PRIVATE KEY-----
+    MIIEowIBAAKCAQEAtyCfiXxwB6Dk6n7Y1t9u2XqMkLPvMArKwRUWGEwTzS7w0NzY
+    bCYdUfxo9m3tmhCO6hb0Yqzk6LEcOrARR7nHK7dS0JpRmZBeD3thdgXD8wNaG1PQ
+    ZzdNwGHP5cjfBXPHYXdP1k6HtLB1PymPDyEqhU0ZJrVGBK4+WSLNesSGOMiREQSx
+    kg/85aGdagLWHgAbgi0x1xx+Bu8LtuVkIcz8IMa3lanY7B8s5aIMxsOGTokJvdRL
+    QZJN0AfGRSANTIQZXfgkTO5wP85UsNisB8j7bliLl1wbxgnq/LTJZ+nQ1PA4dx8c
+    t0FKHYIR6zSd0LkDZaxJnZBgSrVnZ2JBbt7hTwIDAQABAoIBAHu/Ic+INTQSd142
+    hVT9+ywe8em+jX0LbeN32kxk7GSUucqJ0f2S6/FA/bS4p/yZ/9kT1eTwLGdJd2f5
+    HlQ3p+1UnjO0dDuvIMCZgUx9rOIEe9lHk+aLqpC8B/6g9IP8rtigBWUt/+oL687Y
+    yIFSyib16G8Nw9jZ3evh5rR1JLYtPHvAJiodsT3iY/+wZkuo4dAa4/QlKPT7QXaU
+    G5/AA/8zdsVOJl5JOHjP2pFBMzxttkWbkuYpEQe7LRw5MOlfFpMEYYZ+NJGVwDNe
+    0WTpiOIDo78xaq5TsOS23fJCEKodtFrITXvSv0c1tNoL/WcslwmwcV3mKyySFffG
+    Sj7G5RECgYEA7NszuBZBY0Zn7qLkczIdTq15lZ0KFJb2sHIRQbzeeCYn6Q8LJsCb
+    ELhgevun4BxrE2O+R8H4HL+g002vqzL9Vn9oOqFTn3GZMaHojiMSmjCBNl05Mftw
+    EM69l6WV6H8E+D90GMGGoxRJlqHdOuNcQ9bdQpkF4vTNBfzx7VU+5csCgYEAxe23
+    h191srNg7wjafMuK22RtM739knqX+sqeFaGqM6f73+vJaqNilvfgSRQYZc1MOetp
+    Ty4A4g/Jx/NkDWkaLbewFaHw7dNK62Vr6Ovl67Z9sEo8A2ySS7VWVuAqzVbRjyGp
+    yddGiW2Q+ITdfPfHbCFobVUgFeSinfZxkMFw4g0CgYBG8rZASzJU+W8CdXq24ukS
+    ezYzUbIGTt4gJlry9Q8ysEM+NYpilkkcrg4AaMd1gy2zxinmNr0KZ4BWKywWvRRT
+    x6BCB7cTyKRZ0KTnhqv40dSyoyQRy75a2oLCHRCVbw7fCarOC5I34UjVvTCWhipK
+    C9+FJm8z954+T/Fr5SANFwKBgEJBSvhD2jBRn5ckjY7My9SZD30Mkj9gTlOjU7vF
+    /CWCi+vvD+NkgfIrU6bi1S/uwx94UC4zJhSGWHNYZBuhHSREour65J2X5zJZJwA3
+    RyXaVsSWdPRoeahiMV6vd2R5NXkGOcHZEEGcrbSjNUlJ4DWwETbYEf+CI3VhM67T
+    MihZAoGBALxcTSivHJZDle81lsu1dcgmzZkUfQAcUSYDWhg+Bqg3A8FVKMpEzrbd
+    weGRM8S8oAz1PN0T/LRcpJq3TFZpy+iXx59jl5XenmoKwPr+u5XFrEHTWqNS2NcL
+    MwS8VTJhWYNVdrNIRWClRVUv87hZMha40JHiPK1KA4em1G+H29x3
+    -----END RSA PRIVATE KEY-----
+  backup_public_key: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQC3IJ+JfHAHoOTqftjW327ZeoyQs+8wCsrBFRYYTBPNLvDQ3NhsJh1R/Gj2be2aEI7qFvRirOTosRw6sBFHuccrt1LQmlGZkF4Pe2F2BcPzA1obU9BnN03AYc/lyN8Fc8dhd0/WToe0sHU/KY8PISqFTRkmtUYErj5ZIs16xIY4yJERBLGSD/zloZ1qAtYeABuCLTHXHH4G7wu25WQhzPwgxreVqdjsHyzlogzGw4ZOiQm91EtBkk3QB8ZFIA1MhBld+CRM7nA/zlSw2KwHyPtuWIuXXBvGCer8tMln6dDU8Dh3Hxy3QUodghHrNJ3QuQNlrEmdkGBKtWdnYkFu3uFP
+  bmk_enabled: 'False'
+  calico_cni_image: docker-prod-local.artifactory.mirantis.com/mirantis/projectcalico/calico/cni:latest
+  calico_enable_nat: 'True'
+  calico_image: docker-prod-local.artifactory.mirantis.com/mirantis/projectcalico/calico/node:latest
+  calico_netmask: '16'
+  calico_network: 192.168.0.0
+  calicoctl_image: docker-prod-local.artifactory.mirantis.com/mirantis/projectcalico/calico/ctl:latest
+  ceph_enabled: 'False'
+  cicd_control_node01_address: 10.167.4.91
+  cicd_control_node01_hostname: cid01
+  cicd_control_node02_address: 10.167.4.92
+  cicd_control_node02_hostname: cid02
+  cicd_control_node03_address: 10.167.4.93
+  cicd_control_node03_hostname: cid03
+  cicd_control_vip_address: 10.167.4.90
+  cicd_control_vip_hostname: cid
+  cicd_enabled: 'True'
+  cicd_private_key: |-
+    -----BEGIN RSA PRIVATE KEY-----
+    MIIEpAIBAAKCAQEAv64AnFbEuuOQHLlmMkmaZ+Hh/8hJ+VfFpJ/MzW1wWzYyhis7
+    3A8rxNFWJ/I1/LJSsFI8qU0DpxjFjS9LMTTFXhDPPpuzgRLwfVusEmuQdXjOiT34
+    AIs07Q4w1nlvJ2+/l788ie1AEfnewd9erUHOs8Wt/PT3OOM/0ikY7EibvYF4L1Lb
+    xGRKYnUkY7G3eal9XcQpsTzAFRXoK3WafbCFBFsfzEWOhx1T+tn1SwaxPYJDt1OB
+    B1s77enFtBwbmbd0m1F1ufSXmdWea2xF3+5caS6tps/hwhCoOSJUQb7+dK4ri8og
+    q2YIhfEptrMP1R+nVqEY76P31aa/YSw4zOvcQwIDAQABAoIBAQCLKOzQlD4n4ObT
+    s9Z6U+2B1gCaDvOFzy9yoYGy8u1Li0GLHwBKd8kzDzgZsEN5vo1B7bKUx5ELU3S5
+    V8ijZMiVzmZn8eqUnwdyO4flp6otXxOzmAXhfy9hm5fhXjBQ1VSn+vMcv95wLpSG
+    9IBsEQbchXwX1lFWP8Yp8iRiByTqoz6A7qSxRzIOtq1/coYS9Vcy7VZDMiUjqvuc
+    pYvwYHvrgeYqxLXyDRzbZX1BbkSoNI/5VwxLb9IMG901IXph0r4V3uVgnnq+Xzkk
+    MoOfmB3cyOrvtWblZAjkyA+jzTs/QNALRUeI7wUeh4FvlwEGHE6v5G4G28zOS0vL
+    7IEhCqThAoGBAOeyDO07b060l+NOO+Jkv+NV31VD0w3S4TMyLPVSxXsrRPoHM9RM
+    udi6lewmALE4wk2Lc1Il6n0UrUGVbXxf55NJp2BQoSic+ZK2nTki0cZ/CkUDVNwY
+    R0WtWE0i3J+eF3e8j9VYm1mIlv0aDoYeH4qCp5is/JanvLy4MUl6tM7/AoGBANPJ
+    XheDO5lmqq1ejDTo3GAzYuAs44dQLDs0znEuuaUKZ4MKgQ4ax0L9n0MxvsuUGVcN
+    Nm7fZS4uMY3zLCOLcAXyD1jXY210gmOgFdXeYrH+2kSmqfflV8KHOLCatxLzRtbe
+    KBflcrEnrpUVNGKlpZaYr+4AyapXeMuXIxwveva9AoGAYtoDS9/UwHaqau+A+zlS
+    6TJFA8LZNAepz0b0CYLUAJXYavhRs508mWwZ9NPN7c6yj5UUkZLdtZnxxY50VOEy
+    ExQUljIwX/yBOogxEiR57b9b6U/fj7vIBMFNcDOUf4Far9pCX5rbRNrS2I+abLxD
+    ZrwRt0Duz3QnQTkwxhHVPI8CgYAaIjQJJLl7AW84O32DneRrvouJ7CAbd2ot2CNN
+    Vh20XudNBUPNkMJb4t3/Nak8h8bktg2sesaKf0rAIGym6jLlmOwJ43IydHkOgBeR
+    r4JwQml+pS4+F7/Pkk4NhNnobbqlEv7RjA+uCp6BaP9w2M3pGmhDLzezXF3ciYbc
+    mINM5QKBgQCyM9ZWwSiA0D3oitnhs7C4eC0IHBfnSoa7f40osKm4VvmqKBFgRu8L
+    qYK9qX++pUm4sk0q7poGUscc1udMlejAkfc/HLIlUi6MM+S7ZQ2NHtnZ7COZa5O4
+    9fG8FTiigLvMHka9ihYXtPbyGvusCaqyHp3D9VyOT+WsyM5eJe40lA==
+    -----END RSA PRIVATE KEY-----
+  cicd_public_key: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQC/rgCcVsS645AcuWYySZpn4eH/yEn5V8Wkn8zNbXBbNjKGKzvcDyvE0VYn8jX8slKwUjypTQOnGMWNL0sxNMVeEM8+m7OBEvB9W6wSa5B1eM6JPfgAizTtDjDWeW8nb7+XvzyJ7UAR+d7B316tQc6zxa389Pc44z/SKRjsSJu9gXgvUtvEZEpidSRjsbd5qX1dxCmxPMAVFegrdZp9sIUEWx/MRY6HHVP62fVLBrE9gkO3U4EHWzvt6cW0HBuZt3SbUXW59JeZ1Z5rbEXf7lxpLq2mz+HCEKg5IlRBvv50riuLyiCrZgiF8Sm2sw/VH6dWoRjvo/fVpr9hLDjM69xD
+  cluster_domain: cookied-mcp-k8s-calico-sl.local
+  cluster_name: cookied-mcp-k8s-calico-sl
+  context_seed: T3sbEdCaBfxrg9ysyA6LIaift250Ktb389rpcISKbdqPi5j0WHKiKAhBftYueBKl
+  control_network_netmask: 255.255.255.0
+  control_network_subnet: 10.167.4.0/24
+  control_vlan: '10'
+  cookiecutter_template_branch: ''
+  cookiecutter_template_credentials: gerrit
+  cookiecutter_template_url: https://gerrit.mcp.mirantis.net/mk/cookiecutter-templates.git
+  deploy_network_gateway: 10.167.5.1
+  deploy_network_netmask: 255.255.255.0
+  deploy_network_subnet: 10.167.5.0/24
+  deployment_type: physical
+  dns_server01: 172.18.176.6
+  dns_server02: 172.18.208.44
+  email_address: ddmitriev@mirantis.com
+  etcd_ssl: 'True'
+  infra_bond_mode: active-backup
+  infra_deploy_nic: eth0
+  infra_kvm01_control_address: 10.167.4.241
+  infra_kvm01_deploy_address: 10.167.5.91
+  infra_kvm01_hostname: kvm01
+  infra_kvm02_control_address: 10.167.4.242
+  infra_kvm02_deploy_address: 10.167.5.92
+  infra_kvm02_hostname: kvm02
+  infra_kvm03_control_address: 10.167.4.243
+  infra_kvm03_deploy_address: 10.167.5.93
+  infra_kvm03_hostname: kvm03
+  infra_kvm_vip_address: 10.167.4.240
+  infra_primary_first_nic: eth1
+  infra_primary_second_nic: eth2
+  internal_proxy_enabled: 'False'
+  kqueen_custom_mail_enabled: 'False'
+  kqueen_enabled: 'False'
+  kubernetes_compute_node01_address: 10.167.4.101
+  kubernetes_compute_node01_deploy_address: 10.167.5.101
+  kubernetes_compute_node01_hostname: cmp01
+  kubernetes_compute_node02_address: 10.167.4.102
+  kubernetes_compute_node02_deploy_address: 10.167.5.102
+  kubernetes_compute_node02_hostname: cmp02
+  kubernetes_control_address: 10.167.4.10
+  kubernetes_control_node01_address: 10.167.4.11
+  kubernetes_control_node01_deploy_address: 10.167.5.11
+  kubernetes_control_node01_hostname: ctl01
+  kubernetes_control_node02_address: 10.167.4.12
+  kubernetes_control_node02_deploy_address: 10.167.5.12
+  kubernetes_control_node02_hostname: ctl02
+  kubernetes_control_node03_address: 10.167.4.13
+  kubernetes_control_node03_deploy_address: 10.167.5.13
+  kubernetes_control_node03_hostname: ctl03
+  kubernetes_enabled: 'True'
+  kubernetes_externaldns_enabled: 'False'
+  kubernetes_keepalived_vip_interface: ens4
+  kubernetes_network_calico_enabled: 'True'
+  kubernetes_virtlet_enabled: 'False'
+  local_repositories: 'False'
+  maas_deploy_address: 10.167.5.15
+  maas_deploy_range_end: 10.167.5.254
+  maas_deploy_range_start: 10.167.5.1
+  maas_deploy_vlan: '0'
+  maas_fabric_name: fabric-0
+  maas_hostname: cfg01
+  mcp_common_scripts_branch: ''
+  mcp_version: proposed
+  offline_deployment: 'False'
+  opencontrail_enabled: 'False'
+  openldap_domain: ${_param:cluster_name}.local
+  openldap_enabled: 'True'
+  openldap_organisation: ${_param:cluster_name}
+  openssh_groups: cicd
+  openstack_enabled: 'False'
+  oss_enabled: 'False'
+  oss_node03_address: ${_param:stacklight_monitor_node03_address}
+  oss_webhook_app_id: '24'
+  oss_webhook_login_id: '13'
+  platform: kubernetes_enabled
+  public_host: ${_param:infra_config_address}
+  publication_method: email
+  reclass_repository: https://github.com/Mirantis/mk-lab-salt-model.git
+  salt_api_password: LTlVnap35hqpRVbB5QjA27EuKh9Ttl3k
+  salt_api_password_hash: $6$RKagUPuQ$Javpjz7b.hqKOOr1rai7uGQd/FnqlOH59tXn12/0G.LkVyunYmgBkSC5zTjoqZvIS1fOOOqsmCb9Q4HcGUbXS.
+  salt_master_address: 10.167.4.15
+  salt_master_hostname: cfg01
+  salt_master_management_address: 10.167.5.15
+  shared_reclass_branch: ''
+  shared_reclass_url: https://gerrit.mcp.mirantis.net/salt-models/reclass-system.git
+  stacklight_enabled: 'True'
+  stacklight_log_address: 10.167.4.60
+  stacklight_log_hostname: log
+  stacklight_log_node01_address: 10.167.4.61
+  stacklight_log_node01_hostname: log01
+  stacklight_log_node02_address: 10.167.4.62
+  stacklight_log_node02_hostname: log02
+  stacklight_log_node03_address: 10.167.4.63
+  stacklight_log_node03_hostname: log03
+  stacklight_long_term_storage_type: prometheus
+  stacklight_monitor_address: 10.167.4.70
+  stacklight_monitor_hostname: mon
+  stacklight_monitor_node01_address: 10.167.4.71
+  stacklight_monitor_node01_hostname: mon01
+  stacklight_monitor_node02_address: 10.167.4.72
+  stacklight_monitor_node02_hostname: mon02
+  stacklight_monitor_node03_address: 10.167.4.73
+  stacklight_monitor_node03_hostname: mon03
+  stacklight_telemetry_address: 10.167.4.85
+  stacklight_telemetry_hostname: mtr
+  stacklight_telemetry_node01_address: 10.167.4.86
+  stacklight_telemetry_node01_hostname: mtr01
+  stacklight_telemetry_node02_address: 10.167.4.87
+  stacklight_telemetry_node02_hostname: mtr02
+  stacklight_telemetry_node03_address: 10.167.4.88
+  stacklight_telemetry_node03_hostname: mtr03
+  stacklight_version: '2'
+  static_ips_on_deploy_network_enabled: 'False'
+  tenant_network_gateway: 10.167.6.1
+  tenant_network_netmask: 255.255.255.0
+  tenant_network_subnet: 10.167.6.0/24
+  tenant_vlan: '20'
+  upstream_proxy_enabled: 'False'
+  use_default_network_scheme: 'False'
+  vnf_onboarding_enabled: 'False'
diff --git a/tcp_tests/templates/cookied-cicd-k8s-calico-sl/deploy-and-test.groovy b/tcp_tests/templates/cookied-cicd-k8s-calico-sl/deploy-and-test.groovy
new file mode 100644
index 0000000..71b3e8d
--- /dev/null
+++ b/tcp_tests/templates/cookied-cicd-k8s-calico-sl/deploy-and-test.groovy
@@ -0,0 +1,140 @@
+common = new com.mirantis.mk.Common()
+
+def run_cmd(cmd, returnStdout=false) {
+    common.printMsg("Run shell command:\n" + cmd, "blue")
+    def VENV_PATH='/home/jenkins/fuel-devops30'
+    script = "set +x; echo 'activate python virtualenv ${VENV_PATH}';. ${VENV_PATH}/bin/activate; bash -c 'set -ex;set -ex;${cmd.stripIndent()}'"
+    return sh(script: script, returnStdout: returnStdout)
+}
+
+def run_cmd_stdout(cmd) {
+    return run_cmd(cmd, true)
+}
+
+node ("${NODE_NAME}") {
+  try {
+
+    stage("Clean the environment") {
+        println "Clean the working directory ${env.WORKSPACE}"
+        deleteDir()
+        // do not fail if environment doesn't exists
+        println "Remove environment ${ENV_NAME}"
+        run_cmd("""\
+            dos.py erase ${ENV_NAME} || true
+        """)
+        println "Remove config drive ISO"
+        run_cmd("""\
+            rm /home/jenkins/images/${CFG01_CONFIG_IMAGE_NAME} || true
+        """)
+    }
+
+    stage("Clone tcp-qa project and install requirements") {
+        run_cmd("""\
+        git clone https://github.com/Mirantis/tcp-qa.git ${env.WORKSPACE}
+        #cd tcp-qa
+        if [ -n "$TCP_QA_REFS" ]; then
+            set -e
+            git fetch https://review.gerrithub.io/Mirantis/tcp-qa $TCP_QA_REFS && git checkout FETCH_HEAD || exit \$?
+        fi
+        pip install --upgrade --upgrade-strategy=only-if-needed -r tcp_tests/requirements.txt
+        """)
+    }
+
+    // load shared methods from the clonned tcp-qa repository.
+    // DO NOT MOVE this code before clonning the repo
+    def rootDir = pwd()
+    def shared = load "${rootDir}/tcp_tests/templates/SharedPipeline.groovy"
+
+    stage("Create an environment ${ENV_NAME} in disabled state") {
+        // do not fail if environment doesn't exists
+        run_cmd("""\
+        python ./tcp_tests/utils/create_devops_env.py
+        """)
+    }
+
+    stage("Generate the model") {
+        shared.generate_cookied_model()
+    }
+
+    stage("Generate config drive ISO") {
+        shared.generate_configdrive_iso()
+    }
+
+    stage("Upload generated config drive ISO into volume on cfg01 node") {
+        run_cmd("""\
+        virsh vol-upload ${ENV_NAME}_cfg01.${LAB_CONFIG_NAME}.local_config /home/jenkins/images/${CFG01_CONFIG_IMAGE_NAME} --pool default
+        virsh pool-refresh --pool default
+        """)
+    }
+
+    stage("Run the 'underlay' and 'salt-deployed' fixtures to bootstrap salt cluster") {
+        run_cmd("""\
+        export MANAGER=devops
+        export SHUTDOWN_ENV_ON_TEARDOWN=false
+        export BOOTSTRAP_TIMEOUT=900
+        export PYTHONIOENCODING=UTF-8
+        export REPOSITORY_SUITE=${MCP_VERSION}
+        #export SALT_STEPS_PATH=templates/${LAB_CONFIG_NAME}/salt.yaml
+        export TEST_GROUP=test_install_local_salt
+        py.test -vvv -s -p no:django -p no:ipdb --junit-xml=nosetests.xml -k \${TEST_GROUP}
+        sleep 60  # wait for jenkins to start and IO calm down
+
+        """)
+    }
+
+    // Install core and cicd
+    stage("Run Jenkins job on salt-master [deploy_openstack:core]") {
+        shared.run_job_on_day01_node("core")
+    }
+
+    stage("Run Jenkins job on salt-master [deploy_openstack:cicd]") {
+        shared.run_job_on_day01_node("cicd")
+    }
+
+    // Install the cluster
+    for (stack in "${STACK_INSTALL}".split(",")) {
+        stage("Run Jenkins job on CICD [deploy_openstack:${stack}]") {
+            shared.run_job_on_cicd_nodes(stack)
+        }
+    }
+
+    stage("Run tests") {
+        run_cmd("""\
+            export ENV_NAME=${ENV_NAME}
+            . ./tcp_tests/utils/env_salt
+            . ./tcp_tests/utils/env_k8s
+
+            # Initialize variables used in tcp-qa tests
+            export CURRENT_SNAPSHOT=sl_deployed  # provide the snapshot name required by the test
+            export TESTS_CONFIGS=\$(pwd)/${ENV_NAME}_salt_deployed.ini  # some SSH data may be filled separatelly
+
+            export MANAGER=empty  # skip 'hardware' fixture, disable snapshot/revert features
+            # export SSH='{...}'  # non-empty SSH required to skip 'underlay' fixture. It is filled from TESTS_CONFIGS now
+            export salt_master_host=\$SALT_MASTER_IP  # skip salt_deployed fixture
+            export salt_master_port=6969
+            export SALT_USER=\$SALTAPI_USER
+            export SALT_PASSWORD=\$SALTAPI_PASS
+            export COMMON_SERVICES_INSTALLED=true  # skip common_services_deployed fixture
+            export K8S_INSTALLED=true              # skip k8s_deployed fixture
+            export sl_installed=true              # skip sl_deployed fixture
+
+            py.test -vvv -s -p no:django -p no:ipdb --junit-xml=nosetests.xml -m k8s_calico_sl
+
+            #dos.py suspend ${ENV_NAME}
+            #dos.py snapshot ${ENV_NAME} test_completed
+            #dos.py resume ${ENV_NAME}
+            #dos.py time-sync ${ENV_NAME}
+            """)
+    }
+
+  } catch (e) {
+      common.printMsg("Job failed", "red")
+      throw e
+  } finally {
+    // TODO(ddmitriev): analyze the "def currentResult = currentBuild.result ?: 'SUCCESS'"
+    // and report appropriate data to TestRail
+    run_cmd("""\
+        dos.py destroy ${ENV_NAME}
+    """)
+  }
+}
\ No newline at end of file
diff --git a/tcp_tests/templates/cookied-cicd-k8s-calico-sl/environment-context-k8s-sl.yaml b/tcp_tests/templates/cookied-cicd-k8s-calico-sl/environment-context-k8s-sl.yaml
new file mode 100644
index 0000000..248b42d
--- /dev/null
+++ b/tcp_tests/templates/cookied-cicd-k8s-calico-sl/environment-context-k8s-sl.yaml
@@ -0,0 +1,235 @@
+nodes:
+    cfg01:
+      reclass_storage_name: infra_config_node01
+      roles:
+      - infra_config
+      - linux_system_codename_xenial
+      interfaces:
+        ens3:
+          role: single_dhcp
+        ens4:
+          role: single_static_ctl
+
+    kvm01:
+      reclass_storage_name: infra_kvm_node01
+      roles:
+      - infra_kvm
+      - linux_system_codename_xenial
+      interfaces:
+        ens3:
+          role: single_dhcp
+        ens4:
+          role: single_ctl
+
+    kvm02:
+      reclass_storage_name: infra_kvm_node02
+      roles:
+      - infra_kvm
+      - linux_system_codename_xenial
+      interfaces:
+        ens3:
+          role: single_dhcp
+        ens4:
+          role: single_ctl
+
+    kvm03:
+      reclass_storage_name: infra_kvm_node03
+      roles:
+      - infra_kvm
+      - linux_system_codename_xenial
+      interfaces:
+        ens3:
+          role: single_dhcp
+        ens4:
+          role: single_ctl
+
+    cid01:
+      reclass_storage_name: cicd_control_node01
+      roles:
+      - cicd_control_leader
+      - linux_system_codename_xenial
+      interfaces:
+        ens3:
+          role: single_dhcp
+        ens4:
+          role: single_ctl
+
+    cid02:
+      reclass_storage_name: cicd_control_node02
+      roles:
+      - cicd_control_manager
+      - linux_system_codename_xenial
+      interfaces:
+        ens3:
+          role: single_dhcp
+        ens4:
+          role: single_ctl
+
+    cid03:
+      reclass_storage_name: cicd_control_node03
+      roles:
+      - cicd_control_manager
+      - linux_system_codename_xenial
+      interfaces:
+        ens3:
+          role: single_dhcp
+        ens4:
+          role: single_ctl
+
+    ctl01:
+      reclass_storage_name: kubernetes_control_node01
+      roles:
+      - kubernetes_control
+      - linux_system_codename_xenial
+      interfaces:
+        ens3:
+          role: single_dhcp
+        ens4:
+          role: single_ctl
+
+    ctl02:
+      reclass_storage_name: kubernetes_control_node02
+      roles:
+      - kubernetes_control
+      - linux_system_codename_xenial
+      interfaces:
+        ens3:
+          role: single_dhcp
+        ens4:
+          role: single_ctl
+
+    ctl03:
+      reclass_storage_name: kubernetes_control_node03
+      roles:
+      - kubernetes_control
+      - linux_system_codename_xenial
+      interfaces:
+        ens3:
+          role: single_dhcp
+        ens4:
+          role: single_ctl
+
+    cmp01:
+      reclass_storage_name: kubernetes_compute_node01
+      roles:
+      - kubernetes_compute
+      - linux_system_codename_xenial
+      - salt_master_host
+      interfaces:
+        ens3:
+          role: single_dhcp
+        ens4:
+          role: single_ctl
+          single_address: ${_param:kubernetes_compute_node01_address}
+
+    cmp02:
+      reclass_storage_name: kubernetes_compute_node02
+      roles:
+      - kubernetes_compute
+      - linux_system_codename_xenial
+      - salt_master_host
+      interfaces:
+        ens3:
+          role: single_dhcp
+        ens4:
+          role: single_ctl
+          single_address: ${_param:kubernetes_compute_node02_address}
+
+    mon01:
+      reclass_storage_name: stacklight_server_node01
+      roles:
+      - stacklightv2_server_leader
+      - linux_system_codename_xenial
+      interfaces:
+        ens3:
+          role: single_dhcp
+        ens4:
+          role: single_ctl
+
+    mon02:
+      reclass_storage_name: stacklight_server_node02
+      roles:
+      - stacklightv2_server
+      - linux_system_codename_xenial
+      interfaces:
+        ens3:
+          role: single_dhcp
+        ens4:
+          role: single_ctl
+
+    mon03:
+      reclass_storage_name: stacklight_server_node03
+      roles:
+      - stacklightv2_server
+      - linux_system_codename_xenial
+      interfaces:
+        ens3:
+          role: single_dhcp
+        ens4:
+          role: single_ctl
+
+    mtr01:
+      reclass_storage_name: stacklight_telemetry_node01
+      roles:
+      - stacklight_telemetry_leader
+      - linux_system_codename_xenial
+      interfaces:
+        ens3:
+          role: single_dhcp
+        ens4:
+          role: single_ctl
+
+    mtr02:
+      reclass_storage_name: stacklight_telemetry_node02
+      roles:
+      - stacklight_telemetry
+      - linux_system_codename_xenial
+      interfaces:
+        ens3:
+          role: single_dhcp
+        ens4:
+          role: single_ctl
+
+    mtr03:
+      reclass_storage_name: stacklight_telemetry_node03
+      roles:
+      - stacklight_telemetry
+      - linux_system_codename_xenial
+      interfaces:
+        ens3:
+          role: single_dhcp
+        ens4:
+          role: single_ctl
+
+    log01:
+      reclass_storage_name: stacklight_log_node01
+      roles:
+      - stacklight_log_leader_v2
+      - linux_system_codename_xenial
+      interfaces:
+        ens3:
+          role: single_dhcp
+        ens4:
+          role: single_ctl
+
+    log02:
+      reclass_storage_name: stacklight_log_node02
+      roles:
+      - stacklight_log
+      - linux_system_codename_xenial
+      interfaces:
+        ens3:
+          role: single_dhcp
+        ens4:
+          role: single_ctl
+
+    log03:
+      reclass_storage_name: stacklight_log_node03
+      roles:
+      - stacklight_log
+      - linux_system_codename_xenial
+      interfaces:
+        ens3:
+          role: single_dhcp
+        ens4:
+          role: single_ctl
diff --git a/tcp_tests/templates/cookied-cicd-k8s-calico-sl/salt.yaml b/tcp_tests/templates/cookied-cicd-k8s-calico-sl/salt.yaml
new file mode 100644
index 0000000..70f3cd3
--- /dev/null
+++ b/tcp_tests/templates/cookied-cicd-k8s-calico-sl/salt.yaml
@@ -0,0 +1,16 @@
+{% from 'cookied-cicd-k8s-calico-sl/underlay.yaml' import HOSTNAME_CFG01 with context %}
+{% from 'cookied-cicd-k8s-calico-sl/underlay.yaml' import LAB_CONFIG_NAME with context %}
+{% from 'cookied-cicd-k8s-calico-sl/underlay.yaml' import DOMAIN_NAME with context %}
+
+{% set SALT_MODELS_REPOSITORY = os_env('SALT_MODELS_REPOSITORY','https://gerrit.mcp.mirantis.net/salt-models/mcp-virtual-lab') %}
+# Other salt model repository parameters see in shared-salt.yaml
+
+{% import 'shared-salt.yaml' as SHARED with context %}
+
+- description: Workaround, configure ntp and rsyslog on salt master node
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@salt:master' state.sls ntp,rsyslog
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 10}
+  skip_fail: false
+
+{{ SHARED.MACRO_INSTALL_SALT_MINIONS() }}
diff --git a/tcp_tests/templates/cookied-cicd-k8s-calico-sl/underlay--meta-data.yaml b/tcp_tests/templates/cookied-cicd-k8s-calico-sl/underlay--meta-data.yaml
new file mode 100644
index 0000000..3699401
--- /dev/null
+++ b/tcp_tests/templates/cookied-cicd-k8s-calico-sl/underlay--meta-data.yaml
@@ -0,0 +1,4 @@
+| # All the data below will be stored as a string object
+  instance-id: iid-local1
+  hostname: {hostname}
+  local-hostname: {hostname}
diff --git a/tcp_tests/templates/cookied-cicd-k8s-calico-sl/underlay--user-data-cfg01.yaml b/tcp_tests/templates/cookied-cicd-k8s-calico-sl/underlay--user-data-cfg01.yaml
new file mode 100644
index 0000000..77c18d1
--- /dev/null
+++ b/tcp_tests/templates/cookied-cicd-k8s-calico-sl/underlay--user-data-cfg01.yaml
@@ -0,0 +1,102 @@
+| # All the data below will be stored as a string object
+  #cloud-config, see http://cloudinit.readthedocs.io/en/latest/topics/examples.html
+
+  ssh_pwauth: True
+  users:
+   - name: root
+     sudo: ALL=(ALL) NOPASSWD:ALL
+     shell: /bin/bash
+     ssh_authorized_keys:
+     {% for key in config.underlay.ssh_keys %}
+      - ssh-rsa {{ key['public'] }}
+     {% endfor %}
+
+  disable_root: false
+  chpasswd:
+   list: |
+    root:r00tme
+   expire: False
+
+  bootcmd:
+   # Block access to SSH while node is preparing
+   - cloud-init-per once sudo touch /is_cloud_init_started
+   # Enable root access
+   - sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
+   - service sshd restart
+
+  output:
+    all: '| tee -a /var/log/cloud-init-output.log /dev/tty0'
+
+  runcmd:
+   - echo "******** MOUNT CONFIG DRIVE"
+   # Mount config drive
+   - mkdir /root/config-drive
+   - mount /dev/sr0 /root/config-drive
+
+   # Configure dhclient
+   - sudo echo "nameserver {gateway}" >> /etc/resolvconf/resolv.conf.d/base
+   - sudo resolvconf -u
+
+   # Enable grub menu using updated config below
+   - update-grub
+
+   # Prepare network connection
+   #- sudo ifdown ens3
+   #- sudo ip r d default || true  # remove existing default route to get it from dhcp
+   #- sudo ifup ens3
+   #- sudo route add default gw {gateway} {interface_name}
+
+   # Create swap
+   - fallocate -l 16G /swapfile
+   - chmod 600 /swapfile
+   - mkswap /swapfile
+   - swapon /swapfile
+   - echo "/swapfile   none    swap    defaults    0   0" >> /etc/fstab
+
+   # Run user data script from config drive
+   - ifdown --force ens3; ifconfig ens3 down; ip a flush dev ens3; rm -f /var/run/network/ifstate.ens3; ip l set down ens3
+   - ifdown --force ens4; ifconfig ens4 down; ip a flush dev ens4; rm -f /var/run/network/ifstate.ens4; ip l set down ens4
+   - rm -f /etc/network/interfaces
+   #- ifdown --force ens5; ifconfig ens5 down; ip a flush dev ens5; rm -f /var/run/network/ifstate.ens5
+   #- cp /root/config-drive/user-data /root/user-data
+   #- sed -i '/^reboot$/d' /root/user-data
+   #- set -x; cd /root && /bin/bash -xe ./user-data
+   - set -x; cd /root/config-drive && /bin/bash -xe ./user-data
+
+   #- echo "nameserver 172.18.208.44" >> /etc/resolv.conf;
+
+   # Enable root access (after reboot)
+   - sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
+   #- service sshd stop
+
+   ########################################################
+   # Node is ready, allow SSH access
+   - touch /is_cloud_init_finished
+   #- reboot
+   ########################################################
+
+  write_files:
+   - path: /etc/default/grub.d/97-enable-grub-menu.cfg
+     content: |
+         GRUB_RECORDFAIL_TIMEOUT=30
+         GRUB_TIMEOUT=3
+         GRUB_TIMEOUT_STYLE=menu
+
+   #- path: /etc/network/interfaces
+   - path: /root/interfaces
+     content: |
+          auto lo
+          iface lo inet loopback
+
+          auto ens3
+          iface ens3 inet dhcp
+
+   - path: /root/.ssh/config
+     owner: root:root
+     permissions: '0600'
+     content: |
+          Host *
+            ServerAliveInterval 60
+            ServerAliveCountMax 0
+            StrictHostKeyChecking no
+            UserKnownHostsFile /dev/null
diff --git a/tcp_tests/templates/cookied-cicd-k8s-calico-sl/underlay--user-data1604-swp.yaml b/tcp_tests/templates/cookied-cicd-k8s-calico-sl/underlay--user-data1604-swp.yaml
new file mode 100644
index 0000000..319c007
--- /dev/null
+++ b/tcp_tests/templates/cookied-cicd-k8s-calico-sl/underlay--user-data1604-swp.yaml
@@ -0,0 +1,59 @@
+| # All the data below will be stored as a string object
+  #cloud-config, see http://cloudinit.readthedocs.io/en/latest/topics/examples.html
+
+  ssh_pwauth: True
+  users:
+   - name: root
+     sudo: ALL=(ALL) NOPASSWD:ALL
+     shell: /bin/bash
+     ssh_authorized_keys:
+     {% for key in config.underlay.ssh_keys %}
+      - ssh-rsa {{ key['public'] }}
+     {% endfor %}
+
+  disable_root: false
+  chpasswd:
+   list: |
+    root:r00tme
+   expire: False
+
+  bootcmd:
+   # Enable root access
+   - sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
+   - service sshd restart
+  output:
+    all: '| tee -a /var/log/cloud-init-output.log /dev/tty0'
+
+  runcmd:
+   - export TERM=linux
+   - export LANG=C
+   # Configure dhclient
+   - sudo echo "nameserver {gateway}" >> /etc/resolvconf/resolv.conf.d/base
+   - sudo resolvconf -u
+
+   # Enable grub menu using updated config below
+   - update-grub
+
+   # Prepare network connection
+   - sudo ifup ens3
+   #- sudo route add default gw {gateway} {interface_name}
+
+   # Create swap
+   - fallocate -l 16G /swapfile
+   - chmod 600 /swapfile
+   - mkswap /swapfile
+   - swapon /swapfile
+   - echo "/swapfile   none    swap    defaults   0   0" >> /etc/fstab
+
+  write_files:
+   - path: /etc/default/grub.d/97-enable-grub-menu.cfg
+     content: |
+         GRUB_RECORDFAIL_TIMEOUT=30
+         GRUB_TIMEOUT=3
+         GRUB_TIMEOUT_STYLE=menu
+
+   - path: /etc/network/interfaces
+     content: |
+          auto ens3
+          iface ens3 inet dhcp
+
diff --git a/tcp_tests/templates/cookied-cicd-k8s-calico-sl/underlay--user-data1604.yaml b/tcp_tests/templates/cookied-cicd-k8s-calico-sl/underlay--user-data1604.yaml
new file mode 100644
index 0000000..b1b6430
--- /dev/null
+++ b/tcp_tests/templates/cookied-cicd-k8s-calico-sl/underlay--user-data1604.yaml
@@ -0,0 +1,59 @@
+| # All the data below will be stored as a string object
+  #cloud-config, see http://cloudinit.readthedocs.io/en/latest/topics/examples.html
+
+  ssh_pwauth: True
+  users:
+   - name: root
+     sudo: ALL=(ALL) NOPASSWD:ALL
+     shell: /bin/bash
+     ssh_authorized_keys:
+     {% for key in config.underlay.ssh_keys %}
+      - ssh-rsa {{ key['public'] }}
+     {% endfor %}
+
+  disable_root: false
+  chpasswd:
+   list: |
+    root:r00tme
+   expire: False
+
+  bootcmd:
+   # Enable root access
+   - sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
+   - service sshd restart
+  output:
+    all: '| tee -a /var/log/cloud-init-output.log /dev/tty0'
+
+  runcmd:
+   - export TERM=linux
+   - export LANG=C
+   # Configure dhclient
+   - sudo echo "nameserver {gateway}" >> /etc/resolvconf/resolv.conf.d/base
+   - sudo resolvconf -u
+
+   # Enable grub menu using updated config below
+   - update-grub
+
+   # Prepare network connection
+   - sudo ifup ens3
+   #- sudo route add default gw {gateway} {interface_name}
+
+   # Create swap
+   #- fallocate -l 16G /swapfile
+   #- chmod 600 /swapfile
+   #- mkswap /swapfile
+   #- swapon /swapfile
+   #- echo "/swapfile   none    swap    defaults   0   0" >> /etc/fstab
+
+  write_files:
+   - path: /etc/default/grub.d/97-enable-grub-menu.cfg
+     content: |
+         GRUB_RECORDFAIL_TIMEOUT=30
+         GRUB_TIMEOUT=3
+         GRUB_TIMEOUT_STYLE=menu
+
+   - path: /etc/network/interfaces
+     content: |
+          auto ens3
+          iface ens3 inet dhcp
+
diff --git a/tcp_tests/templates/cookied-cicd-k8s-calico-sl/underlay.yaml b/tcp_tests/templates/cookied-cicd-k8s-calico-sl/underlay.yaml
new file mode 100644
index 0000000..10c716e
--- /dev/null
+++ b/tcp_tests/templates/cookied-cicd-k8s-calico-sl/underlay.yaml
@@ -0,0 +1,772 @@
+# Set the repository suite, one of the: 'nightly', 'testing', 'stable', or any other required
+{% set REPOSITORY_SUITE = os_env('REPOSITORY_SUITE', 'testing') %}
+
+{% set LAB_CONFIG_NAME = os_env('LAB_CONFIG_NAME', 'cookied-cicd-k8s-calico-sl') %}
+{% set DOMAIN_NAME = os_env('DOMAIN_NAME', LAB_CONFIG_NAME + '.local') %}
+{% set HOSTNAME_CFG01 = os_env('HOSTNAME_CFG01', 'cfg01.' + DOMAIN_NAME) %}
+{% set HOSTNAME_CID01 = os_env('HOSTNAME_CID01', 'cid01.' + DOMAIN_NAME) %}
+{% set HOSTNAME_CID02 = os_env('HOSTNAME_CID02', 'cid02.' + DOMAIN_NAME) %}
+{% set HOSTNAME_CID03 = os_env('HOSTNAME_CID03', 'cid03.' + DOMAIN_NAME) %}
+
+{% set HOSTNAME_CTL01 = os_env('HOSTNAME_CTL01', 'ctl01.' + DOMAIN_NAME) %}
+{% set HOSTNAME_CTL02 = os_env('HOSTNAME_CTL02', 'ctl02.' + DOMAIN_NAME) %}
+{% set HOSTNAME_CTL03 = os_env('HOSTNAME_CTL03', 'ctl03.' + DOMAIN_NAME) %}
+{% set HOSTNAME_CMP01 = os_env('HOSTNAME_CMP01', 'cmp01.' + DOMAIN_NAME) %}
+{% set HOSTNAME_CMP02 = os_env('HOSTNAME_CMP02', 'cmp02.' + DOMAIN_NAME) %}
+{% set HOSTNAME_LOG01 = os_env('HOSTNAME_LOG01', 'log01.' + DOMAIN_NAME) %}
+{% set HOSTNAME_LOG02 = os_env('HOSTNAME_LOG02', 'log02.' + DOMAIN_NAME) %}
+{% set HOSTNAME_LOG03 = os_env('HOSTNAME_LOG03', 'log03.' + DOMAIN_NAME) %}
+{% set HOSTNAME_MTR01 = os_env('HOSTNAME_MTR01', 'mtr01.' + DOMAIN_NAME) %}
+{% set HOSTNAME_MTR02 = os_env('HOSTNAME_MTR02', 'mtr02.' + DOMAIN_NAME) %}
+{% set HOSTNAME_MTR03 = os_env('HOSTNAME_MTR03', 'mtr03.' + DOMAIN_NAME) %}
+{% set HOSTNAME_MON01 = os_env('HOSTNAME_MON01', 'mon01.' + DOMAIN_NAME) %}
+{% set HOSTNAME_MON02 = os_env('HOSTNAME_MON02', 'mon02.' + DOMAIN_NAME) %}
+{% set HOSTNAME_MON03 = os_env('HOSTNAME_MON03', 'mon03.' + DOMAIN_NAME) %}
+{% set HOSTNAME_PRX01 = os_env('HOSTNAME_PRX01', 'prx01.' + DOMAIN_NAME) %}
+{% set HOSTNAME_KVM01 = os_env('HOSTNAME_KVM01', 'kvm01.' + DOMAIN_NAME) %}
+{% set HOSTNAME_KVM02 = os_env('HOSTNAME_KVM02', 'kvm02.' + DOMAIN_NAME) %}
+{% set HOSTNAME_KVM03 = os_env('HOSTNAME_KVM03', 'kvm03.' + DOMAIN_NAME) %}
+
+{% import 'cookied-cicd-k8s-calico-sl/underlay--meta-data.yaml' as CLOUDINIT_META_DATA with context %}
+{% import 'cookied-cicd-k8s-calico-sl/underlay--user-data-cfg01.yaml' as CLOUDINIT_USER_DATA_CFG01 with context %}
+{% import 'cookied-cicd-k8s-calico-sl/underlay--user-data1604.yaml' as CLOUDINIT_USER_DATA_1604 with context %}
+{% import 'cookied-cicd-k8s-calico-sl/underlay--user-data1604-swp.yaml' as CLOUDINIT_USER_DATA_1604_SWP with context %}
+
+---
+aliases:
+ - &interface_model {{ os_env('INTERFACE_MODEL', 'virtio') }}
+ - &cloudinit_meta_data {{ CLOUDINIT_META_DATA }}
+ - &cloudinit_user_data_cfg01 {{ CLOUDINIT_USER_DATA_CFG01 }}
+ - &cloudinit_user_data_1604 {{ CLOUDINIT_USER_DATA_1604 }}
+ - &cloudinit_user_data_1604_swp {{ CLOUDINIT_USER_DATA_1604_SWP }}
+
+template:
+  devops_settings:
+    env_name: {{ os_env('ENV_NAME', LAB_CONFIG_NAME + '_' + REPOSITORY_SUITE + "_" + os_env('BUILD_NUMBER', '')) }}
+
+    address_pools:
+      private-pool01:
+        net: {{ os_env('PRIVATE_ADDRESS_POOL01', '10.60.0.0/16:24') }}
+        params:
+          ip_reserved:
+            gateway: +1
+            l2_network_device: +1
+            default_{{ HOSTNAME_CFG01 }}: +15
+
+            default_{{ HOSTNAME_CID }}: +90
+            default_{{ HOSTNAME_CID01 }}: +91
+            default_{{ HOSTNAME_CID02 }}: +92
+            default_{{ HOSTNAME_CID03 }}: +93
+
+          ip_ranges:
+            dhcp: [+90, -10]
+
+      admin-pool01:
+        net: {{ os_env('ADMIN_ADDRESS_POOL01', '10.70.0.0/16:24') }}
+        params:
+          ip_reserved:
+            gateway: +1
+            l2_network_device: +1
+            default_{{ HOSTNAME_CFG01 }}: +15
+
+            default_{{ HOSTNAME_CID }}: +90
+            default_{{ HOSTNAME_CID01 }}: +91
+            default_{{ HOSTNAME_CID02 }}: +92
+            default_{{ HOSTNAME_CID03 }}: +93
+
+            default_{{ HOSTNAME_CTL }}: +10
+            default_{{ HOSTNAME_CTL01 }}: +11
+            default_{{ HOSTNAME_CTL02 }}: +12
+            default_{{ HOSTNAME_CTL03 }}: +13
+            default_{{ HOSTNAME_CMP01 }}: +101
+            default_{{ HOSTNAME_CMP02 }}: +102
+            default_{{ HOSTNAME_LOG }}: +60
+            default_{{ HOSTNAME_LOG01 }}: +61
+            default_{{ HOSTNAME_LOG02 }}: +62
+            default_{{ HOSTNAME_LOG03 }}: +63
+            default_{{ HOSTNAME_MON }}: +70
+            default_{{ HOSTNAME_MON01 }}: +71
+            default_{{ HOSTNAME_MON02 }}: +72
+            default_{{ HOSTNAME_MON03 }}: +73
+            default_{{ HOSTNAME_MTR }}: +85
+            default_{{ HOSTNAME_MTR01 }}: +86
+            default_{{ HOSTNAME_MTR02 }}: +87
+            default_{{ HOSTNAME_MTR03 }}: +88
+            default_{{ HOSTNAME_PRX01 }}: +222
+            default_{{ HOSTNAME_KVM }}: +240
+            default_{{ HOSTNAME_KVM01 }}: +241
+            default_{{ HOSTNAME_KVM02 }}: +242
+            default_{{ HOSTNAME_KVM03 }}: +243
+
+          ip_ranges:
+            dhcp: [+90, -10]
+
+      tenant-pool01:
+        net: {{ os_env('TENANT_ADDRESS_POOL01', '10.80.0.0/16:24') }}
+        params:
+          ip_reserved:
+            gateway: +1
+            l2_network_device: +1
+            default_{{ HOSTNAME_CFG01 }}: +15
+
+            default_{{ HOSTNAME_CID }}: +90
+            default_{{ HOSTNAME_CID01 }}: +91
+            default_{{ HOSTNAME_CID02 }}: +92
+            default_{{ HOSTNAME_CID03 }}: +93
+
+          ip_ranges:
+            dhcp: [+10, -10]
+
+      external-pool01:
+        net: {{ os_env('EXTERNAL_ADDRESS_POOL01', '10.90.0.0/16:24') }}
+        params:
+          ip_reserved:
+            gateway: +1
+            l2_network_device: +1
+            default_{{ HOSTNAME_CFG01 }}: +15
+
+            default_{{ HOSTNAME_CID }}: +90
+            default_{{ HOSTNAME_CID01 }}: +91
+            default_{{ HOSTNAME_CID02 }}: +92
+            default_{{ HOSTNAME_CID03 }}: +93
+
+          ip_ranges:
+            dhcp: [+10, -10]
+
+
+    groups:
+      - name: default
+        driver:
+          name: devops.driver.libvirt
+          params:
+            connection_string: {{ os_env('CONNECTION_STRING', 'qemu:///system') }}
+            storage_pool_name: {{ os_env('STORAGE_POOL_NAME', 'default') }}
+            stp: False
+            hpet: False
+            enable_acpi: true
+            use_host_cpu: {{ os_env('DRIVER_USE_HOST_CPU', true) }}
+            use_hugepages: {{ os_env('DRIVER_USE_HUGEPAGES', false) }}
+
+        network_pools:
+          admin: admin-pool01
+          private: private-pool01
+          tenant: tenant-pool01
+          external: external-pool01
+
+        l2_network_devices:
+          admin:
+            address_pool: admin-pool01
+            dhcp: true
+            forward:
+              mode: nat
+
+          private:
+            address_pool: private-pool01
+            dhcp: false
+            forward:
+              mode: route
+
+          external:
+            address_pool: external-pool01
+            dhcp: false
+            forward:
+              mode: nat
+
+
+        group_volumes:
+         - name: cfg01_day01_image               # Pre-configured day01 image
+           source_image: {{ os_env('IMAGE_PATH_CFG01_DAY01') }} # http://images.mirantis.com/cfg01-day01.qcow2 or fallback to IMAGE_PATH1604
+           format: qcow2
+         - name: mcp_ubuntu_1604_image           # Pre-configured image for VCP nodes initially based on kvm nodes.
+           # http://images.mirantis.com/ubuntu-16-04-x64-latest.qcow2  (preffered)
+           # or
+           # https://cloud-images.ubuntu.com/xenial/current/xenial-server-cloudimg-amd64-disk1.img
+           source_image: {{ os_env('MCP_IMAGE_PATH1604') }}
+           format: qcow2
+
+        nodes:
+          - name: {{ HOSTNAME_CFG01 }}
+            role: salt_master
+            params:
+              vcpu: {{ os_env('CFG_NODE_CPU', 3) }}
+              memory: {{ os_env('CFG_NODE_MEMORY', 8192) }}
+              boot:
+                - hd
+              cloud_init_volume_name: iso
+              cloud_init_iface_up: ens3
+              volumes:
+                - name: system
+                  capacity: {{ os_env('CFG_NODE_VOLUME_SIZE', 150) }}
+                  backing_store: cfg01_day01_image
+                  format: qcow2
+                - name: config
+                  capacity: 1
+                  format: raw
+                  device: cdrom
+                  bus: ide
+                  # source_image: !os_env CFG01_CONFIG_PATH # no source image required.
+                                                            # it will be uploaded after config drive generation
+                - name: iso  # Volume with name 'iso' will be used
+                             # for store image with cloud-init metadata.
+                  capacity: 1
+                  format: raw
+                  device: cdrom
+                  bus: ide
+                  cloudinit_meta_data: *cloudinit_meta_data
+                  cloudinit_user_data: *cloudinit_user_data_cfg01
+
+              interfaces:
+                - label: ens3
+                  l2_network_device: admin
+                  interface_model: *interface_model
+                - label: ens4
+                  l2_network_device: private
+                  interface_model: *interface_model
+              network_config:
+                ens3:
+                  networks:
+                    - admin
+                ens4:
+                  networks:
+                    - private
+
+          - name: {{ HOSTNAME_KVM01 }}
+            role: salt_minion
+            params:
+              vcpu: {{ os_env('KVM_NODE_CPU', 1) }}
+              memory: {{ os_env('KVM_NODE_MEMORY', 1024) }}
+              boot:
+                - hd
+              cloud_init_volume_name: iso
+              cloud_init_iface_up: ens3
+              volumes:
+                - name: system
+                  capacity: {{ os_env('KVM_NODE_VOLUME_SIZE', 150) }}
+                  backing_store: mcp_ubuntu_1604_image
+                  format: qcow2
+                - name: iso  # Volume with name 'iso' will be used
+                             # for store image with cloud-init metadata.
+                  capacity: 1
+                  format: raw
+                  device: cdrom
+                  bus: ide
+                  cloudinit_meta_data: *cloudinit_meta_data
+                  cloudinit_user_data: *cloudinit_user_data_1604_swp
+
+              interfaces: &interfaces
+                - label: ens3
+                  l2_network_device: admin
+                  interface_model: *interface_model
+                - label: ens4
+                  l2_network_device: private
+                  interface_model: *interface_model
+              network_config: &network_config
+                ens3:
+                  networks:
+                    - admin
+                ens4:
+                  networks:
+                    - private
+
+          - name: {{ HOSTNAME_KVM02 }}
+            role: salt_minion
+            params:
+              vcpu: {{ os_env('KVM_NODE_CPU', 1) }}
+              memory: {{ os_env('KVM_NODE_MEMORY', 1024) }}
+              boot:
+                - hd
+              cloud_init_volume_name: iso
+              cloud_init_iface_up: ens3
+              volumes:
+                - name: system
+                  capacity: {{ os_env('KVM_NODE_VOLUME_SIZE', 150) }}
+                  backing_store: mcp_ubuntu_1604_image
+                  format: qcow2
+                - name: iso  # Volume with name 'iso' will be used
+                             # for store image with cloud-init metadata.
+                  capacity: 1
+                  format: raw
+                  device: cdrom
+                  bus: ide
+                  cloudinit_meta_data: *cloudinit_meta_data
+                  cloudinit_user_data: *cloudinit_user_data_1604_swp
+
+              interfaces: *interfaces
+              network_config: *network_config
+
+          - name: {{ HOSTNAME_KVM03 }}
+            role: salt_minion
+            params:
+              vcpu: {{ os_env('KVM_NODE_CPU', 1) }}
+              memory: {{ os_env('KVM_NODE_MEMORY', 1024) }}
+              boot:
+                - hd
+              cloud_init_volume_name: iso
+              cloud_init_iface_up: ens3
+              volumes:
+                - name: system
+                  capacity: {{ os_env('KVM_NODE_VOLUME_SIZE', 150) }}
+                  backing_store: mcp_ubuntu_1604_image
+                  format: qcow2
+                - name: iso  # Volume with name 'iso' will be used
+                             # for store image with cloud-init metadata.
+                  capacity: 1
+                  format: raw
+                  device: cdrom
+                  bus: ide
+                  cloudinit_meta_data: *cloudinit_meta_data
+                  cloudinit_user_data: *cloudinit_user_data_1604_swp
+
+              interfaces: *interfaces
+              network_config: *network_config
+
+          - name: {{ HOSTNAME_CID01 }}
+            role: salt_minion
+            params:
+              vcpu: {{ os_env('CID_NODE_CPU', 3) }}
+              memory: {{ os_env('CID_NODE_MEMORY', 8192) }}
+              boot:
+                - hd
+              cloud_init_volume_name: iso
+              cloud_init_iface_up: ens3
+              volumes:
+                - name: system
+                  capacity: {{ os_env('CID_NODE_VOLUME_SIZE', 150) }}
+                  backing_store: mcp_ubuntu_1604_image
+                  format: qcow2
+                - name: iso  # Volume with name 'iso' will be used
+                             # for store image with cloud-init metadata.
+                  capacity: 1
+                  format: raw
+                  device: cdrom
+                  bus: ide
+                  cloudinit_meta_data: *cloudinit_meta_data
+                  cloudinit_user_data: *cloudinit_user_data_1604_swp
+
+              interfaces: *interfaces
+              network_config: *network_config
+
+          - name: {{ HOSTNAME_CID02 }}
+            role: salt_minion
+            params:
+              vcpu: {{ os_env('CID_NODE_CPU', 3) }}
+              memory: {{ os_env('CID_NODE_MEMORY', 8192) }}
+              boot:
+                - hd
+              cloud_init_volume_name: iso
+              cloud_init_iface_up: ens3
+              volumes:
+                - name: system
+                  capacity: {{ os_env('CID_NODE_VOLUME_SIZE', 150) }}
+                  backing_store: mcp_ubuntu_1604_image
+                  format: qcow2
+                - name: iso  # Volume with name 'iso' will be used
+                             # for store image with cloud-init metadata.
+                  capacity: 1
+                  format: raw
+                  device: cdrom
+                  bus: ide
+                  cloudinit_meta_data: *cloudinit_meta_data
+                  cloudinit_user_data: *cloudinit_user_data_1604_swp
+
+              interfaces: *interfaces
+              network_config: *network_config
+
+          - name: {{ HOSTNAME_CID03 }}
+            role: salt_minion
+            params:
+              vcpu: {{ os_env('CID_NODE_CPU', 3) }}
+              memory: {{ os_env('CID_NODE_MEMORY', 8192) }}
+              boot:
+                - hd
+              cloud_init_volume_name: iso
+              cloud_init_iface_up: ens3
+              volumes:
+                - name: system
+                  capacity: {{ os_env('CID_NODE_VOLUME_SIZE', 150) }}
+                  backing_store: mcp_ubuntu_1604_image
+                  format: qcow2
+                - name: iso  # Volume with name 'iso' will be used
+                             # for store image with cloud-init metadata.
+                  capacity: 1
+                  format: raw
+                  device: cdrom
+                  bus: ide
+                  cloudinit_meta_data: *cloudinit_meta_data
+                  cloudinit_user_data: *cloudinit_user_data_1604_swp
+
+              interfaces: *interfaces
+              network_config: *network_config
+
+          - name: {{ HOSTNAME_CTL01 }}
+            role: k8s_controller
+            params:
+              vcpu: !os_env SLAVE_NODE_CPU, 2
+              memory: !os_env SLAVE_NODE_MEMORY, 2048
+              boot:
+                - hd
+              cloud_init_volume_name: iso
+              cloud_init_iface_up: ens3
+              volumes:
+                - name: system
+                  capacity: !os_env NODE_VOLUME_SIZE, 150
+                  backing_store: mcp_ubuntu_1604_image
+                  format: qcow2
+                - name: cinder
+                  capacity: 50
+                  format: qcow2
+                - name: iso  # Volume with name 'iso' will be used
+                             # for store image with cloud-init metadata.
+                  capacity: 1
+                  format: raw
+                  device: cdrom
+                  bus: ide
+                  cloudinit_meta_data: *cloudinit_meta_data
+                  cloudinit_user_data: *cloudinit_user_data_1604
+
+              interfaces: *interfaces
+              network_config: *network_config
+
+          - name: {{ HOSTNAME_CTL02 }}
+            role: salt_minion
+            params:
+              vcpu: !os_env SLAVE_NODE_CPU, 2
+              memory: !os_env SLAVE_NODE_MEMORY, 2048
+              boot:
+                - hd
+              cloud_init_volume_name: iso
+              cloud_init_iface_up: ens3
+              volumes:
+                - name: system
+                  capacity: !os_env NODE_VOLUME_SIZE, 150
+                  backing_store: mcp_ubuntu_1604_image
+                  format: qcow2
+                - name: cinder
+                  capacity: 50
+                  format: qcow2
+                - name: iso  # Volume with name 'iso' will be used
+                             # for store image with cloud-init metadata.
+                  capacity: 1
+                  format: raw
+                  device: cdrom
+                  bus: ide
+                  cloudinit_meta_data: *cloudinit_meta_data
+                  cloudinit_user_data: *cloudinit_user_data_1604
+
+              interfaces: *interfaces
+              network_config: *network_config
+
+          - name: {{ HOSTNAME_CTL03 }}
+            role: salt_minion
+            params:
+              vcpu: !os_env SLAVE_NODE_CPU, 2
+              memory: !os_env SLAVE_NODE_MEMORY, 2048
+              boot:
+                - hd
+              cloud_init_volume_name: iso
+              cloud_init_iface_up: ens3
+              volumes:
+                - name: system
+                  capacity: !os_env NODE_VOLUME_SIZE, 150
+                  backing_store: mcp_ubuntu_1604_image
+                  format: qcow2
+                - name: cinder
+                  capacity: 50
+                  format: qcow2
+                - name: iso  # Volume with name 'iso' will be used
+                             # for store image with cloud-init metadata.
+                  capacity: 1
+                  format: raw
+                  device: cdrom
+                  bus: ide
+                  cloudinit_meta_data: *cloudinit_meta_data
+                  cloudinit_user_data: *cloudinit_user_data_1604
+
+              interfaces: *interfaces
+              network_config: *network_config
+
+          - name: {{ HOSTNAME_CMP01 }}
+            role: salt_minion
+            params:
+              vcpu: !os_env SLAVE_NODE_CPU, 2
+              memory: !os_env SLAVE_NODE_MEMORY, 2048
+              boot:
+                - hd
+              cloud_init_volume_name: iso
+              cloud_init_iface_up: ens3
+              volumes:
+                - name: system
+                  capacity: !os_env NODE_VOLUME_SIZE, 150
+                  backing_store: mcp_ubuntu_1604_image
+                  format: qcow2
+                - name: iso  # Volume with name 'iso' will be used
+                             # for store image with cloud-init metadata.
+                  capacity: 1
+                  format: raw
+                  device: cdrom
+                  bus: ide
+                  cloudinit_meta_data: *cloudinit_meta_data
+                  cloudinit_user_data: *cloudinit_user_data_1604
+
+              interfaces: *interfaces
+              network_config: *network_config
+
+          - name: {{ HOSTNAME_CMP02 }}
+            role: salt_minion
+            params:
+              vcpu: !os_env SLAVE_NODE_CPU, 2
+              memory: !os_env SLAVE_NODE_MEMORY, 2048
+              boot:
+                - hd
+              cloud_init_volume_name: iso
+              cloud_init_iface_up: ens3
+              volumes:
+                - name: system
+                  capacity: !os_env NODE_VOLUME_SIZE, 150
+                  backing_store: mcp_ubuntu_1604_image
+                  format: qcow2
+                - name: iso  # Volume with name 'iso' will be used
+                             # for store image with cloud-init metadata.
+                  capacity: 1
+                  format: raw
+                  device: cdrom
+                  bus: ide
+                  cloudinit_meta_data: *cloudinit_meta_data
+                  cloudinit_user_data: *cloudinit_user_data_1604
+
+              interfaces: *interfaces
+              network_config: *network_config
+
+          - name: {{ HOSTNAME_MON01 }}
+            role: salt_minion
+            params:
+              vcpu: !os_env SLAVE_NODE_CPU, 2
+              memory: !os_env SLAVE_NODE_MEMORY, 4096
+              boot:
+                - hd
+              cloud_init_volume_name: iso
+              cloud_init_iface_up: ens3
+              volumes:
+                - name: system
+                  capacity: !os_env NODE_VOLUME_SIZE, 150
+                  backing_store: mcp_ubuntu_1604_image
+                  format: qcow2
+                - name: iso  # Volume with name 'iso' will be used
+                             # for store image with cloud-init metadata.
+                  capacity: 1
+                  format: raw
+                  device: cdrom
+                  bus: ide
+                  cloudinit_meta_data: *cloudinit_meta_data
+                  cloudinit_user_data: *cloudinit_user_data_1604_swp
+
+              interfaces: *interfaces
+              network_config: *network_config
+
+          - name: {{ HOSTNAME_MON02 }}
+            role: salt_minion
+            params:
+              vcpu: !os_env SLAVE_NODE_CPU, 2
+              memory: !os_env SLAVE_NODE_MEMORY, 4096
+              boot:
+                - hd
+              cloud_init_volume_name: iso
+              cloud_init_iface_up: ens3
+              volumes:
+                - name: system
+                  capacity: !os_env NODE_VOLUME_SIZE, 150
+                  backing_store: mcp_ubuntu_1604_image
+                  format: qcow2
+                - name: iso  # Volume with name 'iso' will be used
+                             # for store image with cloud-init metadata.
+                  capacity: 1
+                  format: raw
+                  device: cdrom
+                  bus: ide
+                  cloudinit_meta_data: *cloudinit_meta_data
+                  cloudinit_user_data: *cloudinit_user_data_1604_swp
+
+              interfaces: *interfaces
+              network_config: *network_config
+
+          - name: {{ HOSTNAME_MON03 }}
+            role: salt_minion
+            params:
+              vcpu: !os_env SLAVE_NODE_CPU, 2
+              memory: !os_env SLAVE_NODE_MEMORY, 4096
+              boot:
+                - hd
+              cloud_init_volume_name: iso
+              cloud_init_iface_up: ens3
+              volumes:
+                - name: system
+                  capacity: !os_env NODE_VOLUME_SIZE, 150
+                  backing_store: mcp_ubuntu_1604_image
+                  format: qcow2
+                - name: iso  # Volume with name 'iso' will be used
+                             # for store image with cloud-init metadata.
+                  capacity: 1
+                  format: raw
+                  device: cdrom
+                  bus: ide
+                  cloudinit_meta_data: *cloudinit_meta_data
+                  cloudinit_user_data: *cloudinit_user_data_1604_swp
+
+              interfaces: *interfaces
+              network_config: *network_config
+
+          - name: {{ HOSTNAME_LOG01 }}
+            role: salt_minion
+            params:
+              vcpu: !os_env SLAVE_NODE_CPU, 2
+              memory: !os_env SLAVE_NODE_MEMORY, 4096
+              boot:
+                - hd
+              cloud_init_volume_name: iso
+              cloud_init_iface_up: ens3
+              volumes:
+                - name: system
+                  capacity: !os_env NODE_VOLUME_SIZE, 150
+                  backing_store: mcp_ubuntu_1604_image
+                  format: qcow2
+                - name: iso  # Volume with name 'iso' will be used
+                             # for store image with cloud-init metadata.
+                  capacity: 1
+                  format: raw
+                  device: cdrom
+                  bus: ide
+                  cloudinit_meta_data: *cloudinit_meta_data
+                  cloudinit_user_data: *cloudinit_user_data_1604_swp
+
+              interfaces: *interfaces
+              network_config: *network_config
+
+          - name: {{ HOSTNAME_LOG02 }}
+            role: salt_minion
+            params:
+              vcpu: !os_env SLAVE_NODE_CPU, 2
+              memory: !os_env SLAVE_NODE_MEMORY, 4096
+              boot:
+                - hd
+              cloud_init_volume_name: iso
+              cloud_init_iface_up: ens3
+              volumes:
+                - name: system
+                  capacity: !os_env NODE_VOLUME_SIZE, 150
+                  backing_store: mcp_ubuntu_1604_image
+                  format: qcow2
+                - name: iso  # Volume with name 'iso' will be used
+                             # for store image with cloud-init metadata.
+                  capacity: 1
+                  format: raw
+                  device: cdrom
+                  bus: ide
+                  cloudinit_meta_data: *cloudinit_meta_data
+                  cloudinit_user_data: *cloudinit_user_data_1604_swp
+
+              interfaces: *interfaces
+              network_config: *network_config
+
+          - name: {{ HOSTNAME_LOG03 }}
+            role: salt_minion
+            params:
+              vcpu: !os_env SLAVE_NODE_CPU, 2
+              memory: !os_env SLAVE_NODE_MEMORY, 4096
+              boot:
+                - hd
+              cloud_init_volume_name: iso
+              cloud_init_iface_up: ens3
+              volumes:
+                - name: system
+                  capacity: !os_env NODE_VOLUME_SIZE, 150
+                  backing_store: mcp_ubuntu_1604_image
+                  format: qcow2
+                - name: iso  # Volume with name 'iso' will be used
+                             # for store image with cloud-init metadata.
+                  capacity: 1
+                  format: raw
+                  device: cdrom
+                  bus: ide
+                  cloudinit_meta_data: *cloudinit_meta_data
+                  cloudinit_user_data: *cloudinit_user_data_1604_swp
+
+              interfaces: *interfaces
+              network_config: *network_config
+
+          - name: {{ HOSTNAME_MTR01 }}
+            role: salt_minion
+            params:
+              vcpu: !os_env SLAVE_NODE_CPU, 2
+              memory: !os_env SLAVE_NODE_MEMORY, 4096
+              boot:
+                - hd
+              cloud_init_volume_name: iso
+              cloud_init_iface_up: ens3
+              volumes:
+                - name: system
+                  capacity: !os_env NODE_VOLUME_SIZE, 150
+                  backing_store: mcp_ubuntu_1604_image
+                  format: qcow2
+                - name: iso  # Volume with name 'iso' will be used
+                             # for store image with cloud-init metadata.
+                  capacity: 1
+                  format: raw
+                  device: cdrom
+                  bus: ide
+                  cloudinit_meta_data: *cloudinit_meta_data
+                  cloudinit_user_data: *cloudinit_user_data_1604_swp
+
+              interfaces: *interfaces
+              network_config: *network_config
+
+          - name: {{ HOSTNAME_MTR02 }}
+            role: salt_minion
+            params:
+              vcpu: !os_env SLAVE_NODE_CPU, 2
+              memory: !os_env SLAVE_NODE_MEMORY, 4096
+              boot:
+                - hd
+              cloud_init_volume_name: iso
+              cloud_init_iface_up: ens3
+              volumes:
+                - name: system
+                  capacity: !os_env NODE_VOLUME_SIZE, 150
+                  backing_store: mcp_ubuntu_1604_image
+                  format: qcow2
+                - name: iso  # Volume with name 'iso' will be used
+                             # for store image with cloud-init metadata.
+                  capacity: 1
+                  format: raw
+                  device: cdrom
+                  bus: ide
+                  cloudinit_meta_data: *cloudinit_meta_data
+                  cloudinit_user_data: *cloudinit_user_data_1604_swp
+
+              interfaces: *interfaces
+              network_config: *network_config
+
+          - name: {{ HOSTNAME_MTR03 }}
+            role: salt_minion
+            params:
+              vcpu: !os_env SLAVE_NODE_CPU, 2
+              memory: !os_env SLAVE_NODE_MEMORY, 4096
+              boot:
+                - hd
+              cloud_init_volume_name: iso
+              cloud_init_iface_up: ens3
+              volumes:
+                - name: system
+                  capacity: !os_env NODE_VOLUME_SIZE, 150
+                  backing_store: mcp_ubuntu_1604_image
+                  format: qcow2
+                - name: iso  # Volume with name 'iso' will be used
+                             # for store image with cloud-init metadata.
+                  capacity: 1
+                  format: raw
+                  device: cdrom
+                  bus: ide
+                  cloudinit_meta_data: *cloudinit_meta_data
+                  cloudinit_user_data: *cloudinit_user_data_1604_swp
+
+              interfaces: *interfaces
+              network_config: *network_config
diff --git a/tcp_tests/templates/cookied-cicd-k8s-calico/cookiecutter-context-k8s.yaml b/tcp_tests/templates/cookied-cicd-k8s-calico/cookiecutter-context-k8s.yaml
new file mode 100644
index 0000000..693a589
--- /dev/null
+++ b/tcp_tests/templates/cookied-cicd-k8s-calico/cookiecutter-context-k8s.yaml
@@ -0,0 +1,166 @@
+default_context:
+  backup_private_key: |-
+    -----BEGIN RSA PRIVATE KEY-----
+    MIIEpAIBAAKCAQEA3ufjR+Eh/CJp84JZPKosMNL7ydXidfe9qdAnQIGGOsS/TBnc
+    RyY+hy4Mg5Or//VBpY53frcrEEnm1CzEeIfGALiQtsMWOwEEiEHIDzbxN7xyYK1u
+    9fpcRHZy16VJx2gQOxbeAIyct9jrQeQuSbN0k7Tr+bfWLVPoGL4SZiBC+dxRjrmT
+    2pMIdqa8kAcd5cakuyENXT31ZI/ffVscl7TJBat7tUbgD+48GK4LqMBL/eC6v2bQ
+    ohmo6ZqWLh9uT+/l/rRdIiBhI+kmPpUDRLnjkd/gH4GQh/r/PPlxu11JNwPgY4Kx
+    IAk5hq9uPRn3pqBKPg+WHgWmQvpHqVVDDnf7XQIDAQABAoIBAQDCEtuL5bQVNlFR
+    NphDfVZkXA3lOVempkB37Ud/nkYkPNDhjVKAkAe44pr6pEQI4px5bIUVypyv3egf
+    q6qT1oLKdedpeImObeBoUf3BYXC7ulNLYTVO7OAQq6BpqPuHpk8bY1l+2O5KE48h
+    G25BtQE26TrbfPf5FyjpAfQ6/rPRniURu1ZMFK1Do18wf7lGxa6RN4jPbfGfEvlf
+    q6GWGtsOB2kLXnUDjuDeUrgS8HgxBSMH+lwxrkdX0Qb4VN+cBOp8TC30rHXdLAmn
+    mWUDQhaao+zZpZsAAxGbM2BAFUQAicd/OS6FJn6xkH0KN6+Rp1Iiy3Sa97wMsMti
+    aHAyVwkBAoGBAPf1gcRmKTHaUVb7XgS3acytBm7LM2GCQfgPDvQIp8rf8bmnAko6
+    MzxPdq2WXzWY75JiNxQSsmemcJyBRJm1sscp0txAnZS5SSycWlHy0zP5LJDtU7jW
+    Z7dXtRYzdDL0sH6KVQCOmfDmGowLs3eO0F7MyCbDIwdkIQ4LCs+TWcYxAoGBAOYi
+    ZUR7vXFbmXQQUEHxeft3sF6v8epFhnMuvwHgmHIzSCDDKoIMoLlqDOV8KynggyqQ
+    /YpvzqfCuP4aDpriU1glTZB0R9WdkKwk+GW13U9LfDw86u/XfGkMtT2QP6PmIQaI
+    1MJlX2b0rihnUy6zqRFH+mU4+9I66Gg1s8O9s4DtAoGAbiUol82pzvNj3neatA2l
+    eb4CdYTeNhpeo4pM4ipWHtCL2CRP6BkiWVATL9j0QiLFiQkH3mrPxSsyKtNhXcZQ
+    vBfgCubJGR+VWbO6i1yKZTPykA5cemcDe3YCgvIoU9pN7GgWikDOMSyF7l/kQN+3
+    v+THpDBahxX7ePl+u+aAooECgYBOoigJ+2HirtLDJqPVtGXit6XK5MF7M+BZ0Pow
+    8QYF12Ho1+bZYuk0EXlwnDm/aFhJHhuTxtpM1isRn+Onpnel4bEcD69P3TPGric1
+    0atZ4cgEaSg5ZV68Ijx3Wad1IDfenLhd5/duHWK4qX1xsq+tGPQEzDC3R6uLl/Xh
+    hxsjjQKBgQC53W+e4N6pOK8oCA2tlDw8Nu733FRrxNP4emdTzYyKJbNxBP2LI/ts
+    K/fgcD9aWeo0zt3Y/0UzzijqrWMCG2NdAlHwEShcXUt1525O4H64mH50MeylTGcj
+    t6ZFlhArriIXlejxuU9Jxe/HEKMh/1iBdlnD0rCOfhJaY/HO9dWtRw==
+    -----END RSA PRIVATE KEY-----
+  backup_public_key: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDe5+NH4SH8Imnzglk8qiww0vvJ1eJ1972p0CdAgYY6xL9MGdxHJj6HLgyDk6v/9UGljnd+tysQSebULMR4h8YAuJC2wxY7AQSIQcgPNvE3vHJgrW71+lxEdnLXpUnHaBA7Ft4AjJy32OtB5C5Js3STtOv5t9YtU+gYvhJmIEL53FGOuZPakwh2pryQBx3lxqS7IQ1dPfVkj999WxyXtMkFq3u1RuAP7jwYrguowEv94Lq/ZtCiGajpmpYuH25P7+X+tF0iIGEj6SY+lQNEueOR3+AfgZCH+v88+XG7XUk3A+BjgrEgCTmGr249GfemoEo+D5YeBaZC+kepVUMOd/td
+  bmk_enabled: 'False'
+  calico_cni_image: docker-prod-local.artifactory.mirantis.com/mirantis/projectcalico/calico/cni:latest
+  calico_enable_nat: 'True'
+  calico_image: docker-prod-local.artifactory.mirantis.com/mirantis/projectcalico/calico/node:latest
+  calico_netmask: '16'
+  calico_network: 192.168.0.0
+  calicoctl_image: docker-prod-local.artifactory.mirantis.com/mirantis/projectcalico/calico/ctl:latest
+  ceph_enabled: 'False'
+  cicd_control_node01_address: 10.167.4.91
+  cicd_control_node01_hostname: cid01
+  cicd_control_node02_address: 10.167.4.92
+  cicd_control_node02_hostname: cid02
+  cicd_control_node03_address: 10.167.4.93
+  cicd_control_node03_hostname: cid03
+  cicd_control_vip_address: 10.167.4.90
+  cicd_control_vip_hostname: cid
+  cicd_enabled: 'True'
+  cicd_private_key: |-
+    -----BEGIN RSA PRIVATE KEY-----
+    MIIEpAIBAAKCAQEA0vs3yV4GVrnMDT1i1rc3DKVCU4kQ6D7OCDhJ10kiA4VZ/9Td
+    CIKGxL3+kzNHBqEB3HcjsGJ7zTud3S97Zd21HzTeF0iia7kQXZs3zw3GnU4IWCzd
+    zeyJenD1twrDILc1Q80SPIw8klbgK3T8Wcs0sACyfAORTKnM4Alrq3PSyn2G/L7s
+    wL+4OYWw8+V1Bo6f/1zsVdJWFelZ1HNd1fuGP9Egx+PSHznmBQniewM2AktZVHkd
+    e2j+sIMnimRr55ic6tEiPKb1sp+mdqfxUbkxU1q3r80Bnq5lbYNexJZLZFbY8pBF
+    w5qtVoXsrUra0xT66yDs9z9kgTXZt8MppSPEywIDAQABAoIBAD60d3IbxdqEwga1
+    Vejm7y+M9leJh0LfV7DNufSIQdm2CnektkTPNmrG5SCuvs9TjxANQMgtnQn0TAjv
+    EcfGywwJVHCrH8rQZ/LKLR4WxA8AoC5Et296e6muZPkP88qHaQdLrb3dGCuOMnX4
+    AQBXCsO8kU+WZ9rXJJL5ecytUdroRZnmChUN9sZFoCrrFmCJyKzSdmbtyHwvmc6t
+    K9YHx5HcQppdnbaUT/PcRNjNgOdQsGrYG/4tGpZE9GYqBRrtEZAFKnRDDlaq9mvc
+    FJ451YhomyUNtM7hor+VpSi9WtgiXh9fWUfJUIH4Ven9y29I7SNAxftfs0Xxdz2O
+    aEYnhPECgYEA9YKsXtslfSsjyG3Z6NTy9peAjBlUSA7jkbsfOIcN1qLAxSlOMIVG
+    P9knWa5rgp1nOssHxpbCJaiZv1r2C0sHZ+B/ZCNV1lCdz+pN+Y0AQHz2/VrQEtkR
+    cvClpWXjACT0igJ5mrmehzV166CI3t7+nCrkRd9aYTuVpNP4o9aEuGcCgYEA2/7f
+    2UVcq8SKJYAJD0mNc4mfnNwmyvIfwJqQfechPPwXG3kmd2jgFdUV3JyCwS771TRz
+    9QAHBLoBgKSVKLBORsTjYy7TSIYvnTUgrymMwc75nevVIy258hPSYpF6cVEIRnNr
+    sek5heYQxJZr9RraxihvoaZytmPRm918sR6B4f0CgYEAlizQc1VpoR76TGelm55m
+    4B/cKdZ0j39MBKCJgHJcLKZxdCjIAzYCupuCToE6kjLmKjh3ESq2p4JySXLCfjXu
+    2cOhKQfUQbweTEfuWm+9b7UBAAjErkLJQZ2iNYIVUMlKLAFHkTVpmxtAflk8X9fX
+    tn8mEveEuWVRK/ndZZqapJECgYA3IQSpZsdVR/gyc4ZRrWXkCR3VahnSi6BHXLRO
+    yKe8p5OGz/JCxCY7cl17HkFp9cMn53ATekFH/vC3cwbp3lyPQXGV/jr2FqJB6/lX
+    y7q5KovE9j9ABIpvTmZPSxN66AqB1RSszbwbgM685NEC6Arg02s9//8JE7SIMZW4
+    sONtZQKBgQDZoa9yAM9YRRFUpgiyKdmp4Yzq3xc17xhlrv3HgAKXzCwlRXm+TxKP
+    kmwFI2nn3sPN9jSegCGyOtfzoh4Q7DtBowrjLPUisWk1gLZH4HpCP7mkndR9ODI3
+    So+yjY6Y787NTuAtS97T1AfOPQR8VOm9vuGT1IRWNno3ckThokPcCg==
+    -----END RSA PRIVATE KEY-----
+  cicd_public_key: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDS+zfJXgZWucwNPWLWtzcMpUJTiRDoPs4IOEnXSSIDhVn/1N0IgobEvf6TM0cGoQHcdyOwYnvNO53dL3tl3bUfNN4XSKJruRBdmzfPDcadTghYLN3N7Il6cPW3CsMgtzVDzRI8jDySVuArdPxZyzSwALJ8A5FMqczgCWurc9LKfYb8vuzAv7g5hbDz5XUGjp//XOxV0lYV6VnUc13V+4Y/0SDH49IfOeYFCeJ7AzYCS1lUeR17aP6wgyeKZGvnmJzq0SI8pvWyn6Z2p/FRuTFTWrevzQGermVtg17ElktkVtjykEXDmq1WheytStrTFPrrIOz3P2SBNdm3wymlI8TL
+  cluster_domain: cookied-mcp-k8s-calico.local
+  cluster_name: cookied-mcp-k8s-calico
+  context_seed: 6RD8HFGk0xksGje6RcIiGRkHIIkdvHUDgBuUsCTYDv5Tw4DmVMbRlRVnatzGHYJd
+  control_network_netmask: 255.255.255.0
+  control_network_subnet: 10.167.4.0/24
+  control_vlan: '10'
+  cookiecutter_template_branch: ''
+  cookiecutter_template_credentials: gerrit
+  cookiecutter_template_url: https://gerrit.mcp.mirantis.net/mk/cookiecutter-templates.git
+  deploy_network_gateway: 10.167.5.1
+  deploy_network_netmask: 255.255.255.0
+  deploy_network_subnet: 10.167.5.0/24
+  deployment_type: physical
+  dns_server01: 172.18.176.6
+  dns_server02: 172.18.208.44
+  email_address: ddmitriev@mirantis.com
+  etcd_ssl: 'True'
+  #hyperkube_image: docker-prod-local.artifactory.mirantis.com/mirantis/kubernetes/hyperkube-amd64:v1.8.5-4
+  infra_bond_mode: active-backup
+  infra_deploy_nic: eth0
+  infra_kvm01_control_address: 10.167.4.241
+  infra_kvm01_deploy_address: 10.167.5.91
+  infra_kvm01_hostname: kvm01
+  infra_kvm02_control_address: 10.167.4.242
+  infra_kvm02_deploy_address: 10.167.5.92
+  infra_kvm02_hostname: kvm02
+  infra_kvm03_control_address: 10.167.4.243
+  infra_kvm03_deploy_address: 10.167.5.93
+  infra_kvm03_hostname: kvm03
+  infra_kvm_vip_address: 10.167.4.240
+  infra_primary_first_nic: eth1
+  infra_primary_second_nic: eth2
+  internal_proxy_enabled: 'False'
+  kqueen_custom_mail_enabled: 'False'
+  kqueen_enabled: 'False'
+  kubernetes_compute_node01_address: 10.167.4.101
+  kubernetes_compute_node01_deploy_address: 10.167.5.101
+  kubernetes_compute_node01_hostname: cmp01
+  kubernetes_compute_node02_address: 10.167.4.102
+  kubernetes_compute_node02_deploy_address: 10.167.5.102
+  kubernetes_compute_node02_hostname: cmp02
+  kubernetes_control_address: 10.167.4.10
+  kubernetes_control_node01_address: 10.167.4.11
+  kubernetes_control_node01_deploy_address: 10.167.5.11
+  kubernetes_control_node01_hostname: ctl01
+  kubernetes_control_node02_address: 10.167.4.12
+  kubernetes_control_node02_deploy_address: 10.167.5.12
+  kubernetes_control_node02_hostname: ctl02
+  kubernetes_control_node03_address: 10.167.4.13
+  kubernetes_control_node03_deploy_address: 10.167.5.13
+  kubernetes_control_node03_hostname: ctl03
+  kubernetes_enabled: 'True'
+  kubernetes_externaldns_enabled: 'False'
+  kubernetes_keepalived_vip_interface: ens4
+  kubernetes_network_calico_enabled: 'True'
+  kubernetes_virtlet_enabled: 'False'
+  local_repositories: 'False'
+  maas_deploy_address: 10.167.5.15
+  maas_hostname: cfg01
+  mcp_common_scripts_branch: ''
+  mcp_version: proposed
+  offline_deployment: 'False'
+  opencontrail_enabled: 'False'
+  openldap_domain: ${_param:cluster_name}.local
+  openldap_enabled: 'True'
+  openldap_organisation: ${_param:cluster_name}
+  openssh_groups: cicd
+  openstack_enabled: 'False'
+  oss_enabled: 'False'
+  oss_node03_address: ${_param:stacklight_monitor_node03_address}
+  platform: kubernetes_enabled
+  public_host: ${_param:infra_config_address}
+  publication_method: email
+  reclass_repository: https://github.com/Mirantis/mk-lab-salt-model.git
+  salt_api_password: GItjqF2mBE9JpA6WjnqD4pKNJMWJK72g
+  salt_api_password_hash: $6$XxwWWczf$tFbAgdW1PeVJWTn0Jw/xfwJlss/RgOf9fGWqx2XE7vZ5O/ZGR1AuIgl/HH7Qm3.ZxvutaWmfWszxWcPFZepzv.
+  salt_master_address: 10.167.4.15
+  salt_master_hostname: cfg01
+  salt_master_management_address: 10.167.5.15
+  shared_reclass_branch: ''
+  shared_reclass_url: https://gerrit.mcp.mirantis.net/salt-models/reclass-system.git
+  stacklight_enabled: 'False'
+  stacklight_version: '2'
+  static_ips_on_deploy_network_enabled: 'False'
+  tenant_network_gateway: 10.167.6.1
+  tenant_network_netmask: 255.255.255.0
+  tenant_network_subnet: 10.167.6.0/24
+  tenant_vlan: '20'
+  upstream_proxy_enabled: 'False'
+  use_default_network_scheme: 'False'
diff --git a/tcp_tests/templates/cookied-cicd-k8s-calico/deploy-and-test.groovy b/tcp_tests/templates/cookied-cicd-k8s-calico/deploy-and-test.groovy
new file mode 100644
index 0000000..18a0c2c
--- /dev/null
+++ b/tcp_tests/templates/cookied-cicd-k8s-calico/deploy-and-test.groovy
@@ -0,0 +1,135 @@
+common = new com.mirantis.mk.Common()
+
+def run_cmd(cmd, returnStdout=false) {
+    common.printMsg("Run shell command:\n" + cmd, "blue")
+    def VENV_PATH='/home/jenkins/fuel-devops30'
+    script = "set +x; echo 'activate python virtualenv ${VENV_PATH}';. ${VENV_PATH}/bin/activate; bash -c 'set -ex;set -ex;${cmd.stripIndent()}'"
+    return sh(script: script, returnStdout: returnStdout)
+}
+
+def run_cmd_stdout(cmd) {
+    return run_cmd(cmd, true)
+}
+
+node ("${NODE_NAME}") {
+  try {
+
+    stage("Clean the environment") {
+        println "Clean the working directory ${env.WORKSPACE}"
+        deleteDir()
+        // do not fail if environment doesn't exists
+        println "Remove environment ${ENV_NAME}"
+        run_cmd("""\
+            dos.py erase ${ENV_NAME} || true
+        """)
+        println "Remove config drive ISO"
+        run_cmd("""\
+            rm /home/jenkins/images/${CFG01_CONFIG_IMAGE_NAME} || true
+        """)
+    }
+
+    stage("Clone tcp-qa project and install requirements") {
+        run_cmd("""\
+        git clone https://github.com/Mirantis/tcp-qa.git ${env.WORKSPACE}
+        #cd tcp-qa
+        if [ -n "$TCP_QA_REFS" ]; then
+            set -e
+            git fetch https://review.gerrithub.io/Mirantis/tcp-qa $TCP_QA_REFS && git checkout FETCH_HEAD || exit \$?
+        fi
+        pip install --upgrade --upgrade-strategy=only-if-needed -r tcp_tests/requirements.txt
+        """)
+    }
+
+    // load shared methods from the clonned tcp-qa repository.
+    // DO NOT MOVE this code before clonning the repo
+    def rootDir = pwd()
+    def shared = load "${rootDir}/tcp_tests/templates/SharedPipeline.groovy"
+
+    stage("Create an environment ${ENV_NAME} in disabled state") {
+        // do not fail if environment doesn't exists
+        run_cmd("""\
+        python ./tcp_tests/utils/create_devops_env.py
+        """)
+    }
+
+    stage("Generate the model") {
+        shared.generate_cookied_model()
+    }
+
+    stage("Generate config drive ISO") {
+        shared.generate_configdrive_iso()
+    }
+
+    stage("Upload generated config drive ISO into volume on cfg01 node") {
+        run_cmd("""\
+        virsh vol-upload ${ENV_NAME}_cfg01.${LAB_CONFIG_NAME}.local_config /home/jenkins/images/${CFG01_CONFIG_IMAGE_NAME} --pool default
+        virsh pool-refresh --pool default
+        """)
+    }
+
+    stage("Run the 'underlay' and 'salt-deployed' fixtures to bootstrap salt cluster") {
+        run_cmd("""\
+        export MANAGER=devops
+        export SHUTDOWN_ENV_ON_TEARDOWN=false
+        export BOOTSTRAP_TIMEOUT=900
+        export PYTHONIOENCODING=UTF-8
+        export REPOSITORY_SUITE=${MCP_VERSION}
+        #export SALT_STEPS_PATH=templates/${LAB_CONFIG_NAME}/salt.yaml
+        export TEST_GROUP=test_install_local_salt
+        py.test -vvv -s -p no:django -p no:ipdb --junit-xml=nosetests.xml -k \${TEST_GROUP}
+        sleep 60  # wait for jenkins to start and IO calm down
+
+        """)
+    }
+
+    // Install core and cicd
+    stage("Run Jenkins job on salt-master [deploy_openstack:core]") {
+        shared.run_job_on_day01_node("core")
+    }
+
+    stage("Run Jenkins job on salt-master [deploy_openstack:cicd]") {
+        shared.run_job_on_day01_node("cicd")
+    }
+
+    // Install the cluster
+    for (stack in "${STACK_INSTALL}".split(",")) {
+        stage("Run Jenkins job on CICD [deploy_openstack:${stack}]") {
+            shared.run_job_on_cicd_nodes(stack)
+        }
+    }
+
+    stage("Run tests") {
+        run_cmd("""\
+            export ENV_NAME=${ENV_NAME}
+            . ./tcp_tests/utils/env_salt
+            . ./tcp_tests/utils/env_k8s
+
+            # Initialize variables used in tcp-qa tests
+            export CURRENT_SNAPSHOT=k8s_deployed  # provide the snapshot name required by the test
+            export TESTS_CONFIGS=\$(pwd)/${ENV_NAME}_salt_deployed.ini  # some SSH data may be filled separatelly
+
+            export MANAGER=empty  # skip 'hardware' fixture, disable snapshot/revert features
+            # export SSH='{...}'  # non-empty SSH required to skip 'underlay' fixture. It is filled from TESTS_CONFIGS now
+            export salt_master_host=\$SALT_MASTER_IP  # skip salt_deployed fixture
+            export salt_master_port=6969
+            export SALT_USER=\$SALTAPI_USER
+            export SALT_PASSWORD=\$SALTAPI_PASS
+            export COMMON_SERVICES_INSTALLED=true  # skip common_services_deployed fixture
+            export K8S_INSTALLED=true              # skip k8s_deployed fixture
+
+            py.test -vvv -s -p no:django -p no:ipdb --junit-xml=nosetests.xml -m k8s_calico
+            """)
+    }
+
+  } catch (e) {
+      common.printMsg("Job failed", "red")
+      throw e
+  } finally {
+    // TODO(ddmitriev): analyze the "def currentResult = currentBuild.result ?: 'SUCCESS'"
+    // and report appropriate data to TestRail
+    run_cmd("""\
+        dos.py destroy ${ENV_NAME}
+    """)
+  }
+
+}
\ No newline at end of file
diff --git a/tcp_tests/templates/cookied-cicd-k8s-calico/environment-context-k8s.yaml b/tcp_tests/templates/cookied-cicd-k8s-calico/environment-context-k8s.yaml
new file mode 100644
index 0000000..44c773f
--- /dev/null
+++ b/tcp_tests/templates/cookied-cicd-k8s-calico/environment-context-k8s.yaml
@@ -0,0 +1,136 @@
+nodes:
+    cfg01:
+      reclass_storage_name: infra_config_node01
+      roles:
+      - infra_config
+      - linux_system_codename_xenial
+      interfaces:
+        ens3:
+          role: single_dhcp
+        ens4:
+          role: single_static_ctl
+
+    kvm01:
+      reclass_storage_name: infra_kvm_node01
+      roles:
+      - infra_kvm
+      - linux_system_codename_xenial
+      interfaces:
+        ens3:
+          role: single_dhcp
+        ens4:
+          role: single_ctl
+
+    kvm02:
+      reclass_storage_name: infra_kvm_node02
+      roles:
+      - infra_kvm
+      - linux_system_codename_xenial
+      interfaces:
+        ens3:
+          role: single_dhcp
+        ens4:
+          role: single_ctl
+
+    kvm03:
+      reclass_storage_name: infra_kvm_node03
+      roles:
+      - infra_kvm
+      - linux_system_codename_xenial
+      interfaces:
+        ens3:
+          role: single_dhcp
+        ens4:
+          role: single_ctl
+
+    cid01:
+      reclass_storage_name: cicd_control_node01
+      roles:
+      - cicd_control_leader
+      - linux_system_codename_xenial
+      interfaces:
+        ens3:
+          role: single_dhcp
+        ens4:
+          role: single_ctl
+
+    cid02:
+      reclass_storage_name: cicd_control_node02
+      roles:
+      - cicd_control_manager
+      - linux_system_codename_xenial
+      interfaces:
+        ens3:
+          role: single_dhcp
+        ens4:
+          role: single_ctl
+
+    cid03:
+      reclass_storage_name: cicd_control_node03
+      roles:
+      - cicd_control_manager
+      - linux_system_codename_xenial
+      interfaces:
+        ens3:
+          role: single_dhcp
+        ens4:
+          role: single_ctl
+
+    ctl01:
+      reclass_storage_name: kubernetes_control_node01
+      roles:
+      - kubernetes_control
+      - linux_system_codename_xenial
+      interfaces:
+        ens3:
+          role: single_dhcp
+        ens4:
+          role: single_ctl
+
+    ctl02:
+      reclass_storage_name: kubernetes_control_node02
+      roles:
+      - kubernetes_control
+      - linux_system_codename_xenial
+      interfaces:
+        ens3:
+          role: single_dhcp
+        ens4:
+          role: single_ctl
+
+    ctl03:
+      reclass_storage_name: kubernetes_control_node03
+      roles:
+      - kubernetes_control
+      - linux_system_codename_xenial
+      interfaces:
+        ens3:
+          role: single_dhcp
+        ens4:
+          role: single_ctl
+
+    cmp01:
+      reclass_storage_name: kubernetes_compute_node01
+      roles:
+      - kubernetes_compute
+      - linux_system_codename_xenial
+      - salt_master_host
+      interfaces:
+        ens3:
+          role: single_dhcp
+        ens4:
+          role: single_ctl
+          single_address: ${_param:kubernetes_compute_node01_address}
+
+    cmp02:
+      reclass_storage_name: kubernetes_compute_node02
+      roles:
+      - kubernetes_compute
+      - linux_system_codename_xenial
+      - salt_master_host
+      interfaces:
+        ens3:
+          role: single_dhcp
+        ens4:
+          role: single_ctl
+          single_address: ${_param:kubernetes_compute_node02_address}
diff --git a/tcp_tests/templates/cookied-cicd-k8s-calico/salt.yaml b/tcp_tests/templates/cookied-cicd-k8s-calico/salt.yaml
new file mode 100644
index 0000000..7451056
--- /dev/null
+++ b/tcp_tests/templates/cookied-cicd-k8s-calico/salt.yaml
@@ -0,0 +1,16 @@
+{% from 'cookied-cicd-k8s-calico/underlay.yaml' import HOSTNAME_CFG01 with context %}
+{% from 'cookied-cicd-k8s-calico/underlay.yaml' import LAB_CONFIG_NAME with context %}
+{% from 'cookied-cicd-k8s-calico/underlay.yaml' import DOMAIN_NAME with context %}
+
+{% set SALT_MODELS_REPOSITORY = os_env('SALT_MODELS_REPOSITORY','https://gerrit.mcp.mirantis.net/salt-models/mcp-virtual-lab') %}
+# Other salt model repository parameters see in shared-salt.yaml
+
+{% import 'shared-salt.yaml' as SHARED with context %}
+
+- description: Workaround, configure ntp and rsyslog on salt master node
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@salt:master' state.sls ntp,rsyslog
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 10}
+  skip_fail: false
+
+{{ SHARED.MACRO_INSTALL_SALT_MINIONS() }}
diff --git a/tcp_tests/templates/cookied-cicd-k8s-calico/underlay--meta-data.yaml b/tcp_tests/templates/cookied-cicd-k8s-calico/underlay--meta-data.yaml
new file mode 100644
index 0000000..3699401
--- /dev/null
+++ b/tcp_tests/templates/cookied-cicd-k8s-calico/underlay--meta-data.yaml
@@ -0,0 +1,4 @@
+| # All the data below will be stored as a string object
+  instance-id: iid-local1
+  hostname: {hostname}
+  local-hostname: {hostname}
diff --git a/tcp_tests/templates/cookied-cicd-k8s-calico/underlay--user-data-cfg01.yaml b/tcp_tests/templates/cookied-cicd-k8s-calico/underlay--user-data-cfg01.yaml
new file mode 100644
index 0000000..77c18d1
--- /dev/null
+++ b/tcp_tests/templates/cookied-cicd-k8s-calico/underlay--user-data-cfg01.yaml
@@ -0,0 +1,102 @@
+| # All the data below will be stored as a string object
+  #cloud-config, see http://cloudinit.readthedocs.io/en/latest/topics/examples.html
+
+  ssh_pwauth: True
+  users:
+   - name: root
+     sudo: ALL=(ALL) NOPASSWD:ALL
+     shell: /bin/bash
+     ssh_authorized_keys:
+     {% for key in config.underlay.ssh_keys %}
+      - ssh-rsa {{ key['public'] }}
+     {% endfor %}
+
+  disable_root: false
+  chpasswd:
+   list: |
+    root:r00tme
+   expire: False
+
+  bootcmd:
+   # Block access to SSH while node is preparing
+   - cloud-init-per once sudo touch /is_cloud_init_started
+   # Enable root access
+   - sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
+   - service sshd restart
+
+  output:
+    all: '| tee -a /var/log/cloud-init-output.log /dev/tty0'
+
+  runcmd:
+   - echo "******** MOUNT CONFIG DRIVE"
+   # Mount config drive
+   - mkdir /root/config-drive
+   - mount /dev/sr0 /root/config-drive
+
+   # Configure dhclient
+   - sudo echo "nameserver {gateway}" >> /etc/resolvconf/resolv.conf.d/base
+   - sudo resolvconf -u
+
+   # Enable grub menu using updated config below
+   - update-grub
+
+   # Prepare network connection
+   #- sudo ifdown ens3
+   #- sudo ip r d default || true  # remove existing default route to get it from dhcp
+   #- sudo ifup ens3
+   #- sudo route add default gw {gateway} {interface_name}
+
+   # Create swap
+   - fallocate -l 16G /swapfile
+   - chmod 600 /swapfile
+   - mkswap /swapfile
+   - swapon /swapfile
+   - echo "/swapfile   none    swap    defaults    0   0" >> /etc/fstab
+
+   # Run user data script from config drive
+   - ifdown --force ens3; ifconfig ens3 down; ip a flush dev ens3; rm -f /var/run/network/ifstate.ens3; ip l set down ens3
+   - ifdown --force ens4; ifconfig ens4 down; ip a flush dev ens4; rm -f /var/run/network/ifstate.ens4; ip l set down ens4
+   - rm -f /etc/network/interfaces
+   #- ifdown --force ens5; ifconfig ens5 down; ip a flush dev ens5; rm -f /var/run/network/ifstate.ens5
+   #- cp /root/config-drive/user-data /root/user-data
+   #- sed -i '/^reboot$/d' /root/user-data
+   #- set -x; cd /root && /bin/bash -xe ./user-data
+   - set -x; cd /root/config-drive && /bin/bash -xe ./user-data
+
+   #- echo "nameserver 172.18.208.44" >> /etc/resolv.conf;
+
+   # Enable root access (after reboot)
+   - sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
+   #- service sshd stop
+
+   ########################################################
+   # Node is ready, allow SSH access
+   - touch /is_cloud_init_finished
+   #- reboot
+   ########################################################
+
+  write_files:
+   - path: /etc/default/grub.d/97-enable-grub-menu.cfg
+     content: |
+         GRUB_RECORDFAIL_TIMEOUT=30
+         GRUB_TIMEOUT=3
+         GRUB_TIMEOUT_STYLE=menu
+
+   #- path: /etc/network/interfaces
+   - path: /root/interfaces
+     content: |
+          auto lo
+          iface lo inet loopback
+
+          auto ens3
+          iface ens3 inet dhcp
+
+   - path: /root/.ssh/config
+     owner: root:root
+     permissions: '0600'
+     content: |
+          Host *
+            ServerAliveInterval 60
+            ServerAliveCountMax 0
+            StrictHostKeyChecking no
+            UserKnownHostsFile /dev/null
diff --git a/tcp_tests/templates/cookied-cicd-k8s-calico/underlay--user-data1604-swp.yaml b/tcp_tests/templates/cookied-cicd-k8s-calico/underlay--user-data1604-swp.yaml
new file mode 100644
index 0000000..319c007
--- /dev/null
+++ b/tcp_tests/templates/cookied-cicd-k8s-calico/underlay--user-data1604-swp.yaml
@@ -0,0 +1,59 @@
+| # All the data below will be stored as a string object
+  #cloud-config, see http://cloudinit.readthedocs.io/en/latest/topics/examples.html
+
+  ssh_pwauth: True
+  users:
+   - name: root
+     sudo: ALL=(ALL) NOPASSWD:ALL
+     shell: /bin/bash
+     ssh_authorized_keys:
+     {% for key in config.underlay.ssh_keys %}
+      - ssh-rsa {{ key['public'] }}
+     {% endfor %}
+
+  disable_root: false
+  chpasswd:
+   list: |
+    root:r00tme
+   expire: False
+
+  bootcmd:
+   # Enable root access
+   - sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
+   - service sshd restart
+  output:
+    all: '| tee -a /var/log/cloud-init-output.log /dev/tty0'
+
+  runcmd:
+   - export TERM=linux
+   - export LANG=C
+   # Configure dhclient
+   - sudo echo "nameserver {gateway}" >> /etc/resolvconf/resolv.conf.d/base
+   - sudo resolvconf -u
+
+   # Enable grub menu using updated config below
+   - update-grub
+
+   # Prepare network connection
+   - sudo ifup ens3
+   #- sudo route add default gw {gateway} {interface_name}
+
+   # Create swap
+   - fallocate -l 16G /swapfile
+   - chmod 600 /swapfile
+   - mkswap /swapfile
+   - swapon /swapfile
+   - echo "/swapfile   none    swap    defaults   0   0" >> /etc/fstab
+
+  write_files:
+   - path: /etc/default/grub.d/97-enable-grub-menu.cfg
+     content: |
+         GRUB_RECORDFAIL_TIMEOUT=30
+         GRUB_TIMEOUT=3
+         GRUB_TIMEOUT_STYLE=menu
+
+   - path: /etc/network/interfaces
+     content: |
+          auto ens3
+          iface ens3 inet dhcp
+
diff --git a/tcp_tests/templates/cookied-cicd-k8s-calico/underlay--user-data1604.yaml b/tcp_tests/templates/cookied-cicd-k8s-calico/underlay--user-data1604.yaml
new file mode 100644
index 0000000..b1b6430
--- /dev/null
+++ b/tcp_tests/templates/cookied-cicd-k8s-calico/underlay--user-data1604.yaml
@@ -0,0 +1,59 @@
+| # All the data below will be stored as a string object
+  #cloud-config, see http://cloudinit.readthedocs.io/en/latest/topics/examples.html
+
+  ssh_pwauth: True
+  users:
+   - name: root
+     sudo: ALL=(ALL) NOPASSWD:ALL
+     shell: /bin/bash
+     ssh_authorized_keys:
+     {% for key in config.underlay.ssh_keys %}
+      - ssh-rsa {{ key['public'] }}
+     {% endfor %}
+
+  disable_root: false
+  chpasswd:
+   list: |
+    root:r00tme
+   expire: False
+
+  bootcmd:
+   # Enable root access
+   - sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
+   - service sshd restart
+  output:
+    all: '| tee -a /var/log/cloud-init-output.log /dev/tty0'
+
+  runcmd:
+   - export TERM=linux
+   - export LANG=C
+   # Configure dhclient
+   - sudo echo "nameserver {gateway}" >> /etc/resolvconf/resolv.conf.d/base
+   - sudo resolvconf -u
+
+   # Enable grub menu using updated config below
+   - update-grub
+
+   # Prepare network connection
+   - sudo ifup ens3
+   #- sudo route add default gw {gateway} {interface_name}
+
+   # Create swap
+   #- fallocate -l 16G /swapfile
+   #- chmod 600 /swapfile
+   #- mkswap /swapfile
+   #- swapon /swapfile
+   #- echo "/swapfile   none    swap    defaults   0   0" >> /etc/fstab
+
+  write_files:
+   - path: /etc/default/grub.d/97-enable-grub-menu.cfg
+     content: |
+         GRUB_RECORDFAIL_TIMEOUT=30
+         GRUB_TIMEOUT=3
+         GRUB_TIMEOUT_STYLE=menu
+
+   - path: /etc/network/interfaces
+     content: |
+          auto ens3
+          iface ens3 inet dhcp
+
diff --git a/tcp_tests/templates/cookied-cicd-k8s-calico/underlay.yaml b/tcp_tests/templates/cookied-cicd-k8s-calico/underlay.yaml
new file mode 100644
index 0000000..921be9b
--- /dev/null
+++ b/tcp_tests/templates/cookied-cicd-k8s-calico/underlay.yaml
@@ -0,0 +1,538 @@
+# Set the repository suite, one of the: 'nightly', 'testing', 'stable', or any other required
+{% set REPOSITORY_SUITE = os_env('REPOSITORY_SUITE', 'testing') %}
+
+{% set LAB_CONFIG_NAME = os_env('LAB_CONFIG_NAME', 'cookied-cicd-k8s-calico') %}
+{% set DOMAIN_NAME = os_env('DOMAIN_NAME', LAB_CONFIG_NAME + '.local') %}
+{% set HOSTNAME_CFG01 = os_env('HOSTNAME_CFG01', 'cfg01.' + DOMAIN_NAME) %}
+{% set HOSTNAME_CID01 = os_env('HOSTNAME_CID01', 'cid01.' + DOMAIN_NAME) %}
+{% set HOSTNAME_CID02 = os_env('HOSTNAME_CID02', 'cid02.' + DOMAIN_NAME) %}
+{% set HOSTNAME_CID03 = os_env('HOSTNAME_CID03', 'cid03.' + DOMAIN_NAME) %}
+
+{% set HOSTNAME_CTL01 = os_env('HOSTNAME_CTL01', 'ctl01.' + DOMAIN_NAME) %}
+{% set HOSTNAME_CTL02 = os_env('HOSTNAME_CTL02', 'ctl02.' + DOMAIN_NAME) %}
+{% set HOSTNAME_CTL03 = os_env('HOSTNAME_CTL03', 'ctl03.' + DOMAIN_NAME) %}
+{% set HOSTNAME_CMP01 = os_env('HOSTNAME_CMP01', 'cmp01.' + DOMAIN_NAME) %}
+{% set HOSTNAME_CMP02 = os_env('HOSTNAME_CMP02', 'cmp02.' + DOMAIN_NAME) %}
+{% set HOSTNAME_LOG01 = os_env('HOSTNAME_LOG01', 'log01.' + DOMAIN_NAME) %}
+{% set HOSTNAME_LOG02 = os_env('HOSTNAME_LOG02', 'log02.' + DOMAIN_NAME) %}
+{% set HOSTNAME_LOG03 = os_env('HOSTNAME_LOG03', 'log03.' + DOMAIN_NAME) %}
+{% set HOSTNAME_MTR01 = os_env('HOSTNAME_MTR01', 'mtr01.' + DOMAIN_NAME) %}
+{% set HOSTNAME_MTR02 = os_env('HOSTNAME_MTR02', 'mtr02.' + DOMAIN_NAME) %}
+{% set HOSTNAME_MTR03 = os_env('HOSTNAME_MTR03', 'mtr03.' + DOMAIN_NAME) %}
+{% set HOSTNAME_MON01 = os_env('HOSTNAME_MON01', 'mon01.' + DOMAIN_NAME) %}
+{% set HOSTNAME_MON02 = os_env('HOSTNAME_MON02', 'mon02.' + DOMAIN_NAME) %}
+{% set HOSTNAME_MON03 = os_env('HOSTNAME_MON03', 'mon03.' + DOMAIN_NAME) %}
+{% set HOSTNAME_PRX01 = os_env('HOSTNAME_PRX01', 'prx01.' + DOMAIN_NAME) %}
+{% set HOSTNAME_KVM01 = os_env('HOSTNAME_KVM01', 'kvm01.' + DOMAIN_NAME) %}
+{% set HOSTNAME_KVM02 = os_env('HOSTNAME_KVM02', 'kvm02.' + DOMAIN_NAME) %}
+{% set HOSTNAME_KVM03 = os_env('HOSTNAME_KVM03', 'kvm03.' + DOMAIN_NAME) %}
+
+{% import 'cookied-cicd-k8s-calico/underlay--meta-data.yaml' as CLOUDINIT_META_DATA with context %}
+{% import 'cookied-cicd-k8s-calico/underlay--user-data-cfg01.yaml' as CLOUDINIT_USER_DATA_CFG01 with context %}
+{% import 'cookied-cicd-k8s-calico/underlay--user-data1604.yaml' as CLOUDINIT_USER_DATA_1604 with context %}
+{% import 'cookied-cicd-k8s-calico/underlay--user-data1604-swp.yaml' as CLOUDINIT_USER_DATA_1604_SWP with context %}
+
+---
+aliases:
+ - &interface_model {{ os_env('INTERFACE_MODEL', 'virtio') }}
+ - &cloudinit_meta_data {{ CLOUDINIT_META_DATA }}
+ - &cloudinit_user_data_cfg01 {{ CLOUDINIT_USER_DATA_CFG01 }}
+ - &cloudinit_user_data_1604 {{ CLOUDINIT_USER_DATA_1604 }}
+ - &cloudinit_user_data_1604_swp {{ CLOUDINIT_USER_DATA_1604_SWP }}
+
+template:
+  devops_settings:
+    env_name: {{ os_env('ENV_NAME', LAB_CONFIG_NAME + '_' + REPOSITORY_SUITE + "_" + os_env('BUILD_NUMBER', '')) }}
+
+    address_pools:
+      private-pool01:
+        net: {{ os_env('PRIVATE_ADDRESS_POOL01', '10.60.0.0/16:24') }}
+        params:
+          ip_reserved:
+            gateway: +1
+            l2_network_device: +1
+            default_{{ HOSTNAME_CFG01 }}: +15
+
+            default_{{ HOSTNAME_CID }}: +90
+            default_{{ HOSTNAME_CID01 }}: +91
+            default_{{ HOSTNAME_CID02 }}: +92
+            default_{{ HOSTNAME_CID03 }}: +93
+
+          ip_ranges:
+            dhcp: [+90, -10]
+
+      admin-pool01:
+        net: {{ os_env('ADMIN_ADDRESS_POOL01', '10.70.0.0/16:24') }}
+        params:
+          ip_reserved:
+            gateway: +1
+            l2_network_device: +1
+            default_{{ HOSTNAME_CFG01 }}: +15
+
+            default_{{ HOSTNAME_CID }}: +90
+            default_{{ HOSTNAME_CID01 }}: +91
+            default_{{ HOSTNAME_CID02 }}: +92
+            default_{{ HOSTNAME_CID03 }}: +93
+
+            default_{{ HOSTNAME_CTL }}: +10
+            default_{{ HOSTNAME_CTL01 }}: +11
+            default_{{ HOSTNAME_CTL02 }}: +12
+            default_{{ HOSTNAME_CTL03 }}: +13
+            default_{{ HOSTNAME_CMP01 }}: +101
+            default_{{ HOSTNAME_CMP02 }}: +102
+            default_{{ HOSTNAME_LOG }}: +60
+            default_{{ HOSTNAME_LOG01 }}: +61
+            default_{{ HOSTNAME_LOG02 }}: +62
+            default_{{ HOSTNAME_LOG03 }}: +63
+            default_{{ HOSTNAME_MON }}: +70
+            default_{{ HOSTNAME_MON01 }}: +71
+            default_{{ HOSTNAME_MON02 }}: +72
+            default_{{ HOSTNAME_MON03 }}: +73
+            default_{{ HOSTNAME_MTR }}: +85
+            default_{{ HOSTNAME_MTR01 }}: +86
+            default_{{ HOSTNAME_MTR02 }}: +87
+            default_{{ HOSTNAME_MTR03 }}: +88
+            default_{{ HOSTNAME_PRX01 }}: +222
+            default_{{ HOSTNAME_KVM }}: +240
+            default_{{ HOSTNAME_KVM01 }}: +241
+            default_{{ HOSTNAME_KVM02 }}: +242
+            default_{{ HOSTNAME_KVM03 }}: +243
+
+          ip_ranges:
+            dhcp: [+90, -10]
+
+      tenant-pool01:
+        net: {{ os_env('TENANT_ADDRESS_POOL01', '10.80.0.0/16:24') }}
+        params:
+          ip_reserved:
+            gateway: +1
+            l2_network_device: +1
+            default_{{ HOSTNAME_CFG01 }}: +15
+
+            default_{{ HOSTNAME_CID }}: +90
+            default_{{ HOSTNAME_CID01 }}: +91
+            default_{{ HOSTNAME_CID02 }}: +92
+            default_{{ HOSTNAME_CID03 }}: +93
+
+          ip_ranges:
+            dhcp: [+10, -10]
+
+      external-pool01:
+        net: {{ os_env('EXTERNAL_ADDRESS_POOL01', '10.90.0.0/16:24') }}
+        params:
+          ip_reserved:
+            gateway: +1
+            l2_network_device: +1
+            default_{{ HOSTNAME_CFG01 }}: +15
+
+            default_{{ HOSTNAME_CID }}: +90
+            default_{{ HOSTNAME_CID01 }}: +91
+            default_{{ HOSTNAME_CID02 }}: +92
+            default_{{ HOSTNAME_CID03 }}: +93
+
+          ip_ranges:
+            dhcp: [+10, -10]
+
+
+    groups:
+      - name: default
+        driver:
+          name: devops.driver.libvirt
+          params:
+            connection_string: {{ os_env('CONNECTION_STRING', 'qemu:///system') }}
+            storage_pool_name: {{ os_env('STORAGE_POOL_NAME', 'default') }}
+            stp: False
+            hpet: False
+            enable_acpi: true
+            use_host_cpu: {{ os_env('DRIVER_USE_HOST_CPU', true) }}
+            use_hugepages: {{ os_env('DRIVER_USE_HUGEPAGES', false) }}
+
+        network_pools:
+          admin: admin-pool01
+          private: private-pool01
+          tenant: tenant-pool01
+          external: external-pool01
+
+        l2_network_devices:
+          admin:
+            address_pool: admin-pool01
+            dhcp: true
+            forward:
+              mode: nat
+
+          private:
+            address_pool: private-pool01
+            dhcp: false
+            forward:
+              mode: route
+
+          external:
+            address_pool: external-pool01
+            dhcp: false
+            forward:
+              mode: nat
+
+
+        group_volumes:
+         - name: cfg01_day01_image               # Pre-configured day01 image
+           source_image: {{ os_env('IMAGE_PATH_CFG01_DAY01') }} # http://images.mirantis.com/cfg01-day01.qcow2 or fallback to IMAGE_PATH1604
+           format: qcow2
+         - name: mcp_ubuntu_1604_image           # Pre-configured image for VCP nodes initially based on kvm nodes.
+           # http://images.mirantis.com/ubuntu-16-04-x64-latest.qcow2  (preffered)
+           # or
+           # https://cloud-images.ubuntu.com/xenial/current/xenial-server-cloudimg-amd64-disk1.img
+           source_image: {{ os_env('MCP_IMAGE_PATH1604') }}
+           format: qcow2
+
+        nodes:
+          - name: {{ HOSTNAME_CFG01 }}
+            role: salt_master
+            params:
+              vcpu: {{ os_env('CFG_NODE_CPU', 3) }}
+              memory: {{ os_env('CFG_NODE_MEMORY', 8192) }}
+              boot:
+                - hd
+              cloud_init_volume_name: iso
+              cloud_init_iface_up: ens3
+              volumes:
+                - name: system
+                  capacity: {{ os_env('CFG_NODE_VOLUME_SIZE', 150) }}
+                  backing_store: cfg01_day01_image
+                  format: qcow2
+                - name: config
+                  capacity: 1
+                  format: raw
+                  device: cdrom
+                  bus: ide
+                  # source_image: !os_env CFG01_CONFIG_PATH # no source image required.
+                                                            # it will be uploaded after config drive generation
+                - name: iso  # Volume with name 'iso' will be used
+                             # for store image with cloud-init metadata.
+                  capacity: 1
+                  format: raw
+                  device: cdrom
+                  bus: ide
+                  cloudinit_meta_data: *cloudinit_meta_data
+                  cloudinit_user_data: *cloudinit_user_data_cfg01
+
+              interfaces:
+                - label: ens3
+                  l2_network_device: admin
+                  interface_model: *interface_model
+                - label: ens4
+                  l2_network_device: private
+                  interface_model: *interface_model
+              network_config:
+                ens3:
+                  networks:
+                    - admin
+                ens4:
+                  networks:
+                    - private
+
+          - name: {{ HOSTNAME_KVM01 }}
+            role: salt_minion
+            params:
+              vcpu: {{ os_env('KVM_NODE_CPU', 1) }}
+              memory: {{ os_env('KVM_NODE_MEMORY', 1024) }}
+              boot:
+                - hd
+              cloud_init_volume_name: iso
+              cloud_init_iface_up: ens3
+              volumes:
+                - name: system
+                  capacity: {{ os_env('KVM_NODE_VOLUME_SIZE', 150) }}
+                  backing_store: mcp_ubuntu_1604_image
+                  format: qcow2
+                - name: iso  # Volume with name 'iso' will be used
+                             # for store image with cloud-init metadata.
+                  capacity: 1
+                  format: raw
+                  device: cdrom
+                  bus: ide
+                  cloudinit_meta_data: *cloudinit_meta_data
+                  cloudinit_user_data: *cloudinit_user_data_1604_swp
+
+              interfaces: &interfaces
+                - label: ens3
+                  l2_network_device: admin
+                  interface_model: *interface_model
+                - label: ens4
+                  l2_network_device: private
+                  interface_model: *interface_model
+              network_config: &network_config
+                ens3:
+                  networks:
+                    - admin
+                ens4:
+                  networks:
+                    - private
+
+          - name: {{ HOSTNAME_KVM02 }}
+            role: salt_minion
+            params:
+              vcpu: {{ os_env('KVM_NODE_CPU', 1) }}
+              memory: {{ os_env('KVM_NODE_MEMORY', 1024) }}
+              boot:
+                - hd
+              cloud_init_volume_name: iso
+              cloud_init_iface_up: ens3
+              volumes:
+                - name: system
+                  capacity: {{ os_env('KVM_NODE_VOLUME_SIZE', 150) }}
+                  backing_store: mcp_ubuntu_1604_image
+                  format: qcow2
+                - name: iso  # Volume with name 'iso' will be used
+                             # for store image with cloud-init metadata.
+                  capacity: 1
+                  format: raw
+                  device: cdrom
+                  bus: ide
+                  cloudinit_meta_data: *cloudinit_meta_data
+                  cloudinit_user_data: *cloudinit_user_data_1604_swp
+
+              interfaces: *interfaces
+              network_config: *network_config
+
+          - name: {{ HOSTNAME_KVM03 }}
+            role: salt_minion
+            params:
+              vcpu: {{ os_env('KVM_NODE_CPU', 1) }}
+              memory: {{ os_env('KVM_NODE_MEMORY', 1024) }}
+              boot:
+                - hd
+              cloud_init_volume_name: iso
+              cloud_init_iface_up: ens3
+              volumes:
+                - name: system
+                  capacity: {{ os_env('KVM_NODE_VOLUME_SIZE', 150) }}
+                  backing_store: mcp_ubuntu_1604_image
+                  format: qcow2
+                - name: iso  # Volume with name 'iso' will be used
+                             # for store image with cloud-init metadata.
+                  capacity: 1
+                  format: raw
+                  device: cdrom
+                  bus: ide
+                  cloudinit_meta_data: *cloudinit_meta_data
+                  cloudinit_user_data: *cloudinit_user_data_1604_swp
+
+              interfaces: *interfaces
+              network_config: *network_config
+
+          - name: {{ HOSTNAME_CID01 }}
+            role: salt_minion
+            params:
+              vcpu: {{ os_env('CID_NODE_CPU', 3) }}
+              memory: {{ os_env('CID_NODE_MEMORY', 8192) }}
+              boot:
+                - hd
+              cloud_init_volume_name: iso
+              cloud_init_iface_up: ens3
+              volumes:
+                - name: system
+                  capacity: {{ os_env('CID_NODE_VOLUME_SIZE', 150) }}
+                  backing_store: mcp_ubuntu_1604_image
+                  format: qcow2
+                - name: iso  # Volume with name 'iso' will be used
+                             # for store image with cloud-init metadata.
+                  capacity: 1
+                  format: raw
+                  device: cdrom
+                  bus: ide
+                  cloudinit_meta_data: *cloudinit_meta_data
+                  cloudinit_user_data: *cloudinit_user_data_1604_swp
+
+              interfaces: *interfaces
+              network_config: *network_config
+
+          - name: {{ HOSTNAME_CID02 }}
+            role: salt_minion
+            params:
+              vcpu: {{ os_env('CID_NODE_CPU', 3) }}
+              memory: {{ os_env('CID_NODE_MEMORY', 8192) }}
+              boot:
+                - hd
+              cloud_init_volume_name: iso
+              cloud_init_iface_up: ens3
+              volumes:
+                - name: system
+                  capacity: {{ os_env('CID_NODE_VOLUME_SIZE', 150) }}
+                  backing_store: mcp_ubuntu_1604_image
+                  format: qcow2
+                - name: iso  # Volume with name 'iso' will be used
+                             # for store image with cloud-init metadata.
+                  capacity: 1
+                  format: raw
+                  device: cdrom
+                  bus: ide
+                  cloudinit_meta_data: *cloudinit_meta_data
+                  cloudinit_user_data: *cloudinit_user_data_1604_swp
+
+              interfaces: *interfaces
+              network_config: *network_config
+
+          - name: {{ HOSTNAME_CID03 }}
+            role: salt_minion
+            params:
+              vcpu: {{ os_env('CID_NODE_CPU', 3) }}
+              memory: {{ os_env('CID_NODE_MEMORY', 8192) }}
+              boot:
+                - hd
+              cloud_init_volume_name: iso
+              cloud_init_iface_up: ens3
+              volumes:
+                - name: system
+                  capacity: {{ os_env('CID_NODE_VOLUME_SIZE', 150) }}
+                  backing_store: mcp_ubuntu_1604_image
+                  format: qcow2
+                - name: iso  # Volume with name 'iso' will be used
+                             # for store image with cloud-init metadata.
+                  capacity: 1
+                  format: raw
+                  device: cdrom
+                  bus: ide
+                  cloudinit_meta_data: *cloudinit_meta_data
+                  cloudinit_user_data: *cloudinit_user_data_1604_swp
+
+              interfaces: *interfaces
+              network_config: *network_config
+
+          - name: {{ HOSTNAME_CTL01 }}
+            role: k8s_controller
+            params:
+              vcpu: !os_env SLAVE_NODE_CPU, 2
+              memory: !os_env SLAVE_NODE_MEMORY, 2048
+              boot:
+                - hd
+              cloud_init_volume_name: iso
+              cloud_init_iface_up: ens3
+              volumes:
+                - name: system
+                  capacity: !os_env NODE_VOLUME_SIZE, 150
+                  backing_store: mcp_ubuntu_1604_image
+                  format: qcow2
+                - name: cinder
+                  capacity: 50
+                  format: qcow2
+                - name: iso  # Volume with name 'iso' will be used
+                             # for store image with cloud-init metadata.
+                  capacity: 1
+                  format: raw
+                  device: cdrom
+                  bus: ide
+                  cloudinit_meta_data: *cloudinit_meta_data
+                  cloudinit_user_data: *cloudinit_user_data_1604
+
+              interfaces: *interfaces
+              network_config: *network_config
+
+          - name: {{ HOSTNAME_CTL02 }}
+            role: salt_minion
+            params:
+              vcpu: !os_env SLAVE_NODE_CPU, 2
+              memory: !os_env SLAVE_NODE_MEMORY, 2048
+              boot:
+                - hd
+              cloud_init_volume_name: iso
+              cloud_init_iface_up: ens3
+              volumes:
+                - name: system
+                  capacity: !os_env NODE_VOLUME_SIZE, 150
+                  backing_store: mcp_ubuntu_1604_image
+                  format: qcow2
+                - name: cinder
+                  capacity: 50
+                  format: qcow2
+                - name: iso  # Volume with name 'iso' will be used
+                             # for store image with cloud-init metadata.
+                  capacity: 1
+                  format: raw
+                  device: cdrom
+                  bus: ide
+                  cloudinit_meta_data: *cloudinit_meta_data
+                  cloudinit_user_data: *cloudinit_user_data_1604
+
+              interfaces: *interfaces
+              network_config: *network_config
+
+          - name: {{ HOSTNAME_CTL03 }}
+            role: salt_minion
+            params:
+              vcpu: !os_env SLAVE_NODE_CPU, 2
+              memory: !os_env SLAVE_NODE_MEMORY, 2048
+              boot:
+                - hd
+              cloud_init_volume_name: iso
+              cloud_init_iface_up: ens3
+              volumes:
+                - name: system
+                  capacity: !os_env NODE_VOLUME_SIZE, 150
+                  backing_store: mcp_ubuntu_1604_image
+                  format: qcow2
+                - name: cinder
+                  capacity: 50
+                  format: qcow2
+                - name: iso  # Volume with name 'iso' will be used
+                             # for store image with cloud-init metadata.
+                  capacity: 1
+                  format: raw
+                  device: cdrom
+                  bus: ide
+                  cloudinit_meta_data: *cloudinit_meta_data
+                  cloudinit_user_data: *cloudinit_user_data_1604
+
+              interfaces: *interfaces
+              network_config: *network_config
+
+          - name: {{ HOSTNAME_CMP01 }}
+            role: salt_minion
+            params:
+              vcpu: !os_env SLAVE_NODE_CPU, 2
+              memory: !os_env SLAVE_NODE_MEMORY, 2048
+              boot:
+                - hd
+              cloud_init_volume_name: iso
+              cloud_init_iface_up: ens3
+              volumes:
+                - name: system
+                  capacity: !os_env NODE_VOLUME_SIZE, 150
+                  backing_store: mcp_ubuntu_1604_image
+                  format: qcow2
+                - name: iso  # Volume with name 'iso' will be used
+                             # for store image with cloud-init metadata.
+                  capacity: 1
+                  format: raw
+                  device: cdrom
+                  bus: ide
+                  cloudinit_meta_data: *cloudinit_meta_data
+                  cloudinit_user_data: *cloudinit_user_data_1604
+
+              interfaces: *interfaces
+              network_config: *network_config
+
+          - name: {{ HOSTNAME_CMP02 }}
+            role: salt_minion
+            params:
+              vcpu: !os_env SLAVE_NODE_CPU, 2
+              memory: !os_env SLAVE_NODE_MEMORY, 2048
+              boot:
+                - hd
+              cloud_init_volume_name: iso
+              cloud_init_iface_up: ens3
+              volumes:
+                - name: system
+                  capacity: !os_env NODE_VOLUME_SIZE, 150
+                  backing_store: mcp_ubuntu_1604_image
+                  format: qcow2
+                - name: iso  # Volume with name 'iso' will be used
+                             # for store image with cloud-init metadata.
+                  capacity: 1
+                  format: raw
+                  device: cdrom
+                  bus: ide
+                  cloudinit_meta_data: *cloudinit_meta_data
+                  cloudinit_user_data: *cloudinit_user_data_1604
+
+              interfaces: *interfaces
+              network_config: *network_config
diff --git a/tcp_tests/templates/cookied-model-generator/salt_cookied-bm-mcp-ovs-dpdk.yaml b/tcp_tests/templates/cookied-model-generator/salt_cookied-bm-mcp-ovs-dpdk.yaml
index 1f2017c..a52bf04 100644
--- a/tcp_tests/templates/cookied-model-generator/salt_cookied-bm-mcp-ovs-dpdk.yaml
+++ b/tcp_tests/templates/cookied-model-generator/salt_cookied-bm-mcp-ovs-dpdk.yaml
@@ -29,13 +29,6 @@
   retry: {count: 1, delay: 1}
   skip_fail: false
 
-- description: Temporary WR for cinder backend defined by default in reclass.system
-  cmd: |
-    sed -i 's/backend\:\ {}//g' /srv/salt/reclass/classes/system/cinder/control/cluster.yml;
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
 - description: "Workaround for rack01 compute generator"
   cmd: |
     set -e;
@@ -44,10 +37,6 @@
     reclass-tools del-key parameters.reclass.storage.node.openstack_compute_rack01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
     # Add openstack_compute_node definition from system
     reclass-tools add-key 'classes' 'system.reclass.storage.system.openstack_compute_multi' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml --merge;
-    # Set ipaddresses for our nodes
-    reclass-tools add-key parameters._param.openstack_compute_node01_tenant_address 10.167.12.105 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
-    reclass-tools add-key parameters._param.openstack_compute_node02_tenant_address 10.167.12.106 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
-
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 10}
   skip_fail: false
diff --git a/tcp_tests/templates/cookied-model-generator/salt_cookied-cicd-k8s-calico-sl.yaml b/tcp_tests/templates/cookied-model-generator/salt_cookied-cicd-k8s-calico-sl.yaml
new file mode 100644
index 0000000..4642e7c
--- /dev/null
+++ b/tcp_tests/templates/cookied-model-generator/salt_cookied-cicd-k8s-calico-sl.yaml
@@ -0,0 +1,38 @@
+{% from 'cookied-model-generator/underlay.yaml' import HOSTNAME_CFG01 with context %}
+{% from 'cookied-model-generator/underlay.yaml' import DOMAIN_NAME with context %}
+
+{% set LAB_CONFIG_NAME = 'cookied-cicd-k8s-calico-sl' %}
+# Name of the context file (without extension, that is fixed .yaml) used to render the Environment model
+{% set ENVIRONMENT_MODEL_INVENTORY_NAME = os_env('ENVIRONMENT_MODEL_INVENTORY_NAME', LAB_CONFIG_NAME) %}
+# Path to the context files used to render Cluster and Environment models
+{%- set CLUSTER_CONTEXT_NAME = 'cookiecutter-context-k8s-sl.yaml' %}
+{%- set ENVIRONMENT_CONTEXT_NAMES = ['environment-context-k8s-sl.yaml'] %}
+
+{% import 'shared-salt.yaml' as SHARED with context %}
+
+{{ SHARED.MACRO_INSTALL_PACKAGES_ON_NODES(HOSTNAME_CFG01) }}
+- description: Re-install all the fromulas
+  cmd: |
+    set -e;
+    apt-get install -y salt-formula-*
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 1}
+  skip_fail: false
+
+{{ SHARED.MACRO_GENERATE_COOKIECUTTER_MODEL() }}
+
+{{ SHARED.MACRO_GENERATE_AND_ENABLE_ENVIRONMENT_MODEL() }}
+
+- description: "Workaround for combined roles: remove unnecessary classes"
+  cmd: |
+    set -e;
+    . /root/venv-reclass-tools/bin/activate;
+    # Workaround for compute nodes. Auto-registration for compute nodes cannot be used without external address inventory
+    reclass-tools add-key parameters._param.kubernetes_compute_node01_address {{ SHARED.IPV4_NET_CONTROL_PREFIX }}.101 /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/kubernetes/init.yml;
+    reclass-tools add-key parameters._param.kubernetes_compute_node02_address {{ SHARED.IPV4_NET_CONTROL_PREFIX }}.102 /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/kubernetes/init.yml;
+
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+{{ SHARED.MACRO_GENERATE_INVENTORY(RERUN_SALTMASTER_STATE=true) }}
diff --git a/tcp_tests/templates/cookied-model-generator/salt_cookied-cicd-k8s-calico.yaml b/tcp_tests/templates/cookied-model-generator/salt_cookied-cicd-k8s-calico.yaml
new file mode 100644
index 0000000..ceace31
--- /dev/null
+++ b/tcp_tests/templates/cookied-model-generator/salt_cookied-cicd-k8s-calico.yaml
@@ -0,0 +1,38 @@
+{% from 'cookied-model-generator/underlay.yaml' import HOSTNAME_CFG01 with context %}
+{% from 'cookied-model-generator/underlay.yaml' import DOMAIN_NAME with context %}
+
+{% set LAB_CONFIG_NAME = 'cookied-cicd-k8s-calico' %}
+# Name of the context file (without extension, that is fixed .yaml) used to render the Environment model
+{% set ENVIRONMENT_MODEL_INVENTORY_NAME = os_env('ENVIRONMENT_MODEL_INVENTORY_NAME', LAB_CONFIG_NAME) %}
+# Path to the context files used to render Cluster and Environment models
+{%- set CLUSTER_CONTEXT_NAME = 'cookiecutter-context-k8s.yaml' %}
+{%- set ENVIRONMENT_CONTEXT_NAMES = ['environment-context-k8s.yaml'] %}
+
+{% import 'shared-salt.yaml' as SHARED with context %}
+
+{{ SHARED.MACRO_INSTALL_PACKAGES_ON_NODES(HOSTNAME_CFG01) }}
+- description: Re-install all the fromulas
+  cmd: |
+    set -e;
+    apt-get install -y salt-formula-*
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 1}
+  skip_fail: false
+
+{{ SHARED.MACRO_GENERATE_COOKIECUTTER_MODEL() }}
+
+{{ SHARED.MACRO_GENERATE_AND_ENABLE_ENVIRONMENT_MODEL() }}
+
+- description: "Workaround for combined roles: remove unnecessary classes"
+  cmd: |
+    set -e;
+    . /root/venv-reclass-tools/bin/activate;
+    # Workaround for compute nodes. Auto-registration for compute nodes cannot be used without external address inventory
+    reclass-tools add-key parameters._param.kubernetes_compute_node01_address {{ SHARED.IPV4_NET_CONTROL_PREFIX }}.101 /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/kubernetes/init.yml;
+    reclass-tools add-key parameters._param.kubernetes_compute_node02_address {{ SHARED.IPV4_NET_CONTROL_PREFIX }}.102 /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/kubernetes/init.yml;
+
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+{{ SHARED.MACRO_GENERATE_INVENTORY(RERUN_SALTMASTER_STATE=true) }}
diff --git a/tcp_tests/templates/shared-backup-restore.yaml b/tcp_tests/templates/shared-backup-restore.yaml
index c85e684..2ec4d5d 100644
--- a/tcp_tests/templates/shared-backup-restore.yaml
+++ b/tcp_tests/templates/shared-backup-restore.yaml
@@ -57,7 +57,7 @@
 - description: Refresh grains for the xtrabackup client node
   cmd: salt -C 'I@xtrabackup:client' saltutil.sync_grains
   node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 1}
+  retry: {count: 5, delay: 10}
   skip_fail: false
 
 - description: Update the mine for the xtrabackup client node
@@ -65,7 +65,7 @@
     salt -C 'I@xtrabackup:client' mine.flush
     salt -C 'I@xtrabackup:client' mine.update
   node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 1}
+  retry: {count: 3, delay: 5}
   skip_fail: false
 
 - description: Apply the xtrabackup client state
diff --git a/tcp_tests/templates/shared-salt.yaml b/tcp_tests/templates/shared-salt.yaml
index adfe43f..061308f 100644
--- a/tcp_tests/templates/shared-salt.yaml
+++ b/tcp_tests/templates/shared-salt.yaml
@@ -379,15 +379,16 @@
     # 192.168.10 -> 10.16.0 (generated network for admin)
     # 10.16.0 -> <external network>
     # So let's replace constant networks to the keywords, and then keywords to the desired networks.
-    sed -i 's/10\.167\.5/==IPV4_NET_ADMIN_PREFIX==/g' {{ CLUSTER_CONTEXT_PATH }}
-    sed -i 's/10\.167\.4/==IPV4_NET_CONTROL_PREFIX==/g' {{ CLUSTER_CONTEXT_PATH }}
-    sed -i 's/10\.167\.6/==IPV4_NET_TENANT_PREFIX==/g' {{ CLUSTER_CONTEXT_PATH }}
-    sed -i 's/172\.17\.16/==IPV4_NET_EXTERNAL_PREFIX==/g' {{ CLUSTER_CONTEXT_PATH }}
+    export REPLACE_DIRS="{{ CLUSTER_CONTEXT_PATH }} /tmp/cookiecutter-templates"
+    find ${REPLACE_DIRS} -type f -exec sed -i 's/10\.167\.5/==IPV4_NET_ADMIN_PREFIX==/g' {} +
+    find ${REPLACE_DIRS} -type f -exec sed -i 's/10\.167\.4/==IPV4_NET_CONTROL_PREFIX==/g' {} +
+    find ${REPLACE_DIRS} -type f -exec sed -i 's/10\.167\.6/==IPV4_NET_TENANT_PREFIX==/g' {} +
+    find ${REPLACE_DIRS} -type f -exec sed -i 's/172\.17\.16/==IPV4_NET_EXTERNAL_PREFIX==/g' {} +
 
-    sed -i 's/==IPV4_NET_ADMIN_PREFIX==/{{ IPV4_NET_ADMIN_PREFIX }}/g' {{ CLUSTER_CONTEXT_PATH }}
-    sed -i 's/==IPV4_NET_CONTROL_PREFIX==/{{ IPV4_NET_CONTROL_PREFIX }}/g' {{ CLUSTER_CONTEXT_PATH }}
-    sed -i 's/==IPV4_NET_TENANT_PREFIX==/{{ IPV4_NET_TENANT_PREFIX }}/g' {{ CLUSTER_CONTEXT_PATH }}
-    sed -i 's/==IPV4_NET_EXTERNAL_PREFIX==/{{ IPV4_NET_EXTERNAL_PREFIX }}/g' {{ CLUSTER_CONTEXT_PATH }}
+    find ${REPLACE_DIRS} -type f -exec sed -i 's/==IPV4_NET_ADMIN_PREFIX==/{{ IPV4_NET_ADMIN_PREFIX }}/g' {} +
+    find ${REPLACE_DIRS} -type f -exec sed -i 's/==IPV4_NET_CONTROL_PREFIX==/{{ IPV4_NET_CONTROL_PREFIX }}/g' {} +
+    find ${REPLACE_DIRS} -type f -exec sed -i 's/==IPV4_NET_TENANT_PREFIX==/{{ IPV4_NET_TENANT_PREFIX }}/g' {} +
+    find ${REPLACE_DIRS} -type f -exec sed -i 's/==IPV4_NET_EXTERNAL_PREFIX==/{{ IPV4_NET_EXTERNAL_PREFIX }}/g' {} +
 
     {% set items = CLUSTER_PRODUCT_MODELS or '$(ls /tmp/cookiecutter-templates/cluster_product/)' %}
     for item in {{ items }}; do
diff --git a/tcp_tests/templates/virtual-mcp-pike-dvr-ceph-rgw/ceph.yaml b/tcp_tests/templates/virtual-mcp-pike-dvr-ceph-rgw/ceph.yaml
index 30b046a..e2573e8 100644
--- a/tcp_tests/templates/virtual-mcp-pike-dvr-ceph-rgw/ceph.yaml
+++ b/tcp_tests/templates/virtual-mcp-pike-dvr-ceph-rgw/ceph.yaml
@@ -168,4 +168,3 @@
   skip_fail: false
 
 {{ BACKUP.MACRO_BACKUP_CEPH() }}
-{{ SHARED.RUN_NEW_TEMPEST() }}
\ No newline at end of file
diff --git a/tcp_tests/templates/virtual-mcp-pike-dvr-ssl-barbican/openstack.yaml b/tcp_tests/templates/virtual-mcp-pike-dvr-ssl-barbican/openstack.yaml
index 8a8e6c2..ce846bb 100644
--- a/tcp_tests/templates/virtual-mcp-pike-dvr-ssl-barbican/openstack.yaml
+++ b/tcp_tests/templates/virtual-mcp-pike-dvr-ssl-barbican/openstack.yaml
@@ -359,5 +359,3 @@
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 3, delay: 5}
   skip_fail: false
-
-{{ SHARED.RUN_NEW_TEMPEST() }}
\ No newline at end of file
diff --git a/tcp_tests/templates/virtual-mcp-pike-dvr-ssl/openstack.yaml b/tcp_tests/templates/virtual-mcp-pike-dvr-ssl/openstack.yaml
index 2d13116..21b9d28 100644
--- a/tcp_tests/templates/virtual-mcp-pike-dvr-ssl/openstack.yaml
+++ b/tcp_tests/templates/virtual-mcp-pike-dvr-ssl/openstack.yaml
@@ -344,5 +344,3 @@
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 3, delay: 5}
   skip_fail: false
-
-{{ SHARED.RUN_NEW_TEMPEST() }}
\ No newline at end of file
diff --git a/tcp_tests/templates/virtual-mcp-pike-dvr/common-services.yaml b/tcp_tests/templates/virtual-mcp-pike-dvr/common-services.yaml
index d96c951..cbde3f0 100644
--- a/tcp_tests/templates/virtual-mcp-pike-dvr/common-services.yaml
+++ b/tcp_tests/templates/virtual-mcp-pike-dvr/common-services.yaml
@@ -1,7 +1,5 @@
 {% from 'virtual-mcp-pike-dvr/underlay.yaml' import HOSTNAME_CFG01 with context %}
 
-{% import 'shared-backup-restore.yaml' as BACKUP with context %}
-
 # Install support services
 - description: Install keepalived on ctl01
   cmd: salt --hard-crash --state-output=mixed --state-verbose=False
@@ -117,6 +115,3 @@
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 3, delay: 10}
   skip_fail: false
-
-{{ BACKUP.MACRO_BACKUP_BACKUPNINJA() }}
-{{ BACKUP.MACRO_BACKUP_XTRABACKUP() }}
\ No newline at end of file
diff --git a/tcp_tests/templates/virtual-mcp-pike-dvr/openstack.yaml b/tcp_tests/templates/virtual-mcp-pike-dvr/openstack.yaml
index cd2ab78..487d369 100644
--- a/tcp_tests/templates/virtual-mcp-pike-dvr/openstack.yaml
+++ b/tcp_tests/templates/virtual-mcp-pike-dvr/openstack.yaml
@@ -9,6 +9,7 @@
 {% from 'shared-salt.yaml' import IPV4_NET_TENANT_PREFIX with context %}
 {% set OVERRIDE_POLICY = os_env('OVERRIDE_POLICY', '') %}
 {% import 'shared-salt.yaml' as SHARED with context %}
+{% import 'shared-backup-restore.yaml' as BACKUP with context %}
 
 # Install OpenStack control services
 
@@ -338,4 +339,6 @@
   retry: {count: 1, delay: 5}
   skip_fail: false
 
-{{ SHARED.RUN_NEW_TEMPEST() }}
\ No newline at end of file
+{{ BACKUP.MACRO_BACKUP_BACKUPNINJA() }}
+{{ BACKUP.MACRO_BACKUP_XTRABACKUP() }}
+
diff --git a/tcp_tests/templates/virtual-mcp-pike-ovs-ceph/ceph.yaml b/tcp_tests/templates/virtual-mcp-pike-ovs-ceph/ceph.yaml
index 2c5fddf..976e4bb 100644
--- a/tcp_tests/templates/virtual-mcp-pike-ovs-ceph/ceph.yaml
+++ b/tcp_tests/templates/virtual-mcp-pike-ovs-ceph/ceph.yaml
@@ -166,5 +166,3 @@
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 2, delay: 5}
   skip_fail: false
-
-{{ SHARED.RUN_NEW_TEMPEST() }}
\ No newline at end of file
diff --git a/tcp_tests/templates/virtual-mcp-pike-ovs/common-services.yaml b/tcp_tests/templates/virtual-mcp-pike-ovs/common-services.yaml
index b7cc62b..abaa50d 100644
--- a/tcp_tests/templates/virtual-mcp-pike-ovs/common-services.yaml
+++ b/tcp_tests/templates/virtual-mcp-pike-ovs/common-services.yaml
@@ -1,7 +1,5 @@
 {% from 'virtual-mcp-pike-ovs/underlay.yaml' import HOSTNAME_CFG01 with context %}
 
-{% import 'shared-backup-restore.yaml' as BACKUP with context %}
-
 # Install support services
 - description: Install keepalived on ctl01
   cmd: salt --hard-crash --state-output=mixed --state-verbose=False
@@ -117,6 +115,3 @@
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 3, delay: 10}
   skip_fail: false
-
-{{ BACKUP.MACRO_BACKUP_BACKUPNINJA() }}
-{{ BACKUP.MACRO_BACKUP_XTRABACKUP() }}
\ No newline at end of file
diff --git a/tcp_tests/templates/virtual-mcp-pike-ovs/openstack.yaml b/tcp_tests/templates/virtual-mcp-pike-ovs/openstack.yaml
index 187ea59..c6e1c04 100644
--- a/tcp_tests/templates/virtual-mcp-pike-ovs/openstack.yaml
+++ b/tcp_tests/templates/virtual-mcp-pike-ovs/openstack.yaml
@@ -8,6 +8,7 @@
 {% from 'shared-salt.yaml' import IPV4_NET_EXTERNAL_PREFIX with context %}
 {% from 'shared-salt.yaml' import IPV4_NET_TENANT_PREFIX with context %}
 {% import 'shared-salt.yaml' as SHARED with context %}
+{% import 'shared-backup-restore.yaml' as BACKUP with context %}
 # Install OpenStack control services
 
 - description: Install glance on all controllers
@@ -357,3 +358,5 @@
   retry: {count: 1, delay: 5}
   skip_fail: false
 
+{{ BACKUP.MACRO_BACKUP_BACKUPNINJA() }}
+{{ BACKUP.MACRO_BACKUP_XTRABACKUP() }}
diff --git a/tcp_tests/templates/virtual-offline-pike-ovs-dpdk/common-services.yaml b/tcp_tests/templates/virtual-offline-pike-ovs-dpdk/common-services.yaml
index d4af905..8ba7026 100644
--- a/tcp_tests/templates/virtual-offline-pike-ovs-dpdk/common-services.yaml
+++ b/tcp_tests/templates/virtual-offline-pike-ovs-dpdk/common-services.yaml
@@ -1,6 +1,5 @@
 {% from 'virtual-offline-pike-ovs-dpdk/underlay.yaml' import HOSTNAME_CFG01 with context %}
 
-{% import 'shared-backup-restore.yaml' as BACKUP with context %}
 
 - description: remove apparmor
   cmd: salt --hard-crash --state-output=mixed --state-verbose=False
@@ -124,6 +123,3 @@
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 3, delay: 10}
   skip_fail: false
-
-{{ BACKUP.MACRO_BACKUP_BACKUPNINJA() }}
-{{ BACKUP.MACRO_BACKUP_XTRABACKUP() }}
\ No newline at end of file
diff --git a/tcp_tests/templates/virtual-offline-pike-ovs-dpdk/openstack.yaml b/tcp_tests/templates/virtual-offline-pike-ovs-dpdk/openstack.yaml
index 49440db..0bf2b70 100644
--- a/tcp_tests/templates/virtual-offline-pike-ovs-dpdk/openstack.yaml
+++ b/tcp_tests/templates/virtual-offline-pike-ovs-dpdk/openstack.yaml
@@ -9,6 +9,8 @@
 {% set DOCKER_LOCAL_REPO = os_env('DOCKER_LOCAL_REPO', 'deb [arch=amd64] http://mirror.mcp.mirantis.local.test/ubuntu-xenial/docker/ ' + REPOSITORY_SUITE + ' stable') %}
 # Install OpenStack control services
 
+{% import 'shared-backup-restore.yaml' as BACKUP with context %}
+
 - description: Install glance on all controllers
   cmd: salt --hard-crash --state-output=mixed --state-verbose=False
      -C 'I@glance:server' state.sls glance -b 1
@@ -363,3 +365,6 @@
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 30}
   skip_fail: false
+
+{{ BACKUP.MACRO_BACKUP_BACKUPNINJA() }}
+{{ BACKUP.MACRO_BACKUP_XTRABACKUP() }}
\ No newline at end of file
diff --git a/tcp_tests/templates/virtual-offline-pike-ovs-dpdk/run_test.sh b/tcp_tests/templates/virtual-offline-pike-ovs-dpdk/run_test.sh
index 898e000..1cd490d 100755
--- a/tcp_tests/templates/virtual-offline-pike-ovs-dpdk/run_test.sh
+++ b/tcp_tests/templates/virtual-offline-pike-ovs-dpdk/run_test.sh
@@ -17,7 +17,7 @@
 export RUN_TEMPEST=true
 
 # Offline deploy parameters
-export SALT_MODELS_REF_CHANGE=refs/changes/44/15144/1
+#export SALT_MODELS_REF_CHANGE=refs/changes/44/15144/1
 
 export BOOTSTRAP_TIMEOUT=1200
 
@@ -34,9 +34,9 @@
 export FORMULA_GPG="http://apt.mirantis.local.test/public.gpg"
 export SALT_REPOSITORY="deb [arch=amd64] http://apt.mirantis.local.test/ubuntu-xenial/ ${REPOSITORY_SUITE} salt/2016.3 main"
 export SALT_GPG="http://apt.mirantis.local.test/public.gpg"
-export UBUNTU_REPOSITORY="deb http://mirror.mcp.mirantis.local.test/ubuntu xenial main universe restricted"
-export UBUNTU_UPDATES_REPOSITORY="deb http://mirror.mcp.mirantis.local.test/ubuntu xenial-updates main universe restricted"
-export UBUNTU_SECURITY_REPOSITORY="deb http://mirror.mcp.mirantis.local.test/ubuntu xenial-security main universe restricted"
+export UBUNTU_REPOSITORY="deb http://mirror.mcp.mirantis.local.test/${REPOSITORY_SUITE}/ubuntu xenial main universe restricted"
+export UBUNTU_UPDATES_REPOSITORY="deb http://mirror.mcp.mirantis.local.test/${REPOSITORY_SUITE}/ubuntu xenial-updates main universe restricted"
+export UBUNTU_SECURITY_REPOSITORY="deb http://mirror.mcp.mirantis.local.test/${REPOSITORY_SUITE}/ubuntu xenial-security main universe restricted"
 
 cd tcp_tests
 py.test -vvv -s -p no:django -p no:ipdb --junit-xml=nosetests.xml -k ${TEST_GROUP}
diff --git a/tcp_tests/templates/virtual-offline-pike-ovs/run_test.sh b/tcp_tests/templates/virtual-offline-pike-ovs/run_test.sh
index 976ce45..e2df1ae 100755
--- a/tcp_tests/templates/virtual-offline-pike-ovs/run_test.sh
+++ b/tcp_tests/templates/virtual-offline-pike-ovs/run_test.sh
@@ -34,9 +34,9 @@
 export FORMULA_GPG="http://apt.mirantis.local.test/public.gpg"
 export SALT_REPOSITORY="deb [arch=amd64] http://apt.mirantis.local.test/ubuntu-xenial/ ${REPOSITORY_SUITE} salt/2016.3 main"
 export SALT_GPG="http://apt.mirantis.local.test/public.gpg"
-export UBUNTU_REPOSITORY="deb http://mirror.mcp.mirantis.local.test/ubuntu xenial main universe restricted"
-export UBUNTU_UPDATES_REPOSITORY="deb http://mirror.mcp.mirantis.local.test/ubuntu xenial-updates main universe restricted"
-export UBUNTU_SECURITY_REPOSITORY="deb http://mirror.mcp.mirantis.local.test/ubuntu xenial-security main universe restricted"
+export UBUNTU_REPOSITORY="deb http://mirror.mcp.mirantis.local.test/${REPOSITORY_SUITE}/ubuntu xenial main universe restricted"
+export UBUNTU_UPDATES_REPOSITORY="deb http://mirror.mcp.mirantis.local.test/${REPOSITORY_SUITE}/ubuntu xenial-updates main universe restricted"
+export UBUNTU_SECURITY_REPOSITORY="deb http://mirror.mcp.mirantis.local.test/${REPOSITORY_SUITE}/ubuntu xenial-security main universe restricted"
 
 cd tcp_tests
 py.test -vvv -s -p no:django -p no:ipdb --junit-xml=nosetests.xml -k ${TEST_GROUP}
diff --git a/tcp_tests/templates/virtual-offline-ssl/common-services.yaml b/tcp_tests/templates/virtual-offline-ssl/common-services.yaml
index 1eb8540..0c75bb4 100644
--- a/tcp_tests/templates/virtual-offline-ssl/common-services.yaml
+++ b/tcp_tests/templates/virtual-offline-ssl/common-services.yaml
@@ -1,7 +1,5 @@
 {% from 'virtual-offline-ssl/underlay.yaml' import HOSTNAME_CFG01 with context %}
 
-{% import 'shared-backup-restore.yaml' as BACKUP with context %}
-
 - description: remove apparmor
   cmd: salt --hard-crash --state-output=mixed --state-verbose=False
     '*' cmd.run 'service apparmor stop; service apparmor teardown; update-rc.d -f apparmor remove; apt-get -y remove apparmor'
@@ -124,6 +122,3 @@
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 3, delay: 10}
   skip_fail: false
-
-{{ BACKUP.MACRO_BACKUP_BACKUPNINJA() }}
-{{ BACKUP.MACRO_BACKUP_XTRABACKUP() }}
\ No newline at end of file
diff --git a/tcp_tests/templates/virtual-offline-ssl/openstack.yaml b/tcp_tests/templates/virtual-offline-ssl/openstack.yaml
index 7024b0f..d3586a1 100644
--- a/tcp_tests/templates/virtual-offline-ssl/openstack.yaml
+++ b/tcp_tests/templates/virtual-offline-ssl/openstack.yaml
@@ -13,6 +13,7 @@
 {% set DOCKER_LOCAL_REPO = os_env('DOCKER_LOCAL_REPO', 'deb [arch=amd64] http://mirror.mcp.mirantis.local.test/ubuntu-xenial/docker/ ' + REPOSITORY_SUITE + ' stable') %}
 
 {% import 'shared-salt.yaml' as SHARED with context %}
+{% import 'shared-backup-restore.yaml' as BACKUP with context %}
 
 # Install OpenStack control services
 
@@ -524,4 +525,7 @@
   retry: {count: 1, delay: 30}
   skip_fail: false
 
+{{ BACKUP.MACRO_BACKUP_BACKUPNINJA() }}
+{{ BACKUP.MACRO_BACKUP_XTRABACKUP() }}
+
 {{ SHARED.RUN_NEW_TEMPEST() }}
\ No newline at end of file
diff --git a/tcp_tests/templates/virtual-offline-ssl/run_test.sh b/tcp_tests/templates/virtual-offline-ssl/run_test.sh
index 9c9a381..f24f56e 100755
--- a/tcp_tests/templates/virtual-offline-ssl/run_test.sh
+++ b/tcp_tests/templates/virtual-offline-ssl/run_test.sh
@@ -34,9 +34,9 @@
 export FORMULA_GPG="http://apt.mirantis.local.test/public.gpg"
 export SALT_REPOSITORY="deb [arch=amd64] http://apt.mirantis.local.test/ubuntu-xenial/ ${REPOSITORY_SUITE} salt/2016.3 main"
 export SALT_GPG="http://apt.mirantis.local.test/public.gpg"
-export UBUNTU_REPOSITORY="deb http://mirror.mcp.mirantis.local.test/ubuntu xenial main universe restricted"
-export UBUNTU_UPDATES_REPOSITORY="deb http://mirror.mcp.mirantis.local.test/ubuntu xenial-updates main universe restricted"
-export UBUNTU_SECURITY_REPOSITORY="deb http://mirror.mcp.mirantis.local.test/ubuntu xenial-security main universe restricted"
+export UBUNTU_REPOSITORY="deb http://mirror.mcp.mirantis.local.test/${REPOSITORY_SUITE}/ubuntu xenial main universe restricted"
+export UBUNTU_UPDATES_REPOSITORY="deb http://mirror.mcp.mirantis.local.test/${REPOSITORY_SUITE}/ubuntu xenial-updates main universe restricted"
+export UBUNTU_SECURITY_REPOSITORY="deb http://mirror.mcp.mirantis.local.test/${REPOSITORY_SUITE}/ubuntu xenial-security main universe restricted"
 
 cd tcp_tests
 py.test -vvv -s -p no:django -p no:ipdb --junit-xml=nosetests.xml -k ${TEST_GROUP}
diff --git a/tcp_tests/tests/environment/test_local_dns.py b/tcp_tests/tests/environment/test_local_dns.py
index fcc0978..dfd29a1 100644
--- a/tcp_tests/tests/environment/test_local_dns.py
+++ b/tcp_tests/tests/environment/test_local_dns.py
@@ -25,3 +25,7 @@
     @pytest.mark.fail_snapshot
     def test_install_local_dns(self, config, underlay):
         LOG.info("*************** DONE **************")
+
+    @pytest.mark.fail_snapshot
+    def test_install_local_salt(self, config, underlay, salt_deployed):
+        LOG.info("*************** DONE **************")
diff --git a/tcp_tests/tests/system/conftest.py b/tcp_tests/tests/system/conftest.py
index 1214cd3..61dd8e2 100644
--- a/tcp_tests/tests/system/conftest.py
+++ b/tcp_tests/tests/system/conftest.py
@@ -27,6 +27,7 @@
 from tcp_tests.fixtures.k8s_fixtures import *  # noqa
 from tcp_tests.fixtures.drivetrain_fixtures import *  # noqa
 from tcp_tests.fixtures.day1_fixtures import *  # noqa
+from tcp_tests.fixtures.runtest_fixtures import * # noqa
 
 
 __all__ = sorted([  # sort for documentation
@@ -74,5 +75,7 @@
     'k8s_deployed',
     'day1_underlay',
     'day1_cfg_config',
-    'day1_salt_action'
+    'day1_salt_action',
+    # tempest
+    'tempest_actions'
 ])
diff --git a/tcp_tests/tests/system/test_backup_restore.py b/tcp_tests/tests/system/test_backup_restore.py
new file mode 100644
index 0000000..d63433b
--- /dev/null
+++ b/tcp_tests/tests/system/test_backup_restore.py
@@ -0,0 +1,72 @@
+#    Copyright 2018 Mirantis, Inc.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+from tcp_tests import logger
+from tcp_tests.managers import backup_restore_manager
+
+LOG = logger.logger
+
+
+class TestBackupRestoreMaster(object):
+    """Test class for testing backup restore of master node"""
+
+    def test_backup_cfg_backupninja_rsync(
+            self, underlay, config, openstack_deployed,
+            salt_actions, show_step):
+        """Test add policy for Nova service
+
+        Scenario:
+            1. Prepare salt on hosts
+            2. Setup controller nodes
+            3. Setup compute nodes
+            4. Check config for rsync exists
+            5. Run backup command
+            6. Delete salt master pki
+            7. Run restore
+            8. Check pki was restored
+            9. Check minions work fine with master
+
+        """
+        backup = backup_restore_manager.BackupRestoreManager(
+            config=config, underlay=underlay, salt_api=salt_actions)
+        # STEP #1,2,3
+        show_step(1)
+        show_step(2)
+        show_step(3)
+
+        # STEP #4
+        show_step(4)
+        backup.check_file_exists('cfg01')
+
+        # STEP #5
+        show_step(5)
+        backup.create_backup('cfg01')
+
+        # STEP #6
+        show_step(6)
+        backup.delete_dirs_files('cfg01')
+
+        # STEP #7
+        show_step(7)
+        backup.restore_salt('cfg01')
+
+        # STEP #8
+        show_step(8)
+        backup.verify_salt_master_restored('cfg01')
+
+        # STEP #9
+        show_step(9)
+        backup.ping_minions('cfg01')
+
+        LOG.info("*************** DONE **************")
diff --git a/tcp_tests/tests/system/test_calico.py b/tcp_tests/tests/system/test_calico.py
index 0dc7265..484aec5 100644
--- a/tcp_tests/tests/system/test_calico.py
+++ b/tcp_tests/tests/system/test_calico.py
@@ -52,6 +52,7 @@
     @pytest.mark.fail_snapshot
     @pytest.mark.calico_ci
     @pytest.mark.cz8116
+    @pytest.mark.k8s_calico
     def test_calico_route_recovery(self, show_step, config, underlay,
                                    k8s_deployed):
         """Test for deploying k8s environment with Calico plugin and check
diff --git a/tcp_tests/tests/system/test_install_k8s.py b/tcp_tests/tests/system/test_install_k8s.py
index e0e88cf..ac4d1d1 100644
--- a/tcp_tests/tests/system/test_install_k8s.py
+++ b/tcp_tests/tests/system/test_install_k8s.py
@@ -27,6 +27,7 @@
     @pytest.mark.grab_versions
     @pytest.mark.fail_snapshot
     @pytest.mark.cz8116
+    @pytest.mark.k8s_calico_sl
     def test_k8s_install_calico_lma(self, config, show_step,
                                     k8s_deployed, k8s_actions,
                                     sl_deployed, sl_actions):
@@ -193,6 +194,7 @@
     @pytest.mark.grab_versions
     @pytest.mark.fail_snapshot
     @pytest.mark.cz8116
+    @pytest.mark.k8s_calico
     def test_only_k8s_install(self, config, show_step,
                               k8s_deployed, k8s_actions, k8s_logs):
         """Test for deploying MCP environment with k8s and check it
diff --git a/tcp_tests/tests/system/test_install_mcp_ovs_pike.py b/tcp_tests/tests/system/test_install_mcp_ovs_pike.py
index c5138f0..7c7c680 100644
--- a/tcp_tests/tests/system/test_install_mcp_ovs_pike.py
+++ b/tcp_tests/tests/system/test_install_mcp_ovs_pike.py
@@ -14,8 +14,6 @@
 
 import pytest
 
-from tcp_tests.managers.runtestmanager import RuntestManager
-
 from tcp_tests import logger
 from tcp_tests import settings
 
@@ -32,8 +30,7 @@
     def test_mcp_pike_ovs_install(self, underlay,
                                   openstack_deployed,
                                   openstack_actions,
-                                  salt_actions,
-                                  config):
+                                  tempest_actions):
         """Test for deploying an mcp environment and check it
         Scenario:
         1. Prepare salt on hosts
@@ -45,27 +42,8 @@
         openstack_actions._salt.local(
             tgt='*', fun='cmd.run',
             args='service ntp stop; ntpd -gq; service ntp start')
-
         if settings.RUN_TEMPEST:
-            tempest_threads = 2
-            tempest_exclude_test_args = ''
-            tempest_pattern = settings.TEMPEST_PATTERN
-            cluster_name = settings.LAB_CONFIG_NAME
-            tempest_timeout = settings.TEMPEST_TIMEOUT
-            domain_name = "{}.local".format(cluster_name)
-            target = 'gtw01'
-            runtest = RuntestManager(
-                underlay, salt_actions,
-                cluster_name=cluster_name,
-                domain_name=domain_name,
-                tempest_threads=tempest_threads,
-                tempest_exclude_test_args=tempest_exclude_test_args,
-                tempest_pattern=tempest_pattern,
-                target=target)
-            runtest.prepare()
-            test_res = runtest.run_tempest(tempest_timeout)
-            runtest.fetch_arficats(username='root')
-            runtest.save_runtime_logs(**test_res)
+            tempest_actions.prepare_and_run_tempest()
 
         LOG.info("*************** DONE **************")
 
@@ -110,21 +88,21 @@
     def test_mcp_pike_dvr_install(self,
                                   underlay,
                                   openstack_deployed,
-                                  openstack_actions):
+                                  openstack_actions,
+                                  tempest_actions):
         """Test for deploying an mcp environment and check it
         Scenario:
         1. Prepare salt on hosts
         2. Setup controller nodes
         3. Setup compute nodes
-
+        4. Run tempest
         """
         openstack_actions._salt.local(
             tgt='*', fun='cmd.run',
             args='service ntp stop; ntpd -gq; service ntp start')
 
         if settings.RUN_TEMPEST:
-            openstack_actions.run_tempest(pattern=settings.PATTERN)
-            openstack_actions.download_tempest_report()
+            tempest_actions.prepare_and_run_tempest()
         LOG.info("*************** DONE **************")
 
     @pytest.mark.grab_versions
@@ -284,16 +262,12 @@
         LOG.info("*************** DONE **************")
 
     @pytest.mark.fail_snapshot
-    def test_bm_deploy(self, config, openstack_deployed,
-                       openstack_actions):
+    def test_bm_deploy(self, config, underlay,
+                       openstack_deployed):
         """Test for deploying an mcp environment on baremetal
 
         """
-        openstack_actions._salt.local(
+        openstack_deployed._salt.local(
             tgt='*', fun='cmd.run',
             args='service ntp stop; ntpd -gq; service ntp start')
-
-        if settings.RUN_TEMPEST:
-            openstack_actions.run_tempest(pattern=settings.PATTERN)
-            openstack_actions.download_tempest_report()
         LOG.info("*************** DONE **************")
diff --git a/tcp_tests/tests/system/test_k8s_actions.py b/tcp_tests/tests/system/test_k8s_actions.py
index a8dbe72..2ebc010 100644
--- a/tcp_tests/tests/system/test_k8s_actions.py
+++ b/tcp_tests/tests/system/test_k8s_actions.py
@@ -13,6 +13,7 @@
 #    under the License.
 
 import pytest
+import time
 
 from tcp_tests import logger
 from tcp_tests import settings
@@ -26,6 +27,7 @@
     @pytest.mark.grab_versions
     @pytest.mark.fail_snapshot
     @pytest.mark.cz8116
+    @pytest.mark.k8s_calico
     def test_k8s_externaldns_coredns(self, show_step, config, k8s_deployed):
         """Test externaldns integration with coredns
 
@@ -106,6 +108,9 @@
         sample_service_ip = k8s_deployed.get_svc_ip(deployment_name, 'default')
         k8s_deployed.wait_deploy_ready(deployment_name)
 
+        # workaround for PROD-20720
+        time.sleep(30)
+
         def check_is_test_service_available():
             assert "Hello Kubernetes!" in k8s_deployed.curl(
                 "http://{}:{}".format(sample_service_ip, 8080))
diff --git a/tcp_tests/tests/system/test_ovs_pike_ceph.py b/tcp_tests/tests/system/test_ovs_pike_ceph.py
index 080be26..1f62a94 100644
--- a/tcp_tests/tests/system/test_ovs_pike_ceph.py
+++ b/tcp_tests/tests/system/test_ovs_pike_ceph.py
@@ -26,9 +26,11 @@
 
     @pytest.mark.grab_versions
     @pytest.mark.fail_snapshot
-    def test_pike_ceph_all_ovs_install(self, underlay, openstack_deployed,
+    def test_pike_ceph_all_ovs_install(self, underlay,
+                                       openstack_deployed,
                                        ceph_deployed,
-                                       openstack_actions):
+                                       openstack_actions,
+                                       tempest_actions):
         """Test for deploying pike ovs with ceph and check it
         Scenario:
         1. Prepare salt on hosts
@@ -43,7 +45,5 @@
                 args='service ntp stop; ntpd -gq; service ntp start')
 
         if settings.RUN_TEMPEST:
-            openstack_actions.run_tempest(pattern=settings.PATTERN,
-                                          conf_name='ceph_mcp.conf')
-            openstack_actions.download_tempest_report()
+            tempest_actions.prepare_and_run_tempest()
         LOG.info("*************** DONE **************")