Merge "Add manila support"
diff --git a/tcp_tests/fixtures/drivetrain_fixtures.py b/tcp_tests/fixtures/drivetrain_fixtures.py
new file mode 100644
index 0000000..e0e709b
--- /dev/null
+++ b/tcp_tests/fixtures/drivetrain_fixtures.py
@@ -0,0 +1,81 @@
+#    Copyright 2016 Mirantis, Inc.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+import pytest
+
+from tcp_tests import logger
+from tcp_tests.helpers import ext
+from tcp_tests.managers import drivetrain_manager
+
+LOG = logger.logger
+
+
+@pytest.fixture(scope='function')
+def drivetrain_actions(config, underlay, salt_actions):
+    """Fixture that provides various actions for Drivetrain
+
+    :param config: fixture provides oslo.config
+    :param underlay: fixture provides underlay manager
+    :rtype: DrivetrainManager
+    """
+    return drivetrain_manager.DrivetrainManager(config, underlay, salt_actions)
+
+
+@pytest.mark.revert_snapshot(ext.SNAPSHOT.drivetrain_deployed)
+@pytest.fixture(scope='function')
+def drivetrain_deployed(revert_snapshot, request, config,
+                        hardware, underlay, salt_deployed,
+                        drivetrain_actions):
+    """Fixture to get or install Drivetrain on the environment
+
+    :param revert_snapshot: fixture that reverts snapshot that is specified
+                            in test with @pytest.mark.revert_snapshot(<name>)
+    :param request: fixture provides pytest data
+    :param config: fixture provides oslo.config
+    :param hardware: fixture provides enviromnet manager
+    :param underlay: fixture provides underlay manager
+    :param drivetrain_actions: fixture provides OSSManager instance
+    :rtype: DrivetrainManager
+
+    If config.drivetrain.drivetrain_installed is not set, this
+    fixture assumes that the Drivetrain were not installed
+    , and do the following:
+    - install Drivetrain
+    - make snapshot with name 'drivetrain_deployed'
+    - return DrivetrainManager
+
+    If config.drivetrain.drivetrain_installed was set, this fixture
+    assumes that the Drivetrain were already installed, and do
+    the following:
+    - return DrivetrainManager instance
+
+    If you want to revert 'drivetrain_deployed' snapshot, please use mark:
+    @pytest.mark.revert_snapshot("drivetrain_deployed")
+    """
+    if not config.drivetrain.drivetrain_installed:
+        steps_path = config.drivetrain_deploy.drivetrain_steps_path
+        commands = underlay.read_template(steps_path)
+        drivetrain_actions.install(commands)
+        hardware.create_snapshot(ext.SNAPSHOT.drivetrain_deployed)
+        salt_deployed.sync_time()
+
+    else:
+        # 1. hardware environment created and powered on
+        # 2. config.underlay.ssh contains SSH access to provisioned nodes
+        #    (can be passed from external config with TESTS_CONFIGS variable)
+        # 3. config.tcp.* options contain access credentials to the already
+        #    installed TCP API endpoint
+        pass
+
+    return drivetrain_actions
diff --git a/tcp_tests/fixtures/k8s_fixtures.py b/tcp_tests/fixtures/k8s_fixtures.py
index 6cc2721..c6fa204 100644
--- a/tcp_tests/fixtures/k8s_fixtures.py
+++ b/tcp_tests/fixtures/k8s_fixtures.py
@@ -94,7 +94,30 @@
                 (request.node.rep_call.passed or request.node.rep_call.failed)\
                 and grab_virtlet_result:
             files = utils.extract_name_from_mark(grab_virtlet_result) \
-                            or "{}".format(func_name)
+                    or "{}".format(func_name)
             k8s_deployed.extract_file_to_node()
-            k8s_deployed.download_virtlet_conformance_log(files)
+            k8s_deployed.download_k8s_logs(files)
+
+    request.addfinalizer(test_fin)
+
+
+@pytest.fixture(scope='function')
+def cncf_log_helper(request, func_name, underlay, k8s_deployed):
+    """Finalizer to prepare cncf tar.gz and save results from archive"""
+
+    cncf_publisher = request.keywords.get('cncf_publisher', None)
+
+    def test_fin():
+        if hasattr(request.node, 'rep_call') and \
+                (request.node.rep_call.passed or request.node.rep_call.failed)\
+                and cncf_publisher:
+            files = utils.extract_name_from_mark(cncf_publisher) \
+                    or "{}".format(func_name)
+            k8s_deployed.extract_file_to_node(
+                system='k8s', file_path='tmp/sonobuoy',
+                pod_name='sonobuoy', pod_namespace='sonobuoy'
+            )
+            k8s_deployed.manage_cncf_archive()
+            k8s_deployed.download_k8s_logs(files)
+
     request.addfinalizer(test_fin)
diff --git a/tcp_tests/helpers/ext.py b/tcp_tests/helpers/ext.py
index 804d110..7999201 100644
--- a/tcp_tests/helpers/ext.py
+++ b/tcp_tests/helpers/ext.py
@@ -45,6 +45,7 @@
     'salt_deployed',
     'common_services_deployed',
     'oss_deployed',
+    'drivetrain_deployed',
     'openstack_deployed',
     'sl_deployed',
     'virtlet_deployed',
diff --git a/tcp_tests/managers/drivetrain_manager.py b/tcp_tests/managers/drivetrain_manager.py
new file mode 100644
index 0000000..6e2fbda
--- /dev/null
+++ b/tcp_tests/managers/drivetrain_manager.py
@@ -0,0 +1,34 @@
+#    Copyright 2017 Mirantis, Inc.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+from tcp_tests.managers.execute_commands import ExecuteCommandsMixin
+
+
+class DrivetrainManager(ExecuteCommandsMixin):
+    """docstring for DrivetrainManager"""
+
+    __config = None
+    __underlay = None
+
+    def __init__(self, config, underlay, salt=None):
+        self.__config = config
+        self.__underlay = underlay
+        self._salt = salt
+        super(DrivetrainManager, self).__init__(
+            config=config, underlay=underlay)
+
+    def install(self, commands):
+        self.execute_commands(commands,
+                              label='Install Drivetrain Tools')
+        self.__config.drivetrain.drivetrain_installed = True
diff --git a/tcp_tests/managers/k8smanager.py b/tcp_tests/managers/k8smanager.py
index 14ce79f..e37ccaa 100644
--- a/tcp_tests/managers/k8smanager.py
+++ b/tcp_tests/managers/k8smanager.py
@@ -531,27 +531,58 @@
             LOG.info("Test results stderr: {}".format(stderr))
         return result
 
-    def extract_file_to_node(self, container='virtlet',
-                             file_path='report.xml'):
-        """
-        Download file from docker container to node
+    def start_k8s_cncf_verification(self, timeout=60 * 90):
+        cncf_cmd = ("curl -L https://raw.githubusercontent.com/cncf/"
+                    "k8s-conformance/master/sonobuoy-conformance.yaml"
+                    " | kubectl apply -f -")
+        with self.__underlay.remote(
+                node_name=self.ctl_host) as remote:
+            remote.check_call(cncf_cmd, timeout=60)
+            self.wait_pod_phase('sonobuoy', 'Running',
+                                namespace='sonobuoy', timeout=120)
+            wait_cmd = ('kubectl logs -n sonobuoy sonobuoy | '
+                        'grep "sonobuoy is now blocking"')
 
+            expected = [0, 1]
+            helpers.wait(
+                lambda: remote.check_call(
+                    wait_cmd, expected=expected).exit_code == 0,
+                interval=30, timeout=timeout,
+                timeout_msg="Timeout for CNCF reached."
+            )
+
+    def extract_file_to_node(self, system='docker',
+                             container='virtlet',
+                             file_path='report.xml', **kwargs):
+        """
+        Download file from docker or k8s container to node
+
+        :param system: docker or k8s
         :param container: Full name of part of name
         :param file_path: File path in container
+        :param kwargs: Used to control pod and namespace
         :return:
         """
         with self.__underlay.remote(
                 node_name=self.ctl_host) as remote:
-            cmd = ("docker ps --all | grep {0} |"
-                   " awk '{{print $1}}'".format(container))
-            result = remote.check_call(cmd)
-            container_id = result['stdout'][0].strip()
-            cmd = "docker start {}".format(container_id)
-            remote.check_call(cmd)
-            cmd = "docker cp {0}:/{1} .".format(container_id, file_path)
-            remote.check_call(cmd)
+            if system is 'docker':
+                cmd = ("docker ps --all | grep {0} |"
+                       " awk '{{print $1}}'".format(container))
+                result = remote.check_call(cmd)
+                container_id = result['stdout'][0].strip()
+                cmd = "docker start {}".format(container_id)
+                remote.check_call(cmd)
+                cmd = "docker cp {0}:/{1} .".format(container_id, file_path)
+                remote.check_call(cmd)
+            else:
+                # system is k8s
+                pod_name = kwargs.get('pod_name')
+                pod_namespace = kwargs.get('pod_namespace')
+                cmd = 'kubectl cp {0}/{1}:/{2} .'.format(
+                    pod_namespace, pod_name, file_path)
+                remote.check_call(cmd)
 
-    def download_virtlet_conformance_log(self, files):
+    def download_k8s_logs(self, files):
         """
         Download JUnit report and conformance logs from cluster
         :param files:
@@ -565,3 +596,35 @@
                 r.check_call(cmd, raise_on_err=False)
                 LOG.info("Downloading the artifact {0}".format(log_file))
                 r.download(destination=log_file, target=os.getcwd())
+
+    def manage_cncf_archive(self):
+        """
+        Function to untar archive, move files, that we are needs to the
+        home folder, prepare it to downloading and clean the trash.
+        Will generate files: e2e.log, junit_01.xml, cncf_results.tar.gz
+        and version.txt
+        :return:
+        """
+
+        # Namespace and pod name may be hardcoded since this function is
+        # very specific for cncf and cncf is not going to change
+        # those launch pod name and namespace.
+        get_tar_name_cmd = ("kubectl logs -n sonobuoy sonobuoy | "
+                            "grep 'Results available' | "
+                            "sed 's/.*\///' | tr -d '\"'")
+
+        with self.__underlay.remote(
+                node_name=self.ctl_host) as remote:
+            tar_name = remote.check_call(get_tar_name_cmd)['stdout'][0].strip()
+            untar = "mkdir result && tar -C result -xzf {0}".format(tar_name)
+            remote.check_call(untar)
+            manage_results = ("mv result/plugins/e2e/results/e2e.log . && "
+                              "mv result/plugins/e2e/results/junit_01.xml . ;"
+                              "kubectl version > version.txt")
+            remote.check_call(manage_results, raise_on_err=False)
+            cleanup_host = "rm -rf result"
+            remote.check_call(cleanup_host)
+            # This one needed to use download fixture, since I don't know
+            # how possible apply fixture arg dynamically from test.
+            rename_tar = "mv {0} cncf_results.tar.gz".format(tar_name)
+            remote.check_call(rename_tar)
diff --git a/tcp_tests/managers/openstack_manager.py b/tcp_tests/managers/openstack_manager.py
index bbf9315..275ef57 100644
--- a/tcp_tests/managers/openstack_manager.py
+++ b/tcp_tests/managers/openstack_manager.py
@@ -105,6 +105,10 @@
         with self.__underlay.remote(node_name=target_name[0]) as node_remote:
             result = node_remote.execute(cmd, verbose=True)
 
+        cmd_iptables = "iptables --policy FORWARD ACCEPT"
+        with self.__underlay.remote(node_name=target_name[0]) as node_remote:
+            result = node_remote.execute(cmd_iptables, verbose=True)
+
         with self.__underlay.remote(
                 host=self.__config.salt.salt_master_host) as node_remote:
             result = node_remote.execute(
diff --git a/tcp_tests/managers/rallymanager.py b/tcp_tests/managers/rallymanager.py
index ae72941..d5b8782 100644
--- a/tcp_tests/managers/rallymanager.py
+++ b/tcp_tests/managers/rallymanager.py
@@ -96,6 +96,9 @@
                                                        version=version))
         self._underlay.check_call(cmd, node_name=self._node_name)
 
+        cmd_iptables = "iptables --policy FORWARD ACCEPT"
+        self._underlay.check_call(cmd_iptables, node_name=self._node_name)
+
         LOG.info("Create rally workdir")
         cmd = 'mkdir -p /root/rally; chown 65500 /root/rally'
         self._underlay.check_call(cmd, node_name=self._node_name)
diff --git a/tcp_tests/managers/underlay_ssh_manager.py b/tcp_tests/managers/underlay_ssh_manager.py
index 54984ae..fce80cc 100644
--- a/tcp_tests/managers/underlay_ssh_manager.py
+++ b/tcp_tests/managers/underlay_ssh_manager.py
@@ -414,64 +414,68 @@
 
     def get_logs(self, artifact_name,
                  node_role=ext.UNDERLAY_NODE_ROLES.salt_master):
+
+        # Prefix each '$' symbol with backslash '\' to disable
+        # early interpolation of environment variables on cfg01 node only
         dump_commands = (
-            "mkdir /root/$(hostname -f)/;"
-            "rsync -aruv /var/log/ /root/$(hostname -f)/;"
-            "dpkg -l > /root/$(hostname -f)/dump_dpkg_l.txt;"
-            "df -h > /root/$(hostname -f)/dump_df.txt;"
-            "mount > /root/$(hostname -f)/dump_mount.txt;"
-            "blkid -o list > /root/$(hostname -f)/dump_blkid_o_list.txt;"
-            "iptables -t nat -S > /root/$(hostname -f)/dump_iptables_nat.txt;"
-            "iptables -S > /root/$(hostname -f)/dump_iptables.txt;"
-            "ps auxwwf > /root/$(hostname -f)/dump_ps.txt;"
-            "docker images > /root/$(hostname -f)/dump_docker_images.txt;"
-            "docker ps > /root/$(hostname -f)/dump_docker_ps.txt;"
+            "mkdir /root/\$(hostname -f)/;"
+            "rsync -aruv /var/log/ /root/\$(hostname -f)/;"
+            "dpkg -l > /root/\$(hostname -f)/dump_dpkg_l.txt;"
+            "df -h > /root/\$(hostname -f)/dump_df.txt;"
+            "mount > /root/\$(hostname -f)/dump_mount.txt;"
+            "blkid -o list > /root/\$(hostname -f)/dump_blkid_o_list.txt;"
+            "iptables -t nat -S > /root/\$(hostname -f)/dump_iptables_nat.txt;"
+            "iptables -S > /root/\$(hostname -f)/dump_iptables.txt;"
+            "ps auxwwf > /root/\$(hostname -f)/dump_ps.txt;"
+            "docker images > /root/\$(hostname -f)/dump_docker_images.txt;"
+            "docker ps > /root/\$(hostname -f)/dump_docker_ps.txt;"
             "docker service ls > "
-            "  /root/$(hostname -f)/dump_docker_services_ls.txt;"
-            "for SERVICE in $(docker service ls | awk '{ print $2 }'); "
-            "  do docker service ps --no-trunc 2>&1 $SERVICE >> "
-            "    /root/$(hostname -f)/dump_docker_service_ps.txt;"
+            "  /root/\$(hostname -f)/dump_docker_services_ls.txt;"
+            "for SERVICE in \$(docker service ls | awk '{ print $2 }'); "
+            "  do docker service ps --no-trunc 2>&1 \$SERVICE >> "
+            "    /root/\$(hostname -f)/dump_docker_service_ps.txt;"
             "  done;"
-            "for SERVICE in $(docker service ls | awk '{ print $2 }'); "
-            "  do docker service logs 2>&1 $SERVICE > "
-            "    /root/$(hostname -f)/dump_docker_service_${SERVICE}_logs;"
+            "for SERVICE in \$(docker service ls | awk '{ print $2 }'); "
+            "  do docker service logs 2>&1 \$SERVICE > "
+            "    /root/\$(hostname -f)/dump_docker_service_\${SERVICE}_logs;"
             "  done;"
-            "vgdisplay > /root/$(hostname -f)/dump_vgdisplay.txt;"
-            "lvdisplay > /root/$(hostname -f)/dump_lvdisplay.txt;"
-            "ip a > /root/$(hostname -f)/dump_ip_a.txt;"
-            "ip r > /root/$(hostname -f)/dump_ip_r.txt;"
-            "netstat -anp > /root/$(hostname -f)/dump_netstat.txt;"
-            "brctl show > /root/$(hostname -f)/dump_brctl_show.txt;"
-            "arp -an > /root/$(hostname -f)/dump_arp.txt;"
-            "uname -a > /root/$(hostname -f)/dump_uname_a.txt;"
-            "lsmod > /root/$(hostname -f)/dump_lsmod.txt;"
-            "cat /proc/interrupts > /root/$(hostname -f)/dump_interrupts.txt;"
-            "cat /etc/*-release > /root/$(hostname -f)/dump_release.txt;"
+            "vgdisplay > /root/\$(hostname -f)/dump_vgdisplay.txt;"
+            "lvdisplay > /root/\$(hostname -f)/dump_lvdisplay.txt;"
+            "ip a > /root/\$(hostname -f)/dump_ip_a.txt;"
+            "ip r > /root/\$(hostname -f)/dump_ip_r.txt;"
+            "netstat -anp > /root/\$(hostname -f)/dump_netstat.txt;"
+            "brctl show > /root/\$(hostname -f)/dump_brctl_show.txt;"
+            "arp -an > /root/\$(hostname -f)/dump_arp.txt;"
+            "uname -a > /root/\$(hostname -f)/dump_uname_a.txt;"
+            "lsmod > /root/\$(hostname -f)/dump_lsmod.txt;"
+            "cat /proc/interrupts > /root/\$(hostname -f)/dump_interrupts.txt;"
+            "cat /etc/*-release > /root/\$(hostname -f)/dump_release.txt;"
             # OpenStack specific, will fail on other nodes
-            # "rabbitmqctl report > /root/$(hostname -f)/dump_rabbitmqctl.txt;"
+            # "rabbitmqctl report > "
+            # "  /root/\$(hostname -f)/dump_rabbitmqctl.txt;"
 
-            # "ceph health > /root/$(hostname -f)/dump_ceph_health.txt;"
-            # "ceph -s > /root/$(hostname -f)/dump_ceph_s.txt;"
-            # "ceph osd tree > /root/$(hostname -f)/dump_ceph_osd_tree.txt;"
+            # "ceph health > /root/\$(hostname -f)/dump_ceph_health.txt;"
+            # "ceph -s > /root/\$(hostname -f)/dump_ceph_s.txt;"
+            # "ceph osd tree > /root/\$(hostname -f)/dump_ceph_osd_tree.txt;"
 
-            # "for ns in $(ip netns list);"
-            # " do echo Namespace: ${ns}; ip netns exec ${ns} ip a;"
-            # "done > /root/$(hostname -f)/dump_ip_a_ns.txt;"
+            # "for ns in \$(ip netns list);"
+            # " do echo Namespace: \${ns}; ip netns exec \${ns} ip a;"
+            # "done > /root/\$(hostname -f)/dump_ip_a_ns.txt;"
 
-            # "for ns in $(ip netns list);"
-            # " do echo Namespace: ${ns}; ip netns exec ${ns} ip r;"
-            # "done > /root/$(hostname -f)/dump_ip_r_ns.txt;"
+            # "for ns in \$(ip netns list);"
+            # " do echo Namespace: \${ns}; ip netns exec \${ns} ip r;"
+            # "done > /root/\$(hostname -f)/dump_ip_r_ns.txt;"
 
-            # "for ns in $(ip netns list);"
-            # " do echo Namespace: ${ns}; ip netns exec ${ns} netstat -anp;"
-            # "done > /root/$(hostname -f)/dump_netstat_ns.txt;"
+            # "for ns in \$(ip netns list);"
+            # " do echo Namespace: \${ns}; ip netns exec \${ns} netstat -anp;"
+            # "done > /root/\$(hostname -f)/dump_netstat_ns.txt;"
 
             "/usr/bin/haproxy-status.sh > "
-            "  /root/$(hostname -f)/dump_haproxy.txt;"
+            "  /root/\$(hostname -f)/dump_haproxy.txt;"
 
             # Archive the files
             "cd /root/; tar --absolute-names --warning=no-file-changed "
-            "  -czf $(hostname -f).tar.gz ./$(hostname -f)/;"
+            "  -czf \$(hostname -f).tar.gz ./\$(hostname -f)/;"
         )
 
         master_host = self.__config.salt.salt_master_host
@@ -490,7 +494,8 @@
                          .format(node['node_name']))
                 master.check_call("rsync -aruv {0}:/root/*.tar.gz "
                                   "/root/dump/".format(node['node_name']),
-                                  raise_on_err=False)
+                                  raise_on_err=False,
+                                  timeout=120)
 
             destination_name = '/root/{0}_dump.tar.gz'.format(artifact_name)
             # Archive the artifacts from all nodes
diff --git a/tcp_tests/settings_oslo.py b/tcp_tests/settings_oslo.py
index ad4cce3..bcefb83 100644
--- a/tcp_tests/settings_oslo.py
+++ b/tcp_tests/settings_oslo.py
@@ -42,6 +42,10 @@
     __name__,
     'templates/{0}/oss.yaml'.format(
         settings.LAB_CONFIG_NAME))
+_default_drivetrain_steps = pkg_resources.resource_filename(
+    __name__,
+    'templates/{0}/drivetrain.yaml'.format(
+        settings.LAB_CONFIG_NAME))
 _default_decapod_steps = pkg_resources.resource_filename(
     __name__,
     'templates/{0}/decapod.yaml'.format(
@@ -129,6 +133,11 @@
     ct.Cfg('templates_dir', ct.String(),
            help="Path to directory with templates",
            default=_default_templates_dir),
+    ct.Cfg('salt_roles', ct.JSONList(),
+           help="Node roles to install salt-minions and manage by salt",
+           default=[ext.UNDERLAY_NODE_ROLES.salt_master,
+                    ext.UNDERLAY_NODE_ROLES.salt_minion,
+                    ext.UNDERLAY_NODE_ROLES.k8s_controller]),
 ]
 salt_opts = [
     ct.Cfg('salt_master_host', ct.IPAddress(),
@@ -159,6 +168,17 @@
            help="", default=False),
 ]
 
+drivetrain_deploy_opts = [
+    ct.Cfg('drivetrain_steps_path', ct.String(),
+           help="Path to YAML with steps to deploy Drivetrain",
+           default=_default_drivetrain_steps),
+]
+
+drivetrain_opts = [
+    ct.Cfg('drivetrain_installed', ct.Boolean(),
+           help="", default=False),
+]
+
 decapod_deploy_opts = [
     ct.Cfg('decapod_steps_path', ct.String(),
            help="Path to YAML with steps to deploy Ceph with Decapod",
@@ -332,6 +352,8 @@
     ('common_services', common_services_opts),
     ('oss_deploy', oss_deploy_opts),
     ('oss', oss_opts),
+    ('drivetrain_deploy', drivetrain_deploy_opts),
+    ('drivetrain', drivetrain_opts),
     ('decapod_deploy', decapod_deploy_opts),
     ('decapod', decapod_opts),
     ('openstack_deploy', openstack_deploy_opts),
@@ -383,6 +405,15 @@
     config.register_opts(group='oss_deploy',
                          opts=oss_deploy_opts)
 
+    config.register_group(cfg.OptGroup(name='drivetrain',
+                          title="Drivetrain Tools", help=""))
+    config.register_opts(group='drivetrain', opts=drivetrain_opts)
+
+    config.register_group(cfg.OptGroup(name='drivetrain_deploy',
+                          title="Drivetrain deploy config", help=""))
+    config.register_opts(group='drivetrain_deploy',
+                         opts=drivetrain_deploy_opts)
+
     config.register_group(cfg.OptGroup(name='decapod',
                           title="Decapod options for Ceph", help=""))
     config.register_opts(group='decapod', opts=decapod_opts)
diff --git a/tcp_tests/templates/cookied-bm-mcp-dvr-vxlan/openstack.yaml b/tcp_tests/templates/cookied-bm-mcp-dvr-vxlan/openstack.yaml
index 342e4c8..f64d8ca 100644
--- a/tcp_tests/templates/cookied-bm-mcp-dvr-vxlan/openstack.yaml
+++ b/tcp_tests/templates/cookied-bm-mcp-dvr-vxlan/openstack.yaml
@@ -280,6 +280,12 @@
   retry: {count: 1, delay: 30}
   skip_fail: false
 
+- description: Enable forward policy
+  cmd: salt "gtw01*" cmd.run 'iptables --policy FORWARD ACCEPT'
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 30}
+  skip_fail: false
+
 - description: create rc file on cfg
   cmd: scp ctl01:/root/keystonercv3 /root
   node_name: {{ HOSTNAME_CFG01 }}
diff --git a/tcp_tests/templates/cookied-bm-mcp-ocata-contrail/openstack.yaml b/tcp_tests/templates/cookied-bm-mcp-ocata-contrail/openstack.yaml
index 1434a32..992ef9f 100644
--- a/tcp_tests/templates/cookied-bm-mcp-ocata-contrail/openstack.yaml
+++ b/tcp_tests/templates/cookied-bm-mcp-ocata-contrail/openstack.yaml
@@ -338,6 +338,11 @@
   retry: {count: 1, delay: 30}
   skip_fail: false
 
+- description: Enable forward policy
+  cmd: salt "ctl01*" cmd.run 'iptables --policy FORWARD ACCEPT'
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 30}
+  skip_fail: false
 #- description: create rc file on cfg
 #  cmd: scp ctl01:/root/keystonercv3 /root
 #  node_name: {{ HOSTNAME_CFG01 }}
diff --git a/tcp_tests/templates/cookied-mcp-ocata-dop-sl2/openstack.yaml b/tcp_tests/templates/cookied-mcp-ocata-dop-sl2/openstack.yaml
index 8c214e3..43483ae 100644
--- a/tcp_tests/templates/cookied-mcp-ocata-dop-sl2/openstack.yaml
+++ b/tcp_tests/templates/cookied-mcp-ocata-dop-sl2/openstack.yaml
@@ -319,6 +319,12 @@
   retry: {count: 1, delay: 30}
   skip_fail: false
 
+- description: Enable forward policy
+  cmd: salt "gtw01*" cmd.run 'iptables --policy FORWARD ACCEPT'
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 30}
+  skip_fail: false
+
 - description: create rc file on cfg
   cmd: scp ctl01:/root/keystonercv3 /root
   node_name: {{ HOSTNAME_CFG01 }}
diff --git a/tcp_tests/templates/cookied-mcp-ocata-dvr-vxlan/openstack.yaml b/tcp_tests/templates/cookied-mcp-ocata-dvr-vxlan/openstack.yaml
index 3882976..a4b52a5 100644
--- a/tcp_tests/templates/cookied-mcp-ocata-dvr-vxlan/openstack.yaml
+++ b/tcp_tests/templates/cookied-mcp-ocata-dvr-vxlan/openstack.yaml
@@ -361,6 +361,12 @@
   retry: {count: 1, delay: 30}
   skip_fail: false
 
+- description: Enable forward policy
+  cmd: iptables --policy FORWARD ACCEPT
+  node_name: {{ HOSTNAME_GTW01 }}
+  retry: {count: 1, delay: 30}
+  skip_fail: false
+
 - description: create rc file on cfg
   cmd: scp ctl01:/root/keystonercv3 /root
   node_name: {{ HOSTNAME_CFG01 }}
diff --git a/tcp_tests/templates/cookied-mcp-pike-dpdk/_context-environment.yaml b/tcp_tests/templates/cookied-mcp-pike-dpdk/_context-environment.yaml
index 913636b..5a68626 100644
--- a/tcp_tests/templates/cookied-mcp-pike-dpdk/_context-environment.yaml
+++ b/tcp_tests/templates/cookied-mcp-pike-dpdk/_context-environment.yaml
@@ -134,12 +134,12 @@
           role: single_dhcp
         ens4:
           role: single_ctl
-        ens5:
+        dpdkport0:
           role: bond2_dpdk_prv
-          dpdk_pci: "00:05.0"
-        ens6:
+          dpdk_pci: "0000:00:05.0"
+        dpdkport1:
           role: bond2_dpdk_prv
-          dpdk_pci: "00:06.0"
+          dpdk_pci: "0000:00:06.0"
         ens7:
           role: bond1_ab_ovs_floating
 
diff --git a/tcp_tests/templates/cookied-mcp-pike-dpdk/salt.yaml b/tcp_tests/templates/cookied-mcp-pike-dpdk/salt.yaml
index d424970..f23c660 100644
--- a/tcp_tests/templates/cookied-mcp-pike-dpdk/salt.yaml
+++ b/tcp_tests/templates/cookied-mcp-pike-dpdk/salt.yaml
@@ -30,6 +30,25 @@
   retry: {count: 1, delay: 10}
   skip_fail: false
 
+- description: "Workaround for PROD-18834: Pre-install linux-headers package"
+  cmd: salt 'cmp*' cmd.run "apt-get install -y linux-headers-$(uname -r)";
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 10}
+  skip_fail: false
+
+- description: "Workaround for PROD-18833: Pre-install ovs packages and update alternatives to DPDK"
+  cmd: |
+    set -ex;
+    salt 'cmp*' cmd.run "apt-get install -y openvswitch-switch openvswitch-switch-dpdk";
+    salt 'cmp*' cmd.run "service openvswitch-switch stop"
+    salt 'cmp*' cmd.run "rm -f /var/lib/openvswitch/*"
+    salt 'cmp*' cmd.run "update-alternatives --set ovs-vswitchd /usr/lib/openvswitch-switch-dpdk/ovs-vswitchd-dpdk";
+    salt 'cmp*' cmd.run "service openvswitch-switch start"
+    salt 'cmp*' cmd.run "ovs-vsctl --no-wait set Open_vSwitch . other_config:dpdk-init=true"
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 10}
+  skip_fail: false
+
 {{ SHARED.MACRO_BOOTSTRAP_ALL_MINIONS() }}
 
 - description: Hack gtw node
diff --git a/tcp_tests/templates/virtual-mcp-pike-dvr-ssl/_context-cookiecutter-mcp-pike-dvr-ssl.yaml b/tcp_tests/templates/cookied-mcp-pike-dvr-ssl/_context-cookiecutter-mcp-pike-dvr-ssl.yaml
similarity index 98%
rename from tcp_tests/templates/virtual-mcp-pike-dvr-ssl/_context-cookiecutter-mcp-pike-dvr-ssl.yaml
rename to tcp_tests/templates/cookied-mcp-pike-dvr-ssl/_context-cookiecutter-mcp-pike-dvr-ssl.yaml
index b52edb6..bac6199 100644
--- a/tcp_tests/templates/virtual-mcp-pike-dvr-ssl/_context-cookiecutter-mcp-pike-dvr-ssl.yaml
+++ b/tcp_tests/templates/cookied-mcp-pike-dvr-ssl/_context-cookiecutter-mcp-pike-dvr-ssl.yaml
@@ -2,8 +2,8 @@
   bmk_enabled: 'False'
   ceph_enabled: 'False'
   cicd_enabled: 'False'
-  cluster_domain: virtual-mcp-pike-dvr-ssl.local
-  cluster_name: virtual-mcp-pike-dvr-ssl
+  cluster_domain: cookied-mcp-pike-dvr-ssl.local
+  cluster_name: cookied-mcp-pike-dvr-ssl
   compute_bond_mode: active-backup
   compute_primary_first_nic: eth1
   compute_primary_second_nic: eth2
diff --git a/tcp_tests/templates/virtual-mcp-pike-dvr-ssl/_context-environment.yaml b/tcp_tests/templates/cookied-mcp-pike-dvr-ssl/_context-environment.yaml
similarity index 89%
rename from tcp_tests/templates/virtual-mcp-pike-dvr-ssl/_context-environment.yaml
rename to tcp_tests/templates/cookied-mcp-pike-dvr-ssl/_context-environment.yaml
index 10b54e4..6edac6e 100644
--- a/tcp_tests/templates/virtual-mcp-pike-dvr-ssl/_context-environment.yaml
+++ b/tcp_tests/templates/cookied-mcp-pike-dvr-ssl/_context-environment.yaml
@@ -1,5 +1,5 @@
 nodes:
-    cfg01.virtual-mcp-pike-dvr-ssl.local:
+    cfg01.cookied-mcp-pike-dvr-ssl.local:
       reclass_storage_name: infra_config_node01
       roles:
       - infra_config
@@ -10,7 +10,7 @@
         ens4:
           role: single_vlan_ctl
 
-    ctl01.virtual-mcp-pike-dvr-ssl.local:
+    ctl01.cookied-mcp-pike-dvr-ssl.local:
       reclass_storage_name: openstack_control_node01
       roles:
       - infra_kvm
@@ -27,7 +27,7 @@
         ens4:
           role: single_vlan_ctl
 
-    ctl02.virtual-mcp-pike-dvr-ssl.local:
+    ctl02.cookied-mcp-pike-dvr-ssl.local:
       reclass_storage_name: openstack_control_node02
       roles:
       - infra_kvm
@@ -43,7 +43,7 @@
         ens4:
           role: single_vlan_ctl
 
-    ctl03.virtual-mcp-pike-dvr-ssl.local:
+    ctl03.cookied-mcp-pike-dvr-ssl.local:
       reclass_storage_name: openstack_control_node03
       roles:
       - infra_kvm
@@ -59,7 +59,7 @@
         ens4:
           role: single_vlan_ctl
 
-    prx01.virtual-mcp-pike-dvr-ssl.local:
+    prx01.cookied-mcp-pike-dvr-ssl.local:
       reclass_storage_name: openstack_proxy_node01
       roles:
       - openstack_proxy
@@ -71,7 +71,7 @@
         ens4:
           role: single_vlan_ctl
 
-    mon01.virtual-mcp-pike-dvr-ssl.local:
+    mon01.cookied-mcp-pike-dvr-ssl.local:
       reclass_storage_name: stacklight_server_node01
       roles:
       - stacklightv2_server_leader
@@ -84,7 +84,7 @@
         ens4:
           role: single_vlan_ctl
 
-    mon02.virtual-mcp-pike-dvr-ssl.local:
+    mon02.cookied-mcp-pike-dvr-ssl.local:
       reclass_storage_name: stacklight_server_node02
       roles:
       - stacklightv2_server
@@ -97,7 +97,7 @@
         ens4:
           role: single_vlan_ctl
 
-    mon03.virtual-mcp-pike-dvr-ssl.local:
+    mon03.cookied-mcp-pike-dvr-ssl.local:
       reclass_storage_name: stacklight_server_node03
       roles:
       - stacklightv2_server
@@ -111,7 +111,7 @@
           role: single_vlan_ctl
 
     # Generator-based computes. For compatibility only
-    cmp<<count>>.virtual-mcp-pike-dvr-ssl.local:
+    cmp<<count>>.cookied-mcp-pike-dvr-ssl.local:
       reclass_storage_name: openstack_compute_rack01
       roles:
       - openstack_compute
@@ -126,7 +126,7 @@
         ens6:
           role: bond1_ab_ovs_floating
 
-    gtw01.virtual-mcp-pike-dvr-ssl.local:
+    gtw01.cookied-mcp-pike-dvr-ssl.local:
       reclass_storage_name: openstack_gateway_node01
       roles:
       - openstack_gateway
@@ -141,7 +141,7 @@
         ens6:
           role: bond1_ab_ovs_floating
 
-    dns01.virtual-mcp-pike-dvr-ssl.local:
+    dns01.cookied-mcp-pike-dvr-ssl.local:
       reclass_storage_name: openstack_dns_node01
       roles:
       - features_designate_pool_manager_dns
@@ -158,7 +158,7 @@
           role: single_vlan_ctl
           single_address: ${_param:openstack_dns_node01_address}
 
-    dns02.virtual-mcp-pike-dvr-ssl.local:
+    dns02.cookied-mcp-pike-dvr-ssl.local:
       reclass_storage_name: openstack_dns_node02
       roles:
       - features_designate_pool_manager_dns
diff --git a/tcp_tests/templates/cookied-mcp-pike-dvr-ssl/common-services.yaml b/tcp_tests/templates/cookied-mcp-pike-dvr-ssl/common-services.yaml
new file mode 100644
index 0000000..4d79b7d
--- /dev/null
+++ b/tcp_tests/templates/cookied-mcp-pike-dvr-ssl/common-services.yaml
@@ -0,0 +1,118 @@
+{% from 'cookied-mcp-pike-dvr-ssl/underlay.yaml' import HOSTNAME_CFG01 with context %}
+
+# Install support services
+- description: Install keepalived on ctl01
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@keepalived:cluster and *01*' state.sls keepalived
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 10}
+  skip_fail: true
+
+- description: Install keepalived
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@keepalived:cluster' state.sls keepalived
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 10}
+  skip_fail: true
+
+- description: Check the VIP
+  cmd: |
+    OPENSTACK_CONTROL_ADDRESS=`salt-call --out=newline_values_only pillar.get _param:openstack_control_address`;
+    echo "_param:openstack_control_address (vip): ${OPENSTACK_CONTROL_ADDRESS}";
+    salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@keepalived:cluster' cmd.run "ip a | grep ${OPENSTACK_CONTROL_ADDRESS}" | grep -B1 ${OPENSTACK_CONTROL_ADDRESS}
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+
+- description: Install glusterfs
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@glusterfs:server' state.sls glusterfs.server.service
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+- description: Setup glusterfs on primary controller
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@glusterfs:server' state.sls glusterfs.server.setup -b 1
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 2, delay: 5}
+  skip_fail: false
+
+- description: Check the gluster status
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@glusterfs:server' cmd.run 'gluster peer status; gluster volume status' -b 1
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+- description: Install RabbitMQ on ctl01
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@rabbitmq:server and *01*' state.sls rabbitmq
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+- description: Install RabbitMQ
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@rabbitmq:server' state.sls rabbitmq
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+- description: Check the rabbitmq status
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@rabbitmq:server' cmd.run 'rabbitmqctl cluster_status'
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+- description: Install Galera on first server
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@galera:master' state.sls galera
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+- description: Install Galera on other servers
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@galera:slave' state.sls galera -b 1
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+- description: Check mysql status
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@galera:*' mysql.status | grep -A1 -e "wsrep_incoming_addresses\|wsrep_cluster_size"
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: true
+
+
+- description: Install haproxy
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@haproxy:proxy' state.sls haproxy
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+- description: Check haproxy status
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@haproxy:proxy' service.status haproxy
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+- description: Restart rsyslog
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@haproxy:proxy' service.restart rsyslog
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+- description: Install memcached on all controllers
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@memcached:server' state.sls memcached
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
diff --git a/tcp_tests/templates/cookied-mcp-pike-dvr-ssl/openstack.yaml b/tcp_tests/templates/cookied-mcp-pike-dvr-ssl/openstack.yaml
new file mode 100644
index 0000000..02cb4c7
--- /dev/null
+++ b/tcp_tests/templates/cookied-mcp-pike-dvr-ssl/openstack.yaml
@@ -0,0 +1,407 @@
+{% from 'cookied-mcp-pike-dvr-ssl/underlay.yaml' import HOSTNAME_CFG01 with context %}
+{% from 'cookied-mcp-pike-dvr-ssl/underlay.yaml' import HOSTNAME_CTL01 with context %}
+{% from 'cookied-mcp-pike-dvr-ssl/underlay.yaml' import HOSTNAME_CTL02 with context %}
+{% from 'cookied-mcp-pike-dvr-ssl/underlay.yaml' import HOSTNAME_CTL03 with context %}
+{% from 'cookied-mcp-pike-dvr-ssl/underlay.yaml' import HOSTNAME_GTW01 with context %}
+{% from 'shared-salt.yaml' import IPV4_NET_EXTERNAL_PREFIX with context %}
+{% from 'shared-salt.yaml' import IPV4_NET_TENANT_PREFIX with context %}
+{% set LAB_CONFIG_NAME = os_env('LAB_CONFIG_NAME') %}
+{% set OVERRIDE_POLICY = os_env('OVERRIDE_POLICY', '') %}
+
+# Install OpenStack control services
+
+{%- if OVERRIDE_POLICY != '' %}
+- description: Upload policy override
+  upload:
+    local_path:  {{ config.salt_deploy.templates_dir }}{{ LAB_CONFIG_NAME }}/
+    local_filename: overrides-policy.yml
+    remote_path: /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/openstack/
+  node_name: {{ HOSTNAME_CFG01 }}
+
+- description: Create custom cluster control class
+  cmd: echo -e "classes:\n- cluster.{{ LAB_CONFIG_NAME }}.openstack.control_orig\n$(cat /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/openstack/overrides-policy.yml)" > /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/openstack/overrides-policy.yml
+  node_name: {{ HOSTNAME_CFG01 }}
+
+- description: Rename control classes
+  cmd: mv /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/openstack/control.yml /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/openstack/control_orig.yml &&
+    ln -s /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/openstack/overrides-policy.yml /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/openstack/control.yml &&
+    salt --hard-crash --state-output=mixed --state-verbose=False '*' saltutil.sync_all &&
+    salt --hard-crash --state-output=mixed --state-verbose=False '*' saltutil.refresh_pillar
+  node_name: {{ HOSTNAME_CFG01 }}
+{%- endif %}
+
+- description: Nginx
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@nginx:server' state.sls salt.minion
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: true
+
+- description: Deploy nginx proxy
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@nginx:server' state.sls nginx
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: true
+
+- description: Install glance on all controllers
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+     -C 'I@glance:server' state.sls glance -b 1
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+- description: Install keystone service (note that different fernet keys are created on different nodes)
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@keystone:server' state.sls keystone.server -b 1
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 2, delay: 15}
+  skip_fail: false
+
+- description: Restart apache due to PROD-10477
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl*' cmd.run "systemctl restart apache2"
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 15}
+  skip_fail: false
+
+- description: Check apache status to PROD-10477
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl*' cmd.run "systemctl status apache2"
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 15}
+  skip_fail: false
+
+- description: Mount glusterfs.client volumes (resuires created 'keystone' and 'glusterfs' system users)
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@glance:server' state.sls glusterfs.client
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+- description: Update fernet keys for keystone server on the mounted glusterfs volume
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@keystone:server' state.sls keystone.server -b 1
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+- description: Populate keystone services/tenants/admins
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@keystone:client' state.sls keystone.client
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 2, delay: 5}
+  skip_fail: false
+
+- description: Check keystone service-list
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@keystone:server' cmd.run '. /root/keystonercv3; openstack service list'
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+- description: Check glance image-list
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@keystone:server' cmd.run '. /root/keystonerc; glance image-list'
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+
+- description: Install nova on all controllers
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@nova:controller' state.sls nova -b 1
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 2, delay: 5}
+  skip_fail: false
+
+- description: Check nova service-list
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@keystone:server' cmd.run '. /root/keystonerc; nova --debug service-list'
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 3, delay: 5}
+  skip_fail: false
+
+
+- description: Install cinder
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@cinder:controller' state.sls cinder -b 1
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+- description: Check cinder list
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@keystone:server' cmd.run '. /root/keystonerc; cinder list'
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+
+- description: Install neutron service
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@neutron:server' state.sls neutron -b 1
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+- description: Install neutron on gtw node
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@neutron:gateway' state.sls neutron
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+# isntall designate
+- description: Install powerdns
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@powerdns:server' state.sls powerdns.server
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+- description: Install designate
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@designate:server' state.sls designate -b 1
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 5, delay: 10}
+  skip_fail: false
+
+- description: Check neutron agent-list
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@keystone:server' cmd.run '. /root/keystonerc; neutron agent-list'
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+- description: Install heat service
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@heat:server' state.sls heat -b 1
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+- description: Check heat service
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@keystone:server' cmd.run '. /root/keystonercv3; openstack orchestration resource type list'
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 5, delay: 10}
+  skip_fail: false
+
+
+- description: Deploy horizon dashboard
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@horizon:server' state.sls horizon
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: true
+
+# Install compute node
+
+- description: Apply formulas for compute node
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'cmp*' state.apply
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: true
+
+- description: Re-apply(as in doc) formulas for compute node
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'cmp*' state.apply
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+- description: Check IP on computes
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'cmp*' cmd.run
+    'ip a'
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 10, delay: 30}
+  skip_fail: false
+
+
+  # Upload cirros image
+
+- description: Upload cirros image on ctl01
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
+    'wget http://download.cirros-cloud.net/0.3.4/cirros-0.3.4-i386-disk.img'
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 2, delay: 30}
+  skip_fail: false
+
+- description: Register image in glance
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
+    '. /root/keystonercv3; glance --timeout 120 image-create --name cirros --visibility public --disk-format qcow2 --container-format bare --progress < /root/cirros-0.3.4-i386-disk.img'
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 30}
+  skip_fail: false
+
+- description: Create net04_external
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
+    '. /root/keystonercv3; neutron net-create net04_ext --router:external True --provider:physical_network physnet1 --provider:network_type flat'
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 30}
+  skip_fail: false
+
+- description: Create subnet_external
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
+    '. /root/keystonercv3; neutron subnet-create net04_ext {{ IPV4_NET_EXTERNAL_PREFIX }}.0/24 --name net04_ext__subnet --disable-dhcp --allocation-pool start={{ IPV4_NET_EXTERNAL_PREFIX }}.150,end={{ IPV4_NET_EXTERNAL_PREFIX }}.180 --gateway {{ IPV4_NET_EXTERNAL_PREFIX }}.1'
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 30}
+  skip_fail: false
+
+- description: Create net04
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
+    '. /root/keystonercv3; neutron net-create net04'
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 30}
+  skip_fail: false
+
+- description: Create subnet_net04
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
+    '. /root/keystonercv3; neutron subnet-create net04 {{ IPV4_NET_TENANT_PREFIX }}.0/24 --name net04__subnet --allocation-pool start={{ IPV4_NET_TENANT_PREFIX }}.120,end={{ IPV4_NET_TENANT_PREFIX }}.240'
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 30}
+  skip_fail: false
+
+- description: Create router
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
+    '. /root/keystonercv3; neutron router-create net04_router01'
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 30}
+  skip_fail: false
+
+- description: Set geteway
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
+    '. /root/keystonercv3; neutron router-gateway-set net04_router01 net04_ext'
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 30}
+  skip_fail: false
+
+- description:  Add interface
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
+    '. /root/keystonercv3; neutron router-interface-add net04_router01 net04__subnet'
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 30}
+  skip_fail: false
+
+#- description:  Allow all tcp
+#  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
+#    '. /root/keystonercv3; nova secgroup-add-rule default tcp 1 65535 0.0.0.0/0'
+#  node_name: {{ HOSTNAME_CFG01 }}
+#  retry: {count: 1, delay: 30}
+#  skip_fail: false
+#
+#- description:  Allow all icmp
+#  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
+#    '. /root/keystonercv3; nova secgroup-add-rule default icmp -1 -1 0.0.0.0/0'
+#  node_name: {{ HOSTNAME_CFG01 }}
+#  retry: {count: 1, delay: 30}
+#  skip_fail: false
+
+- description: sync time
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False '*' cmd.run
+    'service ntp stop; ntpd -gq;  service ntp start'
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 30}
+  skip_fail: false
+
+# Configure cinder-volume salt-call PROD-13167
+- description: Set disks 01
+  cmd: salt-call cmd.run 'echo -e "nn\np\n\n\n\nw" | fdisk /dev/vdb'
+  node_name: {{ HOSTNAME_CTL01 }}
+  retry: {count: 1, delay: 30}
+  skip_fail: false
+
+- description: Set disks 02
+  cmd: salt-call cmd.run 'echo -e "nn\np\n\n\n\nw" | fdisk /dev/vdb'
+  node_name: {{ HOSTNAME_CTL02 }}
+  retry: {count: 1, delay: 30}
+  skip_fail: false
+
+- description: Set disks 03
+  cmd: salt-call cmd.run 'echo -e "nn\np\n\n\n\nw" | fdisk /dev/vdb'
+  node_name: {{ HOSTNAME_CTL03 }}
+  retry: {count: 1, delay: 30}
+  skip_fail: false
+
+- description: Create partitions 01
+  cmd: salt-call cmd.run 'pvcreate /dev/vdb1'
+  node_name: {{ HOSTNAME_CTL01 }}
+  retry: {count: 1, delay: 30}
+  skip_fail: false
+
+- description: Create partitions 02
+  cmd: salt-call cmd.run 'pvcreate /dev/vdb1'
+  node_name: {{ HOSTNAME_CTL02 }}
+  retry: {count: 1, delay: 30}
+  skip_fail: false
+
+- description: Create partitions 03
+  cmd: salt-call cmd.run 'pvcreate /dev/vdb1'
+  node_name: {{ HOSTNAME_CTL03 }}
+  retry: {count: 1, delay: 30}
+  skip_fail: false
+
+- description: create volume_group
+  cmd: salt "ctl*" cmd.run 'vgcreate cinder-volumes /dev/vdb1'
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 30}
+  skip_fail: false
+
+- description: Install cinder-volume
+  cmd: salt 'ctl*' cmd.run 'apt-get install cinder-volume -y'
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 30}
+  skip_fail: false
+
+- description: Install crudini
+  cmd: salt "ctl*" cmd.run 'apt-get install crudini -y'
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 30}
+  skip_fail: false
+
+- description: Temporary WR set enabled backends value 01
+  cmd: salt-call cmd.run 'crudini --verbose --set /etc/cinder/cinder.conf DEFAULT enabled_backends lvm'
+  node_name: {{ HOSTNAME_CTL01 }}
+  retry: {count: 1, delay: 30}
+  skip_fail: false
+
+- description: Temporary WR set enabled backends value 02
+  cmd: salt-call cmd.run 'crudini --verbose --set /etc/cinder/cinder.conf DEFAULT enabled_backends lvm'
+  node_name: {{ HOSTNAME_CTL02 }}
+  retry: {count: 1, delay: 30}
+  skip_fail: false
+
+- description: Temporary WR set enabled backends value 03
+  cmd: salt-call cmd.run 'crudini --verbose --set /etc/cinder/cinder.conf DEFAULT enabled_backends lvm'
+  node_name: {{ HOSTNAME_CTL03 }}
+  retry: {count: 1, delay: 30}
+  skip_fail: false
+
+- description: Install docker.io on gtw
+  cmd: salt-call cmd.run 'apt-get install docker.io -y'
+  node_name: {{ HOSTNAME_GTW01 }}
+  retry: {count: 1, delay: 30}
+  skip_fail: false
+
+- description: Enable forward policy
+  cmd: iptables --policy FORWARD ACCEPT
+  node_name: {{ HOSTNAME_GTW01 }}
+  retry: {count: 1, delay: 30}
+  skip_fail: false
+
+- description: Restart cinder volume
+  cmd: |
+    salt -C 'I@cinder:controller' service.restart cinder-volume;
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 2, delay: 5}
+  skip_fail: false
+
+- description: create rc file on cfg
+  cmd: scp ctl01:/root/keystonercv3 /root
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 30}
+  skip_fail: false
+
+- description: Copy rc file
+  cmd: scp /root/keystonercv3 gtw01:/root
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 30}
+  skip_fail: false
diff --git a/tcp_tests/templates/cookied-mcp-pike-dvr-ssl/overrides-policy.yml b/tcp_tests/templates/cookied-mcp-pike-dvr-ssl/overrides-policy.yml
new file mode 100644
index 0000000..1f35a6b
--- /dev/null
+++ b/tcp_tests/templates/cookied-mcp-pike-dvr-ssl/overrides-policy.yml
@@ -0,0 +1,40 @@
+parameters:
+  nova:
+    controller:
+      policy:
+        context_is_admin: 'role:admin or role:administrator'
+        'compute:create': 'rule:admin_or_owner'
+        'compute:create:attach_network':
+  cinder:
+    controller:
+      policy:
+        'volume:delete': 'rule:admin_or_owner'
+        'volume:extend':
+  neutron:
+    server:
+      policy:
+        create_subnet: 'rule:admin_or_network_owner'
+        'get_network:queue_id': 'rule:admin_only'
+        'create_network:shared':
+  glance:
+    server:
+      policy:
+        publicize_image: "role:admin"
+        add_member:
+  keystone:
+    server:
+      policy:
+        admin_or_token_subject: 'rule:admin_required or rule:token_subject'
+  heat:
+    server:
+      policy:
+        context_is_admin: 'role:admin and is_admin_project:True'
+        deny_stack_user: 'not role:heat_stack_user'
+        deny_everybody: '!'
+        'cloudformation:ValidateTemplate': 'rule:deny_everybody'
+        'cloudformation:DescribeStackResources':
+  ceilometer:
+    server:
+      policy:
+        segregation: 'rule:context_is_admin'
+        'telemetry:get_resource':
diff --git a/tcp_tests/templates/cookied-mcp-pike-dvr-ssl/salt.yaml b/tcp_tests/templates/cookied-mcp-pike-dvr-ssl/salt.yaml
new file mode 100644
index 0000000..38c0742
--- /dev/null
+++ b/tcp_tests/templates/cookied-mcp-pike-dvr-ssl/salt.yaml
@@ -0,0 +1,45 @@
+{% from 'cookied-mcp-pike-dvr-ssl/underlay.yaml' import HOSTNAME_CFG01 with context %}
+{% from 'cookied-mcp-pike-dvr-ssl/underlay.yaml' import HOSTNAME_CMP01 with context %}
+{% from 'cookied-mcp-pike-dvr-ssl/underlay.yaml' import HOSTNAME_CMP02 with context %}
+{% from 'cookied-mcp-pike-dvr-ssl/underlay.yaml' import HOSTNAME_GTW01 with context %}
+{% from 'cookied-mcp-pike-dvr-ssl/underlay.yaml' import LAB_CONFIG_NAME with context %}
+{% from 'cookied-mcp-pike-dvr-ssl/underlay.yaml' import DOMAIN_NAME with context %}
+
+{% set SALT_MODELS_REPOSITORY = os_env('SALT_MODELS_REPOSITORY','https://gerrit.mcp.mirantis.net/salt-models/mcp-virtual-lab') %}
+# Other salt model repository parameters see in shared-salt.yaml
+
+{% import 'shared-salt.yaml' as SHARED with context %}
+
+{{ SHARED.MACRO_INSTALL_SALT_MASTER() }}
+
+{{ SHARED.MACRO_CLONE_RECLASS_MODELS() }}
+
+{{ SHARED.MACRO_CONFIGURE_RECLASS(FORMULA_SERVICES='"linux" "reclass" "salt" "openssh" "ntp" "git" "nginx" "collectd" "sensu" "heka" "sphinx" "keystone" "mysql" "grafana" "haproxy" "rsyslog" "horizon" "prometheus" "telegraf" "elasticsearch" "powerdns" "fluentd"') }}
+
+{{ SHARED.MACRO_INSTALL_SALT_MINIONS() }}
+
+{{ SHARED.MACRO_RUN_SALT_MASTER_UNDERLAY_STATES() }}
+
+{{ SHARED.ADJUST_SL_OPTS(OVERRIDES_FILENAME='/srv/salt/reclass/classes/cluster/' + SHARED.CLUSTER_NAME + '/stacklight/server.yml') }}
+
+{{ SHARED.MACRO_GENERATE_INVENTORY() }}
+
+{{ SHARED.MACRO_BOOTSTRAP_ALL_MINIONS() }}
+
+- description: Hack gtw node
+  cmd: salt '{{ HOSTNAME_GTW01 }}' cmd.run "ip addr del {{ SHARED.IPV4_NET_CONTROL_PREFIX }}.110/24 dev ens4; ip addr flush dev ens4";
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 10}
+  skip_fail: false
+
+- description: Hack cmp01 node
+  cmd: salt '{{ HOSTNAME_CMP01 }}' cmd.run "ip addr del {{ SHARED.IPV4_NET_CONTROL_PREFIX }}.105/24 dev ens4; ip addr flush dev ens4";
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 10}
+  skip_fail: false
+
+- description: Hack cmp02 node
+  cmd: salt '{{ HOSTNAME_CMP02 }}' cmd.run "ip addr del {{ SHARED.IPV4_NET_CONTROL_PREFIX }}.106/24 dev ens4; ip addr flush dev ens4";
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 10}
+  skip_fail: false
diff --git a/tcp_tests/templates/virtual-mcp-pike-dvr-ssl/sl.yaml b/tcp_tests/templates/cookied-mcp-pike-dvr-ssl/sl.yaml
similarity index 98%
rename from tcp_tests/templates/virtual-mcp-pike-dvr-ssl/sl.yaml
rename to tcp_tests/templates/cookied-mcp-pike-dvr-ssl/sl.yaml
index fbc6a00..3e5f7fb 100644
--- a/tcp_tests/templates/virtual-mcp-pike-dvr-ssl/sl.yaml
+++ b/tcp_tests/templates/cookied-mcp-pike-dvr-ssl/sl.yaml
@@ -1,4 +1,4 @@
-{% from 'virtual-mcp-pike-dvr-ssl/underlay.yaml' import HOSTNAME_CFG01 with context %}
+{% from 'cookied-mcp-pike-dvr-ssl/underlay.yaml' import HOSTNAME_CFG01 with context %}
 {% import 'shared-sl-tests.yaml' as SHARED_SL_TESTS with context %}
 # Install docker swarm
 - description: Configure docker service
diff --git a/tcp_tests/templates/cookied-mcp-pike-dvr-ssl/underlay--meta-data.yaml b/tcp_tests/templates/cookied-mcp-pike-dvr-ssl/underlay--meta-data.yaml
new file mode 100644
index 0000000..3699401
--- /dev/null
+++ b/tcp_tests/templates/cookied-mcp-pike-dvr-ssl/underlay--meta-data.yaml
@@ -0,0 +1,4 @@
+| # All the data below will be stored as a string object
+  instance-id: iid-local1
+  hostname: {hostname}
+  local-hostname: {hostname}
diff --git a/tcp_tests/templates/cookied-mcp-pike-dvr-ssl/underlay--user-data-cfg01.yaml b/tcp_tests/templates/cookied-mcp-pike-dvr-ssl/underlay--user-data-cfg01.yaml
new file mode 100644
index 0000000..da7908d
--- /dev/null
+++ b/tcp_tests/templates/cookied-mcp-pike-dvr-ssl/underlay--user-data-cfg01.yaml
@@ -0,0 +1,61 @@
+| # All the data below will be stored as a string object
+  #cloud-config, see http://cloudinit.readthedocs.io/en/latest/topics/examples.html
+
+  ssh_pwauth: True
+  users:
+   - name: root
+     sudo: ALL=(ALL) NOPASSWD:ALL
+     shell: /bin/bash
+     ssh_authorized_keys:
+     {% for key in config.underlay.ssh_keys %}
+      - ssh-rsa {{ key['public'] }}
+     {% endfor %}
+
+  disable_root: false
+  chpasswd:
+   list: |
+    root:r00tme
+   expire: False
+
+  bootcmd:
+   # Enable root access
+   - sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
+   - service sshd restart
+  output:
+    all: '| tee -a /var/log/cloud-init-output.log /dev/tty0'
+
+  runcmd:
+   # Configure dhclient
+   - sudo echo "nameserver {gateway}" >> /etc/resolvconf/resolv.conf.d/base
+   - sudo resolvconf -u
+
+   # Prepare network connection
+   - sudo ifdown ens3
+   - sudo ip r d default || true  # remove existing default route to get it from dhcp
+   - sudo ifup ens3
+   #- sudo route add default gw {gateway} {interface_name}
+
+   # Create swap
+   - fallocate -l 4G /swapfile
+   - chmod 600 /swapfile
+   - mkswap /swapfile
+   - swapon /swapfile
+   - echo "/swapfile   none    swap    defaults    0   0" >> /etc/fstab
+
+   - echo "nameserver 172.18.208.44" >> /etc/resolv.conf;
+
+  write_files:
+   - path: /etc/network/interfaces
+     content: |
+          auto ens3
+          iface ens3 inet dhcp
+
+   - path: /root/.ssh/config
+     owner: root:root
+     permissions: '0600'
+     content: |
+          Host *
+            ServerAliveInterval 300
+            ServerAliveCountMax 10
+            StrictHostKeyChecking no
+            UserKnownHostsFile /dev/null
diff --git a/tcp_tests/templates/cookied-mcp-pike-dvr-ssl/underlay--user-data1604.yaml b/tcp_tests/templates/cookied-mcp-pike-dvr-ssl/underlay--user-data1604.yaml
new file mode 100644
index 0000000..3fbb777
--- /dev/null
+++ b/tcp_tests/templates/cookied-mcp-pike-dvr-ssl/underlay--user-data1604.yaml
@@ -0,0 +1,50 @@
+| # All the data below will be stored as a string object
+  #cloud-config, see http://cloudinit.readthedocs.io/en/latest/topics/examples.html
+
+  ssh_pwauth: True
+  users:
+   - name: root
+     sudo: ALL=(ALL) NOPASSWD:ALL
+     shell: /bin/bash
+     ssh_authorized_keys:
+     {% for key in config.underlay.ssh_keys %}
+      - ssh-rsa {{ key['public'] }}
+     {% endfor %}
+
+  disable_root: false
+  chpasswd:
+   list: |
+    root:r00tme
+   expire: False
+
+  bootcmd:
+   # Enable root access
+   - sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
+   - service sshd restart
+  output:
+    all: '| tee -a /var/log/cloud-init-output.log /dev/tty0'
+
+  runcmd:
+   - export TERM=linux
+   - export LANG=C
+   # Configure dhclient
+   - sudo echo "nameserver {gateway}" >> /etc/resolvconf/resolv.conf.d/base
+   - sudo resolvconf -u
+
+   # Prepare network connection
+   - sudo ifup ens3
+   #- sudo route add default gw {gateway} {interface_name}
+
+   # Create swap
+   - fallocate -l 4G /swapfile
+   - chmod 600 /swapfile
+   - mkswap /swapfile
+   - swapon /swapfile
+   - echo "/swapfile   none    swap    defaults   0   0" >> /etc/fstab
+
+  write_files:
+   - path: /etc/network/interfaces
+     content: |
+          auto ens3
+          iface ens3 inet dhcp
+
diff --git a/tcp_tests/templates/cookied-mcp-pike-dvr-ssl/underlay.yaml b/tcp_tests/templates/cookied-mcp-pike-dvr-ssl/underlay.yaml
new file mode 100644
index 0000000..8b6c716
--- /dev/null
+++ b/tcp_tests/templates/cookied-mcp-pike-dvr-ssl/underlay.yaml
@@ -0,0 +1,516 @@
+# Set the repository suite, one of the: 'nightly', 'testing', 'stable', or any other required
+{% set REPOSITORY_SUITE = os_env('REPOSITORY_SUITE', 'testing') %}
+
+{% import 'cookied-mcp-pike-dvr-ssl/underlay--meta-data.yaml' as CLOUDINIT_META_DATA with context %}
+{% import 'cookied-mcp-pike-dvr-ssl/underlay--user-data-cfg01.yaml' as CLOUDINIT_USER_DATA_CFG01 with context %}
+{% import 'cookied-mcp-pike-dvr-ssl/underlay--user-data1604.yaml' as CLOUDINIT_USER_DATA_1604 with context %}
+
+---
+aliases:
+ - &interface_model {{ os_env('INTERFACE_MODEL', 'virtio') }}
+ - &cloudinit_meta_data {{ CLOUDINIT_META_DATA }}
+ - &cloudinit_user_data_cfg01 {{ CLOUDINIT_USER_DATA_CFG01 }}
+ - &cloudinit_user_data_1604 {{ CLOUDINIT_USER_DATA_1604 }}
+
+{% set LAB_CONFIG_NAME = os_env('LAB_CONFIG_NAME', 'cookied-mcp-pike-dvr-ssl') %}
+{% set DOMAIN_NAME = os_env('DOMAIN_NAME', LAB_CONFIG_NAME) + '.local' %}
+{% set HOSTNAME_CFG01 = os_env('HOSTNAME_CFG01', 'cfg01.' + DOMAIN_NAME) %}
+{% set HOSTNAME_CTL01 = os_env('HOSTNAME_CTL01', 'ctl01.' + DOMAIN_NAME) %}
+{% set HOSTNAME_CTL02 = os_env('HOSTNAME_CTL02', 'ctl02.' + DOMAIN_NAME) %}
+{% set HOSTNAME_CTL03 = os_env('HOSTNAME_CTL03', 'ctl03.' + DOMAIN_NAME) %}
+{% set HOSTNAME_CMP01 = os_env('HOSTNAME_CMP01', 'cmp01.' + DOMAIN_NAME) %}
+{% set HOSTNAME_CMP02 = os_env('HOSTNAME_CMP02', 'cmp02.' + DOMAIN_NAME) %}
+{% set HOSTNAME_MON01 = os_env('HOSTNAME_MON01', 'mon01.' + DOMAIN_NAME) %}
+{% set HOSTNAME_MON02 = os_env('HOSTNAME_MON02', 'mon02.' + DOMAIN_NAME) %}
+{% set HOSTNAME_MON03 = os_env('HOSTNAME_MON03', 'mon03.' + DOMAIN_NAME) %}
+{% set HOSTNAME_GTW01 = os_env('HOSTNAME_GTW01', 'gtw01.' + DOMAIN_NAME) %}
+{% set HOSTNAME_PRX01 = os_env('HOSTNAME_PRX01', 'prx01.' + DOMAIN_NAME) %}
+
+template:
+  devops_settings:
+    env_name: {{ os_env('ENV_NAME', 'cookied-mcp-pike-dvr-ssl_' + REPOSITORY_SUITE + "_" + os_env('BUILD_NUMBER', '')) }}
+
+    address_pools:
+      private-pool01:
+        net: {{ os_env('PRIVATE_ADDRESS_POOL01', '10.60.0.0/16:24') }}
+        params:
+          ip_reserved:
+            gateway: +1
+            l2_network_device: +1
+            default_{{ HOSTNAME_CFG01 }}: +100
+            default_{{ HOSTNAME_CTL01 }}: +101
+            default_{{ HOSTNAME_CTL02 }}: +102
+            default_{{ HOSTNAME_CTL03 }}: +103
+            default_{{ HOSTNAME_CMP01 }}: +105
+            default_{{ HOSTNAME_CMP02 }}: +106
+            default_{{ HOSTNAME_MON01 }}: +107
+            default_{{ HOSTNAME_MON02 }}: +108
+            default_{{ HOSTNAME_MON03 }}: +109
+            default_{{ HOSTNAME_GTW01 }}: +110
+            default_{{ HOSTNAME_PRX01 }}: +121
+          ip_ranges:
+            dhcp: [+90, -10]
+
+      admin-pool01:
+        net: {{ os_env('ADMIN_ADDRESS_POOL01', '10.70.0.0/16:24') }}
+        params:
+          ip_reserved:
+            gateway: +1
+            l2_network_device: +1
+            default_{{ HOSTNAME_CFG01 }}: +90
+            default_{{ HOSTNAME_CTL01 }}: +101
+            default_{{ HOSTNAME_CTL02 }}: +102
+            default_{{ HOSTNAME_CTL03 }}: +103
+            default_{{ HOSTNAME_CMP01 }}: +105
+            default_{{ HOSTNAME_CMP02 }}: +106
+            default_{{ HOSTNAME_MON01 }}: +107
+            default_{{ HOSTNAME_MON02 }}: +108
+            default_{{ HOSTNAME_MON03 }}: +109
+            default_{{ HOSTNAME_GTW01 }}: +110
+            default_{{ HOSTNAME_PRX01 }}: +121
+          ip_ranges:
+            dhcp: [+90, -10]
+
+      tenant-pool01:
+        net: {{ os_env('TENANT_ADDRESS_POOL01', '10.80.0.0/16:24') }}
+        params:
+          ip_reserved:
+            gateway: +1
+            l2_network_device: +1
+            default_{{ HOSTNAME_CFG01 }}: +100
+            default_{{ HOSTNAME_CTL01 }}: +101
+            default_{{ HOSTNAME_CTL02 }}: +102
+            default_{{ HOSTNAME_CTL03 }}: +103
+            default_{{ HOSTNAME_CMP01 }}: +105
+            default_{{ HOSTNAME_CMP02 }}: +106
+            default_{{ HOSTNAME_MON01 }}: +107
+            default_{{ HOSTNAME_MON02 }}: +108
+            default_{{ HOSTNAME_MON03 }}: +109
+            default_{{ HOSTNAME_GTW01 }}: +110
+            default_{{ HOSTNAME_PRX01 }}: +121
+          ip_ranges:
+            dhcp: [+10, -10]
+
+      external-pool01:
+        net: {{ os_env('EXTERNAL_ADDRESS_POOL01', '10.90.0.0/16:24') }}
+        params:
+          ip_reserved:
+            gateway: +1
+            l2_network_device: +1
+            default_{{ HOSTNAME_CFG01 }}: +100
+            default_{{ HOSTNAME_CTL01 }}: +101
+            default_{{ HOSTNAME_CTL02 }}: +102
+            default_{{ HOSTNAME_CTL03 }}: +103
+            default_{{ HOSTNAME_CMP01 }}: +105
+            default_{{ HOSTNAME_CMP02 }}: +106
+            default_{{ HOSTNAME_MON01 }}: +107
+            default_{{ HOSTNAME_MON02 }}: +108
+            default_{{ HOSTNAME_MON03 }}: +109
+            default_{{ HOSTNAME_GTW01 }}: +110
+            default_{{ HOSTNAME_PRX01 }}: +121
+          ip_ranges:
+            dhcp: [+10, -10]
+
+
+    groups:
+      - name: default
+        driver:
+          name: devops.driver.libvirt
+          params:
+            connection_string: !os_env CONNECTION_STRING, qemu:///system
+            storage_pool_name: !os_env STORAGE_POOL_NAME, default
+            stp: False
+            hpet: False
+            enable_acpi: true
+            use_host_cpu: !os_env DRIVER_USE_HOST_CPU, true
+            use_hugepages: !os_env DRIVER_USE_HUGEPAGES, false
+
+        network_pools:
+          admin: admin-pool01
+          private: private-pool01
+          tenant: tenant-pool01
+          external: external-pool01
+
+        l2_network_devices:
+          private:
+            address_pool: private-pool01
+            dhcp: false
+            forward:
+              mode: route
+
+          admin:
+            address_pool: admin-pool01
+            dhcp: true
+            forward:
+              mode: nat
+
+          tenant:
+            address_pool: tenant-pool01
+            dhcp: false
+
+          external:
+            address_pool: external-pool01
+            dhcp: true
+            forward:
+              mode: nat
+
+
+        group_volumes:
+         - name: cloudimage1604    # This name is used for 'backing_store' option for node volumes.
+           source_image: !os_env IMAGE_PATH1604  # https://cloud-images.ubuntu.com/xenial/current/xenial-server-cloudimg-amd64-disk1.img
+           format: qcow2
+         - name: cfg01_day01_image               # Pre-configured day01 image
+           source_image: {{ os_env('IMAGE_PATH_CFG01_DAY01', os_env('IMAGE_PATH1604')) }} # http://images.mirantis.com/cfg01-day01.qcow2 or fallback to IMAGE_PATH1604
+           format: qcow2
+         - name: mcp_ubuntu_1604_image           # Pre-configured image for control plane
+           source_image: !os_env MCP_IMAGE_PATH1604
+           format: qcow2
+
+        nodes:
+          - name: {{ HOSTNAME_CFG01 }}
+            role: salt_master
+            params:
+              vcpu: !os_env SLAVE_NODE_CPU, 2
+              memory: !os_env SLAVE_NODE_MEMORY, 8192
+              boot:
+                - hd
+              cloud_init_volume_name: iso
+              cloud_init_iface_up: ens3
+              volumes:
+                - name: system
+                  capacity: !os_env NODE_VOLUME_SIZE, 150
+                  backing_store: cfg01_day01_image
+                  format: qcow2
+                - name: iso  # Volume with name 'iso' will be used
+                             # for store image with cloud-init metadata.
+                  capacity: 1
+                  format: raw
+                  device: cdrom
+                  bus: ide
+                  cloudinit_meta_data: *cloudinit_meta_data
+                  cloudinit_user_data: *cloudinit_user_data_cfg01
+
+              interfaces:
+                - label: ens3
+                  l2_network_device: admin
+                  interface_model: *interface_model
+                - label: ens4
+                  l2_network_device: private
+                  interface_model: *interface_model
+              network_config:
+                ens3:
+                  networks:
+                    - admin
+                ens4:
+                  networks:
+                    - private
+
+          - name: {{ HOSTNAME_CTL01 }}
+            role: salt_minion
+            params:
+              vcpu: !os_env SLAVE_NODE_CPU, 2
+              memory: !os_env SLAVE_NODE_MEMORY, 16384
+              boot:
+                - hd
+              cloud_init_volume_name: iso
+              cloud_init_iface_up: ens3
+              volumes:
+                - name: system
+                  capacity: !os_env NODE_VOLUME_SIZE, 150
+                  backing_store: mcp_ubuntu_1604_image
+                  format: qcow2
+                - name: cinder
+                  capacity: 50
+                  format: qcow2
+                - name: iso  # Volume with name 'iso' will be used
+                             # for store image with cloud-init metadata.
+                  capacity: 1
+                  format: raw
+                  device: cdrom
+                  bus: ide
+                  cloudinit_meta_data: *cloudinit_meta_data
+                  cloudinit_user_data: *cloudinit_user_data_1604
+
+              interfaces: &interfaces
+                - label: ens3
+                  l2_network_device: admin
+                  interface_model: *interface_model
+                - label: ens4
+                  l2_network_device: private
+                  interface_model: *interface_model
+              network_config: &network_config
+                ens3:
+                  networks:
+                    - admin
+                ens4:
+                  networks:
+                    - private
+
+          - name: {{ HOSTNAME_CTL02 }}
+            role: salt_minion
+            params:
+              vcpu: !os_env SLAVE_NODE_CPU, 2
+              memory: !os_env SLAVE_NODE_MEMORY, 16384
+              boot:
+                - hd
+              cloud_init_volume_name: iso
+              cloud_init_iface_up: ens3
+              volumes:
+                - name: system
+                  capacity: !os_env NODE_VOLUME_SIZE, 150
+                  backing_store: mcp_ubuntu_1604_image
+                  format: qcow2
+                - name: cinder
+                  capacity: 50
+                  format: qcow2
+                - name: iso  # Volume with name 'iso' will be used
+                             # for store image with cloud-init metadata.
+                  capacity: 1
+                  format: raw
+                  device: cdrom
+                  bus: ide
+                  cloudinit_meta_data: *cloudinit_meta_data
+                  cloudinit_user_data: *cloudinit_user_data_1604
+
+              interfaces: *interfaces
+              network_config: *network_config
+
+          - name: {{ HOSTNAME_CTL03 }}
+            role: salt_minion
+            params:
+              vcpu: !os_env SLAVE_NODE_CPU, 2
+              memory: !os_env SLAVE_NODE_MEMORY, 16384
+              boot:
+                - hd
+              cloud_init_volume_name: iso
+              cloud_init_iface_up: ens3
+              volumes:
+                - name: system
+                  capacity: !os_env NODE_VOLUME_SIZE, 150
+                  backing_store: mcp_ubuntu_1604_image
+                  format: qcow2
+                - name: cinder
+                  capacity: 50
+                  format: qcow2
+                - name: iso  # Volume with name 'iso' will be used
+                             # for store image with cloud-init metadata.
+                  capacity: 1
+                  format: raw
+                  device: cdrom
+                  bus: ide
+                  cloudinit_meta_data: *cloudinit_meta_data
+                  cloudinit_user_data: *cloudinit_user_data_1604
+
+              interfaces: *interfaces
+              network_config: *network_config
+
+          - name: {{ HOSTNAME_MON01 }}
+            role: salt_minion
+            params:
+              vcpu: !os_env SLAVE_NODE_CPU, 3
+              memory: !os_env SLAVE_NODE_MEMORY, 6144
+              boot:
+                - hd
+              cloud_init_volume_name: iso
+              cloud_init_iface_up: ens3
+              volumes:
+                - name: system
+                  capacity: !os_env NODE_VOLUME_SIZE, 150
+                  backing_store: mcp_ubuntu_1604_image
+                  format: qcow2
+                - name: iso  # Volume with name 'iso' will be used
+                             # for store image with cloud-init metadata.
+                  capacity: 1
+                  format: raw
+                  device: cdrom
+                  bus: ide
+                  cloudinit_meta_data: *cloudinit_meta_data
+                  cloudinit_user_data: *cloudinit_user_data_1604
+
+              interfaces: *interfaces
+              network_config: *network_config
+
+          - name: {{ HOSTNAME_MON02 }}
+            role: salt_minion
+            params:
+              vcpu: !os_env SLAVE_NODE_CPU, 3
+              memory: !os_env SLAVE_NODE_MEMORY, 6144
+              boot:
+                - hd
+              cloud_init_volume_name: iso
+              cloud_init_iface_up: ens3
+              volumes:
+                - name: system
+                  capacity: !os_env NODE_VOLUME_SIZE, 150
+                  backing_store: mcp_ubuntu_1604_image
+                  format: qcow2
+                - name: iso  # Volume with name 'iso' will be used
+                             # for store image with cloud-init metadata.
+                  capacity: 1
+                  format: raw
+                  device: cdrom
+                  bus: ide
+                  cloudinit_meta_data: *cloudinit_meta_data
+                  cloudinit_user_data: *cloudinit_user_data_1604
+
+              interfaces: *interfaces
+              network_config: *network_config
+
+          - name: {{ HOSTNAME_MON03 }}
+            role: salt_minion
+            params:
+              vcpu: !os_env SLAVE_NODE_CPU, 3
+              memory: !os_env SLAVE_NODE_MEMORY, 6144
+              boot:
+                - hd
+              cloud_init_volume_name: iso
+              cloud_init_iface_up: ens3
+              volumes:
+                - name: system
+                  capacity: !os_env NODE_VOLUME_SIZE, 150
+                  backing_store: mcp_ubuntu_1604_image
+                  format: qcow2
+                - name: iso  # Volume with name 'iso' will be used
+                             # for store image with cloud-init metadata.
+                  capacity: 1
+                  format: raw
+                  device: cdrom
+                  bus: ide
+                  cloudinit_meta_data: *cloudinit_meta_data
+                  cloudinit_user_data: *cloudinit_user_data_1604
+
+              interfaces: *interfaces
+              network_config: *network_config
+
+          - name: {{ HOSTNAME_PRX01 }}
+            role: salt_minion
+            params:
+              vcpu: !os_env SLAVE_NODE_CPU, 1
+              memory: !os_env SLAVE_NODE_MEMORY, 8192
+              boot:
+                - hd
+              cloud_init_volume_name: iso
+              cloud_init_iface_up: ens3
+              volumes:
+                - name: system
+                  capacity: !os_env NODE_VOLUME_SIZE, 150
+                  backing_store: mcp_ubuntu_1604_image
+                  format: qcow2
+                - name: cinder
+                  capacity: 50
+                  format: qcow2
+                - name: iso  # Volume with name 'iso' will be used
+                             # for store image with cloud-init metadata.
+                  capacity: 1
+                  format: raw
+                  device: cdrom
+                  bus: ide
+                  cloudinit_meta_data: *cloudinit_meta_data
+                  cloudinit_user_data: *cloudinit_user_data_1604
+
+              interfaces: *interfaces
+              network_config: *network_config
+
+
+          - name: {{ HOSTNAME_CMP01 }}
+            role: salt_minion
+            params:
+              vcpu: !os_env SLAVE_NODE_CPU, 3
+              memory: !os_env SLAVE_NODE_MEMORY, 4096
+              boot:
+                - hd
+              cloud_init_volume_name: iso
+              cloud_init_iface_up: ens3
+              volumes:
+                - name: system
+                  capacity: !os_env NODE_VOLUME_SIZE, 150
+                  backing_store: cloudimage1604
+                  format: qcow2
+                - name: iso  # Volume with name 'iso' will be used
+                             # for store image with cloud-init metadata.
+                  capacity: 1
+                  format: raw
+                  device: cdrom
+                  bus: ide
+                  cloudinit_meta_data: *cloudinit_meta_data
+                  cloudinit_user_data: *cloudinit_user_data_1604
+
+
+              interfaces: &all_interfaces
+                - label: ens3
+                  l2_network_device: admin
+                  interface_model: *interface_model
+                - label: ens4
+                  l2_network_device: private
+                  interface_model: *interface_model
+                - label: ens5
+                  l2_network_device: tenant
+                  interface_model: *interface_model
+                - label: ens6
+                  l2_network_device: external
+                  interface_model: *interface_model
+              network_config: &all_network_config
+                ens3:
+                  networks:
+                    - admin
+                ens4:
+                  networks:
+                    - private
+                ens5:
+                  networks:
+                    - tenant
+                ens6:
+                  networks:
+                    - external
+
+          - name: {{ HOSTNAME_CMP02 }}
+            role: salt_minion
+            params:
+              vcpu: !os_env SLAVE_NODE_CPU, 3
+              memory: !os_env SLAVE_NODE_MEMORY, 4096
+              boot:
+                - hd
+              cloud_init_volume_name: iso
+              cloud_init_iface_up: ens3
+              volumes:
+                - name: system
+                  capacity: !os_env NODE_VOLUME_SIZE, 150
+                  backing_store: cloudimage1604
+                  format: qcow2
+                - name: iso  # Volume with name 'iso' will be used
+                             # for store image with cloud-init metadata.
+                  capacity: 1
+                  format: raw
+                  device: cdrom
+                  bus: ide
+                  cloudinit_meta_data: *cloudinit_meta_data
+                  cloudinit_user_data: *cloudinit_user_data_1604
+
+              interfaces: *all_interfaces
+              network_config: *all_network_config
+
+          - name: {{ HOSTNAME_GTW01 }}
+            role: salt_minion
+            params:
+              vcpu: !os_env SLAVE_NODE_CPU, 4
+              memory: !os_env SLAVE_NODE_MEMORY, 4096
+              boot:
+                - hd
+              cloud_init_volume_name: iso
+              cloud_init_iface_up: ens3
+              volumes:
+                - name: system
+                  capacity: !os_env NODE_VOLUME_SIZE, 150
+                  backing_store: cloudimage1604
+                  format: qcow2
+                - name: iso  # Volume with name 'iso' will be used
+                             # for store image with cloud-init metadata.
+                  capacity: 1
+                  format: raw
+                  device: cdrom
+                  bus: ide
+                  cloudinit_meta_data: *cloudinit_meta_data
+                  cloudinit_user_data: *cloudinit_user_data_1604
+
+              interfaces: *all_interfaces
+              network_config: *all_network_config
diff --git a/tcp_tests/templates/cookied-mcp-pike-dvr/openstack.yaml b/tcp_tests/templates/cookied-mcp-pike-dvr/openstack.yaml
index b81cef5..3509982 100644
--- a/tcp_tests/templates/cookied-mcp-pike-dvr/openstack.yaml
+++ b/tcp_tests/templates/cookied-mcp-pike-dvr/openstack.yaml
@@ -375,6 +375,12 @@
   retry: {count: 1, delay: 30}
   skip_fail: false
 
+- description: Enable forward policy
+  cmd: iptables --policy FORWARD ACCEPT
+  node_name: {{ HOSTNAME_GTW01 }}
+  retry: {count: 1, delay: 30}
+  skip_fail: false
+
 - description: Restart cinder volume
   cmd: |
     salt -C 'I@cinder:controller' service.restart cinder-volume;
diff --git a/tcp_tests/templates/cookied-mcp-pike-ovs/openstack.yaml b/tcp_tests/templates/cookied-mcp-pike-ovs/openstack.yaml
index 3ee578d..76eb198 100644
--- a/tcp_tests/templates/cookied-mcp-pike-ovs/openstack.yaml
+++ b/tcp_tests/templates/cookied-mcp-pike-ovs/openstack.yaml
@@ -361,6 +361,12 @@
   retry: {count: 1, delay: 30}
   skip_fail: false
 
+- description: Enable forward policy
+  cmd: iptables --policy FORWARD ACCEPT
+  node_name: {{ HOSTNAME_GTW01 }}
+  retry: {count: 1, delay: 30}
+  skip_fail: false
+
 - description: create rc file on cfg
   cmd: scp ctl01:/root/keystonercv3 /root
   node_name: {{ HOSTNAME_CFG01 }}
diff --git a/tcp_tests/templates/cookied-model-generator/salt_cookied-bm-mcp-dvr-vxlan.yaml b/tcp_tests/templates/cookied-model-generator/salt_cookied-bm-mcp-dvr-vxlan.yaml
index 24a6c8b..c9ad78f 100644
--- a/tcp_tests/templates/cookied-model-generator/salt_cookied-bm-mcp-dvr-vxlan.yaml
+++ b/tcp_tests/templates/cookied-model-generator/salt_cookied-bm-mcp-dvr-vxlan.yaml
@@ -12,6 +12,15 @@
 
 {% import 'shared-salt.yaml' as SHARED with context %}
 
+{{ SHARED.MACRO_INSTALL_PACKAGES_ON_NODES(HOSTNAME_CFG01) }}
+- description: Re-install all the fromulas
+  cmd: |
+    set -e;
+    apt-get install -y salt-formula-*
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 1}
+  skip_fail: false
+
 {{ SHARED.MACRO_GENERATE_COOKIECUTTER_MODEL(CONTROL_VLAN=CONTROL_VLAN, TENANT_VLAN=TENANT_VLAN) }}
 
 {{ SHARED.MACRO_GENERATE_AND_ENABLE_ENVIRONMENT_MODEL() }}
diff --git a/tcp_tests/templates/cookied-model-generator/salt_cookied-bm-mcp-ocata-contrail.yaml b/tcp_tests/templates/cookied-model-generator/salt_cookied-bm-mcp-ocata-contrail.yaml
index 4da1add..9f6b678 100644
--- a/tcp_tests/templates/cookied-model-generator/salt_cookied-bm-mcp-ocata-contrail.yaml
+++ b/tcp_tests/templates/cookied-model-generator/salt_cookied-bm-mcp-ocata-contrail.yaml
@@ -12,6 +12,15 @@
 
 {% import 'shared-salt.yaml' as SHARED with context %}
 
+{{ SHARED.MACRO_INSTALL_PACKAGES_ON_NODES(HOSTNAME_CFG01) }}
+- description: Re-install all the fromulas
+  cmd: |
+    set -e;
+    apt-get install -y salt-formula-*
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 1}
+  skip_fail: false
+
 {{ SHARED.MACRO_GENERATE_COOKIECUTTER_MODEL(CONTROL_VLAN=CONTROL_VLAN, TENANT_VLAN=TENANT_VLAN) }}
 
 {{ SHARED.MACRO_GENERATE_AND_ENABLE_ENVIRONMENT_MODEL() }}
diff --git a/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-mitaka-dvr.yaml b/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-mitaka-dvr.yaml
index cc7603c..0dac293 100644
--- a/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-mitaka-dvr.yaml
+++ b/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-mitaka-dvr.yaml
@@ -29,6 +29,15 @@
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 1}
   skip_fail: false
+
+{{ SHARED.MACRO_INSTALL_PACKAGES_ON_NODES(HOSTNAME_CFG01) }}
+- description: Re-install all the fromulas
+  cmd: |
+    set -e;
+    apt-get install -y salt-formula-*
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 1}
+  skip_fail: false
 {{ SHARED.MACRO_GENERATE_COOKIECUTTER_MODEL() }}
 
 {{ SHARED.MACRO_GENERATE_AND_ENABLE_ENVIRONMENT_MODEL() }}
diff --git a/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-ocata-dop-sl2.yaml b/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-ocata-dop-sl2.yaml
index d1693c1..f09392b 100644
--- a/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-ocata-dop-sl2.yaml
+++ b/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-ocata-dop-sl2.yaml
@@ -13,6 +13,15 @@
 
 {% import 'shared-salt.yaml' as SHARED with context %}
 
+{{ SHARED.MACRO_INSTALL_PACKAGES_ON_NODES(HOSTNAME_CFG01) }}
+- description: Re-install all the fromulas
+  cmd: |
+    set -e;
+    apt-get install -y salt-formula-*
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 1}
+  skip_fail: false
+
 {{ SHARED.MACRO_GENERATE_COOKIECUTTER_MODEL() }}
 
 {{ SHARED.MACRO_GENERATE_AND_ENABLE_ENVIRONMENT_MODEL() }}
diff --git a/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-ocata-dvr-vxlan.yaml b/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-ocata-dvr-vxlan.yaml
index d404b3a..83a433c 100644
--- a/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-ocata-dvr-vxlan.yaml
+++ b/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-ocata-dvr-vxlan.yaml
@@ -10,6 +10,15 @@
 
 {% import 'shared-salt.yaml' as SHARED with context %}
 
+{{ SHARED.MACRO_INSTALL_PACKAGES_ON_NODES(HOSTNAME_CFG01) }}
+- description: Re-install all the fromulas
+  cmd: |
+    set -e;
+    apt-get install -y salt-formula-*
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 1}
+  skip_fail: false
+
 {{ SHARED.MACRO_GENERATE_COOKIECUTTER_MODEL() }}
 
 {{ SHARED.MACRO_GENERATE_AND_ENABLE_ENVIRONMENT_MODEL() }}
diff --git a/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-ocata-dvr.yaml b/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-ocata-dvr.yaml
index 2e3cfde..c49f52d 100644
--- a/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-ocata-dvr.yaml
+++ b/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-ocata-dvr.yaml
@@ -13,6 +13,15 @@
 
 {% import 'shared-salt.yaml' as SHARED with context %}
 
+{{ SHARED.MACRO_INSTALL_PACKAGES_ON_NODES(HOSTNAME_CFG01) }}
+- description: Re-install all the fromulas
+  cmd: |
+    set -e;
+    apt-get install -y salt-formula-*
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 1}
+  skip_fail: false
+
 {{ SHARED.MACRO_GENERATE_COOKIECUTTER_MODEL() }}
 
 {{ SHARED.MACRO_GENERATE_AND_ENABLE_ENVIRONMENT_MODEL() }}
diff --git a/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-ocata-ovs.yaml b/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-ocata-ovs.yaml
index 34857bc..b2f2324 100644
--- a/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-ocata-ovs.yaml
+++ b/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-ocata-ovs.yaml
@@ -13,6 +13,15 @@
 
 {% import 'shared-salt.yaml' as SHARED with context %}
 
+{{ SHARED.MACRO_INSTALL_PACKAGES_ON_NODES(HOSTNAME_CFG01) }}
+- description: Re-install all the fromulas
+  cmd: |
+    set -e;
+    apt-get install -y salt-formula-*
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 1}
+  skip_fail: false
+
 {{ SHARED.MACRO_GENERATE_COOKIECUTTER_MODEL() }}
 
 {{ SHARED.MACRO_GENERATE_AND_ENABLE_ENVIRONMENT_MODEL() }}
diff --git a/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-pike-dpdk.yaml b/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-pike-dpdk.yaml
index 72cad63..7f33d82 100644
--- a/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-pike-dpdk.yaml
+++ b/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-pike-dpdk.yaml
@@ -10,6 +10,15 @@
 
 {% import 'shared-salt.yaml' as SHARED with context %}
 
+{{ SHARED.MACRO_INSTALL_PACKAGES_ON_NODES(HOSTNAME_CFG01) }}
+- description: Re-install all the fromulas
+  cmd: |
+    set -e;
+    apt-get install -y salt-formula-*
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 1}
+  skip_fail: false
+
 {{ SHARED.MACRO_GENERATE_COOKIECUTTER_MODEL() }}
 
 {{ SHARED.MACRO_GENERATE_AND_ENABLE_ENVIRONMENT_MODEL() }}
@@ -30,6 +39,10 @@
     reclass-tools del-key parameters.reclass.storage.node.infra_kvm_node03 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
     reclass-tools del-key parameters.reclass.storage.node.stacklight_log_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
 
+    # set wider cpu mask for DPDK
+    salt-call reclass.cluster_meta_set name='compute_ovs_pmd_cpu_mask' value='0xF' file_name=/srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/init.yml
+    salt-call reclass.cluster_meta_set name='compute_ovs_dpdk_lcore_mask' value='0xF' file_name=/srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/init.yml
+
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 5}
   skip_fail: false
diff --git a/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-pike-ovs.yaml b/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-pike-ovs.yaml
index 3cab167..0e607dc 100644
--- a/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-pike-ovs.yaml
+++ b/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-pike-ovs.yaml
@@ -10,6 +10,15 @@
 
 {% import 'shared-salt.yaml' as SHARED with context %}
 
+{{ SHARED.MACRO_INSTALL_PACKAGES_ON_NODES(HOSTNAME_CFG01) }}
+- description: Re-install all the fromulas
+  cmd: |
+    set -e;
+    apt-get install -y salt-formula-*
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 1}
+  skip_fail: false
+
 {{ SHARED.MACRO_GENERATE_COOKIECUTTER_MODEL() }}
 
 {{ SHARED.MACRO_GENERATE_AND_ENABLE_ENVIRONMENT_MODEL() }}
diff --git a/tcp_tests/templates/cookied-model-generator/underlay--user-data-cfg01.yaml b/tcp_tests/templates/cookied-model-generator/underlay--user-data-cfg01.yaml
index d07e03f..7bd7a02 100644
--- a/tcp_tests/templates/cookied-model-generator/underlay--user-data-cfg01.yaml
+++ b/tcp_tests/templates/cookied-model-generator/underlay--user-data-cfg01.yaml
@@ -19,10 +19,13 @@
 
   bootcmd:
    # Block access to SSH while node is preparing
-   - cloud-init-per once sudo iptables -A INPUT -p tcp --dport 22 -j DROP
+   #- cloud-init-per once sudo iptables -A INPUT -p tcp --dport 22 -j DROP
+   - cloud-init-per once sudo ifdown ens4
+
    # Enable root access
-   - sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
-   - service sshd restart
+   - cloud-init-per once sudo sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
+   - cloud-init-per once sudo service sshd restart
+
   output:
     all: '| tee -a /var/log/cloud-init-output.log /dev/tty0'
 
@@ -32,6 +35,16 @@
    - sudo ifup ens3
    #- sudo route add default gw {gateway} {interface_name}
 
+   # Purge the unattended-upgrades package (Workaround for PROD-17904, PROD-18736)"
+   - echo "APT::Periodic::Update-Package-Lists 0;" > /etc/apt/apt.conf.d/99dont_update_package_list-salt
+   - echo "APT::Periodic::Download-Upgradeable-Packages 0;" > /etc/apt/apt.conf.d/99dont_update_download_upg_packages-salt
+   - echo "APT::Periodic::Unattended-Upgrade 0;" > /etc/apt/apt.conf.d/99disable_unattended_upgrade-salt
+   - apt-get -y purge unattended-upgrades
+   # Stop currently running apt-daily service, source: https://unix.stackexchange.com/a/315517
+   - systemctl stop apt-daily.service
+   - systemctl kill --kill-who=all apt-daily.service
+   - while ! (systemctl list-units --all apt-daily.service | fgrep -q dead); do sleep 1; done
+
    # Configure dhclient
    - sudo echo "nameserver {gateway}" >> /etc/resolvconf/resolv.conf.d/base
    - sudo resolvconf -u
@@ -64,10 +77,12 @@
 
    ########################################################
    # Node is ready, allow SSH access
-   - echo "Allow SSH access ..."
-   - sudo iptables -D INPUT -p tcp --dport 22 -j DROP
+   #- echo "Allow SSH access ..."
+   #- sudo iptables -D INPUT -p tcp --dport 22 -j DROP
+   - sudo ifup ens4
    ########################################################
 
+
   write_files:
    - path: /etc/default/grub.d/97-enable-grub-menu.cfg
      content: |
@@ -79,6 +94,8 @@
      content: |
           auto ens3
           iface ens3 inet dhcp
+          auto ens4
+          iface ens4 inet dhcp
 
    - path: /root/.ssh/config
      owner: root:root
diff --git a/tcp_tests/templates/cookied-model-generator/underlay.yaml b/tcp_tests/templates/cookied-model-generator/underlay.yaml
index 25fb76c..4783bd8 100644
--- a/tcp_tests/templates/cookied-model-generator/underlay.yaml
+++ b/tcp_tests/templates/cookied-model-generator/underlay.yaml
@@ -29,6 +29,16 @@
           ip_ranges:
             dhcp: [+90, -10]
 
+      external-pool01:
+        net: {{ os_env('ADMIN_ADDRESS_POOL01', '10.90.0.0/16:24') }}
+        params:
+          ip_reserved:
+            gateway: +1
+            l2_network_device: +1
+            default_{{ HOSTNAME_CFG01 }}: +15
+          ip_ranges:
+            dhcp: [+90, -10]
+
     groups:
       - name: default
         driver:
@@ -46,11 +56,17 @@
           admin: admin-pool01
 
         l2_network_devices:
+          external:
+            address_pool: external-pool01
+            dhcp: true
+            forward:
+              mode: nat
+
           admin:
             address_pool: admin-pool01
             dhcp: true
             forward:
-              mode: nat
+              mode: route
 
         nodes:
           - name: {{ HOSTNAME_CFG01 }}
@@ -78,9 +94,15 @@
 
               interfaces:
                 - label: ens3
+                  l2_network_device: external
+                  interface_model: *interface_model
+                - label: ens4
                   l2_network_device: admin
                   interface_model: *interface_model
               network_config:
                 ens3:
                   networks:
+                    - external
+                ens4:
+                  networks:
                     - admin
diff --git a/tcp_tests/templates/mcp-ocata-local-repo-dvr/openstack.yaml b/tcp_tests/templates/mcp-ocata-local-repo-dvr/openstack.yaml
index 8802687..043b74a 100644
--- a/tcp_tests/templates/mcp-ocata-local-repo-dvr/openstack.yaml
+++ b/tcp_tests/templates/mcp-ocata-local-repo-dvr/openstack.yaml
@@ -373,6 +373,12 @@
   retry: {count: 1, delay: 30}
   skip_fail: false
 
+- description: Enable forward policy
+  cmd: iptables --policy FORWARD ACCEPT
+  node_name: {{ HOSTNAME_GTW01 }}
+  retry: {count: 1, delay: 30}
+  skip_fail: false
+
 - description: create rc file on cfg
   cmd: scp ctl01:/root/keystonercv3 /root
   node_name: {{ HOSTNAME_CFG01 }}
diff --git a/tcp_tests/templates/shared-salt.yaml b/tcp_tests/templates/shared-salt.yaml
index 95f207a..f2feef4 100644
--- a/tcp_tests/templates/shared-salt.yaml
+++ b/tcp_tests/templates/shared-salt.yaml
@@ -62,7 +62,8 @@
     echo "{{ UBUNTU_REPOSITORY }}" > /etc/apt/sources.list.d/ubuntu.list
     echo "{{ UBUNTU_UPDATES_REPOSITORY }}" > /etc/apt/sources.list.d/ubuntu_updates.list
     echo "{{ UBUNTU_SECURITY_REPOSITORY }}" > /etc/apt/sources.list.d/ubuntu_security.list
-    eatmydata apt-get clean && apt-get update;
+    eatmydata apt-get clean;
+    apt-get update;
     sync;
   node_name: {{ NODE_NAME }}
   retry: {count: 1, delay: 5}
@@ -497,7 +498,15 @@
 
 {%- macro MACRO_INSTALL_SALT_MINIONS() %}
 {#######################################}
-{% for ssh in config.underlay.ssh %}
+{%- for ssh in config.underlay.ssh %}
+  {%- set salt_roles = [] %}
+  {%- for role in ssh['roles'] %}
+    {%- if role in config.salt_deploy.salt_roles %}
+      {%- set _ = salt_roles.append(role) %}
+    {%- endif %}
+  {%- endfor %}
+
+  {%- if salt_roles %}
 - description: Configure salt-minion on {{ ssh['node_name'] }}
   cmd: |
     [ ! -d /etc/salt/minion.d ] && mkdir -p /etc/salt/minion.d;
@@ -530,8 +539,15 @@
   node_name: {{ ssh['node_name'] }}
   retry: {count: 1, delay: 1}
   skip_fail: false
-{% endfor %}
+  {%- else %}
+- description: Check SSH connectivity to non-salt-minion node {{ ssh['node_name'] }}
+  cmd: echo "SSH to $(hostname -f) passed"
+  node_name: {{ ssh['node_name'] }}
+  retry: {count: 1, delay: 1}
+  skip_fail: false
+  {%- endif %}
 
+{%- endfor %}
 
 - description: Accept salt keys from all the nodes
   cmd: salt-key -A -y
diff --git a/tcp_tests/templates/virtual-mcp-ocata-ceph-offline/openstack.yaml b/tcp_tests/templates/virtual-mcp-ocata-ceph-offline/openstack.yaml
index 1783d6d..827b16a 100644
--- a/tcp_tests/templates/virtual-mcp-ocata-ceph-offline/openstack.yaml
+++ b/tcp_tests/templates/virtual-mcp-ocata-ceph-offline/openstack.yaml
@@ -259,6 +259,12 @@
   retry: {count: 1, delay: 30}
   skip_fail: false
 
+- description: Enable forward policy
+  cmd: iptables --policy FORWARD ACCEPT
+  node_name: {{ HOSTNAME_GTW01 }}
+  retry: {count: 1, delay: 30}
+  skip_fail: false
+
 - description: create rc file on cfg
   cmd: scp ctl01:/root/keystonercv3 /root
   node_name: {{ HOSTNAME_CFG01 }}
diff --git a/tcp_tests/templates/virtual-mcp-ocata-dvr/openstack.yaml b/tcp_tests/templates/virtual-mcp-ocata-dvr/openstack.yaml
index ed0ee59..f823c2f 100644
--- a/tcp_tests/templates/virtual-mcp-ocata-dvr/openstack.yaml
+++ b/tcp_tests/templates/virtual-mcp-ocata-dvr/openstack.yaml
@@ -375,6 +375,12 @@
   retry: {count: 1, delay: 30}
   skip_fail: false
 
+- description: Enable forward policy
+  cmd: iptables --policy FORWARD ACCEPT
+  node_name: {{ HOSTNAME_GTW01 }}
+  retry: {count: 1, delay: 30}
+  skip_fail: false
+
 - description: Restart cinder volume
   cmd: |
     salt -C 'I@cinder:controller' service.restart cinder-volume;
diff --git a/tcp_tests/templates/virtual-mcp-ocata-ovs-ceph/openstack.yaml b/tcp_tests/templates/virtual-mcp-ocata-ovs-ceph/openstack.yaml
index f35c749..4fbecca 100644
--- a/tcp_tests/templates/virtual-mcp-ocata-ovs-ceph/openstack.yaml
+++ b/tcp_tests/templates/virtual-mcp-ocata-ovs-ceph/openstack.yaml
@@ -259,6 +259,12 @@
   retry: {count: 1, delay: 30}
   skip_fail: false
 
+- description: Enable forward policy
+  cmd: iptables --policy FORWARD ACCEPT
+  node_name: {{ HOSTNAME_GTW01 }}
+  retry: {count: 1, delay: 30}
+  skip_fail: false
+
 - description: create rc file on cfg
   cmd: scp ctl01:/root/keystonercv3 /root
   node_name: {{ HOSTNAME_CFG01 }}
diff --git a/tcp_tests/templates/virtual-mcp-ocata-ovs/openstack.yaml b/tcp_tests/templates/virtual-mcp-ocata-ovs/openstack.yaml
index a82a5f6..613814a 100644
--- a/tcp_tests/templates/virtual-mcp-ocata-ovs/openstack.yaml
+++ b/tcp_tests/templates/virtual-mcp-ocata-ovs/openstack.yaml
@@ -361,6 +361,12 @@
   retry: {count: 1, delay: 30}
   skip_fail: false
 
+- description: Enable forward policy
+  cmd: iptables --policy FORWARD ACCEPT
+  node_name: {{ HOSTNAME_GTW01 }}
+  retry: {count: 1, delay: 30}
+  skip_fail: false
+
 - description: create rc file on cfg
   cmd: scp ctl01:/root/keystonercv3 /root
   node_name: {{ HOSTNAME_CFG01 }}
diff --git a/tcp_tests/templates/virtual-mcp-pike-dvr-ceph-rgw/ceph.yaml b/tcp_tests/templates/virtual-mcp-pike-dvr-ceph-rgw/ceph.yaml
index 28c410b..c5f8f3c 100644
--- a/tcp_tests/templates/virtual-mcp-pike-dvr-ceph-rgw/ceph.yaml
+++ b/tcp_tests/templates/virtual-mcp-pike-dvr-ceph-rgw/ceph.yaml
@@ -110,14 +110,23 @@
   retry: {count: 1, delay: 10}
   skip_fail: false
 
-{% for ssh in config.underlay.ssh %}
+{%- for ssh in config.underlay.ssh %}
+  {%- set salt_roles = [] %}
+  {%- for role in ssh['roles'] %}
+    {%- if role in config.salt_deploy.salt_roles %}
+      {%- set _ = salt_roles.append(role) %}
+    {%- endif %}
+  {%- endfor %}
+
+  {%- if salt_roles %}
 - description: Restart salt-minion as workaround of PROD-16970
   cmd: |
     service salt-minion restart;  # For case if salt-minion was already installed
   node_name: {{ ssh['node_name'] }}
   retry: {count: 1, delay: 1}
   skip_fail: false
-{% endfor %}
+  {%- endif %}
+{%- endfor %}
 
 - description: Connect ceph to glance
   cmd: |
diff --git a/tcp_tests/templates/virtual-mcp-pike-dvr-ceph-rgw/openstack.yaml b/tcp_tests/templates/virtual-mcp-pike-dvr-ceph-rgw/openstack.yaml
index f4903a9..c5f0593 100644
--- a/tcp_tests/templates/virtual-mcp-pike-dvr-ceph-rgw/openstack.yaml
+++ b/tcp_tests/templates/virtual-mcp-pike-dvr-ceph-rgw/openstack.yaml
@@ -259,6 +259,12 @@
   retry: {count: 1, delay: 30}
   skip_fail: false
 
+- description: Enable forward policy
+  cmd: iptables --policy FORWARD ACCEPT
+  node_name: {{ HOSTNAME_GTW01 }}
+  retry: {count: 1, delay: 30}
+  skip_fail: false
+
 - description: create rc file on cfg
   cmd: scp ctl01:/root/keystonercv3 /root
   node_name: {{ HOSTNAME_CFG01 }}
diff --git a/tcp_tests/templates/virtual-mcp-pike-dvr-ssl/openstack.yaml b/tcp_tests/templates/virtual-mcp-pike-dvr-ssl/openstack.yaml
index 5d573d7..5fdf941 100644
--- a/tcp_tests/templates/virtual-mcp-pike-dvr-ssl/openstack.yaml
+++ b/tcp_tests/templates/virtual-mcp-pike-dvr-ssl/openstack.yaml
@@ -381,6 +381,12 @@
   retry: {count: 1, delay: 30}
   skip_fail: false
 
+- description: Enable forward policy
+  cmd: iptables --policy FORWARD ACCEPT
+  node_name: {{ HOSTNAME_GTW01 }}
+  retry: {count: 1, delay: 30}
+  skip_fail: false
+
 - description: Restart cinder volume
   cmd: |
     salt -C 'I@cinder:controller' service.restart cinder-volume;
diff --git a/tcp_tests/templates/virtual-mcp-pike-dvr-ssl/underlay.yaml b/tcp_tests/templates/virtual-mcp-pike-dvr-ssl/underlay.yaml
index d479f1c..d1c8656 100644
--- a/tcp_tests/templates/virtual-mcp-pike-dvr-ssl/underlay.yaml
+++ b/tcp_tests/templates/virtual-mcp-pike-dvr-ssl/underlay.yaml
@@ -20,9 +20,6 @@
 {% set HOSTNAME_CTL03 = os_env('HOSTNAME_CTL03', 'ctl03.' + DOMAIN_NAME) %}
 {% set HOSTNAME_CMP01 = os_env('HOSTNAME_CMP01', 'cmp01.' + DOMAIN_NAME) %}
 {% set HOSTNAME_CMP02 = os_env('HOSTNAME_CMP02', 'cmp02.' + DOMAIN_NAME) %}
-{% set HOSTNAME_MON01 = os_env('HOSTNAME_MON01', 'mon01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_MON02 = os_env('HOSTNAME_MON02', 'mon02.' + DOMAIN_NAME) %}
-{% set HOSTNAME_MON03 = os_env('HOSTNAME_MON03', 'mon03.' + DOMAIN_NAME) %}
 {% set HOSTNAME_GTW01 = os_env('HOSTNAME_GTW01', 'gtw01.' + DOMAIN_NAME) %}
 {% set HOSTNAME_PRX01 = os_env('HOSTNAME_PRX01', 'prx01.' + DOMAIN_NAME) %}
 
@@ -43,9 +40,6 @@
             default_{{ HOSTNAME_CTL03 }}: +103
             default_{{ HOSTNAME_CMP01 }}: +105
             default_{{ HOSTNAME_CMP02 }}: +106
-            default_{{ HOSTNAME_MON01 }}: +107
-            default_{{ HOSTNAME_MON02 }}: +108
-            default_{{ HOSTNAME_MON03 }}: +109
             default_{{ HOSTNAME_GTW01 }}: +110
             default_{{ HOSTNAME_PRX01 }}: +121
           ip_ranges:
@@ -63,9 +57,6 @@
             default_{{ HOSTNAME_CTL03 }}: +103
             default_{{ HOSTNAME_CMP01 }}: +105
             default_{{ HOSTNAME_CMP02 }}: +106
-            default_{{ HOSTNAME_MON01 }}: +107
-            default_{{ HOSTNAME_MON02 }}: +108
-            default_{{ HOSTNAME_MON03 }}: +109
             default_{{ HOSTNAME_GTW01 }}: +110
             default_{{ HOSTNAME_PRX01 }}: +121
           ip_ranges:
@@ -83,9 +74,6 @@
             default_{{ HOSTNAME_CTL03 }}: +103
             default_{{ HOSTNAME_CMP01 }}: +105
             default_{{ HOSTNAME_CMP02 }}: +106
-            default_{{ HOSTNAME_MON01 }}: +107
-            default_{{ HOSTNAME_MON02 }}: +108
-            default_{{ HOSTNAME_MON03 }}: +109
             default_{{ HOSTNAME_GTW01 }}: +110
             default_{{ HOSTNAME_PRX01 }}: +121
           ip_ranges:
@@ -103,9 +91,6 @@
             default_{{ HOSTNAME_CTL03 }}: +103
             default_{{ HOSTNAME_CMP01 }}: +105
             default_{{ HOSTNAME_CMP02 }}: +106
-            default_{{ HOSTNAME_MON01 }}: +107
-            default_{{ HOSTNAME_MON02 }}: +108
-            default_{{ HOSTNAME_MON03 }}: +109
             default_{{ HOSTNAME_GTW01 }}: +110
             default_{{ HOSTNAME_PRX01 }}: +121
           ip_ranges:
@@ -304,84 +289,6 @@
               interfaces: *interfaces
               network_config: *network_config
 
-          - name: {{ HOSTNAME_MON01 }}
-            role: salt_minion
-            params:
-              vcpu: !os_env SLAVE_NODE_CPU, 3
-              memory: !os_env SLAVE_NODE_MEMORY, 6144
-              boot:
-                - hd
-              cloud_init_volume_name: iso
-              cloud_init_iface_up: ens3
-              volumes:
-                - name: system
-                  capacity: !os_env NODE_VOLUME_SIZE, 150
-                  backing_store: mcp_ubuntu_1604_image
-                  format: qcow2
-                - name: iso  # Volume with name 'iso' will be used
-                             # for store image with cloud-init metadata.
-                  capacity: 1
-                  format: raw
-                  device: cdrom
-                  bus: ide
-                  cloudinit_meta_data: *cloudinit_meta_data
-                  cloudinit_user_data: *cloudinit_user_data_1604
-
-              interfaces: *interfaces
-              network_config: *network_config
-
-          - name: {{ HOSTNAME_MON02 }}
-            role: salt_minion
-            params:
-              vcpu: !os_env SLAVE_NODE_CPU, 3
-              memory: !os_env SLAVE_NODE_MEMORY, 6144
-              boot:
-                - hd
-              cloud_init_volume_name: iso
-              cloud_init_iface_up: ens3
-              volumes:
-                - name: system
-                  capacity: !os_env NODE_VOLUME_SIZE, 150
-                  backing_store: mcp_ubuntu_1604_image
-                  format: qcow2
-                - name: iso  # Volume with name 'iso' will be used
-                             # for store image with cloud-init metadata.
-                  capacity: 1
-                  format: raw
-                  device: cdrom
-                  bus: ide
-                  cloudinit_meta_data: *cloudinit_meta_data
-                  cloudinit_user_data: *cloudinit_user_data_1604
-
-              interfaces: *interfaces
-              network_config: *network_config
-
-          - name: {{ HOSTNAME_MON03 }}
-            role: salt_minion
-            params:
-              vcpu: !os_env SLAVE_NODE_CPU, 3
-              memory: !os_env SLAVE_NODE_MEMORY, 6144
-              boot:
-                - hd
-              cloud_init_volume_name: iso
-              cloud_init_iface_up: ens3
-              volumes:
-                - name: system
-                  capacity: !os_env NODE_VOLUME_SIZE, 150
-                  backing_store: mcp_ubuntu_1604_image
-                  format: qcow2
-                - name: iso  # Volume with name 'iso' will be used
-                             # for store image with cloud-init metadata.
-                  capacity: 1
-                  format: raw
-                  device: cdrom
-                  bus: ide
-                  cloudinit_meta_data: *cloudinit_meta_data
-                  cloudinit_user_data: *cloudinit_user_data_1604
-
-              interfaces: *interfaces
-              network_config: *network_config
-
           - name: {{ HOSTNAME_PRX01 }}
             role: salt_minion
             params:
diff --git a/tcp_tests/templates/virtual-mcp-pike-dvr/openstack.yaml b/tcp_tests/templates/virtual-mcp-pike-dvr/openstack.yaml
index 45ededb..8276b67 100644
--- a/tcp_tests/templates/virtual-mcp-pike-dvr/openstack.yaml
+++ b/tcp_tests/templates/virtual-mcp-pike-dvr/openstack.yaml
@@ -375,6 +375,12 @@
   retry: {count: 1, delay: 30}
   skip_fail: false
 
+- description: Enable forward policy
+  cmd: iptables --policy FORWARD ACCEPT
+  node_name: {{ HOSTNAME_GTW01 }}
+  retry: {count: 1, delay: 30}
+  skip_fail: false
+
 - description: Restart cinder volume
   cmd: |
     salt -C 'I@cinder:controller' service.restart cinder-volume;
diff --git a/tcp_tests/templates/virtual-mcp-pike-ovs-ceph/ceph.yaml b/tcp_tests/templates/virtual-mcp-pike-ovs-ceph/ceph.yaml
index 317021a..bcbcad0 100644
--- a/tcp_tests/templates/virtual-mcp-pike-ovs-ceph/ceph.yaml
+++ b/tcp_tests/templates/virtual-mcp-pike-ovs-ceph/ceph.yaml
@@ -113,14 +113,23 @@
   retry: {count: 1, delay: 10}
   skip_fail: false
 
-{% for ssh in config.underlay.ssh %}
+{%- for ssh in config.underlay.ssh %}
+  {%- set salt_roles = [] %}
+  {%- for role in ssh['roles'] %}
+    {%- if role in config.salt_deploy.salt_roles %}
+      {%- set _ = salt_roles.append(role) %}
+    {%- endif %}
+  {%- endfor %}
+
+  {%- if salt_roles %}
 - description: Restart salt-minion as workaround of PROD-16970
   cmd: |
     service salt-minion restart;  # For case if salt-minion was already installed
   node_name: {{ ssh['node_name'] }}
   retry: {count: 1, delay: 1}
   skip_fail: false
-{% endfor %}
+  {%- endif %}
+{%- endfor %}
 
 - description: Connect ceph to glance
   cmd: |
diff --git a/tcp_tests/templates/virtual-mcp-pike-ovs-ceph/openstack.yaml b/tcp_tests/templates/virtual-mcp-pike-ovs-ceph/openstack.yaml
index 9d7dbf4..34692c7 100644
--- a/tcp_tests/templates/virtual-mcp-pike-ovs-ceph/openstack.yaml
+++ b/tcp_tests/templates/virtual-mcp-pike-ovs-ceph/openstack.yaml
@@ -259,6 +259,12 @@
   retry: {count: 1, delay: 30}
   skip_fail: false
 
+- description: Enable forward policy
+  cmd: iptables --policy FORWARD ACCEPT
+  node_name: {{ HOSTNAME_GTW01 }}
+  retry: {count: 1, delay: 30}
+  skip_fail: false
+
 - description: create rc file on cfg
   cmd: scp ctl01:/root/keystonercv3 /root
   node_name: {{ HOSTNAME_CFG01 }}
diff --git a/tcp_tests/templates/virtual-mcp-pike-ovs/openstack.yaml b/tcp_tests/templates/virtual-mcp-pike-ovs/openstack.yaml
index 602325f..6bf534a 100644
--- a/tcp_tests/templates/virtual-mcp-pike-ovs/openstack.yaml
+++ b/tcp_tests/templates/virtual-mcp-pike-ovs/openstack.yaml
@@ -409,6 +409,12 @@
   retry: {count: 1, delay: 30}
   skip_fail: false
 
+- description: Enable forward policy
+  cmd: iptables --policy FORWARD ACCEPT
+  node_name: {{ HOSTNAME_GTW01 }}
+  retry: {count: 1, delay: 30}
+  skip_fail: false
+
 - description: create rc file on cfg
   cmd: scp ctl01:/root/keystonercv3 /root
   node_name: {{ HOSTNAME_CFG01 }}
diff --git a/tcp_tests/templates/virtual-mcp-sl-os/openstack.yaml b/tcp_tests/templates/virtual-mcp-sl-os/openstack.yaml
index 9c2b134..fe35460 100644
--- a/tcp_tests/templates/virtual-mcp-sl-os/openstack.yaml
+++ b/tcp_tests/templates/virtual-mcp-sl-os/openstack.yaml
@@ -281,6 +281,14 @@
   retry: {count: 1, delay: 30}
   skip_fail: false
 
+- description: Enable forward policy on gtw
+  cmd: |
+    set -e;
+    iptables --policy FORWARD ACCEPT;
+  node_name: {{ HOSTNAME_GTW01 }}
+  retry: {count: 1, delay: 30}
+  skip_fail: false
+
 - description: create rc file on cfg
   cmd: scp ctl01:/root/keystonercv3 /root
   node_name: {{ HOSTNAME_CFG01 }}
diff --git a/tcp_tests/templates/virtual-mcp-sl-os/salt.yaml b/tcp_tests/templates/virtual-mcp-sl-os/salt.yaml
index 738463b..19e74bd 100644
--- a/tcp_tests/templates/virtual-mcp-sl-os/salt.yaml
+++ b/tcp_tests/templates/virtual-mcp-sl-os/salt.yaml
@@ -11,7 +11,7 @@
 
 {{ SHARED.MACRO_CLONE_RECLASS_MODELS() }}
 
-{{ SHARED.MACRO_CONFIGURE_RECLASS(FORMULA_SERVICES='"linux" "reclass" "salt" "openssh" "ntp" "git" "nginx" "collectd" "sensu" "heka" "sphinx" "keystone" "mysql" "grafana" "haproxy" "rsyslog" "horizon" "prometheus" "telegraf" "elasticsearch" "fluentd" "docker"') }}
+{{ SHARED.MACRO_CONFIGURE_RECLASS(FORMULA_SERVICES='"linux" "reclass" "salt" "openssh" "ntp" "git" "nginx" "collectd" "sensu" "heka" "sphinx" "keystone" "mysql" "grafana" "haproxy" "rsyslog" "horizon" "prometheus" "telegraf" "elasticsearch" "fluentd" "docker" "kibana"') }}
 
 {{ SHARED.MACRO_INSTALL_SALT_MINIONS() }}
 
diff --git a/tcp_tests/templates/virtual-offline-pike-ovs/openstack.yaml b/tcp_tests/templates/virtual-offline-pike-ovs/openstack.yaml
index 0575c58..1e5d62e 100644
--- a/tcp_tests/templates/virtual-offline-pike-ovs/openstack.yaml
+++ b/tcp_tests/templates/virtual-offline-pike-ovs/openstack.yaml
@@ -344,6 +344,14 @@
   retry: {count: 1, delay: 30}
   skip_fail: false
 
+- description: Enable forward policy on gtw
+  cmd: |
+    set -e;
+    iptables --policy FORWARD ACCEPT;
+  node_name: {{ HOSTNAME_GTW01 }}
+  retry: {count: 1, delay: 30}
+  skip_fail: false
+
 - description: create rc file on cfg
   cmd: scp ctl01:/root/keystonercv3 /root
   node_name: {{ HOSTNAME_CFG01 }}
diff --git a/tcp_tests/tests/system/conftest.py b/tcp_tests/tests/system/conftest.py
index 64288ab..080cb4d 100644
--- a/tcp_tests/tests/system/conftest.py
+++ b/tcp_tests/tests/system/conftest.py
@@ -25,7 +25,7 @@
 from tcp_tests.fixtures.decapod_fixtures import *  # noqa
 from tcp_tests.fixtures.stacklight_fixtures import *  # noqa
 from tcp_tests.fixtures.k8s_fixtures import *  # noqa
-
+from tcp_tests.fixtures.drivetrain_fixtures import *  # noqa
 
 __all__ = sorted([  # sort for documentation
     # common_fixtures
@@ -53,6 +53,9 @@
     'oss_actions',
     'oss_deployed',
     'oss_sl_os_deployed',
+    # drivetrain_fixtures
+    'drivetrain_actions',
+    'drivetrain_deployed',
     # decapod_fixtures
     'decapod_actions',
     'decapod_deployed',
diff --git a/tcp_tests/tests/system/test_failover_openstack_services.py b/tcp_tests/tests/system/test_failover_openstack_services.py
index 37cff72..08a928b 100644
--- a/tcp_tests/tests/system/test_failover_openstack_services.py
+++ b/tcp_tests/tests/system/test_failover_openstack_services.py
@@ -95,7 +95,7 @@
     @pytest.mark.fail_snapshot
     @pytest.mark.with_rally(rally_node="gtw01.", prepare_openstack=True)
     def test_restart_keepalived(self, func_name, underlay, config,
-                                openstack_deployed,  sl_os_deployed,
+                                openstack_deployed,
                                 common_services_actions,
                                 salt_actions, openstack_actions,
                                 rally, show_step):
@@ -164,7 +164,7 @@
     @pytest.mark.fail_snapshot
     @pytest.mark.with_rally(rally_node="gtw01.", prepare_openstack=True)
     def test_stop_keepalived(self, func_name, underlay, config,
-                             openstack_deployed,  sl_os_deployed,
+                             openstack_deployed,
                              common_services_actions,
                              salt_actions, openstack_actions,
                              rally, show_step):
@@ -251,7 +251,7 @@
     @pytest.mark.fail_snapshot
     @pytest.mark.with_rally(rally_node="gtw01.", prepare_openstack=True)
     def test_kill_keepalived(self, func_name, underlay, config,
-                             openstack_deployed,  sl_os_deployed,
+                             openstack_deployed,
                              common_services_actions,
                              salt_actions, openstack_actions,
                              rally, show_step):
@@ -430,7 +430,7 @@
     @pytest.mark.fail_snapshot
     @pytest.mark.with_rally(rally_node="gtw01.", prepare_openstack=True)
     def test_kill_rabbit_galera(self, func_name, underlay, config,
-                                openstack_deployed, sl_os_deployed,
+                                openstack_deployed,
                                 common_services_actions,
                                 salt_actions, openstack_actions,
                                 rally, show_step):
diff --git a/tcp_tests/tests/system/test_k8s_actions.py b/tcp_tests/tests/system/test_k8s_actions.py
index bc0a70d..30dcba1 100644
--- a/tcp_tests/tests/system/test_k8s_actions.py
+++ b/tcp_tests/tests/system/test_k8s_actions.py
@@ -61,3 +61,19 @@
         show_step(5)
         dns_host = k8s_deployed.get_svc_ip('coredns')
         k8s_deployed.nslookup(hostname, dns_host)
+
+    @pytest.mark.grab_versions
+    @pytest.mark.cncf_publisher(name=['e2e.log', 'junit_01.xml', 'version.txt',
+                                      'cncf_results.tar.gz'])
+    @pytest.mark.fail_snapshot
+    def test_k8s_cncf_certification(self, show_step, config, k8s_deployed,
+                                    cncf_log_helper):
+        """Run cncf e2e suite and provide files needed for pull request
+        to the CNCF repo
+
+        Scenario:
+        1. Run cncf from https://github.com/cncf/k8s-conformance
+        """
+
+        show_step(1)
+        k8s_deployed.start_k8s_cncf_verification()