Merge "Add installation of DNSaaS component to virtual-mcp-sl-os template"
diff --git a/tcp_tests/fixtures/salt_fixtures.py b/tcp_tests/fixtures/salt_fixtures.py
index 58c8509..9bd24b3 100644
--- a/tcp_tests/fixtures/salt_fixtures.py
+++ b/tcp_tests/fixtures/salt_fixtures.py
@@ -35,7 +35,7 @@
 @pytest.mark.revert_snapshot(ext.SNAPSHOT.salt_deployed)
 @pytest.fixture(scope='function')
 def salt_deployed(revert_snapshot, request, config,
-                  hardware, underlay, salt_actions, grab_versions, snapshot):
+                  hardware, underlay, salt_actions, snapshot, grab_versions):
     """Fixture to get or install salt service on environment
 
     :param revert_snapshot: fixture that reverts snapshot that is specified
diff --git a/tcp_tests/fixtures/underlay_fixtures.py b/tcp_tests/fixtures/underlay_fixtures.py
index 7502df2..d991548 100644
--- a/tcp_tests/fixtures/underlay_fixtures.py
+++ b/tcp_tests/fixtures/underlay_fixtures.py
@@ -211,10 +211,17 @@
     grab_version = request.keywords.get('grab_versions', None)
 
     def test_fin():
-        if hasattr(request.node, 'rep_call') and \
-                (request.node.rep_call.passed or request.node.rep_call.failed)\
-                and grab_version:
+        fixture_failed = (hasattr(request.node, 'rep_setup') and
+                          request.node.rep_setup.failed)
+        test_passed = (hasattr(request.node, 'rep_call') and
+                       request.node.rep_call.passed)
+        test_failed = (hasattr(request.node, 'rep_call') and
+                       request.node.rep_call.failed)
+
+        if fixture_failed or test_passed or test_failed:
             artifact_name = utils.extract_name_from_mark(grab_version) or \
                 "{}".format(func_name)
             underlay.get_logs(artifact_name)
-    request.addfinalizer(test_fin)
+
+    if grab_version:
+        request.addfinalizer(test_fin)
diff --git a/tcp_tests/managers/underlay_ssh_manager.py b/tcp_tests/managers/underlay_ssh_manager.py
index 5194239..5116300 100644
--- a/tcp_tests/managers/underlay_ssh_manager.py
+++ b/tcp_tests/managers/underlay_ssh_manager.py
@@ -414,50 +414,84 @@
 
     def get_logs(self, artifact_name,
                  node_role=ext.UNDERLAY_NODE_ROLES.salt_master):
-        master_node = [ssh for ssh in self.config_ssh
-                       if node_role in ssh['roles']][0]
-        cmd = ("dpkg -l | grep formula > "
-               "/var/log/{0}_packages.output".format(master_node['node_name']))
+        dump_commands = (
+            "mkdir /root/$(hostname -f)/;"
+            "rsync -aruv /var/log/ /root/$(hostname -f)/;"
+            "dpkg -l > /root/$(hostname -f)/dump_dpkg_l.txt;"
+            "df -h > /root/$(hostname -f)/dump_df.txt;"
+            "mount > /root/$(hostname -f)/dump_mount.txt;"
+            "blkid -o list > /root/$(hostname -f)/dump_blkid_o_list.txt;"
+            "iptables -t nat -S > /root/$(hostname -f)/dump_iptables_nat.txt;"
+            "iptables -S > /root/$(hostname -f)/dump_iptables.txt;"
+            "ps auxwwf > /root/$(hostname -f)/dump_ps.txt;"
+            "docker images > /root/$(hostname -f)/dump_docker_images.txt;"
+            "docker ps > /root/$(hostname -f)/dump_docker_ps.txt;"
+            "vgdisplay > /root/$(hostname -f)/dump_vgdisplay.txt;"
+            "lvdisplay > /root/$(hostname -f)/dump_lvdisplay.txt;"
+            "ip a > /root/$(hostname -f)/dump_ip_a.txt;"
+            "ip r > /root/$(hostname -f)/dump_ip_r.txt;"
+            "netstat -anp > /root/$(hostname -f)/dump_netstat.txt;"
+            "brctl show > /root/$(hostname -f)/dump_brctl_show.txt;"
+            "arp -an > /root/$(hostname -f)/dump_arp.txt;"
+            "uname -a > /root/$(hostname -f)/dump_uname_a.txt;"
+            "lsmod > /root/$(hostname -f)/dump_lsmod.txt;"
+            "cat /proc/interrupts > /root/$(hostname -f)/dump_interrupts.txt;"
+            "cat /etc/*-release > /root/$(hostname -f)/dump_release.txt;"
+            # OpenStack specific, will fail on other nodes
+            # "rabbitmqctl report > /root/$(hostname -f)/dump_rabbitmqctl.txt;"
 
-        tar_cmd = ('tar --absolute-names'
-                   ' --warning=no-file-changed '
-                   '-czf {t} {d}'.format(
-                       t='{0}_log.tar.gz'.format(artifact_name), d='/var/log'))
-        minion_nodes = [ssh for ssh in self.config_ssh
-                        if node_role not in ssh['roles']]
+            # "ceph health > /root/$(hostname -f)/dump_ceph_health.txt;"
+            # "ceph -s > /root/$(hostname -f)/dump_ceph_s.txt;"
+            # "ceph osd tree > /root/$(hostname -f)/dump_ceph_osd_tree.txt;"
 
-        with self.remote(master_node['node_name']) as r:
-            for node in minion_nodes:
-                LOG.info("Archiving logs on the node {0}"
+            # "for ns in $(ip netns list);"
+            # " do echo Namespace: ${ns}; ip netns exec ${ns} ip a;"
+            # "done > /root/$(hostname -f)/dump_ip_a_ns.txt;"
+
+            # "for ns in $(ip netns list);"
+            # " do echo Namespace: ${ns}; ip netns exec ${ns} ip r;"
+            # "done > /root/$(hostname -f)/dump_ip_r_ns.txt;"
+
+            # "for ns in $(ip netns list);"
+            # " do echo Namespace: ${ns}; ip netns exec ${ns} netstat -anp;"
+            # "done > /root/$(hostname -f)/dump_netstat_ns.txt;"
+
+            "/usr/bin/haproxy-status.sh > "
+            "  /root/$(hostname -f)/dump_haproxy.txt;"
+
+            # Archive the files
+            "cd /root/; tar --absolute-names --warning=no-file-changed "
+            "  -czf $(hostname -f).tar.gz ./$(hostname -f)/;"
+        )
+
+        master_host = self.__config.salt.salt_master_host
+        with self.remote(host=master_host) as master:
+            # dump files
+            LOG.info("Archive artifacts on all nodes")
+            master.check_call("salt '*' cmd.run '{0}'".format(dump_commands),
+                              raise_on_err=False)
+
+            # create target dir for archives
+            master.check_call("mkdir /root/dump/")
+
+            # get archived artifacts to the master node
+            for node in self.config_ssh:
+                LOG.info("Getting archived artifacts from the node {0}"
                          .format(node['node_name']))
-                r.check_call((
-                    "salt '{n}*' cmd.run "
-                    "'tar "
-                    "--absolute-names "
-                    "--warning=no-file-changed "
-                    "-czf {t} {d}'".format(
-                        n=node['node_name'],
-                        t='{0}.tar.gz'.format(node['node_name']),
-                        d='/var/log')),
-                        raise_on_err=False)
+                master.check_call("rsync -aruv {0}:/root/*.tar.gz "
+                                  "/root/dump/".format(node['node_name']),
+                                  raise_on_err=False)
 
-                LOG.info("Copying logs from {0} to {1}"
-                         .format(node['node_name'], master_node['node_name']))
-                packages_minion_cmd = ("salt '{0}*' cmd.run "
-                                       "'dpkg -l' > /var/log/"
-                                       "{0}_packages.output".format(
-                                           node['node_name']))
-                r.check_call(packages_minion_cmd)
-                r.check_call("rsync {0}:/root/*.tar.gz "
-                             "/var/log/".format(node['node_name']),
-                             raise_on_err=False)
+            destination_name = '/root/{0}_dump.tar.gz'.format(artifact_name)
+            # Archive the artifacts from all nodes
+            master.check_call(
+                'cd /root/dump/;'
+                'tar --absolute-names --warning=no-file-changed -czf '
+                ' {0} ./'.format(destination_name))
 
-            r.check_call(cmd)
-            r.check_call(tar_cmd)
-
-            destination_name = '{0}_log.tar.gz'.format(artifact_name)
+            # Download the artifact to the host
             LOG.info("Downloading the artifact {0}".format(destination_name))
-            r.download(destination=destination_name, target=os.getcwd())
+            master.download(destination=destination_name, target=os.getcwd())
 
     def delayed_call(
             self, cmd,
diff --git a/tcp_tests/requirements.txt b/tcp_tests/requirements.txt
index b06b805..a5cda3b 100644
--- a/tcp_tests/requirements.txt
+++ b/tcp_tests/requirements.txt
@@ -1,5 +1,6 @@
 # git+git://github.com/openstack/fuel-devops.git@887368d#egg=project[postgre]   # Use this requirement for PostgreSQL
-git+git://github.com/openstack/fuel-devops.git@c39a2a9   # Use this requirement for Sqlite3, or if requirements for PostgreSQL are already installed
+libvirt-python>=3.5.0, !=4.1.0  # LGPLv2+
+git+git://github.com/openstack/fuel-devops.git@cce44f4   # Use this requirement for Sqlite3, or if requirements for PostgreSQL are already installed
 git+git://github.com/dis-xcom/fuel-devops-driver-ironic
 paramiko
 six
@@ -15,7 +16,7 @@
 testrail
 functools32
 python-k8sclient==0.4.0
-salt-pepper
+salt-pepper<=0.5.3
 setuptools<=36.2.0
 netaddr
 mock>=1.2
diff --git a/tcp_tests/templates/cookied-bm-mcp-dvr-vxlan/common-services.yaml b/tcp_tests/templates/cookied-bm-mcp-dvr-vxlan/common-services.yaml
index abf1040..4e3bb65 100644
--- a/tcp_tests/templates/cookied-bm-mcp-dvr-vxlan/common-services.yaml
+++ b/tcp_tests/templates/cookied-bm-mcp-dvr-vxlan/common-services.yaml
@@ -75,7 +75,7 @@
 
 - description: Install Galera on other servers
   cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@galera:slave' state.sls galera
+    -C 'I@galera:slave' state.sls galera -b 1
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 5}
   skip_fail: false
diff --git a/tcp_tests/templates/cookied-bm-mcp-dvr-vxlan/salt-context-cookiecutter-openstack_ovs_dvr_vxlan.yaml b/tcp_tests/templates/cookied-bm-mcp-dvr-vxlan/salt-context-cookiecutter-openstack_ovs_dvr_vxlan.yaml
index 74a4666..0b75ccd 100644
--- a/tcp_tests/templates/cookied-bm-mcp-dvr-vxlan/salt-context-cookiecutter-openstack_ovs_dvr_vxlan.yaml
+++ b/tcp_tests/templates/cookied-bm-mcp-dvr-vxlan/salt-context-cookiecutter-openstack_ovs_dvr_vxlan.yaml
@@ -109,6 +109,35 @@
   public_host: ${_param:openstack_proxy_address}
   publication_method: email
   reclass_repository: https://github.com/Mirantis/mk-lab-salt-model.git
+  backup_private_key: |
+      -----BEGIN RSA PRIVATE KEY-----
+      MIIEowIBAAKCAQEAxL6/rVgCetsETpZaUmXmkj8cZ1WN0eubH1FvMDOi/La9ZJyT
+      k0C6AYpJnIyEm93pMj5cLm08qRqMW+2pdOhYjcH69yg5MrX5SkRk8jCmIHIYoIbh
+      Qnwbnj3dd3I39ZdfU2FO7u2vlbglVou6ZoQxlJDItuLNtzq6EG+w9eF19e7+OsC6
+      6iUItp618zfw1l3J/8nKvCGe2RYDf7mJW6XwCl/DwryJmwwzvPgYJ3QMuDD8/HFj
+      lrJ3xjFTXj4b4Ws1XIoy78fFbtiLr4OwqCYkho03u2E5rOOP1qZxZB63sivHMLMO
+      MM5bOAQKbulFNoyALADGYfc7sf0bZ4u9XXDXxQIDAQABAoIBAQCfmc2MJRT97KW1
+      yqpCpX9BrAiymuiNHf+cjEcSZxEUyHkjIRFmJt+9WB0W7ba1anM92vCUiPDojSzH
+      dig9Oi578JxR20NrK8uqv4jUHzrknynzLveVI3CUEcOSnglfJQijbxDFKfOCFPvV
+      FUyE1UATMNBh6+LNfMprgu+exuMWOPnDyUiYQ+WZ0JfuZY8fuaZte4woJJOb9LUu
+      5rsMG/smIzjpgZ0Z9ZVDMurfq565qhpaXRAqKeIuyht8pacTo31iMQdHB78AvY/3
+      g0z21Gk8k3z0Kr/YFKr2r4FmXY5m/gAUvZly2ZrVQM5XsbTVCzq/JpI5fssNvSbU
+      AKmXzf4RAoGBAOO3d4/cstxERzW6hyOTjZIN1ppR52CsnZTsVPbfd0pCtmzmVZce
+      CtHKdcXSbTwZvvkK09QSWAp3MoSpd0gIOiLU8Wx/R/RIZsu9BlhTS3r3EQLnk72d
+      H/1TTA+j4T/LIYLSojQ1RxvIrHetAD44j732aTwKAHj/SybEAVqNkOB/AoGBAN0u
+      gLcrgqIHGrk4VjWSvlCGymfF40equcx+ud7XhfZDGETUOSahW4dPZ52cjPAkrCBQ
+      MMfcDwSVGsOAjd+mNt11BHUKobnhXwFaWWuyqyn9NmWFbjMbICVh7E3Of5aVN38o
+      lrmo/7LuKMVG7XRwphCv5NkaJmQG4njDyUQWlaW7AoGADCd8wDb9bPhP/LQqBmIX
+      ylXmwHHisaxE9O/wUQT4bwREjGd25gv6c9wkkRx8LBsLsGs9hzI7dMOL9Ly+2x9l
+      SvqmsC3S/1zl77X1Ir2/Z57MT6Vgo1xBmtnZU3Rhz2/eKAdqFPNLClaZrgGT475N
+      HcyLLWMzR0IJFtabY+Puea0CgYA8Zb5wRkldxWLewSuJZZDinGwY+kieAVjLJq/K
+      0j+ah6fQ48LXcah0wpIgz+cMjHcUO9GWQdk3/x9X03rqX5EL2DBnZYfUIl63F9zj
+      M97ZkHOSNWVqPzX//0Vv2butewG0j3jZKfTo/2/SrxOYgEpYtC9huWpSVi7xm0US
+      erhSkQKBgFIf9JEsfgE57ANhvITZ3ZI0uZXNxZkXQaVg8jvScDi79IIhy9iPzhKC
+      aIIQoDNIlWv1ftCRZ5AlBvVXgvQ/QNrwy48JiQTzWZlb9Ezg8w+olQmSbG6fq7Y+
+      7r3i+QUZ7RBdOb24QcQ618q54ozNTCB7OywY78ptFzeoBeptiNr1
+      -----END RSA PRIVATE KEY-----
+  backup_public_key: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDEvr+tWAJ62wROllpSZeaSPxxnVY3R65sfUW8wM6L8tr1knJOTQLoBikmcjISb3ekyPlwubTypGoxb7al06FiNwfr3KDkytflKRGTyMKYgchighuFCfBuePd13cjf1l19TYU7u7a+VuCVWi7pmhDGUkMi24s23OroQb7D14XX17v46wLrqJQi2nrXzN/DWXcn/ycq8IZ7ZFgN/uYlbpfAKX8PCvImbDDO8+BgndAy4MPz8cWOWsnfGMVNePhvhazVcijLvx8Vu2Iuvg7CoJiSGjTe7YTms44/WpnFkHreyK8cwsw4wzls4BApu6UU2jIAsAMZh9zux/Rtni71dcNfF
   salt_master_address: 10.167.4.2
   salt_master_hostname: cfg01
   salt_master_management_address: 172.16.49.2
diff --git a/tcp_tests/templates/cookied-bm-mcp-dvr-vxlan/salt-context-vcp-environment.yaml b/tcp_tests/templates/cookied-bm-mcp-dvr-vxlan/salt-context-vcp-environment.yaml
index b875e47..7435fc8 100644
--- a/tcp_tests/templates/cookied-bm-mcp-dvr-vxlan/salt-context-vcp-environment.yaml
+++ b/tcp_tests/templates/cookied-bm-mcp-dvr-vxlan/salt-context-vcp-environment.yaml
@@ -93,6 +93,7 @@
       reclass_storage_name: openstack_proxy_node01
       roles:
       - openstack_proxy
+      - features_designate_proxy
       - linux_system_codename_xenial
       interfaces:
         ens3:
@@ -102,6 +103,7 @@
       reclass_storage_name: openstack_proxy_node02
       roles:
       - openstack_proxy
+      - features_designate_proxy
       - linux_system_codename_xenial
       interfaces:
         ens3:
diff --git a/tcp_tests/templates/cookied-bm-mcp-dvr-vxlan/sl.yaml b/tcp_tests/templates/cookied-bm-mcp-dvr-vxlan/sl.yaml
index ed3636c..3c4ce05 100644
--- a/tcp_tests/templates/cookied-bm-mcp-dvr-vxlan/sl.yaml
+++ b/tcp_tests/templates/cookied-bm-mcp-dvr-vxlan/sl.yaml
@@ -1,22 +1,6 @@
 {% from 'cookied-bm-mcp-dvr-vxlan/underlay.yaml' import HOSTNAME_CFG01 with context %}
 
 # Install docker swarm.
-- description: Install keepalived on mon nodes
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'mon*' state.sls keepalived
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 10}
-  skip_fail: false
-
-- description: Check the VIP on StackLight mon nodes
-  cmd: |
-    SL_VIP=$(salt --out=newline_values_only "mon01*" pillar.get _param:cluster_vip_address);
-    echo "_param:cluster_vip_address (vip): ${SL_VIP}";
-    salt --hard-crash --state-output=mixed --state-verbose=False -C 'mon*' cmd.run "ip a | grep ${SL_VIP}" | grep -B1 ${SL_VIP}
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
 - description: Configure docker service
   cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' state.sls docker.host
   node_name: {{ HOSTNAME_CFG01 }}
@@ -65,6 +49,22 @@
   retry: {count: 1, delay: 10}
   skip_fail: false
 
+- description: Install keepalived on mon nodes
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'mon*' state.sls keepalived
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 10}
+  skip_fail: false
+
+- description: Check the VIP on StackLight mon nodes
+  cmd: |
+    SL_VIP=$(salt --out=newline_values_only "mon01*" pillar.get _param:cluster_vip_address);
+    echo "_param:cluster_vip_address (vip): ${SL_VIP}";
+    salt --hard-crash --state-output=mixed --state-verbose=False -C 'mon*' cmd.run "ip a | grep ${SL_VIP}" | grep -B1 ${SL_VIP}
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
 # Install slv2 infra
 - description: Install telegraf
   cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@telegraf:agent or I@telegraf:remote_agent' state.sls telegraf
diff --git a/tcp_tests/templates/cookied-bm-mcp-dvr-vxlan/underlay.yaml b/tcp_tests/templates/cookied-bm-mcp-dvr-vxlan/underlay.yaml
index a1d6090..ab28e9b 100644
--- a/tcp_tests/templates/cookied-bm-mcp-dvr-vxlan/underlay.yaml
+++ b/tcp_tests/templates/cookied-bm-mcp-dvr-vxlan/underlay.yaml
@@ -113,7 +113,7 @@
             role: salt_master
             params:
               vcpu: !os_env SLAVE_NODE_CPU, 4
-              memory: !os_env SLAVE_NODE_MEMORY, 4096
+              memory: !os_env SLAVE_NODE_MEMORY, 8192
               boot:
                 - hd
               cloud_init_volume_name: iso
diff --git a/tcp_tests/templates/cookied-bm-mcp-ocata-contrail/common-services.yaml b/tcp_tests/templates/cookied-bm-mcp-ocata-contrail/common-services.yaml
index 5a4fe25..2a08d8a 100644
--- a/tcp_tests/templates/cookied-bm-mcp-ocata-contrail/common-services.yaml
+++ b/tcp_tests/templates/cookied-bm-mcp-ocata-contrail/common-services.yaml
@@ -106,7 +106,7 @@
 
 - description: Install Galera on other servers
   cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@galera:slave' state.sls galera
+    -C 'I@galera:slave' state.sls galera -b 1
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 5}
   skip_fail: false
diff --git a/tcp_tests/templates/cookied-bm-mcp-ocata-contrail/salt-context-cookiecutter-contrail-dpdk.yaml b/tcp_tests/templates/cookied-bm-mcp-ocata-contrail/salt-context-cookiecutter-contrail-dpdk.yaml
index 265d897..2f3105a 100644
--- a/tcp_tests/templates/cookied-bm-mcp-ocata-contrail/salt-context-cookiecutter-contrail-dpdk.yaml
+++ b/tcp_tests/templates/cookied-bm-mcp-ocata-contrail/salt-context-cookiecutter-contrail-dpdk.yaml
@@ -120,6 +120,35 @@
   public_host: ${_param:openstack_proxy_address}

   publication_method: email

   reclass_repository: https://github.com/Mirantis/mk-lab-salt-model.git

+  backup_private_key: |
+      -----BEGIN RSA PRIVATE KEY-----
+      MIIEowIBAAKCAQEAxL6/rVgCetsETpZaUmXmkj8cZ1WN0eubH1FvMDOi/La9ZJyT
+      k0C6AYpJnIyEm93pMj5cLm08qRqMW+2pdOhYjcH69yg5MrX5SkRk8jCmIHIYoIbh
+      Qnwbnj3dd3I39ZdfU2FO7u2vlbglVou6ZoQxlJDItuLNtzq6EG+w9eF19e7+OsC6
+      6iUItp618zfw1l3J/8nKvCGe2RYDf7mJW6XwCl/DwryJmwwzvPgYJ3QMuDD8/HFj
+      lrJ3xjFTXj4b4Ws1XIoy78fFbtiLr4OwqCYkho03u2E5rOOP1qZxZB63sivHMLMO
+      MM5bOAQKbulFNoyALADGYfc7sf0bZ4u9XXDXxQIDAQABAoIBAQCfmc2MJRT97KW1
+      yqpCpX9BrAiymuiNHf+cjEcSZxEUyHkjIRFmJt+9WB0W7ba1anM92vCUiPDojSzH
+      dig9Oi578JxR20NrK8uqv4jUHzrknynzLveVI3CUEcOSnglfJQijbxDFKfOCFPvV
+      FUyE1UATMNBh6+LNfMprgu+exuMWOPnDyUiYQ+WZ0JfuZY8fuaZte4woJJOb9LUu
+      5rsMG/smIzjpgZ0Z9ZVDMurfq565qhpaXRAqKeIuyht8pacTo31iMQdHB78AvY/3
+      g0z21Gk8k3z0Kr/YFKr2r4FmXY5m/gAUvZly2ZrVQM5XsbTVCzq/JpI5fssNvSbU
+      AKmXzf4RAoGBAOO3d4/cstxERzW6hyOTjZIN1ppR52CsnZTsVPbfd0pCtmzmVZce
+      CtHKdcXSbTwZvvkK09QSWAp3MoSpd0gIOiLU8Wx/R/RIZsu9BlhTS3r3EQLnk72d
+      H/1TTA+j4T/LIYLSojQ1RxvIrHetAD44j732aTwKAHj/SybEAVqNkOB/AoGBAN0u
+      gLcrgqIHGrk4VjWSvlCGymfF40equcx+ud7XhfZDGETUOSahW4dPZ52cjPAkrCBQ
+      MMfcDwSVGsOAjd+mNt11BHUKobnhXwFaWWuyqyn9NmWFbjMbICVh7E3Of5aVN38o
+      lrmo/7LuKMVG7XRwphCv5NkaJmQG4njDyUQWlaW7AoGADCd8wDb9bPhP/LQqBmIX
+      ylXmwHHisaxE9O/wUQT4bwREjGd25gv6c9wkkRx8LBsLsGs9hzI7dMOL9Ly+2x9l
+      SvqmsC3S/1zl77X1Ir2/Z57MT6Vgo1xBmtnZU3Rhz2/eKAdqFPNLClaZrgGT475N
+      HcyLLWMzR0IJFtabY+Puea0CgYA8Zb5wRkldxWLewSuJZZDinGwY+kieAVjLJq/K
+      0j+ah6fQ48LXcah0wpIgz+cMjHcUO9GWQdk3/x9X03rqX5EL2DBnZYfUIl63F9zj
+      M97ZkHOSNWVqPzX//0Vv2butewG0j3jZKfTo/2/SrxOYgEpYtC9huWpSVi7xm0US
+      erhSkQKBgFIf9JEsfgE57ANhvITZ3ZI0uZXNxZkXQaVg8jvScDi79IIhy9iPzhKC
+      aIIQoDNIlWv1ftCRZ5AlBvVXgvQ/QNrwy48JiQTzWZlb9Ezg8w+olQmSbG6fq7Y+
+      7r3i+QUZ7RBdOb24QcQ618q54ozNTCB7OywY78ptFzeoBeptiNr1
+      -----END RSA PRIVATE KEY-----
+  backup_public_key: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDEvr+tWAJ62wROllpSZeaSPxxnVY3R65sfUW8wM6L8tr1knJOTQLoBikmcjISb3ekyPlwubTypGoxb7al06FiNwfr3KDkytflKRGTyMKYgchighuFCfBuePd13cjf1l19TYU7u7a+VuCVWi7pmhDGUkMi24s23OroQb7D14XX17v46wLrqJQi2nrXzN/DWXcn/ycq8IZ7ZFgN/uYlbpfAKX8PCvImbDDO8+BgndAy4MPz8cWOWsnfGMVNePhvhazVcijLvx8Vu2Iuvg7CoJiSGjTe7YTms44/WpnFkHreyK8cwsw4wzls4BApu6UU2jIAsAMZh9zux/Rtni71dcNfF
   salt_master_address: 10.167.8.66

   salt_master_hostname: cfg01

   salt_master_management_address: 172.16.49.66

diff --git a/tcp_tests/templates/cookied-bm-mcp-ocata-contrail/salt-context-cookiecutter-contrail.yaml b/tcp_tests/templates/cookied-bm-mcp-ocata-contrail/salt-context-cookiecutter-contrail.yaml
index c91448f..cbb8365 100644
--- a/tcp_tests/templates/cookied-bm-mcp-ocata-contrail/salt-context-cookiecutter-contrail.yaml
+++ b/tcp_tests/templates/cookied-bm-mcp-ocata-contrail/salt-context-cookiecutter-contrail.yaml
@@ -122,6 +122,35 @@
   public_host: ${_param:openstack_proxy_address}

   publication_method: email

   reclass_repository: https://github.com/Mirantis/mk-lab-salt-model.git

+  backup_private_key: |
+      -----BEGIN RSA PRIVATE KEY-----
+      MIIEowIBAAKCAQEAxL6/rVgCetsETpZaUmXmkj8cZ1WN0eubH1FvMDOi/La9ZJyT
+      k0C6AYpJnIyEm93pMj5cLm08qRqMW+2pdOhYjcH69yg5MrX5SkRk8jCmIHIYoIbh
+      Qnwbnj3dd3I39ZdfU2FO7u2vlbglVou6ZoQxlJDItuLNtzq6EG+w9eF19e7+OsC6
+      6iUItp618zfw1l3J/8nKvCGe2RYDf7mJW6XwCl/DwryJmwwzvPgYJ3QMuDD8/HFj
+      lrJ3xjFTXj4b4Ws1XIoy78fFbtiLr4OwqCYkho03u2E5rOOP1qZxZB63sivHMLMO
+      MM5bOAQKbulFNoyALADGYfc7sf0bZ4u9XXDXxQIDAQABAoIBAQCfmc2MJRT97KW1
+      yqpCpX9BrAiymuiNHf+cjEcSZxEUyHkjIRFmJt+9WB0W7ba1anM92vCUiPDojSzH
+      dig9Oi578JxR20NrK8uqv4jUHzrknynzLveVI3CUEcOSnglfJQijbxDFKfOCFPvV
+      FUyE1UATMNBh6+LNfMprgu+exuMWOPnDyUiYQ+WZ0JfuZY8fuaZte4woJJOb9LUu
+      5rsMG/smIzjpgZ0Z9ZVDMurfq565qhpaXRAqKeIuyht8pacTo31iMQdHB78AvY/3
+      g0z21Gk8k3z0Kr/YFKr2r4FmXY5m/gAUvZly2ZrVQM5XsbTVCzq/JpI5fssNvSbU
+      AKmXzf4RAoGBAOO3d4/cstxERzW6hyOTjZIN1ppR52CsnZTsVPbfd0pCtmzmVZce
+      CtHKdcXSbTwZvvkK09QSWAp3MoSpd0gIOiLU8Wx/R/RIZsu9BlhTS3r3EQLnk72d
+      H/1TTA+j4T/LIYLSojQ1RxvIrHetAD44j732aTwKAHj/SybEAVqNkOB/AoGBAN0u
+      gLcrgqIHGrk4VjWSvlCGymfF40equcx+ud7XhfZDGETUOSahW4dPZ52cjPAkrCBQ
+      MMfcDwSVGsOAjd+mNt11BHUKobnhXwFaWWuyqyn9NmWFbjMbICVh7E3Of5aVN38o
+      lrmo/7LuKMVG7XRwphCv5NkaJmQG4njDyUQWlaW7AoGADCd8wDb9bPhP/LQqBmIX
+      ylXmwHHisaxE9O/wUQT4bwREjGd25gv6c9wkkRx8LBsLsGs9hzI7dMOL9Ly+2x9l
+      SvqmsC3S/1zl77X1Ir2/Z57MT6Vgo1xBmtnZU3Rhz2/eKAdqFPNLClaZrgGT475N
+      HcyLLWMzR0IJFtabY+Puea0CgYA8Zb5wRkldxWLewSuJZZDinGwY+kieAVjLJq/K
+      0j+ah6fQ48LXcah0wpIgz+cMjHcUO9GWQdk3/x9X03rqX5EL2DBnZYfUIl63F9zj
+      M97ZkHOSNWVqPzX//0Vv2butewG0j3jZKfTo/2/SrxOYgEpYtC9huWpSVi7xm0US
+      erhSkQKBgFIf9JEsfgE57ANhvITZ3ZI0uZXNxZkXQaVg8jvScDi79IIhy9iPzhKC
+      aIIQoDNIlWv1ftCRZ5AlBvVXgvQ/QNrwy48JiQTzWZlb9Ezg8w+olQmSbG6fq7Y+
+      7r3i+QUZ7RBdOb24QcQ618q54ozNTCB7OywY78ptFzeoBeptiNr1
+      -----END RSA PRIVATE KEY-----
+  backup_public_key: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDEvr+tWAJ62wROllpSZeaSPxxnVY3R65sfUW8wM6L8tr1knJOTQLoBikmcjISb3ekyPlwubTypGoxb7al06FiNwfr3KDkytflKRGTyMKYgchighuFCfBuePd13cjf1l19TYU7u7a+VuCVWi7pmhDGUkMi24s23OroQb7D14XX17v46wLrqJQi2nrXzN/DWXcn/ycq8IZ7ZFgN/uYlbpfAKX8PCvImbDDO8+BgndAy4MPz8cWOWsnfGMVNePhvhazVcijLvx8Vu2Iuvg7CoJiSGjTe7YTms44/WpnFkHreyK8cwsw4wzls4BApu6UU2jIAsAMZh9zux/Rtni71dcNfF
   salt_master_address: 10.167.8.66

   salt_master_hostname: cfg01

   salt_master_management_address: 172.16.49.66

diff --git a/tcp_tests/templates/cookied-bm-mcp-ocata-contrail/sl.yaml b/tcp_tests/templates/cookied-bm-mcp-ocata-contrail/sl.yaml
index 8d131b7..37c8e0c 100644
--- a/tcp_tests/templates/cookied-bm-mcp-ocata-contrail/sl.yaml
+++ b/tcp_tests/templates/cookied-bm-mcp-ocata-contrail/sl.yaml
@@ -1,22 +1,6 @@
 {% from 'cookied-bm-mcp-ocata-contrail/underlay.yaml' import HOSTNAME_CFG01 with context %}
 
 # Install docker swarm.
-- description: Install keepalived on mon nodes
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'mon*' state.sls keepalived
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 10}
-  skip_fail: false
-
-- description: Check the VIP on StackLight mon nodes
-  cmd: |
-    SL_VIP=$(salt --out=newline_values_only "mon01*" pillar.get _param:cluster_vip_address);
-    echo "_param:cluster_vip_address (vip): ${SL_VIP}";
-    salt --hard-crash --state-output=mixed --state-verbose=False -C 'mon*' cmd.run "ip a | grep ${SL_VIP}" | grep -B1 ${SL_VIP}
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
 - description: Configure docker service
   cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' state.sls docker.host
   node_name: {{ HOSTNAME_CFG01 }}
@@ -65,6 +49,22 @@
   retry: {count: 1, delay: 10}
   skip_fail: false
 
+- description: Install keepalived on mon nodes
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'mon*' state.sls keepalived
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 10}
+  skip_fail: false
+
+- description: Check the VIP on StackLight mon nodes
+  cmd: |
+    SL_VIP=$(salt --out=newline_values_only "mon01*" pillar.get _param:cluster_vip_address);
+    echo "_param:cluster_vip_address (vip): ${SL_VIP}";
+    salt --hard-crash --state-output=mixed --state-verbose=False -C 'mon*' cmd.run "ip a | grep ${SL_VIP}" | grep -B1 ${SL_VIP}
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
 # Install slv2 infra
 - description: Install telegraf
   cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@telegraf:agent or I@telegraf:remote_agent' state.sls telegraf
diff --git a/tcp_tests/templates/cookied-bm-mcp-ocata-contrail/underlay.yaml b/tcp_tests/templates/cookied-bm-mcp-ocata-contrail/underlay.yaml
index 2d0a783..7b3d93c 100644
--- a/tcp_tests/templates/cookied-bm-mcp-ocata-contrail/underlay.yaml
+++ b/tcp_tests/templates/cookied-bm-mcp-ocata-contrail/underlay.yaml
@@ -126,7 +126,7 @@
             role: salt_master
             params:
               vcpu: !os_env SLAVE_NODE_CPU, 4
-              memory: !os_env SLAVE_NODE_MEMORY, 4096
+              memory: !os_env SLAVE_NODE_MEMORY, 8192
               boot:
                 - hd
               cloud_init_volume_name: iso
diff --git a/tcp_tests/templates/cookied-bm-ocata-cicd-pipeline/salt-context-cookiecutter-initial-cicd-nfv.yaml b/tcp_tests/templates/cookied-bm-ocata-cicd-pipeline/salt-context-cookiecutter-initial-cicd-nfv.yaml
index 3a2ada7..39827ae 100644
--- a/tcp_tests/templates/cookied-bm-ocata-cicd-pipeline/salt-context-cookiecutter-initial-cicd-nfv.yaml
+++ b/tcp_tests/templates/cookied-bm-ocata-cicd-pipeline/salt-context-cookiecutter-initial-cicd-nfv.yaml
@@ -152,6 +152,35 @@
   public_host: ${_param:openstack_proxy_address}
   publication_method: email
   reclass_repository: https://github.com/Mirantis/mk-lab-salt-model.git
+  backup_private_key: |
+      -----BEGIN RSA PRIVATE KEY-----
+      MIIEowIBAAKCAQEAxL6/rVgCetsETpZaUmXmkj8cZ1WN0eubH1FvMDOi/La9ZJyT
+      k0C6AYpJnIyEm93pMj5cLm08qRqMW+2pdOhYjcH69yg5MrX5SkRk8jCmIHIYoIbh
+      Qnwbnj3dd3I39ZdfU2FO7u2vlbglVou6ZoQxlJDItuLNtzq6EG+w9eF19e7+OsC6
+      6iUItp618zfw1l3J/8nKvCGe2RYDf7mJW6XwCl/DwryJmwwzvPgYJ3QMuDD8/HFj
+      lrJ3xjFTXj4b4Ws1XIoy78fFbtiLr4OwqCYkho03u2E5rOOP1qZxZB63sivHMLMO
+      MM5bOAQKbulFNoyALADGYfc7sf0bZ4u9XXDXxQIDAQABAoIBAQCfmc2MJRT97KW1
+      yqpCpX9BrAiymuiNHf+cjEcSZxEUyHkjIRFmJt+9WB0W7ba1anM92vCUiPDojSzH
+      dig9Oi578JxR20NrK8uqv4jUHzrknynzLveVI3CUEcOSnglfJQijbxDFKfOCFPvV
+      FUyE1UATMNBh6+LNfMprgu+exuMWOPnDyUiYQ+WZ0JfuZY8fuaZte4woJJOb9LUu
+      5rsMG/smIzjpgZ0Z9ZVDMurfq565qhpaXRAqKeIuyht8pacTo31iMQdHB78AvY/3
+      g0z21Gk8k3z0Kr/YFKr2r4FmXY5m/gAUvZly2ZrVQM5XsbTVCzq/JpI5fssNvSbU
+      AKmXzf4RAoGBAOO3d4/cstxERzW6hyOTjZIN1ppR52CsnZTsVPbfd0pCtmzmVZce
+      CtHKdcXSbTwZvvkK09QSWAp3MoSpd0gIOiLU8Wx/R/RIZsu9BlhTS3r3EQLnk72d
+      H/1TTA+j4T/LIYLSojQ1RxvIrHetAD44j732aTwKAHj/SybEAVqNkOB/AoGBAN0u
+      gLcrgqIHGrk4VjWSvlCGymfF40equcx+ud7XhfZDGETUOSahW4dPZ52cjPAkrCBQ
+      MMfcDwSVGsOAjd+mNt11BHUKobnhXwFaWWuyqyn9NmWFbjMbICVh7E3Of5aVN38o
+      lrmo/7LuKMVG7XRwphCv5NkaJmQG4njDyUQWlaW7AoGADCd8wDb9bPhP/LQqBmIX
+      ylXmwHHisaxE9O/wUQT4bwREjGd25gv6c9wkkRx8LBsLsGs9hzI7dMOL9Ly+2x9l
+      SvqmsC3S/1zl77X1Ir2/Z57MT6Vgo1xBmtnZU3Rhz2/eKAdqFPNLClaZrgGT475N
+      HcyLLWMzR0IJFtabY+Puea0CgYA8Zb5wRkldxWLewSuJZZDinGwY+kieAVjLJq/K
+      0j+ah6fQ48LXcah0wpIgz+cMjHcUO9GWQdk3/x9X03rqX5EL2DBnZYfUIl63F9zj
+      M97ZkHOSNWVqPzX//0Vv2butewG0j3jZKfTo/2/SrxOYgEpYtC9huWpSVi7xm0US
+      erhSkQKBgFIf9JEsfgE57ANhvITZ3ZI0uZXNxZkXQaVg8jvScDi79IIhy9iPzhKC
+      aIIQoDNIlWv1ftCRZ5AlBvVXgvQ/QNrwy48JiQTzWZlb9Ezg8w+olQmSbG6fq7Y+
+      7r3i+QUZ7RBdOb24QcQ618q54ozNTCB7OywY78ptFzeoBeptiNr1
+      -----END RSA PRIVATE KEY-----
+  backup_public_key: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDEvr+tWAJ62wROllpSZeaSPxxnVY3R65sfUW8wM6L8tr1knJOTQLoBikmcjISb3ekyPlwubTypGoxb7al06FiNwfr3KDkytflKRGTyMKYgchighuFCfBuePd13cjf1l19TYU7u7a+VuCVWi7pmhDGUkMi24s23OroQb7D14XX17v46wLrqJQi2nrXzN/DWXcn/ycq8IZ7ZFgN/uYlbpfAKX8PCvImbDDO8+BgndAy4MPz8cWOWsnfGMVNePhvhazVcijLvx8Vu2Iuvg7CoJiSGjTe7YTms44/WpnFkHreyK8cwsw4wzls4BApu6UU2jIAsAMZh9zux/Rtni71dcNfF
   salt_api_password: KEfAXxIWJykUBv0v8uKWdI2v4cBG5A07
   salt_api_password_hash: $6$XBCrfheG$2q48l7h1giiqF2sdp7CFtLQQi8pcMa6K5A8cPYQmhuGqJtzv08YXVqYyhkHARzl1VBLVf.aTMY6d0M5naM5WU0
   salt_master_address: 172.16.49.66
diff --git a/tcp_tests/templates/cookied-bm-ocata-cicd-pipeline/salt-context-cookiecutter-initial-cicd.yaml b/tcp_tests/templates/cookied-bm-ocata-cicd-pipeline/salt-context-cookiecutter-initial-cicd.yaml
index 225c6f0..328b70c 100644
--- a/tcp_tests/templates/cookied-bm-ocata-cicd-pipeline/salt-context-cookiecutter-initial-cicd.yaml
+++ b/tcp_tests/templates/cookied-bm-ocata-cicd-pipeline/salt-context-cookiecutter-initial-cicd.yaml
@@ -220,6 +220,35 @@
   rundeck_iframe_host: ${_param:openstack_proxy_address}
   rundeck_iframe_port: ${_param:haproxy_rundeck_exposed_port}
   rundeck_iframe_ssl: 'False'
+  backup_private_key: |
+      -----BEGIN RSA PRIVATE KEY-----
+      MIIEowIBAAKCAQEAxL6/rVgCetsETpZaUmXmkj8cZ1WN0eubH1FvMDOi/La9ZJyT
+      k0C6AYpJnIyEm93pMj5cLm08qRqMW+2pdOhYjcH69yg5MrX5SkRk8jCmIHIYoIbh
+      Qnwbnj3dd3I39ZdfU2FO7u2vlbglVou6ZoQxlJDItuLNtzq6EG+w9eF19e7+OsC6
+      6iUItp618zfw1l3J/8nKvCGe2RYDf7mJW6XwCl/DwryJmwwzvPgYJ3QMuDD8/HFj
+      lrJ3xjFTXj4b4Ws1XIoy78fFbtiLr4OwqCYkho03u2E5rOOP1qZxZB63sivHMLMO
+      MM5bOAQKbulFNoyALADGYfc7sf0bZ4u9XXDXxQIDAQABAoIBAQCfmc2MJRT97KW1
+      yqpCpX9BrAiymuiNHf+cjEcSZxEUyHkjIRFmJt+9WB0W7ba1anM92vCUiPDojSzH
+      dig9Oi578JxR20NrK8uqv4jUHzrknynzLveVI3CUEcOSnglfJQijbxDFKfOCFPvV
+      FUyE1UATMNBh6+LNfMprgu+exuMWOPnDyUiYQ+WZ0JfuZY8fuaZte4woJJOb9LUu
+      5rsMG/smIzjpgZ0Z9ZVDMurfq565qhpaXRAqKeIuyht8pacTo31iMQdHB78AvY/3
+      g0z21Gk8k3z0Kr/YFKr2r4FmXY5m/gAUvZly2ZrVQM5XsbTVCzq/JpI5fssNvSbU
+      AKmXzf4RAoGBAOO3d4/cstxERzW6hyOTjZIN1ppR52CsnZTsVPbfd0pCtmzmVZce
+      CtHKdcXSbTwZvvkK09QSWAp3MoSpd0gIOiLU8Wx/R/RIZsu9BlhTS3r3EQLnk72d
+      H/1TTA+j4T/LIYLSojQ1RxvIrHetAD44j732aTwKAHj/SybEAVqNkOB/AoGBAN0u
+      gLcrgqIHGrk4VjWSvlCGymfF40equcx+ud7XhfZDGETUOSahW4dPZ52cjPAkrCBQ
+      MMfcDwSVGsOAjd+mNt11BHUKobnhXwFaWWuyqyn9NmWFbjMbICVh7E3Of5aVN38o
+      lrmo/7LuKMVG7XRwphCv5NkaJmQG4njDyUQWlaW7AoGADCd8wDb9bPhP/LQqBmIX
+      ylXmwHHisaxE9O/wUQT4bwREjGd25gv6c9wkkRx8LBsLsGs9hzI7dMOL9Ly+2x9l
+      SvqmsC3S/1zl77X1Ir2/Z57MT6Vgo1xBmtnZU3Rhz2/eKAdqFPNLClaZrgGT475N
+      HcyLLWMzR0IJFtabY+Puea0CgYA8Zb5wRkldxWLewSuJZZDinGwY+kieAVjLJq/K
+      0j+ah6fQ48LXcah0wpIgz+cMjHcUO9GWQdk3/x9X03rqX5EL2DBnZYfUIl63F9zj
+      M97ZkHOSNWVqPzX//0Vv2butewG0j3jZKfTo/2/SrxOYgEpYtC9huWpSVi7xm0US
+      erhSkQKBgFIf9JEsfgE57ANhvITZ3ZI0uZXNxZkXQaVg8jvScDi79IIhy9iPzhKC
+      aIIQoDNIlWv1ftCRZ5AlBvVXgvQ/QNrwy48JiQTzWZlb9Ezg8w+olQmSbG6fq7Y+
+      7r3i+QUZ7RBdOb24QcQ618q54ozNTCB7OywY78ptFzeoBeptiNr1
+      -----END RSA PRIVATE KEY-----
+  backup_public_key: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDEvr+tWAJ62wROllpSZeaSPxxnVY3R65sfUW8wM6L8tr1knJOTQLoBikmcjISb3ekyPlwubTypGoxb7al06FiNwfr3KDkytflKRGTyMKYgchighuFCfBuePd13cjf1l19TYU7u7a+VuCVWi7pmhDGUkMi24s23OroQb7D14XX17v46wLrqJQi2nrXzN/DWXcn/ycq8IZ7ZFgN/uYlbpfAKX8PCvImbDDO8+BgndAy4MPz8cWOWsnfGMVNePhvhazVcijLvx8Vu2Iuvg7CoJiSGjTe7YTms44/WpnFkHreyK8cwsw4wzls4BApu6UU2jIAsAMZh9zux/Rtni71dcNfF
   salt_api_password: qFc7FCrLBnWkeLxRWWQeezH5dmuI5vsI
   salt_api_password_hash: $6$hbWiURAY$Sfzk6dzos6j1B8gFDK6WoGNDk0I2Bd2IOarWDGOflgY2sBpUJ4KTq1Uw241ri933/ROHTSuhNcodmDe13i5gS.
   salt_master_address: 172.16.49.66
diff --git a/tcp_tests/templates/cookied-bm-ocata-cicd-pipeline/underlay--user-data-cfg01.yaml b/tcp_tests/templates/cookied-bm-ocata-cicd-pipeline/underlay--user-data-cfg01.yaml
index c6314ad..8faced7 100644
--- a/tcp_tests/templates/cookied-bm-ocata-cicd-pipeline/underlay--user-data-cfg01.yaml
+++ b/tcp_tests/templates/cookied-bm-ocata-cicd-pipeline/underlay--user-data-cfg01.yaml
@@ -18,8 +18,6 @@
    expire: False

 

   bootcmd:

-   # Block access to SSH while node is preparing

-   - cloud-init-per once sudo iptables -A INPUT -p tcp --dport 22 -j DROP

    # Enable root access

    - sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config

    - service sshd restart

@@ -46,39 +44,7 @@
    - mkswap /swapfile

    - swapon /swapfile

    - echo "/swapfile   none    swap    defaults    0   0" >> /etc/fstab

-

-   ############## TCP Cloud cfg01 node ##################

-   #- sleep 120

-   - echo "Preparing base OS"

-

    - echo "nameserver 172.18.208.44" > /etc/resolv.conf;

-   - echo "nameserver 8.8.8.8" >> /etc/resolv.conf;

-   - which wget >/dev/null || (apt-get update; apt-get install -y wget);

-

-   # Configure Ubuntu mirrors
-   - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial main restricted universe" > /etc/apt/sources.list
-   - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial-updates main restricted universe" >> /etc/apt/sources.list
-   - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial-security main restricted universe" >> /etc/apt/sources.list
-
-   - echo "deb [arch=amd64] http://apt.mirantis.com/xenial {{ REPOSITORY_SUITE }} salt extra" > /etc/apt/sources.list.d/mcp_salt.list;

-   - wget -O - http://apt.mirantis.com/public.gpg | apt-key add -;

-   - echo "deb http://repo.saltstack.com/apt/ubuntu/16.04/amd64/2016.3 xenial main" > /etc/apt/sources.list.d/saltstack.list;

-   - wget -O - https://repo.saltstack.com/apt/ubuntu/16.04/amd64/2016.3/SALTSTACK-GPG-KEY.pub | apt-key add -;

-

-   - apt-get clean

-   - apt-get update

-

-   # Install common packages

-   - eatmydata apt-get install -y python-pip git curl tmux byobu iputils-ping traceroute htop tree mc

-

-   # Install salt-minion and stop it until it is configured

-   - eatmydata apt-get install -y salt-minion && service salt-minion stop

-

-   ########################################################

-   # Node is ready, allow SSH access

-   - echo "Allow SSH access ..."

-   - sudo iptables -D INPUT -p tcp --dport 22 -j DROP

-   ########################################################

 

   write_files:

    - path: /etc/default/grub.d/97-enable-grub-menu.cfg

diff --git a/tcp_tests/templates/cookied-bm-ocata-cicd-pipeline/underlay--user-data1604.yaml b/tcp_tests/templates/cookied-bm-ocata-cicd-pipeline/underlay--user-data1604.yaml
index c3ffe17..1837e32 100644
--- a/tcp_tests/templates/cookied-bm-ocata-cicd-pipeline/underlay--user-data1604.yaml
+++ b/tcp_tests/templates/cookied-bm-ocata-cicd-pipeline/underlay--user-data1604.yaml
@@ -18,8 +18,6 @@
    expire: False

 

   bootcmd:

-   # Block access to SSH while node is preparing

-   - cloud-init-per once sudo iptables -A INPUT -p tcp --dport 22 -j DROP

    # Enable root access

    - sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config

    - service sshd restart

@@ -38,7 +36,6 @@
 

    # Prepare network connection

    - sudo ifup {interface_name}

-   #- sudo route add default gw {gateway} {interface_name}

 

    # Create swap

    - fallocate -l 4G /swapfile

@@ -46,43 +43,7 @@
    - mkswap /swapfile

    - swapon /swapfile

    - echo "/swapfile   none    swap    defaults   0   0" >> /etc/fstab

-

-

-   ############## TCP Cloud cfg01 node ##################

-   #- sleep 120

-   - echo "Preparing base OS"

    - echo "nameserver 172.18.208.44" > /etc/resolv.conf;

-   - echo "nameserver 8.8.8.8" >> /etc/resolv.conf;

-   - which wget >/dev/null || (apt-get update; apt-get install -y wget)

-

-   # Configure Ubuntu mirrors
-   - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial main restricted universe" > /etc/apt/sources.list
-   - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial-updates main restricted universe" >> /etc/apt/sources.list
-   - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial-security main restricted universe" >> /etc/apt/sources.list
-
-   - echo "deb [arch=amd64] http://apt.mirantis.com/xenial {{ REPOSITORY_SUITE }} salt extra" > /etc/apt/sources.list.d/mcp_salt.list;

-   - wget -O - http://apt.mirantis.com/public.gpg | apt-key add -;

-   - echo "deb http://repo.saltstack.com/apt/ubuntu/16.04/amd64/2016.3 xenial main" > /etc/apt/sources.list.d/saltstack.list;

-   - wget -O - https://repo.saltstack.com/apt/ubuntu/16.04/amd64/2016.3/SALTSTACK-GPG-KEY.pub | apt-key add -;

-

-   - apt-get clean

-   - eatmydata apt-get update && apt-get -y upgrade

-

-   # Install common packages

-   - eatmydata apt-get install -y python-pip git curl tmux byobu iputils-ping traceroute htop tree mc

-

-   # Install salt-minion and stop it until it is configured

-   - eatmydata apt-get install -y salt-minion && service salt-minion stop

-

-   # Install latest kernel

-   - eatmydata apt-get install -y linux-generic-hwe-16.04

-

-   ########################################################

-   # Node is ready, allow SSH access

-   #- echo "Allow SSH access ..."

-   #- sudo iptables -D INPUT -p tcp --dport 22 -j DROP

-   - reboot

-   ########################################################

 

   write_files:

    - path: /etc/default/grub.d/97-enable-grub-menu.cfg

diff --git a/tcp_tests/templates/cookied-bm-ocata-cicd-pipeline/underlay.yaml b/tcp_tests/templates/cookied-bm-ocata-cicd-pipeline/underlay.yaml
index bff4542..90c6227 100644
--- a/tcp_tests/templates/cookied-bm-ocata-cicd-pipeline/underlay.yaml
+++ b/tcp_tests/templates/cookied-bm-ocata-cicd-pipeline/underlay.yaml
@@ -135,7 +135,7 @@
             role: salt_master
             params:
               vcpu: !os_env SLAVE_NODE_CPU, 4
-              memory: !os_env SLAVE_NODE_MEMORY, 4096
+              memory: !os_env SLAVE_NODE_MEMORY, 8192
               boot:
                 - hd
               cloud_init_volume_name: iso
diff --git a/tcp_tests/templates/cookied-mcp-ocata-dop-sl2/common-services.yaml b/tcp_tests/templates/cookied-mcp-ocata-dop-sl2/common-services.yaml
index 7c247b7..9929c03 100644
--- a/tcp_tests/templates/cookied-mcp-ocata-dop-sl2/common-services.yaml
+++ b/tcp_tests/templates/cookied-mcp-ocata-dop-sl2/common-services.yaml
@@ -82,7 +82,7 @@
 
 - description: Install Galera on other servers
   cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@galera:slave' state.sls galera
+    -C 'I@galera:slave' state.sls galera -b 1
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 5}
   skip_fail: false
diff --git a/tcp_tests/templates/cookied-mcp-ocata-dop-sl2/oss.yaml b/tcp_tests/templates/cookied-mcp-ocata-dop-sl2/oss.yaml
index f26d7ee..4d8aa14 100644
--- a/tcp_tests/templates/cookied-mcp-ocata-dop-sl2/oss.yaml
+++ b/tcp_tests/templates/cookied-mcp-ocata-dop-sl2/oss.yaml
@@ -2,29 +2,6 @@
 
 # Install OSS: Operational Support System Tools
 
-# Keepalived
-#-----------
-- description: Install keepalived
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@keepalived:cluster:enabled:True' state.sls keepalived
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 10}
-  skip_fail: false
-
-- description: Install haproxy
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@haproxy:proxy:enabled:True' state.sls haproxy
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 10}
-  skip_fail: false
-
-- description: Check the CICD VIP
-  cmd: |
-    CICD_CONTROL_ADDRESS=`salt --out=newline_values_only -C 'I@haproxy:proxy and I@jenkins:client' pillar.get _param:cluster_vip_address`;
-    echo "_param:cluster_vip_address (vip): ${CICD_CONTROL_ADDRESS}";
-    salt --hard-crash --state-output=mixed --state-verbose=False -C "I@keepalived:cluster:instance:*:address:${CICD_CONTROL_ADDRESS}" cmd.run "ip a | grep ${CICD_CONTROL_ADDRESS}" | grep -B1 ${CICD_CONTROL_ADDRESS}
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
 # Glusterfs
 #-----------
 
@@ -110,6 +87,29 @@
   retry: {count: 1, delay: 5}
   skip_fail: false
 
+# Keepalived
+#-----------
+- description: Install keepalived
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@keepalived:cluster:enabled:True' state.sls keepalived
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 10}
+  skip_fail: false
+
+- description: Install haproxy
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@haproxy:proxy:enabled:True' state.sls haproxy
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 10}
+  skip_fail: false
+
+- description: Check the CICD VIP
+  cmd: |
+    CICD_CONTROL_ADDRESS=`salt --out=newline_values_only -C 'I@haproxy:proxy and I@jenkins:client' pillar.get _param:cluster_vip_address`;
+    echo "_param:cluster_vip_address (vip): ${CICD_CONTROL_ADDRESS}";
+    salt --hard-crash --state-output=mixed --state-verbose=False -C "I@keepalived:cluster:instance:*:address:${CICD_CONTROL_ADDRESS}" cmd.run "ip a | grep ${CICD_CONTROL_ADDRESS}" | grep -B1 ${CICD_CONTROL_ADDRESS}
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
 # Configure OSS services
 #-----------------------
 
diff --git a/tcp_tests/templates/cookied-mcp-ocata-dop-sl2/salt-context-cookiecutter-cicd_oss.yaml b/tcp_tests/templates/cookied-mcp-ocata-dop-sl2/salt-context-cookiecutter-cicd_oss.yaml
index 8022d47..fbf40dd 100644
--- a/tcp_tests/templates/cookied-mcp-ocata-dop-sl2/salt-context-cookiecutter-cicd_oss.yaml
+++ b/tcp_tests/templates/cookied-mcp-ocata-dop-sl2/salt-context-cookiecutter-cicd_oss.yaml
@@ -127,6 +127,35 @@
   openstack_version: ocata
   public_host: ${_param:openstack_proxy_address}
   reclass_repository: https://github.com/Mirantis/mk-lab-salt-model.git
+  backup_private_key: |
+      -----BEGIN RSA PRIVATE KEY-----
+      MIIEowIBAAKCAQEAxL6/rVgCetsETpZaUmXmkj8cZ1WN0eubH1FvMDOi/La9ZJyT
+      k0C6AYpJnIyEm93pMj5cLm08qRqMW+2pdOhYjcH69yg5MrX5SkRk8jCmIHIYoIbh
+      Qnwbnj3dd3I39ZdfU2FO7u2vlbglVou6ZoQxlJDItuLNtzq6EG+w9eF19e7+OsC6
+      6iUItp618zfw1l3J/8nKvCGe2RYDf7mJW6XwCl/DwryJmwwzvPgYJ3QMuDD8/HFj
+      lrJ3xjFTXj4b4Ws1XIoy78fFbtiLr4OwqCYkho03u2E5rOOP1qZxZB63sivHMLMO
+      MM5bOAQKbulFNoyALADGYfc7sf0bZ4u9XXDXxQIDAQABAoIBAQCfmc2MJRT97KW1
+      yqpCpX9BrAiymuiNHf+cjEcSZxEUyHkjIRFmJt+9WB0W7ba1anM92vCUiPDojSzH
+      dig9Oi578JxR20NrK8uqv4jUHzrknynzLveVI3CUEcOSnglfJQijbxDFKfOCFPvV
+      FUyE1UATMNBh6+LNfMprgu+exuMWOPnDyUiYQ+WZ0JfuZY8fuaZte4woJJOb9LUu
+      5rsMG/smIzjpgZ0Z9ZVDMurfq565qhpaXRAqKeIuyht8pacTo31iMQdHB78AvY/3
+      g0z21Gk8k3z0Kr/YFKr2r4FmXY5m/gAUvZly2ZrVQM5XsbTVCzq/JpI5fssNvSbU
+      AKmXzf4RAoGBAOO3d4/cstxERzW6hyOTjZIN1ppR52CsnZTsVPbfd0pCtmzmVZce
+      CtHKdcXSbTwZvvkK09QSWAp3MoSpd0gIOiLU8Wx/R/RIZsu9BlhTS3r3EQLnk72d
+      H/1TTA+j4T/LIYLSojQ1RxvIrHetAD44j732aTwKAHj/SybEAVqNkOB/AoGBAN0u
+      gLcrgqIHGrk4VjWSvlCGymfF40equcx+ud7XhfZDGETUOSahW4dPZ52cjPAkrCBQ
+      MMfcDwSVGsOAjd+mNt11BHUKobnhXwFaWWuyqyn9NmWFbjMbICVh7E3Of5aVN38o
+      lrmo/7LuKMVG7XRwphCv5NkaJmQG4njDyUQWlaW7AoGADCd8wDb9bPhP/LQqBmIX
+      ylXmwHHisaxE9O/wUQT4bwREjGd25gv6c9wkkRx8LBsLsGs9hzI7dMOL9Ly+2x9l
+      SvqmsC3S/1zl77X1Ir2/Z57MT6Vgo1xBmtnZU3Rhz2/eKAdqFPNLClaZrgGT475N
+      HcyLLWMzR0IJFtabY+Puea0CgYA8Zb5wRkldxWLewSuJZZDinGwY+kieAVjLJq/K
+      0j+ah6fQ48LXcah0wpIgz+cMjHcUO9GWQdk3/x9X03rqX5EL2DBnZYfUIl63F9zj
+      M97ZkHOSNWVqPzX//0Vv2butewG0j3jZKfTo/2/SrxOYgEpYtC9huWpSVi7xm0US
+      erhSkQKBgFIf9JEsfgE57ANhvITZ3ZI0uZXNxZkXQaVg8jvScDi79IIhy9iPzhKC
+      aIIQoDNIlWv1ftCRZ5AlBvVXgvQ/QNrwy48JiQTzWZlb9Ezg8w+olQmSbG6fq7Y+
+      7r3i+QUZ7RBdOb24QcQ618q54ozNTCB7OywY78ptFzeoBeptiNr1
+      -----END RSA PRIVATE KEY-----
+  backup_public_key: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDEvr+tWAJ62wROllpSZeaSPxxnVY3R65sfUW8wM6L8tr1knJOTQLoBikmcjISb3ekyPlwubTypGoxb7al06FiNwfr3KDkytflKRGTyMKYgchighuFCfBuePd13cjf1l19TYU7u7a+VuCVWi7pmhDGUkMi24s23OroQb7D14XX17v46wLrqJQi2nrXzN/DWXcn/ycq8IZ7ZFgN/uYlbpfAKX8PCvImbDDO8+BgndAy4MPz8cWOWsnfGMVNePhvhazVcijLvx8Vu2Iuvg7CoJiSGjTe7YTms44/WpnFkHreyK8cwsw4wzls4BApu6UU2jIAsAMZh9zux/Rtni71dcNfF
   salt_master_address: 10.167.4.15
   salt_master_hostname: cfg01
   salt_master_management_address: 10.167.5.15
diff --git a/tcp_tests/templates/cookied-mcp-ocata-dop-sl2/salt-context-environment.yaml b/tcp_tests/templates/cookied-mcp-ocata-dop-sl2/salt-context-environment.yaml
index 648984a..a17173d 100644
--- a/tcp_tests/templates/cookied-mcp-ocata-dop-sl2/salt-context-environment.yaml
+++ b/tcp_tests/templates/cookied-mcp-ocata-dop-sl2/salt-context-environment.yaml
@@ -96,6 +96,7 @@
       reclass_storage_name: openstack_proxy_node01
       roles:
       - openstack_proxy
+      - features_designate_proxy
       - linux_system_codename_xenial
       interfaces:
         ens3:
@@ -107,6 +108,7 @@
       reclass_storage_name: openstack_proxy_node02
       roles:
       - openstack_proxy
+      - features_designate_proxy
       - linux_system_codename_xenial
       interfaces:
         ens3:
diff --git a/tcp_tests/templates/cookied-mcp-ocata-dop-sl2/sl.yaml b/tcp_tests/templates/cookied-mcp-ocata-dop-sl2/sl.yaml
index 962035c..c351023 100644
--- a/tcp_tests/templates/cookied-mcp-ocata-dop-sl2/sl.yaml
+++ b/tcp_tests/templates/cookied-mcp-ocata-dop-sl2/sl.yaml
@@ -2,22 +2,6 @@
 {% from 'cookied-mcp-ocata-dop-sl2/salt.yaml' import ENVIRONMENT_MODEL_INVENTORY_NAME with context %}
 
 # Install docker swarm
-- description: Install keepalived on mon nodes
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'mon*' state.sls keepalived
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 10}
-  skip_fail: false
-
-- description: Check the VIP on StackLight mon nodes
-  cmd: |
-    SL_VIP=$(salt --out=newline_values_only "mon01*" pillar.get _param:cluster_vip_address);
-    echo "_param:cluster_vip_address (vip): ${SL_VIP}";
-    salt --hard-crash --state-output=mixed --state-verbose=False -C 'mon*' cmd.run "ip a | grep ${SL_VIP}" | grep -B1 ${SL_VIP}
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
 - description: Configure docker service
   cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' state.sls docker.host
   node_name: {{ HOSTNAME_CFG01 }}
@@ -66,6 +50,22 @@
   retry: {count: 1, delay: 10}
   skip_fail: false
 
+- description: Install keepalived on mon nodes
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'mon*' state.sls keepalived
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 10}
+  skip_fail: false
+
+- description: Check the VIP on StackLight mon nodes
+  cmd: |
+    SL_VIP=$(salt --out=newline_values_only "mon01*" pillar.get _param:cluster_vip_address);
+    echo "_param:cluster_vip_address (vip): ${SL_VIP}";
+    salt --hard-crash --state-output=mixed --state-verbose=False -C 'mon*' cmd.run "ip a | grep ${SL_VIP}" | grep -B1 ${SL_VIP}
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
 # Install slv2 infra
 - description: Install telegraf
   cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@telegraf:agent or I@telegraf:remote_agent' state.sls telegraf
diff --git a/tcp_tests/templates/cookied-mcp-ocata-dop-sl2/underlay.yaml b/tcp_tests/templates/cookied-mcp-ocata-dop-sl2/underlay.yaml
index 126a441..fbbda4b 100644
--- a/tcp_tests/templates/cookied-mcp-ocata-dop-sl2/underlay.yaml
+++ b/tcp_tests/templates/cookied-mcp-ocata-dop-sl2/underlay.yaml
@@ -234,7 +234,7 @@
             role: salt_master
             params:
               vcpu: {{ os_env('CFG_NODE_CPU', 2) }}
-              memory: {{ os_env('CFG_NODE_MEMORY', 4096) }}
+              memory: {{ os_env('CFG_NODE_MEMORY', 8192) }}
               boot:
                 - hd
               cloud_init_volume_name: iso
@@ -641,8 +641,8 @@
           - name: {{ HOSTNAME_GTW01 }}
             role: salt_minion
             params:
-              vcpu: {{ os_env('GTW_NODE_CPU', 1) }}
-              memory: {{ os_env('GTW_NODE_MEMORY', 2048) }}
+              vcpu: {{ os_env('GTW_NODE_CPU', 4) }}
+              memory: {{ os_env('GTW_NODE_MEMORY', 4096) }}
               boot:
                 - hd
               cloud_init_volume_name: iso
diff --git a/tcp_tests/templates/cookied-mcp-ocata-dvr-vxlan/common-services.yaml b/tcp_tests/templates/cookied-mcp-ocata-dvr-vxlan/common-services.yaml
index 74d3c68..6b26974 100644
--- a/tcp_tests/templates/cookied-mcp-ocata-dvr-vxlan/common-services.yaml
+++ b/tcp_tests/templates/cookied-mcp-ocata-dvr-vxlan/common-services.yaml
@@ -82,7 +82,7 @@
 
 - description: Install Galera on other servers
   cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@galera:slave' state.sls galera
+    -C 'I@galera:slave' state.sls galera -b 1
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 5}
   skip_fail: false
diff --git a/tcp_tests/templates/cookied-mcp-ocata-dvr-vxlan/salt-context-cookiecutter-openstack_ovs_dvr_vxlan.yaml b/tcp_tests/templates/cookied-mcp-ocata-dvr-vxlan/salt-context-cookiecutter-openstack_ovs_dvr_vxlan.yaml
index 315f8cc..73c0222 100644
--- a/tcp_tests/templates/cookied-mcp-ocata-dvr-vxlan/salt-context-cookiecutter-openstack_ovs_dvr_vxlan.yaml
+++ b/tcp_tests/templates/cookied-mcp-ocata-dvr-vxlan/salt-context-cookiecutter-openstack_ovs_dvr_vxlan.yaml
@@ -101,6 +101,35 @@
   openstack_version: ocata
   public_host: ${_param:openstack_proxy_address}
   reclass_repository: https://github.com/Mirantis/mk-lab-salt-model.git
+  backup_private_key: |
+      -----BEGIN RSA PRIVATE KEY-----
+      MIIEowIBAAKCAQEAxL6/rVgCetsETpZaUmXmkj8cZ1WN0eubH1FvMDOi/La9ZJyT
+      k0C6AYpJnIyEm93pMj5cLm08qRqMW+2pdOhYjcH69yg5MrX5SkRk8jCmIHIYoIbh
+      Qnwbnj3dd3I39ZdfU2FO7u2vlbglVou6ZoQxlJDItuLNtzq6EG+w9eF19e7+OsC6
+      6iUItp618zfw1l3J/8nKvCGe2RYDf7mJW6XwCl/DwryJmwwzvPgYJ3QMuDD8/HFj
+      lrJ3xjFTXj4b4Ws1XIoy78fFbtiLr4OwqCYkho03u2E5rOOP1qZxZB63sivHMLMO
+      MM5bOAQKbulFNoyALADGYfc7sf0bZ4u9XXDXxQIDAQABAoIBAQCfmc2MJRT97KW1
+      yqpCpX9BrAiymuiNHf+cjEcSZxEUyHkjIRFmJt+9WB0W7ba1anM92vCUiPDojSzH
+      dig9Oi578JxR20NrK8uqv4jUHzrknynzLveVI3CUEcOSnglfJQijbxDFKfOCFPvV
+      FUyE1UATMNBh6+LNfMprgu+exuMWOPnDyUiYQ+WZ0JfuZY8fuaZte4woJJOb9LUu
+      5rsMG/smIzjpgZ0Z9ZVDMurfq565qhpaXRAqKeIuyht8pacTo31iMQdHB78AvY/3
+      g0z21Gk8k3z0Kr/YFKr2r4FmXY5m/gAUvZly2ZrVQM5XsbTVCzq/JpI5fssNvSbU
+      AKmXzf4RAoGBAOO3d4/cstxERzW6hyOTjZIN1ppR52CsnZTsVPbfd0pCtmzmVZce
+      CtHKdcXSbTwZvvkK09QSWAp3MoSpd0gIOiLU8Wx/R/RIZsu9BlhTS3r3EQLnk72d
+      H/1TTA+j4T/LIYLSojQ1RxvIrHetAD44j732aTwKAHj/SybEAVqNkOB/AoGBAN0u
+      gLcrgqIHGrk4VjWSvlCGymfF40equcx+ud7XhfZDGETUOSahW4dPZ52cjPAkrCBQ
+      MMfcDwSVGsOAjd+mNt11BHUKobnhXwFaWWuyqyn9NmWFbjMbICVh7E3Of5aVN38o
+      lrmo/7LuKMVG7XRwphCv5NkaJmQG4njDyUQWlaW7AoGADCd8wDb9bPhP/LQqBmIX
+      ylXmwHHisaxE9O/wUQT4bwREjGd25gv6c9wkkRx8LBsLsGs9hzI7dMOL9Ly+2x9l
+      SvqmsC3S/1zl77X1Ir2/Z57MT6Vgo1xBmtnZU3Rhz2/eKAdqFPNLClaZrgGT475N
+      HcyLLWMzR0IJFtabY+Puea0CgYA8Zb5wRkldxWLewSuJZZDinGwY+kieAVjLJq/K
+      0j+ah6fQ48LXcah0wpIgz+cMjHcUO9GWQdk3/x9X03rqX5EL2DBnZYfUIl63F9zj
+      M97ZkHOSNWVqPzX//0Vv2butewG0j3jZKfTo/2/SrxOYgEpYtC9huWpSVi7xm0US
+      erhSkQKBgFIf9JEsfgE57ANhvITZ3ZI0uZXNxZkXQaVg8jvScDi79IIhy9iPzhKC
+      aIIQoDNIlWv1ftCRZ5AlBvVXgvQ/QNrwy48JiQTzWZlb9Ezg8w+olQmSbG6fq7Y+
+      7r3i+QUZ7RBdOb24QcQ618q54ozNTCB7OywY78ptFzeoBeptiNr1
+      -----END RSA PRIVATE KEY-----
+  backup_public_key: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDEvr+tWAJ62wROllpSZeaSPxxnVY3R65sfUW8wM6L8tr1knJOTQLoBikmcjISb3ekyPlwubTypGoxb7al06FiNwfr3KDkytflKRGTyMKYgchighuFCfBuePd13cjf1l19TYU7u7a+VuCVWi7pmhDGUkMi24s23OroQb7D14XX17v46wLrqJQi2nrXzN/DWXcn/ycq8IZ7ZFgN/uYlbpfAKX8PCvImbDDO8+BgndAy4MPz8cWOWsnfGMVNePhvhazVcijLvx8Vu2Iuvg7CoJiSGjTe7YTms44/WpnFkHreyK8cwsw4wzls4BApu6UU2jIAsAMZh9zux/Rtni71dcNfF
   salt_master_address: 10.167.4.15
   salt_master_hostname: cfg01
   salt_master_management_address: 10.167.5.15
diff --git a/tcp_tests/templates/cookied-mcp-ocata-dvr-vxlan/salt-context-environment.yaml b/tcp_tests/templates/cookied-mcp-ocata-dvr-vxlan/salt-context-environment.yaml
index 9de891d..e60baf8 100644
--- a/tcp_tests/templates/cookied-mcp-ocata-dvr-vxlan/salt-context-environment.yaml
+++ b/tcp_tests/templates/cookied-mcp-ocata-dvr-vxlan/salt-context-environment.yaml
@@ -186,6 +186,7 @@
       reclass_storage_name: openstack_proxy_node01
       roles:
       - openstack_proxy
+      - features_designate_proxy
       - linux_system_codename_xenial
       interfaces:
         ens3:
@@ -197,6 +198,7 @@
       reclass_storage_name: openstack_proxy_node02
       roles:
       - openstack_proxy
+      - features_designate_proxy
       - linux_system_codename_xenial
       interfaces:
         ens3:
diff --git a/tcp_tests/templates/cookied-mcp-ocata-dvr-vxlan/sl.yaml b/tcp_tests/templates/cookied-mcp-ocata-dvr-vxlan/sl.yaml
index dd64b90..90c52fb 100644
--- a/tcp_tests/templates/cookied-mcp-ocata-dvr-vxlan/sl.yaml
+++ b/tcp_tests/templates/cookied-mcp-ocata-dvr-vxlan/sl.yaml
@@ -1,22 +1,6 @@
 {% from 'cookied-mcp-ocata-dvr-vxlan/underlay.yaml' import HOSTNAME_CFG01 with context %}
 
 # Install docker swarm
-- description: Install keepalived on mon nodes
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'mon*' state.sls keepalived
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 10}
-  skip_fail: false
-
-- description: Check the VIP on mon nodes
-  cmd: |
-    SL_VIP=`salt-call --out=newline_values_only pillar.get _param:stacklight_monitor_address`;
-    echo "_param:stacklight_monitor_address (vip): ${SL_VIP}";
-    salt --hard-crash --state-output=mixed --state-verbose=False -C 'mon*' cmd.run "ip a | grep ${SL_VIP}" | grep -B1 ${SL_VIP}
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
 - description: Configure docker service
   cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' state.sls docker.host
   node_name: {{ HOSTNAME_CFG01 }}
@@ -65,6 +49,22 @@
   retry: {count: 1, delay: 10}
   skip_fail: false
 
+- description: Install keepalived on mon nodes
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'mon*' state.sls keepalived
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 10}
+  skip_fail: false
+
+- description: Check the VIP on mon nodes
+  cmd: |
+    SL_VIP=`salt-call --out=newline_values_only pillar.get _param:stacklight_monitor_address`;
+    echo "_param:stacklight_monitor_address (vip): ${SL_VIP}";
+    salt --hard-crash --state-output=mixed --state-verbose=False -C 'mon*' cmd.run "ip a | grep ${SL_VIP}" | grep -B1 ${SL_VIP}
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
 # Install slv2 infra
 - description: Install telegraf
   cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@telegraf:agent or I@telegraf:remote_agent' state.sls telegraf
diff --git a/tcp_tests/templates/cookied-mcp-ocata-dvr-vxlan/underlay.yaml b/tcp_tests/templates/cookied-mcp-ocata-dvr-vxlan/underlay.yaml
index 8c4c848..cffa424 100644
--- a/tcp_tests/templates/cookied-mcp-ocata-dvr-vxlan/underlay.yaml
+++ b/tcp_tests/templates/cookied-mcp-ocata-dvr-vxlan/underlay.yaml
@@ -350,7 +350,7 @@
             role: salt_master
             params:
               vcpu: !os_env SLAVE_NODE_CPU, 3
-              memory: !os_env SLAVE_NODE_MEMORY, 4096
+              memory: !os_env SLAVE_NODE_MEMORY, 8192
               boot:
                 - hd
               cloud_init_volume_name: iso
diff --git a/tcp_tests/templates/cookied-mcp-pike-dpdk/_context-cookiecutter-pike-ovs-dpdk.yaml b/tcp_tests/templates/cookied-mcp-pike-dpdk/_context-cookiecutter-pike-ovs-dpdk.yaml
new file mode 100644
index 0000000..6b9ab13
--- /dev/null
+++ b/tcp_tests/templates/cookied-mcp-pike-dpdk/_context-cookiecutter-pike-ovs-dpdk.yaml
@@ -0,0 +1,156 @@
+default_context:
+  bmk_enabled: 'False'
+  ceph_enabled: 'False'
+  cicd_enabled: 'False'
+  cluster_domain: cookied-mcp-pike-dpdk.local
+  cluster_name: cookied-mcp-pike-dpdk
+  compute_bond_mode: active-backup
+  compute_primary_first_nic: eth1
+  compute_primary_second_nic: eth2
+  context_seed: wUqrwKeBTCpRpVrhK1KwZQv4cjM9VhG7L2vQ0iQsTuMrXASklEBDmJEf6bnPEqcK
+  control_network_netmask: 255.255.255.0
+  control_network_subnet: 172.16.10.0/24
+  control_vlan: '10'
+  cookiecutter_template_branch: master
+  cookiecutter_template_credentials: gerrit
+  cookiecutter_template_url: https://gerrit.mcp.mirantis.net/mk/cookiecutter-templates.git
+  deploy_network_gateway: 192.168.10.1
+  deploy_network_netmask: 255.255.255.0
+  deploy_network_subnet: 192.168.10.0/24
+  deployment_type: physical
+  dns_server01: 8.8.8.8
+  dns_server02: 8.8.4.4
+  email_address: ddmitriev@mirantis.com
+  gateway_primary_first_nic: eth1
+  gateway_primary_second_nic: eth2
+  infra_bond_mode: active-backup
+  infra_deploy_nic: eth0
+  infra_kvm01_control_address: ${_param:openstack_control_node01_address}
+#  infra_kvm01_deploy_address: 192.168.10.101
+  infra_kvm01_hostname: ${_param:openstack_control_node01_hostname}
+  infra_kvm02_control_address: ${_param:openstack_control_node02_address}
+#  infra_kvm02_deploy_address: 192.168.10.102
+  infra_kvm02_hostname: ${_param:openstack_control_node02_hostname}
+  infra_kvm03_control_address: ${_param:openstack_control_node03_address}
+#  infra_kvm03_deploy_address: 192.168.10.103
+  infra_kvm03_hostname: ${_param:openstack_control_node03_hostname}
+  infra_kvm_vip_address: ${_param:openstack_control_address}
+  infra_primary_first_nic: eth1
+  infra_primary_second_nic: eth2
+  kubernetes_enabled: 'False'
+  local_repositories: 'False'
+  maas_deploy_address: 192.168.10.90
+  maas_hostname: cfg01
+  mcp_version: stable
+  offline_deployment: 'False'
+  opencontrail_enabled: 'False'
+  openstack_benchmark_node01_address: 172.16.10.95
+  openstack_benchmark_node01_hostname: bmk01
+  openstack_cluster_size: compact
+  openstack_compute_count: '2'
+  openstack_compute_rack01_hostname: cmp
+  openstack_compute_rack01_single_subnet: 172.16.10
+  openstack_compute_rack01_tenant_subnet: 10.1.0
+  openstack_control_address: 172.16.10.100
+  openstack_control_hostname: ctl
+  openstack_control_node01_address: 172.16.10.101
+  openstack_control_node01_hostname: ctl01
+  openstack_control_node02_address: 172.16.10.102
+  openstack_control_node02_hostname: ctl02
+  openstack_control_node03_address: 172.16.10.103
+  openstack_control_node03_hostname: ctl03
+  openstack_database_address: 10.167.4.50
+  openstack_database_hostname: dbs
+  openstack_database_node01_address: 10.167.4.51
+  openstack_database_node01_hostname: dbs01
+  openstack_database_node02_address: 10.167.4.52
+  openstack_database_node02_hostname: dbs02
+  openstack_database_node03_address: 10.167.4.53
+  openstack_database_node03_hostname: dbs03
+  openstack_enabled: 'True'
+  openstack_gateway_node01_address: 172.16.10.110
+  openstack_gateway_node01_hostname: gtw01
+  openstack_gateway_node01_tenant_address: 10.1.0.6
+  openstack_gateway_node02_address: 172.16.10.111
+  openstack_gateway_node02_hostname: gtw02
+  openstack_gateway_node02_tenant_address: 10.1.0.7
+  openstack_gateway_node03_address: 172.16.10.112
+  openstack_gateway_node03_hostname: gtw03
+  openstack_gateway_node03_tenant_address: 10.1.0.8
+  openstack_message_queue_address: 10.167.4.40
+  openstack_message_queue_hostname: msg
+  openstack_message_queue_node01_address: 10.167.4.41
+  openstack_message_queue_node01_hostname: msg01
+  openstack_message_queue_node02_address: 10.167.4.42
+  openstack_message_queue_node02_hostname: msg02
+  openstack_message_queue_node03_address: 10.167.4.43
+  openstack_message_queue_node03_hostname: msg03
+  openstack_network_engine: ovs
+  openstack_neutron_qos: 'False'
+  openstack_neutron_vlan_aware_vms: 'False'
+  openstack_nfv_dpdk_enabled: 'True'
+  openstack_nfv_sriov_enabled: 'False'
+  openstack_nova_compute_hugepages_count: '600'
+  openstack_nova_compute_nfv_req_enabled: 'False'
+  openstack_nova_cpu_pinning: '3'
+  openstack_ovs_dvr_enabled: 'False'
+  openstack_ovs_encapsulation_type: vxlan
+  openstack_proxy_address: 172.16.10.80
+  openstack_proxy_hostname: prx
+  openstack_proxy_node01_address: 172.16.10.121
+  openstack_proxy_node01_hostname: prx01
+  openstack_proxy_node02_address: 172.16.10.122
+  openstack_proxy_node02_hostname: prx02
+  openstack_upgrade_node01_address: 172.16.10.19
+  openstack_version: pike
+  oss_enabled: 'False'
+  oss_node03_address: ${_param:stacklight_monitor_node03_address}
+  platform: openstack_enabled
+  public_host: ${_param:openstack_proxy_address}
+  publication_method: email
+  reclass_repository: https://github.com/Mirantis/mk-lab-salt-model.git
+  backup_private_key: |
+      -----BEGIN RSA PRIVATE KEY-----
+      MIIEowIBAAKCAQEAxL6/rVgCetsETpZaUmXmkj8cZ1WN0eubH1FvMDOi/La9ZJyT
+      k0C6AYpJnIyEm93pMj5cLm08qRqMW+2pdOhYjcH69yg5MrX5SkRk8jCmIHIYoIbh
+      Qnwbnj3dd3I39ZdfU2FO7u2vlbglVou6ZoQxlJDItuLNtzq6EG+w9eF19e7+OsC6
+      6iUItp618zfw1l3J/8nKvCGe2RYDf7mJW6XwCl/DwryJmwwzvPgYJ3QMuDD8/HFj
+      lrJ3xjFTXj4b4Ws1XIoy78fFbtiLr4OwqCYkho03u2E5rOOP1qZxZB63sivHMLMO
+      MM5bOAQKbulFNoyALADGYfc7sf0bZ4u9XXDXxQIDAQABAoIBAQCfmc2MJRT97KW1
+      yqpCpX9BrAiymuiNHf+cjEcSZxEUyHkjIRFmJt+9WB0W7ba1anM92vCUiPDojSzH
+      dig9Oi578JxR20NrK8uqv4jUHzrknynzLveVI3CUEcOSnglfJQijbxDFKfOCFPvV
+      FUyE1UATMNBh6+LNfMprgu+exuMWOPnDyUiYQ+WZ0JfuZY8fuaZte4woJJOb9LUu
+      5rsMG/smIzjpgZ0Z9ZVDMurfq565qhpaXRAqKeIuyht8pacTo31iMQdHB78AvY/3
+      g0z21Gk8k3z0Kr/YFKr2r4FmXY5m/gAUvZly2ZrVQM5XsbTVCzq/JpI5fssNvSbU
+      AKmXzf4RAoGBAOO3d4/cstxERzW6hyOTjZIN1ppR52CsnZTsVPbfd0pCtmzmVZce
+      CtHKdcXSbTwZvvkK09QSWAp3MoSpd0gIOiLU8Wx/R/RIZsu9BlhTS3r3EQLnk72d
+      H/1TTA+j4T/LIYLSojQ1RxvIrHetAD44j732aTwKAHj/SybEAVqNkOB/AoGBAN0u
+      gLcrgqIHGrk4VjWSvlCGymfF40equcx+ud7XhfZDGETUOSahW4dPZ52cjPAkrCBQ
+      MMfcDwSVGsOAjd+mNt11BHUKobnhXwFaWWuyqyn9NmWFbjMbICVh7E3Of5aVN38o
+      lrmo/7LuKMVG7XRwphCv5NkaJmQG4njDyUQWlaW7AoGADCd8wDb9bPhP/LQqBmIX
+      ylXmwHHisaxE9O/wUQT4bwREjGd25gv6c9wkkRx8LBsLsGs9hzI7dMOL9Ly+2x9l
+      SvqmsC3S/1zl77X1Ir2/Z57MT6Vgo1xBmtnZU3Rhz2/eKAdqFPNLClaZrgGT475N
+      HcyLLWMzR0IJFtabY+Puea0CgYA8Zb5wRkldxWLewSuJZZDinGwY+kieAVjLJq/K
+      0j+ah6fQ48LXcah0wpIgz+cMjHcUO9GWQdk3/x9X03rqX5EL2DBnZYfUIl63F9zj
+      M97ZkHOSNWVqPzX//0Vv2butewG0j3jZKfTo/2/SrxOYgEpYtC9huWpSVi7xm0US
+      erhSkQKBgFIf9JEsfgE57ANhvITZ3ZI0uZXNxZkXQaVg8jvScDi79IIhy9iPzhKC
+      aIIQoDNIlWv1ftCRZ5AlBvVXgvQ/QNrwy48JiQTzWZlb9Ezg8w+olQmSbG6fq7Y+
+      7r3i+QUZ7RBdOb24QcQ618q54ozNTCB7OywY78ptFzeoBeptiNr1
+      -----END RSA PRIVATE KEY-----
+  backup_public_key: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDEvr+tWAJ62wROllpSZeaSPxxnVY3R65sfUW8wM6L8tr1knJOTQLoBikmcjISb3ekyPlwubTypGoxb7al06FiNwfr3KDkytflKRGTyMKYgchighuFCfBuePd13cjf1l19TYU7u7a+VuCVWi7pmhDGUkMi24s23OroQb7D14XX17v46wLrqJQi2nrXzN/DWXcn/ycq8IZ7ZFgN/uYlbpfAKX8PCvImbDDO8+BgndAy4MPz8cWOWsnfGMVNePhvhazVcijLvx8Vu2Iuvg7CoJiSGjTe7YTms44/WpnFkHreyK8cwsw4wzls4BApu6UU2jIAsAMZh9zux/Rtni71dcNfF
+  salt_api_password: H0rTPdmktZ8RI7T7y6fjqY0uEbbs7Kwi
+  salt_api_password_hash: $6$lfbIFtMZ$.nTbTDMzs1iYv0WqkZHia8H8Fma963Nv3qyyz1x68jQh0YXK9i907B/hvoG4QHMvfolE7V7vQnFClJ1mVA3Yb.
+  salt_master_address: 172.16.10.90
+  salt_master_hostname: cfg01
+  salt_master_management_address: 192.168.10.90
+  shared_reclass_branch: master
+  shared_reclass_url: https://gerrit.mcp.mirantis.net/salt-models/reclass-system.git
+  stacklight_enabled: 'False'
+  stacklight_version: '2'
+  static_ips_on_deploy_network_enabled: 'False'
+  tenant_network_gateway: 10.1.0.1
+  tenant_network_netmask: 255.255.255.0
+  tenant_network_subnet: 10.1.0.0/24
+  tenant_vlan: '20'
+  upstream_proxy_enabled: 'False'
+  use_default_network_scheme: 'False'
diff --git a/tcp_tests/templates/virtual-mcp-pike-ovs/_context-environment.yaml b/tcp_tests/templates/cookied-mcp-pike-dpdk/_context-environment.yaml
similarity index 66%
copy from tcp_tests/templates/virtual-mcp-pike-ovs/_context-environment.yaml
copy to tcp_tests/templates/cookied-mcp-pike-dpdk/_context-environment.yaml
index 63cedf1..bd2275c 100644
--- a/tcp_tests/templates/virtual-mcp-pike-ovs/_context-environment.yaml
+++ b/tcp_tests/templates/cookied-mcp-pike-dpdk/_context-environment.yaml
@@ -1,5 +1,5 @@
 nodes:
-    cfg01.mcp11-ovs-dpdk.local:
+    cfg01.cookied-mcp-pike-dpdk.local:
       reclass_storage_name: infra_config_node01
       roles:
       - infra_config
@@ -10,17 +10,11 @@
         ens4:
           role: single_ctl
 
-    ctl01.mcp11-ovs-dpdk.local:
+    ctl01.cookied-mcp-pike-dpdk.local:
       reclass_storage_name: openstack_control_node01
       roles:
       - infra_kvm
       - openstack_control_leader
-      - openstack_database_leader
-      - openstack_message_queue
-      - features_designate_bind9_database
-      - features_designate_bind9_dns
-      - features_designate_bind9
-      - features_designate_bind9_keystone
       - linux_system_codename_xenial
       interfaces:
         ens3:
@@ -28,16 +22,11 @@
         ens4:
           role: single_ctl
 
-    ctl02.mcp11-ovs-dpdk.local:
+    ctl02.cookied-mcp-pike-dpdk.local:
       reclass_storage_name: openstack_control_node02
       roles:
       - infra_kvm
       - openstack_control
-      - openstack_database
-      - openstack_message_queue
-      - features_designate_bind9_database
-      - features_designate_bind9_dns
-      - features_designate_bind9
       - linux_system_codename_xenial
       interfaces:
         ens3:
@@ -45,15 +34,11 @@
         ens4:
           role: single_ctl
 
-    ctl03.mcp11-ovs-dpdk.local:
+    ctl03.cookied-mcp-pike-dpdk.local:
       reclass_storage_name: openstack_control_node03
       roles:
       - infra_kvm
       - openstack_control
-      - openstack_database
-      - openstack_message_queue
-      - features_designate_bind9_database
-      - features_designate_bind9
       - linux_system_codename_xenial
       interfaces:
         ens3:
@@ -61,7 +46,73 @@
         ens4:
           role: single_ctl
 
-    prx01.mcp11-ovs-dpdk.local:
+    dbs01.cookied-mcp-pike-dpdk.local:
+      reclass_storage_name: openstack_database_node01
+      roles:
+      - openstack_database_leader
+      - linux_system_codename_xenial
+      interfaces:
+        ens3:
+          role: single_dhcp
+        ens4:
+          role: single_ctl
+
+    dbs02.cookied-mcp-pike-dpdk.local:
+      reclass_storage_name: openstack_database_node02
+      roles:
+      - openstack_database
+      - linux_system_codename_xenial
+      interfaces:
+        ens3:
+          role: single_dhcp
+        ens4:
+          role: single_ctl
+
+    dbs03.cookied-mcp-pike-dpdk.local:
+      reclass_storage_name: openstack_database_node03
+      roles:
+      - openstack_database
+      - linux_system_codename_xenial
+      interfaces:
+        ens3:
+          role: single_dhcp
+        ens4:
+          role: single_ctl
+
+    msg01.cookied-mcp-pike-dpdk.local:
+      reclass_storage_name: openstack_message_queue_node01
+      roles:
+      - openstack_message_queue
+      - linux_system_codename_xenial
+      interfaces:
+        ens3:
+          role: single_dhcp
+        ens4:
+          role: single_ctl
+
+    msg02.cookied-mcp-pike-dpdk.local:
+      reclass_storage_name: openstack_message_queue_node02
+      roles:
+      - openstack_message_queue
+      - linux_system_codename_xenial
+      interfaces:
+        ens3:
+          role: single_dhcp
+        ens4:
+          role: single_ctl
+
+    msg03.cookied-mcp-pike-dpdk.local:
+      reclass_storage_name: openstack_message_queue_node03
+      roles:
+      - openstack_message_queue
+      - linux_system_codename_xenial
+      interfaces:
+        ens3:
+          role: single_dhcp
+        ens4:
+          role: single_ctl
+
+    prx01.cookied-mcp-pike-dpdk.local:
       reclass_storage_name: openstack_proxy_node01
       roles:
       - openstack_proxy
@@ -72,47 +123,8 @@
         ens4:
           role: single_ctl
 
-    mon01.mcp11-ovs-dpdk.local:
-      reclass_storage_name: stacklight_server_node01
-      roles:
-      - stacklightv2_server_leader
-      - stacklight_telemetry_leader
-      - stacklight_log_leader_v2
-      - linux_system_codename_xenial
-      interfaces:
-        ens3:
-          role: single_dhcp
-        ens4:
-          role: single_ctl
-
-    mon02.mcp11-ovs-dpdk.local:
-      reclass_storage_name: stacklight_server_node02
-      roles:
-      - stacklightv2_server
-      - stacklight_telemetry
-      - stacklight_log
-      - linux_system_codename_xenial
-      interfaces:
-        ens3:
-          role: single_dhcp
-        ens4:
-          role: single_ctl
-
-    mon03.mcp11-ovs-dpdk.local:
-      reclass_storage_name: stacklight_server_node03
-      roles:
-      - stacklightv2_server
-      - stacklight_telemetry
-      - stacklight_log
-      - linux_system_codename_xenial
-      interfaces:
-        ens3:
-          role: single_dhcp
-        ens4:
-          role: single_ctl
-
     # Generator-based computes. For compatibility only
-    cmp<<count>>.mcp11-ovs-dpdk.local:
+    cmp<<count>>.cookied-mcp-pike-dpdk.local:
       reclass_storage_name: openstack_compute_rack01
       roles:
       - openstack_compute
@@ -127,7 +139,7 @@
         ens6:
           role: bond1_ab_ovs_floating
 
-    gtw01.mcp11-ovs-dpdk.local:
+    gtw01.cookied-mcp-pike-dpdk.local:
       reclass_storage_name: openstack_gateway_node01
       roles:
       - openstack_gateway
diff --git a/tcp_tests/templates/cookied-mcp-pike-dpdk/_salt_generate_cookied-pike-ovs-dpdk.yaml b/tcp_tests/templates/cookied-mcp-pike-dpdk/_salt_generate_cookied-pike-ovs-dpdk.yaml
new file mode 100644
index 0000000..050c4c4
--- /dev/null
+++ b/tcp_tests/templates/cookied-mcp-pike-dpdk/_salt_generate_cookied-pike-ovs-dpdk.yaml
@@ -0,0 +1,37 @@
+{% from 'cookied-model-generator/underlay.yaml' import HOSTNAME_CFG01 with context %}
+{% from 'cookied-model-generator/underlay.yaml' import LAB_CONFIG_NAME with context %}
+{% from 'cookied-model-generator/underlay.yaml' import DOMAIN_NAME with context %}
+
+# Name of the context file (without extension, that is fixed .yaml) used to render the Environment model
+{% set ENVIRONMENT_MODEL_INVENTORY_NAME = os_env('ENVIRONMENT_MODEL_INVENTORY_NAME', LAB_CONFIG_NAME) %}
+# Path to the context files used to render Cluster and Environment models
+{%- set CLUSTER_CONTEXT_NAME = '_context-cookiecutter-pike-ovs-dpdk.yaml' %}
+{%- set ENVIRONMENT_CONTEXT_NAMES = ['_context-environment.yaml'] %}
+
+{% import 'shared-salt.yaml' as SHARED with context %}
+
+{{ SHARED.MACRO_GENERATE_COOKIECUTTER_MODEL() }}
+
+{{ SHARED.MACRO_GENERATE_AND_ENABLE_ENVIRONMENT_MODEL() }}
+
+- description: "Workaround for combined roles: remove unnecessary classes"
+  cmd: |
+    set -e;
+    sed -i '/system.reclass.storage.system.physical_control_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
+    sed -i '/system.reclass.storage.system.stacklight_telemetry_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
+    sed -i '/system.reclass.storage.system.stacklight_log_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
+
+    # Start compute node addresses from .105 , as in static models
+    sed -i 's/start: 101/start: 105/g' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
+
+    . /root/venv-reclass-tools/bin/activate;
+    reclass-tools del-key parameters.reclass.storage.node.infra_kvm_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
+    reclass-tools del-key parameters.reclass.storage.node.infra_kvm_node02 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
+    reclass-tools del-key parameters.reclass.storage.node.infra_kvm_node03 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
+    reclass-tools del-key parameters.reclass.storage.node.stacklight_log_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
+
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+{{ SHARED.MACRO_GENERATE_INVENTORY(RERUN_SALTMASTER_STATE=true) }}
diff --git a/tcp_tests/templates/cookied-mcp-pike-dpdk/common-services.yaml b/tcp_tests/templates/cookied-mcp-pike-dpdk/common-services.yaml
new file mode 100644
index 0000000..9264ed5
--- /dev/null
+++ b/tcp_tests/templates/cookied-mcp-pike-dpdk/common-services.yaml
@@ -0,0 +1,118 @@
+{% from 'cookied-mcp-pike-dpdk/underlay.yaml' import HOSTNAME_CFG01 with context %}
+
+# Install support services
+- description: Install keepalived on ctl01
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@keepalived:cluster and *01*' state.sls keepalived
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 10}
+  skip_fail: true
+
+- description: Install keepalived
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@keepalived:cluster' state.sls keepalived
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 10}
+  skip_fail: true
+
+- description: Check the VIP
+  cmd: |
+    OPENSTACK_CONTROL_ADDRESS=`salt-call --out=newline_values_only pillar.get _param:openstack_control_address`;
+    echo "_param:openstack_control_address (vip): ${OPENSTACK_CONTROL_ADDRESS}";
+    salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@keepalived:cluster' cmd.run "ip a | grep ${OPENSTACK_CONTROL_ADDRESS}" | grep -B1 ${OPENSTACK_CONTROL_ADDRESS}
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+
+- description: Install glusterfs
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@glusterfs:server' state.sls glusterfs.server.service
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+- description: Setup glusterfs on primary controller
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@glusterfs:server' state.sls glusterfs.server.setup -b 1
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 2, delay: 5}
+  skip_fail: false
+
+- description: Check the gluster status
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@glusterfs:server' cmd.run 'gluster peer status; gluster volume status' -b 1
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+- description: Install RabbitMQ on ctl01
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@rabbitmq:server and *01*' state.sls rabbitmq
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+- description: Install RabbitMQ
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@rabbitmq:server' state.sls rabbitmq
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+- description: Check the rabbitmq status
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@rabbitmq:server' cmd.run 'rabbitmqctl cluster_status'
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+- description: Install Galera on first server
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@galera:master' state.sls galera
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+- description: Install Galera on other servers
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@galera:slave' state.sls galera -b 1
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+- description: Check mysql status
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@galera:*' mysql.status | grep -A1 -e "wsrep_incoming_addresses\|wsrep_cluster_size"
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: true
+
+
+- description: Install haproxy
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@haproxy:proxy' state.sls haproxy
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+- description: Check haproxy status
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@haproxy:proxy' service.status haproxy
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+- description: Restart rsyslog
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@haproxy:proxy' service.restart rsyslog
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+- description: Install memcached on all controllers
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@memcached:server' state.sls memcached
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
diff --git a/tcp_tests/templates/cookied-mcp-pike-dpdk/openstack.yaml b/tcp_tests/templates/cookied-mcp-pike-dpdk/openstack.yaml
new file mode 100644
index 0000000..0311e79
--- /dev/null
+++ b/tcp_tests/templates/cookied-mcp-pike-dpdk/openstack.yaml
@@ -0,0 +1,277 @@
+{% from 'cookied-mcp-pike-dpdk/underlay.yaml' import HOSTNAME_CFG01 with context %}
+{% from 'cookied-mcp-pike-dpdk/underlay.yaml' import HOSTNAME_CTL01 with context %}
+{% from 'cookied-mcp-pike-dpdk/underlay.yaml' import HOSTNAME_CTL02 with context %}
+{% from 'cookied-mcp-pike-dpdk/underlay.yaml' import HOSTNAME_CTL03 with context %}
+{% from 'shared-salt.yaml' import IPV4_NET_EXTERNAL_PREFIX with context %}
+{% from 'shared-salt.yaml' import IPV4_NET_TENANT_PREFIX with context %}
+
+# Install OpenStack control services
+
+- description: Install glance on all controllers
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+     -C 'I@glance:server' state.sls glance -b 1
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+- description: Install keystone service (note that different fernet keys are created on different nodes)
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@keystone:server' state.sls keystone.server -b 1
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 2, delay: 15}
+  skip_fail: false
+
+- description: Restart apache due to PROD-10477
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl*' cmd.run "systemctl restart apache2"
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 15}
+  skip_fail: false
+
+- description: Check apache status to PROD-10477
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl*' cmd.run "systemctl status apache2"
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 15}
+  skip_fail: false
+
+- description: Mount glusterfs.client volumes (resuires created 'keystone' and 'glusterfs' system users)
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@glance:server' state.sls glusterfs.client
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+- description: Update fernet keys for keystone server on the mounted glusterfs volume
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@keystone:server' state.sls keystone.server -b 1
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+- description: Populate keystone services/tenants/admins
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@keystone:client' state.sls keystone.client
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 2, delay: 5}
+  skip_fail: false
+
+- description: Check keystone service-list
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@keystone:server' cmd.run '. /root/keystonercv3; openstack service list'
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+- description: Check glance image-list
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@keystone:server' cmd.run '. /root/keystonerc; glance image-list'
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+
+- description: Install nova on all controllers
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@nova:controller' state.sls nova -b 1
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 2, delay: 5}
+  skip_fail: false
+
+- description: Check nova service-list
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@keystone:server' cmd.run '. /root/keystonerc; nova service-list'
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+
+- description: Install cinder
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@cinder:controller' state.sls cinder -b 1
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+- description: Check cinder list
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@keystone:server' cmd.run '. /root/keystonerc; cinder list'
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+
+- description: Install neutron service
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@neutron:server' state.sls neutron -b 1
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+- description: Install neutron on gtw node
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@neutron:gateway' state.sls neutron
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+
+- description: Check neutron agent-list
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@keystone:server' cmd.run '. /root/keystonerc; neutron agent-list'
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+
+- description: Install heat service
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@heat:server' state.sls heat -b 1
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+- description: Check heat service
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@keystone:server' cmd.run '. /root/keystonercv3; openstack orchestration resource type list'
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 5, delay: 10}
+  skip_fail: false
+
+
+- description: Deploy horizon dashboard
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@horizon:server' state.sls horizon
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: true
+
+- description: Deploy nginx proxy
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@nginx:server' state.sls nginx
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: true
+
+
+# Install compute node
+
+- description: Apply formulas for compute node
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'cmp*' state.apply
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: true
+
+- description: Re-apply(as in doc) formulas for compute node
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'cmp*' state.apply
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+- description: Check IP on computes
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'cmp*' cmd.run
+    'ip a'
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 10, delay: 30}
+  skip_fail: false
+
+
+  # Upload cirros image
+
+- description: Upload cirros image on ctl01
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
+    'wget http://download.cirros-cloud.net/0.3.4/cirros-0.3.4-i386-disk.img'
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 2, delay: 30}
+  skip_fail: false
+
+- description: Register image in glance
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
+    '. /root/keystonercv3; glance --timeout 120 image-create --name cirros --visibility public --disk-format qcow2 --container-format bare --progress < /root/cirros-0.3.4-i386-disk.img'
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 30}
+  skip_fail: false
+
+- description: Create net04_external
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
+    '. /root/keystonercv3; neutron net-create net04_ext --router:external True --provider:physical_network physnet1 --provider:network_type flat'
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 30}
+  skip_fail: false
+
+- description: Create subnet_external
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
+    '. /root/keystonercv3; neutron subnet-create net04_ext {{ IPV4_NET_EXTERNAL_PREFIX }}.0/24 --name net04_ext__subnet --disable-dhcp --allocation-pool start={{ IPV4_NET_EXTERNAL_PREFIX }}.150,end={{ IPV4_NET_EXTERNAL_PREFIX }}.180 --gateway {{ IPV4_NET_EXTERNAL_PREFIX }}.1'
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 30}
+  skip_fail: false
+
+- description: Create net04
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
+    '. /root/keystonercv3; neutron net-create net04 --provider:network_type gre'
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 30}
+  skip_fail: false
+
+- description: Create subnet_net04
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
+    '. /root/keystonercv3; neutron subnet-create net04 {{ IPV4_NET_TENANT_PREFIX }}.0/24 --name net04__subnet --allocation-pool start={{ IPV4_NET_TENANT_PREFIX }}.120,end={{ IPV4_NET_TENANT_PREFIX }}.240'
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 30}
+  skip_fail: false
+
+- description: Create router
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
+    '. /root/keystonercv3; neutron router-create net04_router01 --ha False'
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 30}
+  skip_fail: false
+
+- description: Set geteway
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
+    '. /root/keystonercv3; neutron router-gateway-set net04_router01 net04_ext'
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 30}
+  skip_fail: false
+
+- description:  Add interface
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
+    '. /root/keystonercv3; neutron router-interface-add net04_router01 net04__subnet'
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 30}
+  skip_fail: false
+
+# Configure cinder-volume salt-call
+- description: Set disks 01
+  cmd: salt-call cmd.run 'echo -e "nn\np\n\n\n\nw" | fdisk /dev/vdb'
+  node_name: {{ HOSTNAME_CTL01 }}
+  retry: {count: 1, delay: 30}
+  skip_fail: false
+
+- description: Set disks 02
+  cmd: salt-call cmd.run 'echo -e "nn\np\n\n\n\nw" | fdisk /dev/vdb'
+  node_name: {{ HOSTNAME_CTL02 }}
+  retry: {count: 1, delay: 30}
+  skip_fail: false
+
+- description: Set disks 03
+  cmd: salt-call cmd.run 'echo -e "nn\np\n\n\n\nw" | fdisk /dev/vdb'
+  node_name: {{ HOSTNAME_CTL03 }}
+  retry: {count: 1, delay: 30}
+  skip_fail: false
+
+- description: Create partitions 01
+  cmd: salt-call cmd.run 'pvcreate /dev/vdb1'
+  node_name: {{ HOSTNAME_CTL01 }}
+  retry: {count: 1, delay: 30}
+  skip_fail: false
+
+- description: Create partitions 02
+  cmd: salt-call cmd.run 'pvcreate /dev/vdb1'
+  node_name: {{ HOSTNAME_CTL02 }}
+  retry: {count: 1, delay: 30}
+  skip_fail: false
+
+- description: Create partitions 03
+  cmd: salt-call cmd.run 'pvcreate /dev/vdb1'
+  node_name: {{ HOSTNAME_CTL03 }}
+  retry: {count: 1, delay: 30}
+  skip_fail: false
diff --git a/tcp_tests/templates/cookied-mcp-pike-dpdk/salt.yaml b/tcp_tests/templates/cookied-mcp-pike-dpdk/salt.yaml
new file mode 100644
index 0000000..ead4b3d
--- /dev/null
+++ b/tcp_tests/templates/cookied-mcp-pike-dpdk/salt.yaml
@@ -0,0 +1,49 @@
+{% from 'cookied-mcp-pike-dpdk/underlay.yaml' import HOSTNAME_CFG01 with context %}
+{% from 'cookied-mcp-pike-dpdk/underlay.yaml' import HOSTNAME_CMP01 with context %}
+{% from 'cookied-mcp-pike-dpdk/underlay.yaml' import HOSTNAME_CMP02 with context %}
+{% from 'cookied-mcp-pike-dpdk/underlay.yaml' import HOSTNAME_GTW01 with context %}
+{% from 'cookied-mcp-pike-dpdk/underlay.yaml' import LAB_CONFIG_NAME with context %}
+{% from 'cookied-mcp-pike-dpdk/underlay.yaml' import DOMAIN_NAME with context %}
+
+{% set SALT_MODELS_REPOSITORY = os_env('SALT_MODELS_REPOSITORY','https://gerrit.mcp.mirantis.net/salt-models/mcp-virtual-lab') %}
+# Other salt model repository parameters see in shared-salt.yaml
+
+{% import 'shared-salt.yaml' as SHARED with context %}
+
+{{ SHARED.MACRO_INSTALL_SALT_MASTER() }}
+
+{{ SHARED.MACRO_CLONE_RECLASS_MODELS() }}
+
+{{ SHARED.MACRO_CONFIGURE_RECLASS(FORMULA_SERVICES='"linux" "reclass" "salt" "openssh" "ntp" "git" "nginx" "collectd" "sensu" "heka" "sphinx" "keystone" "mysql" "grafana" "haproxy" "rsyslog" "horizon" "prometheus" "telegraf" "elasticsearch" "fluentd"') }}
+
+{{ SHARED.MACRO_INSTALL_SALT_MINIONS() }}
+
+{{ SHARED.MACRO_RUN_SALT_MASTER_UNDERLAY_STATES() }}
+
+{{ SHARED.MACRO_GENERATE_INVENTORY() }}
+
+- description: Enable hugepages on cmp nodes
+  cmd: salt 'cmp*' cmd.run "apt-get install -y hugepages; echo 2048 > /proc/sys/vm/nr_hugepages";
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 10}
+  skip_fail: false
+
+{{ SHARED.MACRO_BOOTSTRAP_ALL_MINIONS() }}
+
+- description: Hack gtw node
+  cmd: salt '{{ HOSTNAME_GTW01 }}' cmd.run "ip addr del {{ SHARED.IPV4_NET_CONTROL_PREFIX }}.110/24 dev ens4; ip addr flush dev ens4";
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 10}
+  skip_fail: false
+
+- description: Hack cmp01 node
+  cmd: salt '{{ HOSTNAME_CMP01 }}' cmd.run "ip addr del {{ SHARED.IPV4_NET_CONTROL_PREFIX }}.105/24 dev ens4; ip addr flush dev ens4";
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 10}
+  skip_fail: false
+
+- description: Hack cmp02 node
+  cmd: salt '{{ HOSTNAME_CMP02 }}' cmd.run "ip addr del {{ SHARED.IPV4_NET_CONTROL_PREFIX }}.106/24 dev ens4; ip addr flush dev ens4";
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 10}
+  skip_fail: false
diff --git a/tcp_tests/templates/cookied-mcp-pike-dpdk/underlay--meta-data.yaml b/tcp_tests/templates/cookied-mcp-pike-dpdk/underlay--meta-data.yaml
new file mode 100644
index 0000000..3699401
--- /dev/null
+++ b/tcp_tests/templates/cookied-mcp-pike-dpdk/underlay--meta-data.yaml
@@ -0,0 +1,4 @@
+| # All the data below will be stored as a string object
+  instance-id: iid-local1
+  hostname: {hostname}
+  local-hostname: {hostname}
diff --git a/tcp_tests/templates/cookied-mcp-pike-dpdk/underlay--user-data-cfg01.yaml b/tcp_tests/templates/cookied-mcp-pike-dpdk/underlay--user-data-cfg01.yaml
new file mode 100644
index 0000000..da7908d
--- /dev/null
+++ b/tcp_tests/templates/cookied-mcp-pike-dpdk/underlay--user-data-cfg01.yaml
@@ -0,0 +1,61 @@
+| # All the data below will be stored as a string object
+  #cloud-config, see http://cloudinit.readthedocs.io/en/latest/topics/examples.html
+
+  ssh_pwauth: True
+  users:
+   - name: root
+     sudo: ALL=(ALL) NOPASSWD:ALL
+     shell: /bin/bash
+     ssh_authorized_keys:
+     {% for key in config.underlay.ssh_keys %}
+      - ssh-rsa {{ key['public'] }}
+     {% endfor %}
+
+  disable_root: false
+  chpasswd:
+   list: |
+    root:r00tme
+   expire: False
+
+  bootcmd:
+   # Enable root access
+   - sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
+   - service sshd restart
+  output:
+    all: '| tee -a /var/log/cloud-init-output.log /dev/tty0'
+
+  runcmd:
+   # Configure dhclient
+   - sudo echo "nameserver {gateway}" >> /etc/resolvconf/resolv.conf.d/base
+   - sudo resolvconf -u
+
+   # Prepare network connection
+   - sudo ifdown ens3
+   - sudo ip r d default || true  # remove existing default route to get it from dhcp
+   - sudo ifup ens3
+   #- sudo route add default gw {gateway} {interface_name}
+
+   # Create swap
+   - fallocate -l 4G /swapfile
+   - chmod 600 /swapfile
+   - mkswap /swapfile
+   - swapon /swapfile
+   - echo "/swapfile   none    swap    defaults    0   0" >> /etc/fstab
+
+   - echo "nameserver 172.18.208.44" >> /etc/resolv.conf;
+
+  write_files:
+   - path: /etc/network/interfaces
+     content: |
+          auto ens3
+          iface ens3 inet dhcp
+
+   - path: /root/.ssh/config
+     owner: root:root
+     permissions: '0600'
+     content: |
+          Host *
+            ServerAliveInterval 300
+            ServerAliveCountMax 10
+            StrictHostKeyChecking no
+            UserKnownHostsFile /dev/null
diff --git a/tcp_tests/templates/cookied-mcp-pike-dpdk/underlay--user-data1604.yaml b/tcp_tests/templates/cookied-mcp-pike-dpdk/underlay--user-data1604.yaml
new file mode 100644
index 0000000..3fbb777
--- /dev/null
+++ b/tcp_tests/templates/cookied-mcp-pike-dpdk/underlay--user-data1604.yaml
@@ -0,0 +1,50 @@
+| # All the data below will be stored as a string object
+  #cloud-config, see http://cloudinit.readthedocs.io/en/latest/topics/examples.html
+
+  ssh_pwauth: True
+  users:
+   - name: root
+     sudo: ALL=(ALL) NOPASSWD:ALL
+     shell: /bin/bash
+     ssh_authorized_keys:
+     {% for key in config.underlay.ssh_keys %}
+      - ssh-rsa {{ key['public'] }}
+     {% endfor %}
+
+  disable_root: false
+  chpasswd:
+   list: |
+    root:r00tme
+   expire: False
+
+  bootcmd:
+   # Enable root access
+   - sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
+   - service sshd restart
+  output:
+    all: '| tee -a /var/log/cloud-init-output.log /dev/tty0'
+
+  runcmd:
+   - export TERM=linux
+   - export LANG=C
+   # Configure dhclient
+   - sudo echo "nameserver {gateway}" >> /etc/resolvconf/resolv.conf.d/base
+   - sudo resolvconf -u
+
+   # Prepare network connection
+   - sudo ifup ens3
+   #- sudo route add default gw {gateway} {interface_name}
+
+   # Create swap
+   - fallocate -l 4G /swapfile
+   - chmod 600 /swapfile
+   - mkswap /swapfile
+   - swapon /swapfile
+   - echo "/swapfile   none    swap    defaults   0   0" >> /etc/fstab
+
+  write_files:
+   - path: /etc/network/interfaces
+     content: |
+          auto ens3
+          iface ens3 inet dhcp
+
diff --git a/tcp_tests/templates/cookied-mcp-pike-dpdk/underlay.yaml b/tcp_tests/templates/cookied-mcp-pike-dpdk/underlay.yaml
new file mode 100644
index 0000000..8e57462
--- /dev/null
+++ b/tcp_tests/templates/cookied-mcp-pike-dpdk/underlay.yaml
@@ -0,0 +1,628 @@
+# Set the repository suite, one of the: 'nightly', 'testing', 'stable', or any other required
+{% set REPOSITORY_SUITE = os_env('REPOSITORY_SUITE', 'testing') %}
+
+{% import 'cookied-mcp-pike-dpdk/underlay--meta-data.yaml' as CLOUDINIT_META_DATA with context %}
+{% import 'cookied-mcp-pike-dpdk/underlay--user-data-cfg01.yaml' as CLOUDINIT_USER_DATA_CFG01 with context %}
+{% import 'cookied-mcp-pike-dpdk/underlay--user-data1604.yaml' as CLOUDINIT_USER_DATA_1604 with context %}
+
+---
+aliases:
+ - &interface_model {{ os_env('INTERFACE_MODEL', 'virtio') }}
+ - &cloudinit_meta_data {{ CLOUDINIT_META_DATA }}
+ - &cloudinit_user_data_cfg01 {{ CLOUDINIT_USER_DATA_CFG01 }}
+ - &cloudinit_user_data_1604 {{ CLOUDINIT_USER_DATA_1604 }}
+
+{% set LAB_CONFIG_NAME = os_env('LAB_CONFIG_NAME', 'cookied-mcp-pike-dpdk') %}
+{% set DOMAIN_NAME = os_env('DOMAIN_NAME', LAB_CONFIG_NAME) + '.local' %}
+{% set HOSTNAME_CFG01 = os_env('HOSTNAME_CFG01', 'cfg01.' + DOMAIN_NAME) %}
+{% set HOSTNAME_CTL01 = os_env('HOSTNAME_CTL01', 'ctl01.' + DOMAIN_NAME) %}
+{% set HOSTNAME_CTL02 = os_env('HOSTNAME_CTL02', 'ctl02.' + DOMAIN_NAME) %}
+{% set HOSTNAME_CTL03 = os_env('HOSTNAME_CTL03', 'ctl03.' + DOMAIN_NAME) %}
+{% set HOSTNAME_DBS = os_env('HOSTNAME_DBS', 'dbs.' + DOMAIN_NAME) %}
+{% set HOSTNAME_DBS01 = os_env('HOSTNAME_DBS01', 'dbs01.' + DOMAIN_NAME) %}
+{% set HOSTNAME_DBS02 = os_env('HOSTNAME_DBS02', 'dbs02.' + DOMAIN_NAME) %}
+{% set HOSTNAME_DBS03 = os_env('HOSTNAME_DBS03', 'dbs03.' + DOMAIN_NAME) %}
+{% set HOSTNAME_MSG = os_env('HOSTNAME_MSG', 'msg.' + DOMAIN_NAME) %}
+{% set HOSTNAME_MSG01 = os_env('HOSTNAME_MSG01', 'msg01.' + DOMAIN_NAME) %}
+{% set HOSTNAME_MSG02 = os_env('HOSTNAME_MSG02', 'msg02.' + DOMAIN_NAME) %}
+{% set HOSTNAME_MSG03 = os_env('HOSTNAME_MSG03', 'msg03.' + DOMAIN_NAME) %}
+{% set HOSTNAME_CMP01 = os_env('HOSTNAME_CMP01', 'cmp01.' + DOMAIN_NAME) %}
+{% set HOSTNAME_CMP02 = os_env('HOSTNAME_CMP02', 'cmp02.' + DOMAIN_NAME) %}
+{% set HOSTNAME_GTW01 = os_env('HOSTNAME_GTW01', 'gtw01.' + DOMAIN_NAME) %}
+{% set HOSTNAME_PRX01 = os_env('HOSTNAME_PRX01', 'prx01.' + DOMAIN_NAME) %}
+
+template:
+  devops_settings:
+    env_name: {{ os_env('ENV_NAME', 'cookied-mcp-pike-dpdk_' + REPOSITORY_SUITE + "_" + os_env('BUILD_NUMBER', '')) }}
+
+    address_pools:
+      private-pool01:
+        net: {{ os_env('PRIVATE_ADDRESS_POOL01', '10.60.0.0/16:24') }}
+        params:
+          ip_reserved:
+            gateway: +1
+            l2_network_device: +1
+            default_{{ HOSTNAME_CFG01 }}: +100
+            default_{{ HOSTNAME_CTL01 }}: +101
+            default_{{ HOSTNAME_CTL02 }}: +102
+            default_{{ HOSTNAME_CTL03 }}: +103
+            default_{{ HOSTNAME_DBS }}: +50
+            default_{{ HOSTNAME_DBS01 }}: +51
+            default_{{ HOSTNAME_DBS02 }}: +52
+            default_{{ HOSTNAME_DBS03 }}: +53
+            default_{{ HOSTNAME_MSG }}: +40
+            default_{{ HOSTNAME_MSG01 }}: +41
+            default_{{ HOSTNAME_MSG02 }}: +42
+            default_{{ HOSTNAME_MSG03 }}: +43
+            default_{{ HOSTNAME_CMP01 }}: +105
+            default_{{ HOSTNAME_CMP02 }}: +106
+            default_{{ HOSTNAME_GTW01 }}: +110
+            default_{{ HOSTNAME_PRX01 }}: +121
+          ip_ranges:
+            dhcp: [+90, -10]
+
+      admin-pool01:
+        net: {{ os_env('ADMIN_ADDRESS_POOL01', '10.70.0.0/16:24') }}
+        params:
+          ip_reserved:
+            gateway: +1
+            l2_network_device: +1
+            default_{{ HOSTNAME_CFG01 }}: +90
+            default_{{ HOSTNAME_CTL01 }}: +101
+            default_{{ HOSTNAME_CTL02 }}: +102
+            default_{{ HOSTNAME_CTL03 }}: +103
+            default_{{ HOSTNAME_DBS }}: +50
+            default_{{ HOSTNAME_DBS01 }}: +51
+            default_{{ HOSTNAME_DBS02 }}: +52
+            default_{{ HOSTNAME_DBS03 }}: +53
+            default_{{ HOSTNAME_MSG }}: +40
+            default_{{ HOSTNAME_MSG01 }}: +41
+            default_{{ HOSTNAME_MSG02 }}: +42
+            default_{{ HOSTNAME_MSG03 }}: +43
+            default_{{ HOSTNAME_CMP01 }}: +105
+            default_{{ HOSTNAME_CMP02 }}: +106
+            default_{{ HOSTNAME_GTW01 }}: +110
+            default_{{ HOSTNAME_PRX01 }}: +121
+          ip_ranges:
+            dhcp: [+90, -10]
+
+      tenant-pool01:
+        net: {{ os_env('TENANT_ADDRESS_POOL01', '10.80.0.0/16:24') }}
+        params:
+          ip_reserved:
+            gateway: +1
+            l2_network_device: +1
+            default_{{ HOSTNAME_CFG01 }}: +100
+            default_{{ HOSTNAME_CTL01 }}: +101
+            default_{{ HOSTNAME_CTL02 }}: +102
+            default_{{ HOSTNAME_CTL03 }}: +103
+            default_{{ HOSTNAME_DBS }}: +50
+            default_{{ HOSTNAME_DBS01 }}: +51
+            default_{{ HOSTNAME_DBS02 }}: +52
+            default_{{ HOSTNAME_DBS03 }}: +53
+            default_{{ HOSTNAME_MSG }}: +40
+            default_{{ HOSTNAME_MSG01 }}: +41
+            default_{{ HOSTNAME_MSG02 }}: +42
+            default_{{ HOSTNAME_MSG03 }}: +43
+            default_{{ HOSTNAME_CMP01 }}: +105
+            default_{{ HOSTNAME_CMP02 }}: +106
+            default_{{ HOSTNAME_GTW01 }}: +110
+            default_{{ HOSTNAME_PRX01 }}: +121
+          ip_ranges:
+            dhcp: [+10, -10]
+
+      external-pool01:
+        net: {{ os_env('EXTERNAL_ADDRESS_POOL01', '10.90.0.0/16:24') }}
+        params:
+          ip_reserved:
+            gateway: +1
+            l2_network_device: +1
+            default_{{ HOSTNAME_CFG01 }}: +100
+            default_{{ HOSTNAME_CTL01 }}: +101
+            default_{{ HOSTNAME_CTL02 }}: +102
+            default_{{ HOSTNAME_CTL03 }}: +103
+            default_{{ HOSTNAME_DBS }}: +50
+            default_{{ HOSTNAME_DBS01 }}: +51
+            default_{{ HOSTNAME_DBS02 }}: +52
+            default_{{ HOSTNAME_DBS03 }}: +53
+            default_{{ HOSTNAME_MSG }}: +40
+            default_{{ HOSTNAME_MSG01 }}: +41
+            default_{{ HOSTNAME_MSG02 }}: +42
+            default_{{ HOSTNAME_MSG03 }}: +43
+            default_{{ HOSTNAME_CMP01 }}: +105
+            default_{{ HOSTNAME_CMP02 }}: +106
+            default_{{ HOSTNAME_GTW01 }}: +110
+            default_{{ HOSTNAME_PRX01 }}: +121
+          ip_ranges:
+            dhcp: [+10, -10]
+
+
+    groups:
+      - name: default
+        driver:
+          name: devops.driver.libvirt
+          params:
+            connection_string: !os_env CONNECTION_STRING, qemu:///system
+            storage_pool_name: !os_env STORAGE_POOL_NAME, default
+            stp: False
+            hpet: True
+            use_hugepages: True
+            enable_acpi: true
+            use_host_cpu: !os_env DRIVER_USE_HOST_CPU, true
+            use_hugepages: !os_env DRIVER_USE_HUGEPAGES, false
+
+        network_pools:
+          admin: admin-pool01
+          private: private-pool01
+          tenant: tenant-pool01
+          external: external-pool01
+
+        l2_network_devices:
+          private:
+            address_pool: private-pool01
+            dhcp: true
+            forward:
+              mode: route
+
+          admin:
+            address_pool: admin-pool01
+            dhcp: true
+            forward:
+              mode: nat
+
+          tenant:
+            address_pool: tenant-pool01
+            dhcp: true
+
+          external:
+            address_pool: external-pool01
+            dhcp: true
+            forward:
+              mode: nat
+
+
+        group_volumes:
+         - name: cloudimage1604    # This name is used for 'backing_store' option for node volumes.
+           source_image: !os_env IMAGE_PATH1604  # https://cloud-images.ubuntu.com/xenial/current/xenial-server-cloudimg-amd64-disk1.img
+           format: qcow2
+
+         - name: cfg01_day01_image               # Pre-configured day01 image
+           source_image: {{ os_env('IMAGE_PATH_CFG01_DAY01', os_env('IMAGE_PATH1604')) }} # http://images.mirantis.com/cfg01-day01.qcow2 or fallback to IMAGE_PATH1604
+           format: qcow2
+
+        nodes:
+          - name: {{ HOSTNAME_CFG01 }}
+            role: salt_master
+            params:
+              vcpu: !os_env SLAVE_NODE_CPU, 2
+              memory: !os_env SLAVE_NODE_MEMORY, 8192
+              boot:
+                - hd
+              cloud_init_volume_name: iso
+              cloud_init_iface_up: ens3
+              volumes:
+                - name: system
+                  capacity: !os_env NODE_VOLUME_SIZE, 150
+                  backing_store: cfg01_day01_image
+                  format: qcow2
+                - name: iso  # Volume with name 'iso' will be used
+                             # for store image with cloud-init metadata.
+                  capacity: 1
+                  format: raw
+                  device: cdrom
+                  bus: ide
+                  cloudinit_meta_data: *cloudinit_meta_data
+                  cloudinit_user_data: *cloudinit_user_data_cfg01
+
+              interfaces:
+                - label: ens3
+                  l2_network_device: admin
+                  interface_model: *interface_model
+                - label: ens4
+                  l2_network_device: private
+                  interface_model: *interface_model
+              network_config:
+                ens3:
+                  networks:
+                    - admin
+                ens4:
+                  networks:
+                    - private
+
+          - name: {{ HOSTNAME_CTL01 }}
+            role: salt_minion
+            params:
+              vcpu: !os_env SLAVE_NODE_CPU, 2
+              memory: !os_env SLAVE_NODE_MEMORY, 16384
+              boot:
+                - hd
+              cloud_init_volume_name: iso
+              cloud_init_iface_up: ens3
+              volumes:
+                - name: system
+                  capacity: !os_env NODE_VOLUME_SIZE, 150
+                  backing_store: cloudimage1604
+                  format: qcow2
+                - name: cinder
+                  capacity: 50
+                  format: qcow2
+                - name: iso  # Volume with name 'iso' will be used
+                             # for store image with cloud-init metadata.
+                  capacity: 1
+                  format: raw
+                  device: cdrom
+                  bus: ide
+                  cloudinit_meta_data: *cloudinit_meta_data
+                  cloudinit_user_data: *cloudinit_user_data_1604
+
+              interfaces: &interfaces
+                - label: ens3
+                  l2_network_device: admin
+                  interface_model: *interface_model
+                - label: ens4
+                  l2_network_device: private
+                  interface_model: *interface_model
+              network_config: &network_config
+                ens3:
+                  networks:
+                    - admin
+                ens4:
+                  networks:
+                    - private
+
+          - name: {{ HOSTNAME_CTL02 }}
+            role: salt_minion
+            params:
+              vcpu: !os_env SLAVE_NODE_CPU, 2
+              memory: !os_env SLAVE_NODE_MEMORY, 16384
+              boot:
+                - hd
+              cloud_init_volume_name: iso
+              cloud_init_iface_up: ens3
+              volumes:
+                - name: system
+                  capacity: !os_env NODE_VOLUME_SIZE, 150
+                  backing_store: cloudimage1604
+                  format: qcow2
+                - name: cinder
+                  capacity: 50
+                  format: qcow2
+                - name: iso  # Volume with name 'iso' will be used
+                             # for store image with cloud-init metadata.
+                  capacity: 1
+                  format: raw
+                  device: cdrom
+                  bus: ide
+                  cloudinit_meta_data: *cloudinit_meta_data
+                  cloudinit_user_data: *cloudinit_user_data_1604
+
+              interfaces: *interfaces
+              network_config: *network_config
+
+          - name: {{ HOSTNAME_CTL03 }}
+            role: salt_minion
+            params:
+              vcpu: !os_env SLAVE_NODE_CPU, 2
+              memory: !os_env SLAVE_NODE_MEMORY, 16384
+              boot:
+                - hd
+              cloud_init_volume_name: iso
+              cloud_init_iface_up: ens3
+              volumes:
+                - name: system
+                  capacity: !os_env NODE_VOLUME_SIZE, 150
+                  backing_store: cloudimage1604
+                  format: qcow2
+                - name: cinder
+                  capacity: 50
+                  format: qcow2
+                - name: iso  # Volume with name 'iso' will be used
+                             # for store image with cloud-init metadata.
+                  capacity: 1
+                  format: raw
+                  device: cdrom
+                  bus: ide
+                  cloudinit_meta_data: *cloudinit_meta_data
+                  cloudinit_user_data: *cloudinit_user_data_1604
+
+              interfaces: *interfaces
+              network_config: *network_config
+
+          - name: {{ HOSTNAME_DBS01 }}
+            role: salt_minion
+            params:
+              vcpu: !os_env SLAVE_NODE_CPU, 2
+              memory: !os_env SLAVE_NODE_MEMORY, 8192
+              boot:
+                - hd
+              cloud_init_volume_name: iso
+              cloud_init_iface_up: ens3
+              volumes:
+                - name: system
+                  capacity: !os_env NODE_VOLUME_SIZE, 150
+                  backing_store: cloudimage1604
+                  format: qcow2
+                - name: iso  # Volume with name 'iso' will be used
+                             # for store image with cloud-init metadata.
+                  capacity: 1
+                  format: raw
+                  device: cdrom
+                  bus: ide
+                  cloudinit_meta_data: *cloudinit_meta_data
+                  cloudinit_user_data: *cloudinit_user_data_1604
+
+              interfaces: *interfaces
+              network_config: *network_config
+
+          - name: {{ HOSTNAME_DBS02 }}
+            role: salt_minion
+            params:
+              vcpu: !os_env SLAVE_NODE_CPU, 2
+              memory: !os_env SLAVE_NODE_MEMORY, 8192
+              boot:
+                - hd
+              cloud_init_volume_name: iso
+              cloud_init_iface_up: ens3
+              volumes:
+                - name: system
+                  capacity: !os_env NODE_VOLUME_SIZE, 150
+                  backing_store: cloudimage1604
+                  format: qcow2
+                - name: iso  # Volume with name 'iso' will be used
+                             # for store image with cloud-init metadata.
+                  capacity: 1
+                  format: raw
+                  device: cdrom
+                  bus: ide
+                  cloudinit_meta_data: *cloudinit_meta_data
+                  cloudinit_user_data: *cloudinit_user_data_1604
+
+              interfaces: *interfaces
+              network_config: *network_config
+
+          - name: {{ HOSTNAME_DBS03 }}
+            role: salt_minion
+            params:
+              vcpu: !os_env SLAVE_NODE_CPU, 2
+              memory: !os_env SLAVE_NODE_MEMORY, 8192
+              boot:
+                - hd
+              cloud_init_volume_name: iso
+              cloud_init_iface_up: ens3
+              volumes:
+                - name: system
+                  capacity: !os_env NODE_VOLUME_SIZE, 150
+                  backing_store: cloudimage1604
+                  format: qcow2
+                - name: iso  # Volume with name 'iso' will be used
+                             # for store image with cloud-init metadata.
+                  capacity: 1
+                  format: raw
+                  device: cdrom
+                  bus: ide
+                  cloudinit_meta_data: *cloudinit_meta_data
+                  cloudinit_user_data: *cloudinit_user_data_1604
+
+              interfaces: *interfaces
+              network_config: *network_config
+
+          - name: {{ HOSTNAME_MSG01 }}
+            role: salt_minion
+            params:
+              vcpu: !os_env SLAVE_NODE_CPU, 2
+              memory: !os_env SLAVE_NODE_MEMORY, 4096
+              boot:
+                - hd
+              cloud_init_volume_name: iso
+              cloud_init_iface_up: ens3
+              volumes:
+                - name: system
+                  capacity: !os_env NODE_VOLUME_SIZE, 150
+                  backing_store: cloudimage1604
+                  format: qcow2
+                - name: iso  # Volume with name 'iso' will be used
+                             # for store image with cloud-init metadata.
+                  capacity: 1
+                  format: raw
+                  device: cdrom
+                  bus: ide
+                  cloudinit_meta_data: *cloudinit_meta_data
+                  cloudinit_user_data: *cloudinit_user_data_1604
+
+              interfaces: *interfaces
+              network_config: *network_config
+
+          - name: {{ HOSTNAME_MSG02 }}
+            role: salt_minion
+            params:
+              vcpu: !os_env SLAVE_NODE_CPU, 2
+              memory: !os_env SLAVE_NODE_MEMORY, 4096
+              boot:
+                - hd
+              cloud_init_volume_name: iso
+              cloud_init_iface_up: ens3
+              volumes:
+                - name: system
+                  capacity: !os_env NODE_VOLUME_SIZE, 150
+                  backing_store: cloudimage1604
+                  format: qcow2
+                - name: iso  # Volume with name 'iso' will be used
+                             # for store image with cloud-init metadata.
+                  capacity: 1
+                  format: raw
+                  device: cdrom
+                  bus: ide
+                  cloudinit_meta_data: *cloudinit_meta_data
+                  cloudinit_user_data: *cloudinit_user_data_1604
+
+              interfaces: *interfaces
+              network_config: *network_config
+
+          - name: {{ HOSTNAME_MSG03 }}
+            role: salt_minion
+            params:
+              vcpu: !os_env SLAVE_NODE_CPU, 2
+              memory: !os_env SLAVE_NODE_MEMORY, 4096
+              boot:
+                - hd
+              cloud_init_volume_name: iso
+              cloud_init_iface_up: ens3
+              volumes:
+                - name: system
+                  capacity: !os_env NODE_VOLUME_SIZE, 150
+                  backing_store: cloudimage1604
+                  format: qcow2
+                - name: iso  # Volume with name 'iso' will be used
+                             # for store image with cloud-init metadata.
+                  capacity: 1
+                  format: raw
+                  device: cdrom
+                  bus: ide
+                  cloudinit_meta_data: *cloudinit_meta_data
+                  cloudinit_user_data: *cloudinit_user_data_1604
+
+              interfaces: *interfaces
+              network_config: *network_config
+
+          - name: {{ HOSTNAME_PRX01 }}
+            role: salt_minion
+            params:
+              vcpu: !os_env SLAVE_NODE_CPU, 1
+              memory: !os_env SLAVE_NODE_MEMORY, 8192
+              boot:
+                - hd
+              cloud_init_volume_name: iso
+              cloud_init_iface_up: ens3
+              volumes:
+                - name: system
+                  capacity: !os_env NODE_VOLUME_SIZE, 150
+                  backing_store: cloudimage1604
+                  format: qcow2
+                - name: cinder
+                  capacity: 50
+                  format: qcow2
+                - name: iso  # Volume with name 'iso' will be used
+                             # for store image with cloud-init metadata.
+                  capacity: 1
+                  format: raw
+                  device: cdrom
+                  bus: ide
+                  cloudinit_meta_data: *cloudinit_meta_data
+                  cloudinit_user_data: *cloudinit_user_data_1604
+
+              interfaces: *interfaces
+              network_config: *network_config
+
+
+          - name: {{ HOSTNAME_CMP01 }}
+            role: salt_minion
+            params:
+              vcpu: !os_env SLAVE_NODE_CPU, 12
+              memory: !os_env SLAVE_NODE_MEMORY, 8192
+              numa:
+              - cpus: 0,1,2,3,4,5
+                memory: 4096
+              - cpus: 6,7,8,9,10,11
+                memory: 4096
+              boot:
+                - hd
+              cloud_init_volume_name: iso
+              cloud_init_iface_up: ens3
+              volumes:
+                - name: system
+                  capacity: !os_env NODE_VOLUME_SIZE, 150
+                  backing_store: cloudimage1604
+                  format: qcow2
+                - name: iso  # Volume with name 'iso' will be used
+                             # for store image with cloud-init metadata.
+                  capacity: 1
+                  format: raw
+                  device: cdrom
+                  bus: ide
+                  cloudinit_meta_data: *cloudinit_meta_data
+                  cloudinit_user_data: *cloudinit_user_data_1604
+
+
+              interfaces: &all_interfaces
+                - label: ens3
+                  l2_network_device: admin
+                  interface_model: *interface_model
+                - label: ens4
+                  l2_network_device: private
+                  interface_model: *interface_model
+                - label: ens5
+                  l2_network_device: tenant
+                  interface_model: *interface_model
+                - label: ens6
+                  l2_network_device: external
+                  interface_model: *interface_model
+              network_config: &all_network_config
+                ens3:
+                  networks:
+                    - admin
+                ens4:
+                  networks:
+                    - private
+                ens5:
+                  networks:
+                    - tenant
+                ens6:
+                  networks:
+                    - external
+
+          - name: {{ HOSTNAME_CMP02 }}
+            role: salt_minion
+            params:
+              vcpu: !os_env SLAVE_NODE_CPU, 12
+              memory: !os_env SLAVE_NODE_MEMORY, 8192
+              numa:
+              - cpus: 0,1,2,3,4,5
+                memory: 4096
+              - cpus: 6,7,8,9,10,11
+                memory: 4096
+              boot:
+                - hd
+              cloud_init_volume_name: iso
+              cloud_init_iface_up: ens3
+              volumes:
+                - name: system
+                  capacity: !os_env NODE_VOLUME_SIZE, 150
+                  backing_store: cloudimage1604
+                  format: qcow2
+                - name: iso  # Volume with name 'iso' will be used
+                             # for store image with cloud-init metadata.
+                  capacity: 1
+                  format: raw
+                  device: cdrom
+                  bus: ide
+                  cloudinit_meta_data: *cloudinit_meta_data
+                  cloudinit_user_data: *cloudinit_user_data_1604
+
+              interfaces: *all_interfaces
+              network_config: *all_network_config
+
+          - name: {{ HOSTNAME_GTW01 }}
+            role: salt_minion
+            params:
+              vcpu: !os_env SLAVE_NODE_CPU, 4
+              memory: !os_env SLAVE_NODE_MEMORY, 4096
+              boot:
+                - hd
+              cloud_init_volume_name: iso
+              cloud_init_iface_up: ens3
+              volumes:
+                - name: system
+                  capacity: !os_env NODE_VOLUME_SIZE, 150
+                  backing_store: cloudimage1604
+                  format: qcow2
+                - name: iso  # Volume with name 'iso' will be used
+                             # for store image with cloud-init metadata.
+                  capacity: 1
+                  format: raw
+                  device: cdrom
+                  bus: ide
+                  cloudinit_meta_data: *cloudinit_meta_data
+                  cloudinit_user_data: *cloudinit_user_data_1604
+
+              interfaces: *all_interfaces
+              network_config: *all_network_config
diff --git a/tcp_tests/templates/virtual-mcp-pike-dvr/_context-cookiecutter-mcp-pike-dvr.yaml b/tcp_tests/templates/cookied-mcp-pike-dvr/_context-cookiecutter-mcp-pike-dvr.yaml
similarity index 72%
rename from tcp_tests/templates/virtual-mcp-pike-dvr/_context-cookiecutter-mcp-pike-dvr.yaml
rename to tcp_tests/templates/cookied-mcp-pike-dvr/_context-cookiecutter-mcp-pike-dvr.yaml
index e2ba165..d546308 100644
--- a/tcp_tests/templates/virtual-mcp-pike-dvr/_context-cookiecutter-mcp-pike-dvr.yaml
+++ b/tcp_tests/templates/cookied-mcp-pike-dvr/_context-cookiecutter-mcp-pike-dvr.yaml
@@ -2,8 +2,8 @@
   bmk_enabled: 'False'
   ceph_enabled: 'False'
   cicd_enabled: 'False'
-  cluster_domain: virtual-mcp-pike-dvr.local
-  cluster_name: virtual-mcp-pike-dvr
+  cluster_domain: cookied-mcp-pike-dvr.local
+  cluster_name: cookied-mcp-pike-dvr
   compute_bond_mode: active-backup
   compute_primary_first_nic: eth1
   compute_primary_second_nic: eth2
@@ -47,7 +47,7 @@
   openstack_benchmark_node01_address: 172.16.10.95
   openstack_benchmark_node01_hostname: bmk01
   openstack_cluster_size: compact
-  openstack_compute_count: '100'
+  openstack_compute_count: '2'
   openstack_compute_rack01_hostname: cmp
   openstack_compute_rack01_single_subnet: 172.16.10
   openstack_compute_rack01_tenant_subnet: 10.1.0
@@ -111,6 +111,35 @@
   public_host: ${_param:openstack_proxy_address}
   publication_method: email
   reclass_repository: https://github.com/Mirantis/mk-lab-salt-model.git
+  backup_private_key: |
+      -----BEGIN RSA PRIVATE KEY-----
+      MIIEowIBAAKCAQEAxL6/rVgCetsETpZaUmXmkj8cZ1WN0eubH1FvMDOi/La9ZJyT
+      k0C6AYpJnIyEm93pMj5cLm08qRqMW+2pdOhYjcH69yg5MrX5SkRk8jCmIHIYoIbh
+      Qnwbnj3dd3I39ZdfU2FO7u2vlbglVou6ZoQxlJDItuLNtzq6EG+w9eF19e7+OsC6
+      6iUItp618zfw1l3J/8nKvCGe2RYDf7mJW6XwCl/DwryJmwwzvPgYJ3QMuDD8/HFj
+      lrJ3xjFTXj4b4Ws1XIoy78fFbtiLr4OwqCYkho03u2E5rOOP1qZxZB63sivHMLMO
+      MM5bOAQKbulFNoyALADGYfc7sf0bZ4u9XXDXxQIDAQABAoIBAQCfmc2MJRT97KW1
+      yqpCpX9BrAiymuiNHf+cjEcSZxEUyHkjIRFmJt+9WB0W7ba1anM92vCUiPDojSzH
+      dig9Oi578JxR20NrK8uqv4jUHzrknynzLveVI3CUEcOSnglfJQijbxDFKfOCFPvV
+      FUyE1UATMNBh6+LNfMprgu+exuMWOPnDyUiYQ+WZ0JfuZY8fuaZte4woJJOb9LUu
+      5rsMG/smIzjpgZ0Z9ZVDMurfq565qhpaXRAqKeIuyht8pacTo31iMQdHB78AvY/3
+      g0z21Gk8k3z0Kr/YFKr2r4FmXY5m/gAUvZly2ZrVQM5XsbTVCzq/JpI5fssNvSbU
+      AKmXzf4RAoGBAOO3d4/cstxERzW6hyOTjZIN1ppR52CsnZTsVPbfd0pCtmzmVZce
+      CtHKdcXSbTwZvvkK09QSWAp3MoSpd0gIOiLU8Wx/R/RIZsu9BlhTS3r3EQLnk72d
+      H/1TTA+j4T/LIYLSojQ1RxvIrHetAD44j732aTwKAHj/SybEAVqNkOB/AoGBAN0u
+      gLcrgqIHGrk4VjWSvlCGymfF40equcx+ud7XhfZDGETUOSahW4dPZ52cjPAkrCBQ
+      MMfcDwSVGsOAjd+mNt11BHUKobnhXwFaWWuyqyn9NmWFbjMbICVh7E3Of5aVN38o
+      lrmo/7LuKMVG7XRwphCv5NkaJmQG4njDyUQWlaW7AoGADCd8wDb9bPhP/LQqBmIX
+      ylXmwHHisaxE9O/wUQT4bwREjGd25gv6c9wkkRx8LBsLsGs9hzI7dMOL9Ly+2x9l
+      SvqmsC3S/1zl77X1Ir2/Z57MT6Vgo1xBmtnZU3Rhz2/eKAdqFPNLClaZrgGT475N
+      HcyLLWMzR0IJFtabY+Puea0CgYA8Zb5wRkldxWLewSuJZZDinGwY+kieAVjLJq/K
+      0j+ah6fQ48LXcah0wpIgz+cMjHcUO9GWQdk3/x9X03rqX5EL2DBnZYfUIl63F9zj
+      M97ZkHOSNWVqPzX//0Vv2butewG0j3jZKfTo/2/SrxOYgEpYtC9huWpSVi7xm0US
+      erhSkQKBgFIf9JEsfgE57ANhvITZ3ZI0uZXNxZkXQaVg8jvScDi79IIhy9iPzhKC
+      aIIQoDNIlWv1ftCRZ5AlBvVXgvQ/QNrwy48JiQTzWZlb9Ezg8w+olQmSbG6fq7Y+
+      7r3i+QUZ7RBdOb24QcQ618q54ozNTCB7OywY78ptFzeoBeptiNr1
+      -----END RSA PRIVATE KEY-----
+  backup_public_key: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDEvr+tWAJ62wROllpSZeaSPxxnVY3R65sfUW8wM6L8tr1knJOTQLoBikmcjISb3ekyPlwubTypGoxb7al06FiNwfr3KDkytflKRGTyMKYgchighuFCfBuePd13cjf1l19TYU7u7a+VuCVWi7pmhDGUkMi24s23OroQb7D14XX17v46wLrqJQi2nrXzN/DWXcn/ycq8IZ7ZFgN/uYlbpfAKX8PCvImbDDO8+BgndAy4MPz8cWOWsnfGMVNePhvhazVcijLvx8Vu2Iuvg7CoJiSGjTe7YTms44/WpnFkHreyK8cwsw4wzls4BApu6UU2jIAsAMZh9zux/Rtni71dcNfF
   salt_api_password: H0rTPdmktZ8RI7T7y6fjqY0uEbbs7Kwi
   salt_api_password_hash: $6$lfbIFtMZ$.nTbTDMzs1iYv0WqkZHia8H8Fma963Nv3qyyz1x68jQh0YXK9i907B/hvoG4QHMvfolE7V7vQnFClJ1mVA3Yb.
   salt_master_address: 172.16.10.90
diff --git a/tcp_tests/templates/virtual-mcp-pike-dvr/_context-environment.yaml b/tcp_tests/templates/cookied-mcp-pike-dvr/_context-environment.yaml
similarity index 98%
rename from tcp_tests/templates/virtual-mcp-pike-dvr/_context-environment.yaml
rename to tcp_tests/templates/cookied-mcp-pike-dvr/_context-environment.yaml
index 0127547..ca8114b 100644
--- a/tcp_tests/templates/virtual-mcp-pike-dvr/_context-environment.yaml
+++ b/tcp_tests/templates/cookied-mcp-pike-dvr/_context-environment.yaml
@@ -63,6 +63,7 @@
       reclass_storage_name: openstack_proxy_node01
       roles:
       - openstack_proxy
+      - features_designate_pool_manager_proxy
       - linux_system_codename_xenial
       interfaces:
         ens3:
diff --git a/tcp_tests/templates/cookied-mcp-pike-dvr/common-services.yaml b/tcp_tests/templates/cookied-mcp-pike-dvr/common-services.yaml
new file mode 100644
index 0000000..31a6257
--- /dev/null
+++ b/tcp_tests/templates/cookied-mcp-pike-dvr/common-services.yaml
@@ -0,0 +1,118 @@
+{% from 'cookied-mcp-pike-dvr/underlay.yaml' import HOSTNAME_CFG01 with context %}
+
+# Install support services
+- description: Install keepalived on ctl01
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@keepalived:cluster and *01*' state.sls keepalived
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 10}
+  skip_fail: true
+
+- description: Install keepalived
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@keepalived:cluster' state.sls keepalived
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 10}
+  skip_fail: true
+
+- description: Check the VIP
+  cmd: |
+    OPENSTACK_CONTROL_ADDRESS=`salt-call --out=newline_values_only pillar.get _param:openstack_control_address`;
+    echo "_param:openstack_control_address (vip): ${OPENSTACK_CONTROL_ADDRESS}";
+    salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@keepalived:cluster' cmd.run "ip a | grep ${OPENSTACK_CONTROL_ADDRESS}" | grep -B1 ${OPENSTACK_CONTROL_ADDRESS}
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+
+- description: Install glusterfs
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@glusterfs:server' state.sls glusterfs.server.service
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+- description: Setup glusterfs on primary controller
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@glusterfs:server' state.sls glusterfs.server.setup -b 1
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 2, delay: 5}
+  skip_fail: false
+
+- description: Check the gluster status
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@glusterfs:server' cmd.run 'gluster peer status; gluster volume status' -b 1
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+- description: Install RabbitMQ on ctl01
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@rabbitmq:server and *01*' state.sls rabbitmq
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+- description: Install RabbitMQ
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@rabbitmq:server' state.sls rabbitmq
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+- description: Check the rabbitmq status
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@rabbitmq:server' cmd.run 'rabbitmqctl cluster_status'
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+- description: Install Galera on first server
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@galera:master' state.sls galera
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+- description: Install Galera on other servers
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@galera:slave' state.sls galera -b 1
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+- description: Check mysql status
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@galera:*' mysql.status | grep -A1 -e "wsrep_incoming_addresses\|wsrep_cluster_size"
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: true
+
+
+- description: Install haproxy
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@haproxy:proxy' state.sls haproxy
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+- description: Check haproxy status
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@haproxy:proxy' service.status haproxy
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+- description: Restart rsyslog
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@haproxy:proxy' service.restart rsyslog
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+- description: Install memcached on all controllers
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@memcached:server' state.sls memcached
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
diff --git a/tcp_tests/templates/cookied-mcp-pike-dvr/openstack.yaml b/tcp_tests/templates/cookied-mcp-pike-dvr/openstack.yaml
new file mode 100644
index 0000000..b81cef5
--- /dev/null
+++ b/tcp_tests/templates/cookied-mcp-pike-dvr/openstack.yaml
@@ -0,0 +1,395 @@
+{% from 'cookied-mcp-pike-dvr/underlay.yaml' import HOSTNAME_CFG01 with context %}
+{% from 'cookied-mcp-pike-dvr/underlay.yaml' import HOSTNAME_CTL01 with context %}
+{% from 'cookied-mcp-pike-dvr/underlay.yaml' import HOSTNAME_CTL02 with context %}
+{% from 'cookied-mcp-pike-dvr/underlay.yaml' import HOSTNAME_CTL03 with context %}
+{% from 'cookied-mcp-pike-dvr/underlay.yaml' import HOSTNAME_GTW01 with context %}
+{% from 'shared-salt.yaml' import IPV4_NET_EXTERNAL_PREFIX with context %}
+{% from 'shared-salt.yaml' import IPV4_NET_TENANT_PREFIX with context %}
+{% set LAB_CONFIG_NAME = os_env('LAB_CONFIG_NAME') %}
+{% set OVERRIDE_POLICY = os_env('OVERRIDE_POLICY', '') %}
+
+# Install OpenStack control services
+
+{%- if OVERRIDE_POLICY != '' %}
+- description: Upload policy override
+  upload:
+    local_path:  {{ config.salt_deploy.templates_dir }}{{ LAB_CONFIG_NAME }}/
+    local_filename: overrides-policy.yml
+    remote_path: /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/openstack/
+  node_name: {{ HOSTNAME_CFG01 }}
+
+- description: Create custom cluster control class
+  cmd: echo -e "classes:\n- cluster.{{ LAB_CONFIG_NAME }}.openstack.control_orig\n$(cat /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/openstack/overrides-policy.yml)" > /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/openstack/overrides-policy.yml
+  node_name: {{ HOSTNAME_CFG01 }}
+
+- description: Rename control classes
+  cmd: mv /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/openstack/control.yml /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/openstack/control_orig.yml &&
+    ln -s /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/openstack/overrides-policy.yml /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/openstack/control.yml &&
+    salt --hard-crash --state-output=mixed --state-verbose=False '*' saltutil.sync_all &&
+    salt --hard-crash --state-output=mixed --state-verbose=False '*' saltutil.refresh_pillar
+  node_name: {{ HOSTNAME_CFG01 }}
+{%- endif %}
+
+- description: Install glance on all controllers
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+     -C 'I@glance:server' state.sls glance -b 1
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+- description: Install keystone service (note that different fernet keys are created on different nodes)
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@keystone:server' state.sls keystone.server -b 1
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 2, delay: 15}
+  skip_fail: false
+
+- description: Restart apache due to PROD-10477
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl*' cmd.run "systemctl restart apache2"
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 15}
+  skip_fail: false
+
+- description: Check apache status to PROD-10477
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl*' cmd.run "systemctl status apache2"
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 15}
+  skip_fail: false
+
+- description: Mount glusterfs.client volumes (resuires created 'keystone' and 'glusterfs' system users)
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@glance:server' state.sls glusterfs.client
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+- description: Update fernet keys for keystone server on the mounted glusterfs volume
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@keystone:server' state.sls keystone.server -b 1
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+- description: Populate keystone services/tenants/admins
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@keystone:client' state.sls keystone.client
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 2, delay: 5}
+  skip_fail: false
+
+- description: Check keystone service-list
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@keystone:server' cmd.run '. /root/keystonercv3; openstack service list'
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+- description: Check glance image-list
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@keystone:server' cmd.run '. /root/keystonerc; glance image-list'
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+
+- description: Install nova on all controllers
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@nova:controller' state.sls nova -b 1
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 2, delay: 5}
+  skip_fail: false
+
+- description: Check nova service-list
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@keystone:server' cmd.run '. /root/keystonerc; nova --debug service-list'
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 3, delay: 5}
+  skip_fail: false
+
+
+- description: Install cinder
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@cinder:controller' state.sls cinder -b 1
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+- description: Check cinder list
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@keystone:server' cmd.run '. /root/keystonerc; cinder list'
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+
+- description: Install neutron service
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@neutron:server' state.sls neutron -b 1
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+- description: Install neutron on gtw node
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@neutron:gateway' state.sls neutron
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+# isntall designate
+- description: Install powerdns
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@powerdns:server' state.sls powerdns.server
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+- description: Install designate
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@designate:server' state.sls designate -b 1
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 5, delay: 10}
+  skip_fail: false
+
+- description: Check neutron agent-list
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@keystone:server' cmd.run '. /root/keystonerc; neutron agent-list'
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+- description: Install heat service
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@heat:server' state.sls heat -b 1
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+- description: Check heat service
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@keystone:server' cmd.run '. /root/keystonercv3; openstack orchestration resource type list'
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 5, delay: 10}
+  skip_fail: false
+
+
+- description: Deploy horizon dashboard
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@horizon:server' state.sls horizon
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: true
+
+- description: Deploy nginx proxy
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@nginx:server' state.sls nginx
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: true
+
+
+# Install compute node
+
+- description: Apply formulas for compute node
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'cmp*' state.apply
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: true
+
+- description: Re-apply(as in doc) formulas for compute node
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'cmp*' state.apply
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+- description: Check IP on computes
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'cmp*' cmd.run
+    'ip a'
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 10, delay: 30}
+  skip_fail: false
+
+
+  # Upload cirros image
+
+- description: Upload cirros image on ctl01
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
+    'wget http://download.cirros-cloud.net/0.3.4/cirros-0.3.4-i386-disk.img'
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 2, delay: 30}
+  skip_fail: false
+
+- description: Register image in glance
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
+    '. /root/keystonercv3; glance --timeout 120 image-create --name cirros --visibility public --disk-format qcow2 --container-format bare --progress < /root/cirros-0.3.4-i386-disk.img'
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 30}
+  skip_fail: false
+
+- description: Create net04_external
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
+    '. /root/keystonercv3; neutron net-create net04_ext --router:external True --provider:physical_network physnet1 --provider:network_type flat'
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 30}
+  skip_fail: false
+
+- description: Create subnet_external
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
+    '. /root/keystonercv3; neutron subnet-create net04_ext {{ IPV4_NET_EXTERNAL_PREFIX }}.0/24 --name net04_ext__subnet --disable-dhcp --allocation-pool start={{ IPV4_NET_EXTERNAL_PREFIX }}.150,end={{ IPV4_NET_EXTERNAL_PREFIX }}.180 --gateway {{ IPV4_NET_EXTERNAL_PREFIX }}.1'
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 30}
+  skip_fail: false
+
+- description: Create net04
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
+    '. /root/keystonercv3; neutron net-create net04'
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 30}
+  skip_fail: false
+
+- description: Create subnet_net04
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
+    '. /root/keystonercv3; neutron subnet-create net04 {{ IPV4_NET_TENANT_PREFIX }}.0/24 --name net04__subnet --allocation-pool start={{ IPV4_NET_TENANT_PREFIX }}.120,end={{ IPV4_NET_TENANT_PREFIX }}.240'
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 30}
+  skip_fail: false
+
+- description: Create router
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
+    '. /root/keystonercv3; neutron router-create net04_router01'
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 30}
+  skip_fail: false
+
+- description: Set geteway
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
+    '. /root/keystonercv3; neutron router-gateway-set net04_router01 net04_ext'
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 30}
+  skip_fail: false
+
+- description:  Add interface
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
+    '. /root/keystonercv3; neutron router-interface-add net04_router01 net04__subnet'
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 30}
+  skip_fail: false
+
+#- description:  Allow all tcp
+#  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
+#    '. /root/keystonercv3; nova secgroup-add-rule default tcp 1 65535 0.0.0.0/0'
+#  node_name: {{ HOSTNAME_CFG01 }}
+#  retry: {count: 1, delay: 30}
+#  skip_fail: false
+#
+#- description:  Allow all icmp
+#  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
+#    '. /root/keystonercv3; nova secgroup-add-rule default icmp -1 -1 0.0.0.0/0'
+#  node_name: {{ HOSTNAME_CFG01 }}
+#  retry: {count: 1, delay: 30}
+#  skip_fail: false
+
+- description: sync time
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False '*' cmd.run
+    'service ntp stop; ntpd -gq;  service ntp start'
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 30}
+  skip_fail: false
+
+# Configure cinder-volume salt-call PROD-13167
+- description: Set disks 01
+  cmd: salt-call cmd.run 'echo -e "nn\np\n\n\n\nw" | fdisk /dev/vdb'
+  node_name: {{ HOSTNAME_CTL01 }}
+  retry: {count: 1, delay: 30}
+  skip_fail: false
+
+- description: Set disks 02
+  cmd: salt-call cmd.run 'echo -e "nn\np\n\n\n\nw" | fdisk /dev/vdb'
+  node_name: {{ HOSTNAME_CTL02 }}
+  retry: {count: 1, delay: 30}
+  skip_fail: false
+
+- description: Set disks 03
+  cmd: salt-call cmd.run 'echo -e "nn\np\n\n\n\nw" | fdisk /dev/vdb'
+  node_name: {{ HOSTNAME_CTL03 }}
+  retry: {count: 1, delay: 30}
+  skip_fail: false
+
+- description: Create partitions 01
+  cmd: salt-call cmd.run 'pvcreate /dev/vdb1'
+  node_name: {{ HOSTNAME_CTL01 }}
+  retry: {count: 1, delay: 30}
+  skip_fail: false
+
+- description: Create partitions 02
+  cmd: salt-call cmd.run 'pvcreate /dev/vdb1'
+  node_name: {{ HOSTNAME_CTL02 }}
+  retry: {count: 1, delay: 30}
+  skip_fail: false
+
+- description: Create partitions 03
+  cmd: salt-call cmd.run 'pvcreate /dev/vdb1'
+  node_name: {{ HOSTNAME_CTL03 }}
+  retry: {count: 1, delay: 30}
+  skip_fail: false
+
+- description: create volume_group
+  cmd: salt "ctl*" cmd.run 'vgcreate cinder-volumes /dev/vdb1'
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 30}
+  skip_fail: false
+
+- description: Install cinder-volume
+  cmd: salt 'ctl*' cmd.run 'apt-get install cinder-volume -y'
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 30}
+  skip_fail: false
+
+- description: Install crudini
+  cmd: salt "ctl*" cmd.run 'apt-get install crudini -y'
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 30}
+  skip_fail: false
+
+- description: Temporary WR set enabled backends value 01
+  cmd: salt-call cmd.run 'crudini --verbose --set /etc/cinder/cinder.conf DEFAULT enabled_backends lvm'
+  node_name: {{ HOSTNAME_CTL01 }}
+  retry: {count: 1, delay: 30}
+  skip_fail: false
+
+- description: Temporary WR set enabled backends value 02
+  cmd: salt-call cmd.run 'crudini --verbose --set /etc/cinder/cinder.conf DEFAULT enabled_backends lvm'
+  node_name: {{ HOSTNAME_CTL02 }}
+  retry: {count: 1, delay: 30}
+  skip_fail: false
+
+- description: Temporary WR set enabled backends value 03
+  cmd: salt-call cmd.run 'crudini --verbose --set /etc/cinder/cinder.conf DEFAULT enabled_backends lvm'
+  node_name: {{ HOSTNAME_CTL03 }}
+  retry: {count: 1, delay: 30}
+  skip_fail: false
+
+- description: Install docker.io on gtw
+  cmd: salt-call cmd.run 'apt-get install docker.io -y'
+  node_name: {{ HOSTNAME_GTW01 }}
+  retry: {count: 1, delay: 30}
+  skip_fail: false
+
+- description: Restart cinder volume
+  cmd: |
+    salt -C 'I@cinder:controller' service.restart cinder-volume;
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 2, delay: 5}
+  skip_fail: false
+
+- description: create rc file on cfg
+  cmd: scp ctl01:/root/keystonercv3 /root
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 30}
+  skip_fail: false
+
+- description: Copy rc file
+  cmd: scp /root/keystonercv3 gtw01:/root
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 30}
+  skip_fail: false
diff --git a/tcp_tests/templates/cookied-mcp-pike-dvr/overrides-policy.yml b/tcp_tests/templates/cookied-mcp-pike-dvr/overrides-policy.yml
new file mode 100644
index 0000000..1f35a6b
--- /dev/null
+++ b/tcp_tests/templates/cookied-mcp-pike-dvr/overrides-policy.yml
@@ -0,0 +1,40 @@
+parameters:
+  nova:
+    controller:
+      policy:
+        context_is_admin: 'role:admin or role:administrator'
+        'compute:create': 'rule:admin_or_owner'
+        'compute:create:attach_network':
+  cinder:
+    controller:
+      policy:
+        'volume:delete': 'rule:admin_or_owner'
+        'volume:extend':
+  neutron:
+    server:
+      policy:
+        create_subnet: 'rule:admin_or_network_owner'
+        'get_network:queue_id': 'rule:admin_only'
+        'create_network:shared':
+  glance:
+    server:
+      policy:
+        publicize_image: "role:admin"
+        add_member:
+  keystone:
+    server:
+      policy:
+        admin_or_token_subject: 'rule:admin_required or rule:token_subject'
+  heat:
+    server:
+      policy:
+        context_is_admin: 'role:admin and is_admin_project:True'
+        deny_stack_user: 'not role:heat_stack_user'
+        deny_everybody: '!'
+        'cloudformation:ValidateTemplate': 'rule:deny_everybody'
+        'cloudformation:DescribeStackResources':
+  ceilometer:
+    server:
+      policy:
+        segregation: 'rule:context_is_admin'
+        'telemetry:get_resource':
diff --git a/tcp_tests/templates/cookied-mcp-pike-dvr/salt.yaml b/tcp_tests/templates/cookied-mcp-pike-dvr/salt.yaml
new file mode 100644
index 0000000..128691b
--- /dev/null
+++ b/tcp_tests/templates/cookied-mcp-pike-dvr/salt.yaml
@@ -0,0 +1,45 @@
+{% from 'cookied-mcp-pike-dvr/underlay.yaml' import HOSTNAME_CFG01 with context %}
+{% from 'cookied-mcp-pike-dvr/underlay.yaml' import HOSTNAME_CMP01 with context %}
+{% from 'cookied-mcp-pike-dvr/underlay.yaml' import HOSTNAME_CMP02 with context %}
+{% from 'cookied-mcp-pike-dvr/underlay.yaml' import HOSTNAME_GTW01 with context %}
+{% from 'cookied-mcp-pike-dvr/underlay.yaml' import LAB_CONFIG_NAME with context %}
+{% from 'cookied-mcp-pike-dvr/underlay.yaml' import DOMAIN_NAME with context %}
+
+{% set SALT_MODELS_REPOSITORY = os_env('SALT_MODELS_REPOSITORY','https://gerrit.mcp.mirantis.net/salt-models/mcp-virtual-lab') %}
+# Other salt model repository parameters see in shared-salt.yaml
+
+{% import 'shared-salt.yaml' as SHARED with context %}
+
+{{ SHARED.MACRO_INSTALL_SALT_MASTER() }}
+
+{{ SHARED.MACRO_CLONE_RECLASS_MODELS() }}
+
+{{ SHARED.MACRO_CONFIGURE_RECLASS(FORMULA_SERVICES='"linux" "reclass" "salt" "openssh" "ntp" "git" "nginx" "collectd" "sensu" "heka" "sphinx" "keystone" "mysql" "grafana" "haproxy" "rsyslog" "horizon" "prometheus" "telegraf" "elasticsearch" "powerdns" "fluentd"') }}
+
+{{ SHARED.MACRO_INSTALL_SALT_MINIONS() }}
+
+{{ SHARED.MACRO_RUN_SALT_MASTER_UNDERLAY_STATES() }}
+
+{{ SHARED.ADJUST_SL_OPTS(OVERRIDES_FILENAME='/srv/salt/reclass/classes/cluster/' + SHARED.CLUSTER_NAME + '/stacklight/server.yml') }}
+
+{{ SHARED.MACRO_GENERATE_INVENTORY() }}
+
+{{ SHARED.MACRO_BOOTSTRAP_ALL_MINIONS() }}
+
+- description: Hack gtw node
+  cmd: salt '{{ HOSTNAME_GTW01 }}' cmd.run "ip addr del {{ SHARED.IPV4_NET_CONTROL_PREFIX }}.110/24 dev ens4; ip addr flush dev ens4";
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 10}
+  skip_fail: false
+
+- description: Hack cmp01 node
+  cmd: salt '{{ HOSTNAME_CMP01 }}' cmd.run "ip addr del {{ SHARED.IPV4_NET_CONTROL_PREFIX }}.105/24 dev ens4; ip addr flush dev ens4";
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 10}
+  skip_fail: false
+
+- description: Hack cmp02 node
+  cmd: salt '{{ HOSTNAME_CMP02 }}' cmd.run "ip addr del {{ SHARED.IPV4_NET_CONTROL_PREFIX }}.106/24 dev ens4; ip addr flush dev ens4";
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 10}
+  skip_fail: false
diff --git a/tcp_tests/templates/virtual-mcp-pike-dvr/sl.yaml b/tcp_tests/templates/cookied-mcp-pike-dvr/sl.yaml
similarity index 98%
rename from tcp_tests/templates/virtual-mcp-pike-dvr/sl.yaml
rename to tcp_tests/templates/cookied-mcp-pike-dvr/sl.yaml
index b3818b7..7a31cee 100644
--- a/tcp_tests/templates/virtual-mcp-pike-dvr/sl.yaml
+++ b/tcp_tests/templates/cookied-mcp-pike-dvr/sl.yaml
@@ -1,22 +1,6 @@
-{% from 'virtual-mcp-pike-dvr/underlay.yaml' import HOSTNAME_CFG01 with context %}
+{% from 'cookied-mcp-pike-dvr/underlay.yaml' import HOSTNAME_CFG01 with context %}
 {% import 'shared-sl-tests.yaml' as SHARED_SL_TESTS with context %}
 # Install docker swarm
-- description: Install keepalived on mon nodes
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'mon*' state.sls keepalived
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 10}
-  skip_fail: false
-
-- description: Check the VIP on mon nodes
-  cmd: |
-    SL_VIP=`salt-call --out=newline_values_only pillar.get _param:stacklight_monitor_address`;
-    echo "_param:stacklight_monitor_address (vip): ${SL_VIP}";
-    salt --hard-crash --state-output=mixed --state-verbose=False -C 'mon*' cmd.run "ip a | grep ${SL_VIP}" | grep -B1 ${SL_VIP}
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
 - description: Configure docker service
   cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' state.sls docker.host
   node_name: {{ HOSTNAME_CFG01 }}
@@ -65,6 +49,22 @@
   retry: {count: 1, delay: 10}
   skip_fail: false
 
+- description: Install keepalived on mon nodes
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'mon*' state.sls keepalived
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 10}
+  skip_fail: false
+
+- description: Check the VIP on mon nodes
+  cmd: |
+    SL_VIP=`salt-call --out=newline_values_only pillar.get _param:stacklight_monitor_address`;
+    echo "_param:stacklight_monitor_address (vip): ${SL_VIP}";
+    salt --hard-crash --state-output=mixed --state-verbose=False -C 'mon*' cmd.run "ip a | grep ${SL_VIP}" | grep -B1 ${SL_VIP}
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
 # Install slv2 infra
 - description: Install telegraf
   cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@telegraf:agent or I@telegraf:remote_agent' state.sls telegraf
diff --git a/tcp_tests/templates/cookied-mcp-pike-dvr/underlay--meta-data.yaml b/tcp_tests/templates/cookied-mcp-pike-dvr/underlay--meta-data.yaml
new file mode 100644
index 0000000..3699401
--- /dev/null
+++ b/tcp_tests/templates/cookied-mcp-pike-dvr/underlay--meta-data.yaml
@@ -0,0 +1,4 @@
+| # All the data below will be stored as a string object
+  instance-id: iid-local1
+  hostname: {hostname}
+  local-hostname: {hostname}
diff --git a/tcp_tests/templates/cookied-mcp-pike-dvr/underlay--user-data-cfg01.yaml b/tcp_tests/templates/cookied-mcp-pike-dvr/underlay--user-data-cfg01.yaml
new file mode 100644
index 0000000..da7908d
--- /dev/null
+++ b/tcp_tests/templates/cookied-mcp-pike-dvr/underlay--user-data-cfg01.yaml
@@ -0,0 +1,61 @@
+| # All the data below will be stored as a string object
+  #cloud-config, see http://cloudinit.readthedocs.io/en/latest/topics/examples.html
+
+  ssh_pwauth: True
+  users:
+   - name: root
+     sudo: ALL=(ALL) NOPASSWD:ALL
+     shell: /bin/bash
+     ssh_authorized_keys:
+     {% for key in config.underlay.ssh_keys %}
+      - ssh-rsa {{ key['public'] }}
+     {% endfor %}
+
+  disable_root: false
+  chpasswd:
+   list: |
+    root:r00tme
+   expire: False
+
+  bootcmd:
+   # Enable root access
+   - sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
+   - service sshd restart
+  output:
+    all: '| tee -a /var/log/cloud-init-output.log /dev/tty0'
+
+  runcmd:
+   # Configure dhclient
+   - sudo echo "nameserver {gateway}" >> /etc/resolvconf/resolv.conf.d/base
+   - sudo resolvconf -u
+
+   # Prepare network connection
+   - sudo ifdown ens3
+   - sudo ip r d default || true  # remove existing default route to get it from dhcp
+   - sudo ifup ens3
+   #- sudo route add default gw {gateway} {interface_name}
+
+   # Create swap
+   - fallocate -l 4G /swapfile
+   - chmod 600 /swapfile
+   - mkswap /swapfile
+   - swapon /swapfile
+   - echo "/swapfile   none    swap    defaults    0   0" >> /etc/fstab
+
+   - echo "nameserver 172.18.208.44" >> /etc/resolv.conf;
+
+  write_files:
+   - path: /etc/network/interfaces
+     content: |
+          auto ens3
+          iface ens3 inet dhcp
+
+   - path: /root/.ssh/config
+     owner: root:root
+     permissions: '0600'
+     content: |
+          Host *
+            ServerAliveInterval 300
+            ServerAliveCountMax 10
+            StrictHostKeyChecking no
+            UserKnownHostsFile /dev/null
diff --git a/tcp_tests/templates/cookied-mcp-pike-dvr/underlay--user-data1604.yaml b/tcp_tests/templates/cookied-mcp-pike-dvr/underlay--user-data1604.yaml
new file mode 100644
index 0000000..3fbb777
--- /dev/null
+++ b/tcp_tests/templates/cookied-mcp-pike-dvr/underlay--user-data1604.yaml
@@ -0,0 +1,50 @@
+| # All the data below will be stored as a string object
+  #cloud-config, see http://cloudinit.readthedocs.io/en/latest/topics/examples.html
+
+  ssh_pwauth: True
+  users:
+   - name: root
+     sudo: ALL=(ALL) NOPASSWD:ALL
+     shell: /bin/bash
+     ssh_authorized_keys:
+     {% for key in config.underlay.ssh_keys %}
+      - ssh-rsa {{ key['public'] }}
+     {% endfor %}
+
+  disable_root: false
+  chpasswd:
+   list: |
+    root:r00tme
+   expire: False
+
+  bootcmd:
+   # Enable root access
+   - sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
+   - service sshd restart
+  output:
+    all: '| tee -a /var/log/cloud-init-output.log /dev/tty0'
+
+  runcmd:
+   - export TERM=linux
+   - export LANG=C
+   # Configure dhclient
+   - sudo echo "nameserver {gateway}" >> /etc/resolvconf/resolv.conf.d/base
+   - sudo resolvconf -u
+
+   # Prepare network connection
+   - sudo ifup ens3
+   #- sudo route add default gw {gateway} {interface_name}
+
+   # Create swap
+   - fallocate -l 4G /swapfile
+   - chmod 600 /swapfile
+   - mkswap /swapfile
+   - swapon /swapfile
+   - echo "/swapfile   none    swap    defaults   0   0" >> /etc/fstab
+
+  write_files:
+   - path: /etc/network/interfaces
+     content: |
+          auto ens3
+          iface ens3 inet dhcp
+
diff --git a/tcp_tests/templates/cookied-mcp-pike-dvr/underlay.yaml b/tcp_tests/templates/cookied-mcp-pike-dvr/underlay.yaml
new file mode 100644
index 0000000..5cba5b0
--- /dev/null
+++ b/tcp_tests/templates/cookied-mcp-pike-dvr/underlay.yaml
@@ -0,0 +1,578 @@
+# Set the repository suite, one of the: 'nightly', 'testing', 'stable', or any other required
+{% set REPOSITORY_SUITE = os_env('REPOSITORY_SUITE', 'testing') %}
+
+{% import 'cookied-mcp-pike-dvr/underlay--meta-data.yaml' as CLOUDINIT_META_DATA with context %}
+{% import 'cookied-mcp-pike-dvr/underlay--user-data-cfg01.yaml' as CLOUDINIT_USER_DATA_CFG01 with context %}
+{% import 'cookied-mcp-pike-dvr/underlay--user-data1604.yaml' as CLOUDINIT_USER_DATA_1604 with context %}
+
+---
+aliases:
+ - &interface_model {{ os_env('INTERFACE_MODEL', 'virtio') }}
+ - &cloudinit_meta_data {{ CLOUDINIT_META_DATA }}
+ - &cloudinit_user_data_cfg01 {{ CLOUDINIT_USER_DATA_CFG01 }}
+ - &cloudinit_user_data_1604 {{ CLOUDINIT_USER_DATA_1604 }}
+
+{% set LAB_CONFIG_NAME = os_env('LAB_CONFIG_NAME', 'cookied-mcp-pike-dvr') %}
+{% set DOMAIN_NAME = os_env('DOMAIN_NAME', LAB_CONFIG_NAME) + '.local' %}
+{% set HOSTNAME_CFG01 = os_env('HOSTNAME_CFG01', 'cfg01.' + DOMAIN_NAME) %}
+{% set HOSTNAME_CTL01 = os_env('HOSTNAME_CTL01', 'ctl01.' + DOMAIN_NAME) %}
+{% set HOSTNAME_CTL02 = os_env('HOSTNAME_CTL02', 'ctl02.' + DOMAIN_NAME) %}
+{% set HOSTNAME_CTL03 = os_env('HOSTNAME_CTL03', 'ctl03.' + DOMAIN_NAME) %}
+{% set HOSTNAME_CMP01 = os_env('HOSTNAME_CMP01', 'cmp01.' + DOMAIN_NAME) %}
+{% set HOSTNAME_CMP02 = os_env('HOSTNAME_CMP02', 'cmp02.' + DOMAIN_NAME) %}
+{% set HOSTNAME_MON01 = os_env('HOSTNAME_MON01', 'mon01.' + DOMAIN_NAME) %}
+{% set HOSTNAME_MON02 = os_env('HOSTNAME_MON02', 'mon02.' + DOMAIN_NAME) %}
+{% set HOSTNAME_MON03 = os_env('HOSTNAME_MON03', 'mon03.' + DOMAIN_NAME) %}
+{% set HOSTNAME_GTW01 = os_env('HOSTNAME_GTW01', 'gtw01.' + DOMAIN_NAME) %}
+{% set HOSTNAME_DNS01 = os_env('HOSTNAME_DNS01', 'dns01.' + DOMAIN_NAME) %}
+{% set HOSTNAME_DNS02 = os_env('HOSTNAME_DNS02', 'dns02.' + DOMAIN_NAME) %}
+{% set HOSTNAME_PRX01 = os_env('HOSTNAME_PRX01', 'prx01.' + DOMAIN_NAME) %}
+
+template:
+  devops_settings:
+    env_name: {{ os_env('ENV_NAME', 'cookied-mcp-pike-dvr_' + REPOSITORY_SUITE + "_" + os_env('BUILD_NUMBER', '')) }}
+
+    address_pools:
+      private-pool01:
+        net: {{ os_env('PRIVATE_ADDRESS_POOL01', '10.60.0.0/16:24') }}
+        params:
+          ip_reserved:
+            gateway: +1
+            l2_network_device: +1
+            default_{{ HOSTNAME_CFG01 }}: +100
+            default_{{ HOSTNAME_CTL01 }}: +101
+            default_{{ HOSTNAME_CTL02 }}: +102
+            default_{{ HOSTNAME_CTL03 }}: +103
+            default_{{ HOSTNAME_CMP01 }}: +105
+            default_{{ HOSTNAME_CMP02 }}: +106
+            default_{{ HOSTNAME_MON01 }}: +107
+            default_{{ HOSTNAME_MON02 }}: +108
+            default_{{ HOSTNAME_MON03 }}: +109
+            default_{{ HOSTNAME_GTW01 }}: +110
+            default_{{ HOSTNAME_DNS01 }}: +111
+            default_{{ HOSTNAME_DNS02 }}: +112
+            default_{{ HOSTNAME_PRX01 }}: +121
+          ip_ranges:
+            dhcp: [+90, -10]
+
+      admin-pool01:
+        net: {{ os_env('ADMIN_ADDRESS_POOL01', '10.70.0.0/16:24') }}
+        params:
+          ip_reserved:
+            gateway: +1
+            l2_network_device: +1
+            default_{{ HOSTNAME_CFG01 }}: +90
+            default_{{ HOSTNAME_CTL01 }}: +101
+            default_{{ HOSTNAME_CTL02 }}: +102
+            default_{{ HOSTNAME_CTL03 }}: +103
+            default_{{ HOSTNAME_CMP01 }}: +105
+            default_{{ HOSTNAME_CMP02 }}: +106
+            default_{{ HOSTNAME_MON01 }}: +107
+            default_{{ HOSTNAME_MON02 }}: +108
+            default_{{ HOSTNAME_MON03 }}: +109
+            default_{{ HOSTNAME_GTW01 }}: +110
+            default_{{ HOSTNAME_DNS01 }}: +111
+            default_{{ HOSTNAME_DNS02 }}: +112
+            default_{{ HOSTNAME_PRX01 }}: +121
+          ip_ranges:
+            dhcp: [+90, -10]
+
+      tenant-pool01:
+        net: {{ os_env('TENANT_ADDRESS_POOL01', '10.80.0.0/16:24') }}
+        params:
+          ip_reserved:
+            gateway: +1
+            l2_network_device: +1
+            default_{{ HOSTNAME_CFG01 }}: +100
+            default_{{ HOSTNAME_CTL01 }}: +101
+            default_{{ HOSTNAME_CTL02 }}: +102
+            default_{{ HOSTNAME_CTL03 }}: +103
+            default_{{ HOSTNAME_CMP01 }}: +105
+            default_{{ HOSTNAME_CMP02 }}: +106
+            default_{{ HOSTNAME_MON01 }}: +107
+            default_{{ HOSTNAME_MON02 }}: +108
+            default_{{ HOSTNAME_MON03 }}: +109
+            default_{{ HOSTNAME_GTW01 }}: +110
+            default_{{ HOSTNAME_DNS01 }}: +111
+            default_{{ HOSTNAME_DNS02 }}: +112
+            default_{{ HOSTNAME_PRX01 }}: +121
+          ip_ranges:
+            dhcp: [+10, -10]
+
+      external-pool01:
+        net: {{ os_env('EXTERNAL_ADDRESS_POOL01', '10.90.0.0/16:24') }}
+        params:
+          ip_reserved:
+            gateway: +1
+            l2_network_device: +1
+            default_{{ HOSTNAME_CFG01 }}: +100
+            default_{{ HOSTNAME_CTL01 }}: +101
+            default_{{ HOSTNAME_CTL02 }}: +102
+            default_{{ HOSTNAME_CTL03 }}: +103
+            default_{{ HOSTNAME_CMP01 }}: +105
+            default_{{ HOSTNAME_CMP02 }}: +106
+            default_{{ HOSTNAME_MON01 }}: +107
+            default_{{ HOSTNAME_MON02 }}: +108
+            default_{{ HOSTNAME_MON03 }}: +109
+            default_{{ HOSTNAME_GTW01 }}: +110
+            default_{{ HOSTNAME_DNS01 }}: +111
+            default_{{ HOSTNAME_DNS02 }}: +112
+            default_{{ HOSTNAME_PRX01 }}: +121
+          ip_ranges:
+            dhcp: [+10, -10]
+
+
+    groups:
+      - name: default
+        driver:
+          name: devops.driver.libvirt
+          params:
+            connection_string: !os_env CONNECTION_STRING, qemu:///system
+            storage_pool_name: !os_env STORAGE_POOL_NAME, default
+            stp: False
+            hpet: False
+            enable_acpi: true
+            use_host_cpu: !os_env DRIVER_USE_HOST_CPU, true
+            use_hugepages: !os_env DRIVER_USE_HUGEPAGES, false
+
+        network_pools:
+          admin: admin-pool01
+          private: private-pool01
+          tenant: tenant-pool01
+          external: external-pool01
+
+        l2_network_devices:
+          private:
+            address_pool: private-pool01
+            dhcp: true
+            forward:
+              mode: route
+
+          admin:
+            address_pool: admin-pool01
+            dhcp: true
+            forward:
+              mode: nat
+
+          tenant:
+            address_pool: tenant-pool01
+            dhcp: true
+
+          external:
+            address_pool: external-pool01
+            dhcp: true
+            forward:
+              mode: nat
+
+
+        group_volumes:
+         - name: cloudimage1604    # This name is used for 'backing_store' option for node volumes.
+           source_image: !os_env IMAGE_PATH1604  # https://cloud-images.ubuntu.com/xenial/current/xenial-server-cloudimg-amd64-disk1.img
+           format: qcow2
+         - name: cfg01_day01_image               # Pre-configured day01 image
+           source_image: {{ os_env('IMAGE_PATH_CFG01_DAY01', os_env('IMAGE_PATH1604')) }} # http://images.mirantis.com/cfg01-day01.qcow2 or fallback to IMAGE_PATH1604
+           format: qcow2
+         - name: mcp_ubuntu_1604_image           # Pre-configured image for control plane
+           source_image: !os_env MCP_IMAGE_PATH1604
+           format: qcow2
+
+        nodes:
+          - name: {{ HOSTNAME_CFG01 }}
+            role: salt_master
+            params:
+              vcpu: !os_env SLAVE_NODE_CPU, 2
+              memory: !os_env SLAVE_NODE_MEMORY, 8192
+              boot:
+                - hd
+              cloud_init_volume_name: iso
+              cloud_init_iface_up: ens3
+              volumes:
+                - name: system
+                  capacity: !os_env NODE_VOLUME_SIZE, 150
+                  backing_store: cfg01_day01_image
+                  format: qcow2
+                - name: iso  # Volume with name 'iso' will be used
+                             # for store image with cloud-init metadata.
+                  capacity: 1
+                  format: raw
+                  device: cdrom
+                  bus: ide
+                  cloudinit_meta_data: *cloudinit_meta_data
+                  cloudinit_user_data: *cloudinit_user_data_cfg01
+
+              interfaces:
+                - label: ens3
+                  l2_network_device: admin
+                  interface_model: *interface_model
+                - label: ens4
+                  l2_network_device: private
+                  interface_model: *interface_model
+              network_config:
+                ens3:
+                  networks:
+                    - admin
+                ens4:
+                  networks:
+                    - private
+
+          - name: {{ HOSTNAME_CTL01 }}
+            role: salt_minion
+            params:
+              vcpu: !os_env SLAVE_NODE_CPU, 2
+              memory: !os_env SLAVE_NODE_MEMORY, 16384
+              boot:
+                - hd
+              cloud_init_volume_name: iso
+              cloud_init_iface_up: ens3
+              volumes:
+                - name: system
+                  capacity: !os_env NODE_VOLUME_SIZE, 150
+                  backing_store: mcp_ubuntu_1604_image
+                  format: qcow2
+                - name: cinder
+                  capacity: 50
+                  format: qcow2
+                - name: iso  # Volume with name 'iso' will be used
+                             # for store image with cloud-init metadata.
+                  capacity: 1
+                  format: raw
+                  device: cdrom
+                  bus: ide
+                  cloudinit_meta_data: *cloudinit_meta_data
+                  cloudinit_user_data: *cloudinit_user_data_1604
+
+              interfaces: &interfaces
+                - label: ens3
+                  l2_network_device: admin
+                  interface_model: *interface_model
+                - label: ens4
+                  l2_network_device: private
+                  interface_model: *interface_model
+              network_config: &network_config
+                ens3:
+                  networks:
+                    - admin
+                ens4:
+                  networks:
+                    - private
+
+          - name: {{ HOSTNAME_CTL02 }}
+            role: salt_minion
+            params:
+              vcpu: !os_env SLAVE_NODE_CPU, 2
+              memory: !os_env SLAVE_NODE_MEMORY, 16384
+              boot:
+                - hd
+              cloud_init_volume_name: iso
+              cloud_init_iface_up: ens3
+              volumes:
+                - name: system
+                  capacity: !os_env NODE_VOLUME_SIZE, 150
+                  backing_store: mcp_ubuntu_1604_image
+                  format: qcow2
+                - name: cinder
+                  capacity: 50
+                  format: qcow2
+                - name: iso  # Volume with name 'iso' will be used
+                             # for store image with cloud-init metadata.
+                  capacity: 1
+                  format: raw
+                  device: cdrom
+                  bus: ide
+                  cloudinit_meta_data: *cloudinit_meta_data
+                  cloudinit_user_data: *cloudinit_user_data_1604
+
+              interfaces: *interfaces
+              network_config: *network_config
+
+          - name: {{ HOSTNAME_CTL03 }}
+            role: salt_minion
+            params:
+              vcpu: !os_env SLAVE_NODE_CPU, 2
+              memory: !os_env SLAVE_NODE_MEMORY, 16384
+              boot:
+                - hd
+              cloud_init_volume_name: iso
+              cloud_init_iface_up: ens3
+              volumes:
+                - name: system
+                  capacity: !os_env NODE_VOLUME_SIZE, 150
+                  backing_store: mcp_ubuntu_1604_image
+                  format: qcow2
+                - name: cinder
+                  capacity: 50
+                  format: qcow2
+                - name: iso  # Volume with name 'iso' will be used
+                             # for store image with cloud-init metadata.
+                  capacity: 1
+                  format: raw
+                  device: cdrom
+                  bus: ide
+                  cloudinit_meta_data: *cloudinit_meta_data
+                  cloudinit_user_data: *cloudinit_user_data_1604
+
+              interfaces: *interfaces
+              network_config: *network_config
+
+          - name: {{ HOSTNAME_MON01 }}
+            role: salt_minion
+            params:
+              vcpu: !os_env SLAVE_NODE_CPU, 3
+              memory: !os_env SLAVE_NODE_MEMORY, 6144
+              boot:
+                - hd
+              cloud_init_volume_name: iso
+              cloud_init_iface_up: ens3
+              volumes:
+                - name: system
+                  capacity: !os_env NODE_VOLUME_SIZE, 150
+                  backing_store: mcp_ubuntu_1604_image
+                  format: qcow2
+                - name: iso  # Volume with name 'iso' will be used
+                             # for store image with cloud-init metadata.
+                  capacity: 1
+                  format: raw
+                  device: cdrom
+                  bus: ide
+                  cloudinit_meta_data: *cloudinit_meta_data
+                  cloudinit_user_data: *cloudinit_user_data_1604
+
+              interfaces: *interfaces
+              network_config: *network_config
+
+          - name: {{ HOSTNAME_MON02 }}
+            role: salt_minion
+            params:
+              vcpu: !os_env SLAVE_NODE_CPU, 3
+              memory: !os_env SLAVE_NODE_MEMORY, 6144
+              boot:
+                - hd
+              cloud_init_volume_name: iso
+              cloud_init_iface_up: ens3
+              volumes:
+                - name: system
+                  capacity: !os_env NODE_VOLUME_SIZE, 150
+                  backing_store: mcp_ubuntu_1604_image
+                  format: qcow2
+                - name: iso  # Volume with name 'iso' will be used
+                             # for store image with cloud-init metadata.
+                  capacity: 1
+                  format: raw
+                  device: cdrom
+                  bus: ide
+                  cloudinit_meta_data: *cloudinit_meta_data
+                  cloudinit_user_data: *cloudinit_user_data_1604
+
+              interfaces: *interfaces
+              network_config: *network_config
+
+          - name: {{ HOSTNAME_MON03 }}
+            role: salt_minion
+            params:
+              vcpu: !os_env SLAVE_NODE_CPU, 3
+              memory: !os_env SLAVE_NODE_MEMORY, 6144
+              boot:
+                - hd
+              cloud_init_volume_name: iso
+              cloud_init_iface_up: ens3
+              volumes:
+                - name: system
+                  capacity: !os_env NODE_VOLUME_SIZE, 150
+                  backing_store: mcp_ubuntu_1604_image
+                  format: qcow2
+                - name: iso  # Volume with name 'iso' will be used
+                             # for store image with cloud-init metadata.
+                  capacity: 1
+                  format: raw
+                  device: cdrom
+                  bus: ide
+                  cloudinit_meta_data: *cloudinit_meta_data
+                  cloudinit_user_data: *cloudinit_user_data_1604
+
+              interfaces: *interfaces
+              network_config: *network_config
+
+          - name: {{ HOSTNAME_PRX01 }}
+            role: salt_minion
+            params:
+              vcpu: !os_env SLAVE_NODE_CPU, 1
+              memory: !os_env SLAVE_NODE_MEMORY, 8192
+              boot:
+                - hd
+              cloud_init_volume_name: iso
+              cloud_init_iface_up: ens3
+              volumes:
+                - name: system
+                  capacity: !os_env NODE_VOLUME_SIZE, 150
+                  backing_store: mcp_ubuntu_1604_image
+                  format: qcow2
+                - name: cinder
+                  capacity: 50
+                  format: qcow2
+                - name: iso  # Volume with name 'iso' will be used
+                             # for store image with cloud-init metadata.
+                  capacity: 1
+                  format: raw
+                  device: cdrom
+                  bus: ide
+                  cloudinit_meta_data: *cloudinit_meta_data
+                  cloudinit_user_data: *cloudinit_user_data_1604
+
+              interfaces: *interfaces
+              network_config: *network_config
+
+
+          - name: {{ HOSTNAME_CMP01 }}
+            role: salt_minion
+            params:
+              vcpu: !os_env SLAVE_NODE_CPU, 3
+              memory: !os_env SLAVE_NODE_MEMORY, 4096
+              boot:
+                - hd
+              cloud_init_volume_name: iso
+              cloud_init_iface_up: ens3
+              volumes:
+                - name: system
+                  capacity: !os_env NODE_VOLUME_SIZE, 150
+                  backing_store: cloudimage1604
+                  format: qcow2
+                - name: iso  # Volume with name 'iso' will be used
+                             # for store image with cloud-init metadata.
+                  capacity: 1
+                  format: raw
+                  device: cdrom
+                  bus: ide
+                  cloudinit_meta_data: *cloudinit_meta_data
+                  cloudinit_user_data: *cloudinit_user_data_1604
+
+
+              interfaces: &all_interfaces
+                - label: ens3
+                  l2_network_device: admin
+                  interface_model: *interface_model
+                - label: ens4
+                  l2_network_device: private
+                  interface_model: *interface_model
+                - label: ens5
+                  l2_network_device: tenant
+                  interface_model: *interface_model
+                - label: ens6
+                  l2_network_device: external
+                  interface_model: *interface_model
+              network_config: &all_network_config
+                ens3:
+                  networks:
+                    - admin
+                ens4:
+                  networks:
+                    - private
+                ens5:
+                  networks:
+                    - tenant
+                ens6:
+                  networks:
+                    - external
+
+          - name: {{ HOSTNAME_CMP02 }}
+            role: salt_minion
+            params:
+              vcpu: !os_env SLAVE_NODE_CPU, 3
+              memory: !os_env SLAVE_NODE_MEMORY, 4096
+              boot:
+                - hd
+              cloud_init_volume_name: iso
+              cloud_init_iface_up: ens3
+              volumes:
+                - name: system
+                  capacity: !os_env NODE_VOLUME_SIZE, 150
+                  backing_store: cloudimage1604
+                  format: qcow2
+                - name: iso  # Volume with name 'iso' will be used
+                             # for store image with cloud-init metadata.
+                  capacity: 1
+                  format: raw
+                  device: cdrom
+                  bus: ide
+                  cloudinit_meta_data: *cloudinit_meta_data
+                  cloudinit_user_data: *cloudinit_user_data_1604
+
+              interfaces: *all_interfaces
+              network_config: *all_network_config
+
+          - name: {{ HOSTNAME_GTW01 }}
+            role: salt_minion
+            params:
+              vcpu: !os_env SLAVE_NODE_CPU, 4
+              memory: !os_env SLAVE_NODE_MEMORY, 4096
+              boot:
+                - hd
+              cloud_init_volume_name: iso
+              cloud_init_iface_up: ens3
+              volumes:
+                - name: system
+                  capacity: !os_env NODE_VOLUME_SIZE, 150
+                  backing_store: cloudimage1604
+                  format: qcow2
+                - name: iso  # Volume with name 'iso' will be used
+                             # for store image with cloud-init metadata.
+                  capacity: 1
+                  format: raw
+                  device: cdrom
+                  bus: ide
+                  cloudinit_meta_data: *cloudinit_meta_data
+                  cloudinit_user_data: *cloudinit_user_data_1604
+
+              interfaces: *all_interfaces
+              network_config: *all_network_config
+
+          - name: {{ HOSTNAME_DNS01 }}
+            role: salt_minion
+            params:
+              vcpu: !os_env SLAVE_NODE_CPU, 1
+              memory: !os_env SLAVE_NODE_MEMORY, 2048
+              boot:
+                - hd
+              cloud_init_volume_name: iso
+              cloud_init_iface_up: ens3
+              volumes:
+                - name: system
+                  capacity: !os_env NODE_VOLUME_SIZE, 150
+                  backing_store: mcp_ubuntu_1604_image
+                  format: qcow2
+                - name: iso  # Volume with name 'iso' will be used
+                             # for store image with cloud-init metadata.
+                  capacity: 1
+                  format: raw
+                  device: cdrom
+                  bus: ide
+                  cloudinit_meta_data: *cloudinit_meta_data
+                  cloudinit_user_data: *cloudinit_user_data_1604
+
+              interfaces: *all_interfaces
+              network_config: *all_network_config
+
+          - name: {{ HOSTNAME_DNS02 }}
+            role: salt_minion
+            params:
+              vcpu: !os_env SLAVE_NODE_CPU, 1
+              memory: !os_env SLAVE_NODE_MEMORY, 2048
+              boot:
+                - hd
+              cloud_init_volume_name: iso
+              cloud_init_iface_up: ens3
+              volumes:
+                - name: system
+                  capacity: !os_env NODE_VOLUME_SIZE, 150
+                  backing_store: mcp_ubuntu_1604_image
+                  format: qcow2
+                - name: iso  # Volume with name 'iso' will be used
+                             # for store image with cloud-init metadata.
+                  capacity: 1
+                  format: raw
+                  device: cdrom
+                  bus: ide
+                  cloudinit_meta_data: *cloudinit_meta_data
+                  cloudinit_user_data: *cloudinit_user_data_1604
+
+              interfaces: *all_interfaces
+              network_config: *all_network_config
diff --git a/tcp_tests/templates/virtual-mcp-pike-ovs/_context-cookiecutter-mcp-pike-ovs.yaml b/tcp_tests/templates/cookied-mcp-pike-ovs/_context-cookiecutter-mcp-pike-ovs.yaml
similarity index 72%
rename from tcp_tests/templates/virtual-mcp-pike-ovs/_context-cookiecutter-mcp-pike-ovs.yaml
rename to tcp_tests/templates/cookied-mcp-pike-ovs/_context-cookiecutter-mcp-pike-ovs.yaml
index 8b6e51a..4dd703e 100644
--- a/tcp_tests/templates/virtual-mcp-pike-ovs/_context-cookiecutter-mcp-pike-ovs.yaml
+++ b/tcp_tests/templates/cookied-mcp-pike-ovs/_context-cookiecutter-mcp-pike-ovs.yaml
@@ -2,8 +2,8 @@
   bmk_enabled: 'False'
   ceph_enabled: 'False'
   cicd_enabled: 'False'
-  cluster_domain: virtual-mcp-pike-ovs.local
-  cluster_name: virtual-mcp-pike-ovs
+  cluster_domain: cookied-mcp-pike-ovs.local
+  cluster_name: cookied-mcp-pike-ovs
   compute_bond_mode: active-backup
   compute_primary_first_nic: eth1
   compute_primary_second_nic: eth2
@@ -47,7 +47,7 @@
   openstack_benchmark_node01_address: 172.16.10.95
   openstack_benchmark_node01_hostname: bmk01
   openstack_cluster_size: compact
-  openstack_compute_count: '100'
+  openstack_compute_count: '2'
   openstack_compute_rack01_hostname: cmp
   openstack_compute_rack01_single_subnet: 172.16.10
   openstack_compute_rack01_tenant_subnet: 10.1.0
@@ -111,6 +111,35 @@
   public_host: ${_param:openstack_proxy_address}
   publication_method: email
   reclass_repository: https://github.com/Mirantis/mk-lab-salt-model.git
+  backup_private_key: |
+      -----BEGIN RSA PRIVATE KEY-----
+      MIIEowIBAAKCAQEAxL6/rVgCetsETpZaUmXmkj8cZ1WN0eubH1FvMDOi/La9ZJyT
+      k0C6AYpJnIyEm93pMj5cLm08qRqMW+2pdOhYjcH69yg5MrX5SkRk8jCmIHIYoIbh
+      Qnwbnj3dd3I39ZdfU2FO7u2vlbglVou6ZoQxlJDItuLNtzq6EG+w9eF19e7+OsC6
+      6iUItp618zfw1l3J/8nKvCGe2RYDf7mJW6XwCl/DwryJmwwzvPgYJ3QMuDD8/HFj
+      lrJ3xjFTXj4b4Ws1XIoy78fFbtiLr4OwqCYkho03u2E5rOOP1qZxZB63sivHMLMO
+      MM5bOAQKbulFNoyALADGYfc7sf0bZ4u9XXDXxQIDAQABAoIBAQCfmc2MJRT97KW1
+      yqpCpX9BrAiymuiNHf+cjEcSZxEUyHkjIRFmJt+9WB0W7ba1anM92vCUiPDojSzH
+      dig9Oi578JxR20NrK8uqv4jUHzrknynzLveVI3CUEcOSnglfJQijbxDFKfOCFPvV
+      FUyE1UATMNBh6+LNfMprgu+exuMWOPnDyUiYQ+WZ0JfuZY8fuaZte4woJJOb9LUu
+      5rsMG/smIzjpgZ0Z9ZVDMurfq565qhpaXRAqKeIuyht8pacTo31iMQdHB78AvY/3
+      g0z21Gk8k3z0Kr/YFKr2r4FmXY5m/gAUvZly2ZrVQM5XsbTVCzq/JpI5fssNvSbU
+      AKmXzf4RAoGBAOO3d4/cstxERzW6hyOTjZIN1ppR52CsnZTsVPbfd0pCtmzmVZce
+      CtHKdcXSbTwZvvkK09QSWAp3MoSpd0gIOiLU8Wx/R/RIZsu9BlhTS3r3EQLnk72d
+      H/1TTA+j4T/LIYLSojQ1RxvIrHetAD44j732aTwKAHj/SybEAVqNkOB/AoGBAN0u
+      gLcrgqIHGrk4VjWSvlCGymfF40equcx+ud7XhfZDGETUOSahW4dPZ52cjPAkrCBQ
+      MMfcDwSVGsOAjd+mNt11BHUKobnhXwFaWWuyqyn9NmWFbjMbICVh7E3Of5aVN38o
+      lrmo/7LuKMVG7XRwphCv5NkaJmQG4njDyUQWlaW7AoGADCd8wDb9bPhP/LQqBmIX
+      ylXmwHHisaxE9O/wUQT4bwREjGd25gv6c9wkkRx8LBsLsGs9hzI7dMOL9Ly+2x9l
+      SvqmsC3S/1zl77X1Ir2/Z57MT6Vgo1xBmtnZU3Rhz2/eKAdqFPNLClaZrgGT475N
+      HcyLLWMzR0IJFtabY+Puea0CgYA8Zb5wRkldxWLewSuJZZDinGwY+kieAVjLJq/K
+      0j+ah6fQ48LXcah0wpIgz+cMjHcUO9GWQdk3/x9X03rqX5EL2DBnZYfUIl63F9zj
+      M97ZkHOSNWVqPzX//0Vv2butewG0j3jZKfTo/2/SrxOYgEpYtC9huWpSVi7xm0US
+      erhSkQKBgFIf9JEsfgE57ANhvITZ3ZI0uZXNxZkXQaVg8jvScDi79IIhy9iPzhKC
+      aIIQoDNIlWv1ftCRZ5AlBvVXgvQ/QNrwy48JiQTzWZlb9Ezg8w+olQmSbG6fq7Y+
+      7r3i+QUZ7RBdOb24QcQ618q54ozNTCB7OywY78ptFzeoBeptiNr1
+      -----END RSA PRIVATE KEY-----
+  backup_public_key: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDEvr+tWAJ62wROllpSZeaSPxxnVY3R65sfUW8wM6L8tr1knJOTQLoBikmcjISb3ekyPlwubTypGoxb7al06FiNwfr3KDkytflKRGTyMKYgchighuFCfBuePd13cjf1l19TYU7u7a+VuCVWi7pmhDGUkMi24s23OroQb7D14XX17v46wLrqJQi2nrXzN/DWXcn/ycq8IZ7ZFgN/uYlbpfAKX8PCvImbDDO8+BgndAy4MPz8cWOWsnfGMVNePhvhazVcijLvx8Vu2Iuvg7CoJiSGjTe7YTms44/WpnFkHreyK8cwsw4wzls4BApu6UU2jIAsAMZh9zux/Rtni71dcNfF
   salt_api_password: H0rTPdmktZ8RI7T7y6fjqY0uEbbs7Kwi
   salt_api_password_hash: $6$lfbIFtMZ$.nTbTDMzs1iYv0WqkZHia8H8Fma963Nv3qyyz1x68jQh0YXK9i907B/hvoG4QHMvfolE7V7vQnFClJ1mVA3Yb.
   salt_master_address: 172.16.10.90
diff --git a/tcp_tests/templates/virtual-mcp-pike-ovs/_context-environment.yaml b/tcp_tests/templates/cookied-mcp-pike-ovs/_context-environment.yaml
similarity index 98%
rename from tcp_tests/templates/virtual-mcp-pike-ovs/_context-environment.yaml
rename to tcp_tests/templates/cookied-mcp-pike-ovs/_context-environment.yaml
index 63cedf1..3e05cf0 100644
--- a/tcp_tests/templates/virtual-mcp-pike-ovs/_context-environment.yaml
+++ b/tcp_tests/templates/cookied-mcp-pike-ovs/_context-environment.yaml
@@ -65,6 +65,7 @@
       reclass_storage_name: openstack_proxy_node01
       roles:
       - openstack_proxy
+      - features_designate_bind9_proxy
       - linux_system_codename_xenial
       interfaces:
         ens3:
diff --git a/tcp_tests/templates/cookied-mcp-pike-ovs/common-services.yaml b/tcp_tests/templates/cookied-mcp-pike-ovs/common-services.yaml
new file mode 100644
index 0000000..89aaf5c
--- /dev/null
+++ b/tcp_tests/templates/cookied-mcp-pike-ovs/common-services.yaml
@@ -0,0 +1,118 @@
+{% from 'cookied-mcp-pike-ovs/underlay.yaml' import HOSTNAME_CFG01 with context %}
+
+# Install support services
+- description: Install keepalived on ctl01
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@keepalived:cluster and *01*' state.sls keepalived
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 10}
+  skip_fail: true
+
+- description: Install keepalived
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@keepalived:cluster' state.sls keepalived
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 10}
+  skip_fail: true
+
+- description: Check the VIP
+  cmd: |
+    OPENSTACK_CONTROL_ADDRESS=`salt-call --out=newline_values_only pillar.get _param:openstack_control_address`;
+    echo "_param:openstack_control_address (vip): ${OPENSTACK_CONTROL_ADDRESS}";
+    salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@keepalived:cluster' cmd.run "ip a | grep ${OPENSTACK_CONTROL_ADDRESS}" | grep -B1 ${OPENSTACK_CONTROL_ADDRESS}
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+
+- description: Install glusterfs
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@glusterfs:server' state.sls glusterfs.server.service
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+- description: Setup glusterfs on primary controller
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@glusterfs:server' state.sls glusterfs.server.setup -b 1
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 2, delay: 5}
+  skip_fail: false
+
+- description: Check the gluster status
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@glusterfs:server' cmd.run 'gluster peer status; gluster volume status' -b 1
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+- description: Install RabbitMQ on ctl01
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@rabbitmq:server and *01*' state.sls rabbitmq
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+- description: Install RabbitMQ
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@rabbitmq:server' state.sls rabbitmq
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+- description: Check the rabbitmq status
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@rabbitmq:server' cmd.run 'rabbitmqctl cluster_status'
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+- description: Install Galera on first server
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@galera:master' state.sls galera
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+- description: Install Galera on other servers
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@galera:slave' state.sls galera -b 1
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+- description: Check mysql status
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@galera:*' mysql.status | grep -A1 -e "wsrep_incoming_addresses\|wsrep_cluster_size"
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: true
+
+
+- description: Install haproxy
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@haproxy:proxy' state.sls haproxy
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+- description: Check haproxy status
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@haproxy:proxy' service.status haproxy
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+- description: Restart rsyslog
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@haproxy:proxy' service.restart rsyslog
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+- description: Install memcached on all controllers
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@memcached:server' state.sls memcached
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
diff --git a/tcp_tests/templates/cookied-mcp-pike-ovs/openstack.yaml b/tcp_tests/templates/cookied-mcp-pike-ovs/openstack.yaml
new file mode 100644
index 0000000..3ee578d
--- /dev/null
+++ b/tcp_tests/templates/cookied-mcp-pike-ovs/openstack.yaml
@@ -0,0 +1,374 @@
+{% from 'cookied-mcp-pike-ovs/underlay.yaml' import HOSTNAME_CFG01 with context %}
+{% from 'cookied-mcp-pike-ovs/underlay.yaml' import HOSTNAME_CTL01 with context %}
+{% from 'cookied-mcp-pike-ovs/underlay.yaml' import HOSTNAME_CTL02 with context %}
+{% from 'cookied-mcp-pike-ovs/underlay.yaml' import HOSTNAME_CTL03 with context %}
+{% from 'cookied-mcp-pike-ovs/underlay.yaml' import HOSTNAME_GTW01 with context %}
+{% from 'shared-salt.yaml' import IPV4_NET_EXTERNAL_PREFIX with context %}
+{% from 'shared-salt.yaml' import IPV4_NET_TENANT_PREFIX with context %}
+
+# Install OpenStack control services
+
+- description: Install glance on all controllers
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+     -C 'I@glance:server' state.sls glance -b 1
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+- description: Install keystone service (note that different fernet keys are created on different nodes)
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@keystone:server' state.sls keystone.server -b 1
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 2, delay: 15}
+  skip_fail: false
+
+- description: Restart apache due to PROD-10477
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl*' cmd.run "systemctl restart apache2"
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 15}
+  skip_fail: false
+
+- description: Check apache status to PROD-10477
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl*' cmd.run "systemctl status apache2"
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 15}
+  skip_fail: false
+
+- description: Mount glusterfs.client volumes (resuires created 'keystone' and 'glusterfs' system users)
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@glance:server' state.sls glusterfs.client
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+- description: Update fernet keys for keystone server on the mounted glusterfs volume
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@keystone:server' state.sls keystone.server -b 1
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+- description: Populate keystone services/tenants/admins
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@keystone:client' state.sls keystone.client
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 2, delay: 5}
+  skip_fail: false
+
+- description: Check keystone service-list
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@keystone:server' cmd.run '. /root/keystonercv3; openstack service list'
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+- description: Check glance image-list
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@keystone:server' cmd.run '. /root/keystonerc; glance image-list'
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+
+- description: Install nova on all controllers
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@nova:controller' state.sls nova -b 1
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 2, delay: 5}
+  skip_fail: false
+
+- description: Check nova service-list
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@keystone:server' cmd.run '. /root/keystonerc; nova --debug service-list'
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 3, delay: 5}
+  skip_fail: false
+
+
+- description: Install cinder
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@cinder:controller' state.sls cinder -b 1
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+- description: Check cinder list
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@keystone:server' cmd.run '. /root/keystonerc; cinder list'
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+
+- description: Install neutron service
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@neutron:server' state.sls neutron -b 1
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+- description: Install neutron on gtw node
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@neutron:gateway' state.sls neutron
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+# isntall designate
+- description: Install bind
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@bind:server' state.sls bind
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+- description: Install designate
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@designate:server' state.sls designate -b 1
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 5, delay: 10}
+  skip_fail: false
+
+- description: Check neutron agent-list
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@keystone:server' cmd.run '. /root/keystonerc; neutron agent-list'
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+
+- description: Install heat service
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@heat:server' state.sls heat -b 1
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+- description: Check heat service
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@keystone:server' cmd.run '. /root/keystonercv3; openstack orchestration resource type list'
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 5, delay: 10}
+  skip_fail: false
+
+
+- description: Deploy horizon dashboard
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@horizon:server' state.sls horizon
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: true
+
+- description: Deploy nginx proxy
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@nginx:server' state.sls nginx
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: true
+
+
+# Install compute node
+
+- description: Apply formulas for compute node
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'cmp*' state.apply
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: true
+
+- description: Re-apply(as in doc) formulas for compute node
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'cmp*' state.apply
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+- description: Check IP on computes
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'cmp*' cmd.run
+    'ip a'
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 10, delay: 30}
+  skip_fail: false
+
+
+  # Upload cirros image
+
+- description: Upload cirros image on ctl01
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
+    'wget http://download.cirros-cloud.net/0.3.4/cirros-0.3.4-i386-disk.img'
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 2, delay: 30}
+  skip_fail: false
+
+- description: Register image in glance
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
+    '. /root/keystonercv3; glance --timeout 120 image-create --name cirros --visibility public --disk-format qcow2 --container-format bare --progress < /root/cirros-0.3.4-i386-disk.img'
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 30}
+  skip_fail: false
+
+- description: Create net04_external
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
+    '. /root/keystonercv3; neutron net-create net04_ext --router:external True --provider:physical_network physnet1 --provider:network_type flat'
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 30}
+  skip_fail: false
+
+- description: Create subnet_external
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
+    '. /root/keystonercv3; neutron subnet-create net04_ext {{ IPV4_NET_EXTERNAL_PREFIX }}.0/24 --name net04_ext__subnet --disable-dhcp --allocation-pool start={{ IPV4_NET_EXTERNAL_PREFIX }}.150,end={{ IPV4_NET_EXTERNAL_PREFIX }}.180 --gateway {{ IPV4_NET_EXTERNAL_PREFIX }}.1'
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 30}
+  skip_fail: false
+
+- description: Create net04
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
+    '. /root/keystonercv3; neutron net-create net04'
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 30}
+  skip_fail: false
+
+- description: Create subnet_net04
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
+    '. /root/keystonercv3; neutron subnet-create net04 {{ IPV4_NET_TENANT_PREFIX }}.0/24 --name net04__subnet --allocation-pool start={{ IPV4_NET_TENANT_PREFIX }}.120,end={{ IPV4_NET_TENANT_PREFIX }}.240'
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 30}
+  skip_fail: false
+
+- description: Create router
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
+    '. /root/keystonercv3; neutron router-create net04_router01'
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 30}
+  skip_fail: false
+
+- description: Set geteway
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
+    '. /root/keystonercv3; neutron router-gateway-set net04_router01 net04_ext'
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 30}
+  skip_fail: false
+
+- description:  Add interface
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
+    '. /root/keystonercv3; neutron router-interface-add net04_router01 net04__subnet'
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 30}
+  skip_fail: false
+
+#- description:  Allow all tcp
+#  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
+#    '. /root/keystonercv3; openstack security group rule create --proto tcp --dst-port 22 default'
+#  node_name: {{ HOSTNAME_CFG01 }}
+#  retry: {count: 1, delay: 30}
+#  skip_fail: false
+#
+#- description:  Allow all icmp
+#  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
+#    '. /root/keystonercv3; openstack security group rule create --proto icmp default'
+#  node_name: {{ HOSTNAME_CFG01 }}
+#  retry: {count: 1, delay: 30}
+#  skip_fail: false
+
+- description: sync time
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False '*' cmd.run
+    'service ntp stop; ntpd -gq;  service ntp start'
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 30}
+  skip_fail: false
+
+# Configure cinder-volume salt-call
+- description: Set disks 01
+  cmd: salt-call cmd.run 'echo -e "nn\np\n\n\n\nw" | fdisk /dev/vdb'
+  node_name: {{ HOSTNAME_CTL01 }}
+  retry: {count: 1, delay: 30}
+  skip_fail: false
+
+- description: Set disks 02
+  cmd: salt-call cmd.run 'echo -e "nn\np\n\n\n\nw" | fdisk /dev/vdb'
+  node_name: {{ HOSTNAME_CTL02 }}
+  retry: {count: 1, delay: 30}
+  skip_fail: false
+
+- description: Set disks 03
+  cmd: salt-call cmd.run 'echo -e "nn\np\n\n\n\nw" | fdisk /dev/vdb'
+  node_name: {{ HOSTNAME_CTL03 }}
+  retry: {count: 1, delay: 30}
+  skip_fail: false
+
+- description: Create partitions 01
+  cmd: salt-call cmd.run 'pvcreate /dev/vdb1'
+  node_name: {{ HOSTNAME_CTL01 }}
+  retry: {count: 1, delay: 30}
+  skip_fail: false
+
+- description: Create partitions 02
+  cmd: salt-call cmd.run 'pvcreate /dev/vdb1'
+  node_name: {{ HOSTNAME_CTL02 }}
+  retry: {count: 1, delay: 30}
+  skip_fail: false
+
+- description: Create partitions 03
+  cmd: salt-call cmd.run 'pvcreate /dev/vdb1'
+  node_name: {{ HOSTNAME_CTL03 }}
+  retry: {count: 1, delay: 30}
+  skip_fail: false
+
+- description: create volume_group
+  cmd: salt "ctl*" cmd.run 'vgcreate cinder-volumes /dev/vdb1'
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 30}
+  skip_fail: false
+
+- description: Install cinder-volume
+  cmd: salt 'ctl*' cmd.run 'apt-get install cinder-volume -y'
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 30}
+  skip_fail: false
+
+- description: Install crudini
+  cmd: salt "ctl*" cmd.run 'apt-get install crudini -y'
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 30}
+  skip_fail: false
+
+- description: Temporary WR set enabled backends value 01
+  cmd: salt-call cmd.run 'crudini --verbose --set /etc/cinder/cinder.conf DEFAULT enabled_backends lvm'
+  node_name: {{ HOSTNAME_CTL01 }}
+  retry: {count: 1, delay: 30}
+  skip_fail: false
+
+- description: Temporary WR set enabled backends value 02
+  cmd: salt-call cmd.run 'crudini --verbose --set /etc/cinder/cinder.conf DEFAULT enabled_backends lvm'
+  node_name: {{ HOSTNAME_CTL02 }}
+  retry: {count: 1, delay: 30}
+  skip_fail: false
+
+- description: Temporary WR set enabled backends value 03
+  cmd: salt-call cmd.run 'crudini --verbose --set /etc/cinder/cinder.conf DEFAULT enabled_backends lvm'
+  node_name: {{ HOSTNAME_CTL03 }}
+  retry: {count: 1, delay: 30}
+  skip_fail: false
+
+- description: Restart cinder volume
+  cmd: |
+    salt -C 'I@cinder:controller' service.restart cinder-volume;
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 2, delay: 5}
+  skip_fail: false
+
+- description: Install docker.io on gtw
+  cmd: salt-call cmd.run 'apt-get install docker.io -y'
+  node_name: {{ HOSTNAME_GTW01 }}
+  retry: {count: 1, delay: 30}
+  skip_fail: false
+
+- description: create rc file on cfg
+  cmd: scp ctl01:/root/keystonercv3 /root
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 30}
+  skip_fail: false
+
+- description: Copy rc file
+  cmd: scp /root/keystonercv3 gtw01:/root
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 30}
+  skip_fail: false
diff --git a/tcp_tests/templates/cookied-mcp-pike-ovs/salt.yaml b/tcp_tests/templates/cookied-mcp-pike-ovs/salt.yaml
new file mode 100644
index 0000000..8cc3e21
--- /dev/null
+++ b/tcp_tests/templates/cookied-mcp-pike-ovs/salt.yaml
@@ -0,0 +1,45 @@
+{% from 'cookied-mcp-pike-ovs/underlay.yaml' import HOSTNAME_CFG01 with context %}
+{% from 'cookied-mcp-pike-ovs/underlay.yaml' import HOSTNAME_CMP01 with context %}
+{% from 'cookied-mcp-pike-ovs/underlay.yaml' import HOSTNAME_CMP02 with context %}
+{% from 'cookied-mcp-pike-ovs/underlay.yaml' import HOSTNAME_GTW01 with context %}
+{% from 'cookied-mcp-pike-ovs/underlay.yaml' import LAB_CONFIG_NAME with context %}
+{% from 'cookied-mcp-pike-ovs/underlay.yaml' import DOMAIN_NAME with context %}
+
+{% set SALT_MODELS_REPOSITORY = os_env('SALT_MODELS_REPOSITORY','https://gerrit.mcp.mirantis.net/salt-models/mcp-virtual-lab') %}
+# Other salt model repository parameters see in shared-salt.yaml
+
+{% import 'shared-salt.yaml' as SHARED with context %}
+
+{{ SHARED.MACRO_INSTALL_SALT_MASTER() }}
+
+{{ SHARED.MACRO_CLONE_RECLASS_MODELS() }}
+
+{{ SHARED.MACRO_CONFIGURE_RECLASS(FORMULA_SERVICES='"linux" "reclass" "salt" "openssh" "ntp" "git" "nginx" "collectd" "sensu" "heka" "sphinx" "keystone" "mysql" "grafana" "haproxy" "rsyslog" "horizon" "prometheus" "telegraf" "elasticsearch" "fluentd"') }}
+
+{{ SHARED.MACRO_INSTALL_SALT_MINIONS() }}
+
+{{ SHARED.MACRO_RUN_SALT_MASTER_UNDERLAY_STATES() }}
+
+{{ SHARED.ADJUST_SL_OPTS(OVERRIDES_FILENAME='/srv/salt/reclass/classes/cluster/' + SHARED.CLUSTER_NAME + '/stacklight/server.yml') }}
+
+{{ SHARED.MACRO_GENERATE_INVENTORY() }}
+
+{{ SHARED.MACRO_BOOTSTRAP_ALL_MINIONS() }}
+
+- description: Hack gtw node
+  cmd: salt '{{ HOSTNAME_GTW01 }}' cmd.run "ip addr del {{ SHARED.IPV4_NET_CONTROL_PREFIX }}.110/24 dev ens4; ip addr flush dev ens4";
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 10}
+  skip_fail: false
+
+- description: Hack cmp01 node
+  cmd: salt '{{ HOSTNAME_CMP01 }}' cmd.run "ip addr del {{ SHARED.IPV4_NET_CONTROL_PREFIX }}.105/24 dev ens4; ip addr flush dev ens4";
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 10}
+  skip_fail: false
+
+- description: Hack cmp02 node
+  cmd: salt '{{ HOSTNAME_CMP02 }}' cmd.run "ip addr del {{ SHARED.IPV4_NET_CONTROL_PREFIX }}.106/24 dev ens4; ip addr flush dev ens4";
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 10}
+  skip_fail: false
diff --git a/tcp_tests/templates/virtual-mcp-pike-ovs/sl.yaml b/tcp_tests/templates/cookied-mcp-pike-ovs/sl.yaml
similarity index 98%
rename from tcp_tests/templates/virtual-mcp-pike-ovs/sl.yaml
rename to tcp_tests/templates/cookied-mcp-pike-ovs/sl.yaml
index c5a6ffa..87bdad9 100644
--- a/tcp_tests/templates/virtual-mcp-pike-ovs/sl.yaml
+++ b/tcp_tests/templates/cookied-mcp-pike-ovs/sl.yaml
@@ -1,24 +1,8 @@
-{% from 'virtual-mcp-pike-ovs/underlay.yaml' import HOSTNAME_CFG01 with context %}
+{% from 'cookied-mcp-pike-ovs/underlay.yaml' import HOSTNAME_CFG01 with context %}
 
 {% import 'shared-sl-tests.yaml' as SHARED_SL_TESTS with context %}
 
 # Install docker swarm
-- description: Install keepalived on mon nodes
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'mon*' state.sls keepalived
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 10}
-  skip_fail: false
-
-- description: Check the VIP on mon nodes
-  cmd: |
-    SL_VIP=`salt-call --out=newline_values_only pillar.get _param:stacklight_monitor_address`;
-    echo "_param:stacklight_monitor_address (vip): ${SL_VIP}";
-    salt --hard-crash --state-output=mixed --state-verbose=False -C 'mon*' cmd.run "ip a | grep ${SL_VIP}" | grep -B1 ${SL_VIP}
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
 - description: Configure docker service
   cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' state.sls docker.host
   node_name: {{ HOSTNAME_CFG01 }}
@@ -67,6 +51,22 @@
   retry: {count: 1, delay: 10}
   skip_fail: false
 
+- description: Install keepalived on mon nodes
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'mon*' state.sls keepalived
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 10}
+  skip_fail: false
+
+- description: Check the VIP on mon nodes
+  cmd: |
+    SL_VIP=`salt-call --out=newline_values_only pillar.get _param:stacklight_monitor_address`;
+    echo "_param:stacklight_monitor_address (vip): ${SL_VIP}";
+    salt --hard-crash --state-output=mixed --state-verbose=False -C 'mon*' cmd.run "ip a | grep ${SL_VIP}" | grep -B1 ${SL_VIP}
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
 # Install slv2 infra
 - description: Install telegraf
   cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@telegraf:agent or I@telegraf:remote_agent' state.sls telegraf
diff --git a/tcp_tests/templates/cookied-mcp-pike-ovs/underlay--meta-data.yaml b/tcp_tests/templates/cookied-mcp-pike-ovs/underlay--meta-data.yaml
new file mode 100644
index 0000000..3699401
--- /dev/null
+++ b/tcp_tests/templates/cookied-mcp-pike-ovs/underlay--meta-data.yaml
@@ -0,0 +1,4 @@
+| # All the data below will be stored as a string object
+  instance-id: iid-local1
+  hostname: {hostname}
+  local-hostname: {hostname}
diff --git a/tcp_tests/templates/cookied-mcp-pike-ovs/underlay--user-data-cfg01.yaml b/tcp_tests/templates/cookied-mcp-pike-ovs/underlay--user-data-cfg01.yaml
new file mode 100644
index 0000000..da7908d
--- /dev/null
+++ b/tcp_tests/templates/cookied-mcp-pike-ovs/underlay--user-data-cfg01.yaml
@@ -0,0 +1,61 @@
+| # All the data below will be stored as a string object
+  #cloud-config, see http://cloudinit.readthedocs.io/en/latest/topics/examples.html
+
+  ssh_pwauth: True
+  users:
+   - name: root
+     sudo: ALL=(ALL) NOPASSWD:ALL
+     shell: /bin/bash
+     ssh_authorized_keys:
+     {% for key in config.underlay.ssh_keys %}
+      - ssh-rsa {{ key['public'] }}
+     {% endfor %}
+
+  disable_root: false
+  chpasswd:
+   list: |
+    root:r00tme
+   expire: False
+
+  bootcmd:
+   # Enable root access
+   - sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
+   - service sshd restart
+  output:
+    all: '| tee -a /var/log/cloud-init-output.log /dev/tty0'
+
+  runcmd:
+   # Configure dhclient
+   - sudo echo "nameserver {gateway}" >> /etc/resolvconf/resolv.conf.d/base
+   - sudo resolvconf -u
+
+   # Prepare network connection
+   - sudo ifdown ens3
+   - sudo ip r d default || true  # remove existing default route to get it from dhcp
+   - sudo ifup ens3
+   #- sudo route add default gw {gateway} {interface_name}
+
+   # Create swap
+   - fallocate -l 4G /swapfile
+   - chmod 600 /swapfile
+   - mkswap /swapfile
+   - swapon /swapfile
+   - echo "/swapfile   none    swap    defaults    0   0" >> /etc/fstab
+
+   - echo "nameserver 172.18.208.44" >> /etc/resolv.conf;
+
+  write_files:
+   - path: /etc/network/interfaces
+     content: |
+          auto ens3
+          iface ens3 inet dhcp
+
+   - path: /root/.ssh/config
+     owner: root:root
+     permissions: '0600'
+     content: |
+          Host *
+            ServerAliveInterval 300
+            ServerAliveCountMax 10
+            StrictHostKeyChecking no
+            UserKnownHostsFile /dev/null
diff --git a/tcp_tests/templates/cookied-mcp-pike-ovs/underlay--user-data1604.yaml b/tcp_tests/templates/cookied-mcp-pike-ovs/underlay--user-data1604.yaml
new file mode 100644
index 0000000..3fbb777
--- /dev/null
+++ b/tcp_tests/templates/cookied-mcp-pike-ovs/underlay--user-data1604.yaml
@@ -0,0 +1,50 @@
+| # All the data below will be stored as a string object
+  #cloud-config, see http://cloudinit.readthedocs.io/en/latest/topics/examples.html
+
+  ssh_pwauth: True
+  users:
+   - name: root
+     sudo: ALL=(ALL) NOPASSWD:ALL
+     shell: /bin/bash
+     ssh_authorized_keys:
+     {% for key in config.underlay.ssh_keys %}
+      - ssh-rsa {{ key['public'] }}
+     {% endfor %}
+
+  disable_root: false
+  chpasswd:
+   list: |
+    root:r00tme
+   expire: False
+
+  bootcmd:
+   # Enable root access
+   - sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
+   - service sshd restart
+  output:
+    all: '| tee -a /var/log/cloud-init-output.log /dev/tty0'
+
+  runcmd:
+   - export TERM=linux
+   - export LANG=C
+   # Configure dhclient
+   - sudo echo "nameserver {gateway}" >> /etc/resolvconf/resolv.conf.d/base
+   - sudo resolvconf -u
+
+   # Prepare network connection
+   - sudo ifup ens3
+   #- sudo route add default gw {gateway} {interface_name}
+
+   # Create swap
+   - fallocate -l 4G /swapfile
+   - chmod 600 /swapfile
+   - mkswap /swapfile
+   - swapon /swapfile
+   - echo "/swapfile   none    swap    defaults   0   0" >> /etc/fstab
+
+  write_files:
+   - path: /etc/network/interfaces
+     content: |
+          auto ens3
+          iface ens3 inet dhcp
+
diff --git a/tcp_tests/templates/cookied-mcp-pike-ovs/underlay.yaml b/tcp_tests/templates/cookied-mcp-pike-ovs/underlay.yaml
new file mode 100644
index 0000000..95f3a32
--- /dev/null
+++ b/tcp_tests/templates/cookied-mcp-pike-ovs/underlay.yaml
@@ -0,0 +1,515 @@
+# Set the repository suite, one of the: 'nightly', 'testing', 'stable', or any other required
+{% set REPOSITORY_SUITE = os_env('REPOSITORY_SUITE', 'testing') %}
+
+{% import 'cookied-mcp-pike-ovs/underlay--meta-data.yaml' as CLOUDINIT_META_DATA with context %}
+{% import 'cookied-mcp-pike-ovs/underlay--user-data-cfg01.yaml' as CLOUDINIT_USER_DATA_CFG01 with context %}
+{% import 'cookied-mcp-pike-ovs/underlay--user-data1604.yaml' as CLOUDINIT_USER_DATA_1604 with context %}
+
+---
+aliases:
+ - &interface_model {{ os_env('INTERFACE_MODEL', 'virtio') }}
+ - &cloudinit_meta_data {{ CLOUDINIT_META_DATA }}
+ - &cloudinit_user_data_cfg01 {{ CLOUDINIT_USER_DATA_CFG01 }}
+ - &cloudinit_user_data_1604 {{ CLOUDINIT_USER_DATA_1604 }}
+
+{% set LAB_CONFIG_NAME = os_env('LAB_CONFIG_NAME', 'cookied-mcp-pike-ovs') %}
+{% set DOMAIN_NAME = os_env('DOMAIN_NAME', LAB_CONFIG_NAME) + '.local' %}
+{% set HOSTNAME_CFG01 = os_env('HOSTNAME_CFG01', 'cfg01.' + DOMAIN_NAME) %}
+{% set HOSTNAME_CTL01 = os_env('HOSTNAME_CTL01', 'ctl01.' + DOMAIN_NAME) %}
+{% set HOSTNAME_CTL02 = os_env('HOSTNAME_CTL02', 'ctl02.' + DOMAIN_NAME) %}
+{% set HOSTNAME_CTL03 = os_env('HOSTNAME_CTL03', 'ctl03.' + DOMAIN_NAME) %}
+{% set HOSTNAME_CMP01 = os_env('HOSTNAME_CMP01', 'cmp01.' + DOMAIN_NAME) %}
+{% set HOSTNAME_CMP02 = os_env('HOSTNAME_CMP02', 'cmp02.' + DOMAIN_NAME) %}
+{% set HOSTNAME_MON01 = os_env('HOSTNAME_MON01', 'mon01.' + DOMAIN_NAME) %}
+{% set HOSTNAME_MON02 = os_env('HOSTNAME_MON02', 'mon02.' + DOMAIN_NAME) %}
+{% set HOSTNAME_MON03 = os_env('HOSTNAME_MON03', 'mon03.' + DOMAIN_NAME) %}
+{% set HOSTNAME_GTW01 = os_env('HOSTNAME_GTW01', 'gtw01.' + DOMAIN_NAME) %}
+{% set HOSTNAME_PRX01 = os_env('HOSTNAME_PRX01', 'prx01.' + DOMAIN_NAME) %}
+
+template:
+  devops_settings:
+    env_name: {{ os_env('ENV_NAME', 'cookied-mcp-pike-ovs_' + REPOSITORY_SUITE + "_" + os_env('BUILD_NUMBER', '')) }}
+
+    address_pools:
+      private-pool01:
+        net: {{ os_env('PRIVATE_ADDRESS_POOL01', '10.60.0.0/16:24') }}
+        params:
+          ip_reserved:
+            gateway: +1
+            l2_network_device: +1
+            default_{{ HOSTNAME_CFG01 }}: +100
+            default_{{ HOSTNAME_CTL01 }}: +101
+            default_{{ HOSTNAME_CTL02 }}: +102
+            default_{{ HOSTNAME_CTL03 }}: +103
+            default_{{ HOSTNAME_CMP01 }}: +105
+            default_{{ HOSTNAME_CMP02 }}: +106
+            default_{{ HOSTNAME_MON01 }}: +107
+            default_{{ HOSTNAME_MON02 }}: +108
+            default_{{ HOSTNAME_MON03 }}: +109
+            default_{{ HOSTNAME_GTW01 }}: +110
+            default_{{ HOSTNAME_PRX01 }}: +121
+          ip_ranges:
+            dhcp: [+90, -10]
+
+      admin-pool01:
+        net: {{ os_env('ADMIN_ADDRESS_POOL01', '10.70.0.0/16:24') }}
+        params:
+          ip_reserved:
+            gateway: +1
+            l2_network_device: +1
+            default_{{ HOSTNAME_CFG01 }}: +90
+            default_{{ HOSTNAME_CTL01 }}: +101
+            default_{{ HOSTNAME_CTL02 }}: +102
+            default_{{ HOSTNAME_CTL03 }}: +103
+            default_{{ HOSTNAME_CMP01 }}: +105
+            default_{{ HOSTNAME_CMP02 }}: +106
+            default_{{ HOSTNAME_MON01 }}: +107
+            default_{{ HOSTNAME_MON02 }}: +108
+            default_{{ HOSTNAME_MON03 }}: +109
+            default_{{ HOSTNAME_GTW01 }}: +110
+            default_{{ HOSTNAME_PRX01 }}: +121
+          ip_ranges:
+            dhcp: [+90, -10]
+
+      tenant-pool01:
+        net: {{ os_env('TENANT_ADDRESS_POOL01', '10.80.0.0/16:24') }}
+        params:
+          ip_reserved:
+            gateway: +1
+            l2_network_device: +1
+            default_{{ HOSTNAME_CFG01 }}: +100
+            default_{{ HOSTNAME_CTL01 }}: +101
+            default_{{ HOSTNAME_CTL02 }}: +102
+            default_{{ HOSTNAME_CTL03 }}: +103
+            default_{{ HOSTNAME_CMP01 }}: +105
+            default_{{ HOSTNAME_CMP02 }}: +106
+            default_{{ HOSTNAME_MON01 }}: +107
+            default_{{ HOSTNAME_MON02 }}: +108
+            default_{{ HOSTNAME_MON03 }}: +109
+            default_{{ HOSTNAME_GTW01 }}: +110
+            default_{{ HOSTNAME_PRX01 }}: +121
+          ip_ranges:
+            dhcp: [+10, -10]
+
+      external-pool01:
+        net: {{ os_env('EXTERNAL_ADDRESS_POOL01', '10.90.0.0/16:24') }}
+        params:
+          ip_reserved:
+            gateway: +1
+            l2_network_device: +1
+            default_{{ HOSTNAME_CFG01 }}: +100
+            default_{{ HOSTNAME_CTL01 }}: +101
+            default_{{ HOSTNAME_CTL02 }}: +102
+            default_{{ HOSTNAME_CTL03 }}: +103
+            default_{{ HOSTNAME_CMP01 }}: +105
+            default_{{ HOSTNAME_CMP02 }}: +106
+            default_{{ HOSTNAME_MON01 }}: +107
+            default_{{ HOSTNAME_MON02 }}: +108
+            default_{{ HOSTNAME_MON03 }}: +109
+            default_{{ HOSTNAME_GTW01 }}: +110
+            default_{{ HOSTNAME_PRX01 }}: +121
+          ip_ranges:
+            dhcp: [+10, -10]
+
+
+    groups:
+      - name: default
+        driver:
+          name: devops.driver.libvirt
+          params:
+            connection_string: !os_env CONNECTION_STRING, qemu:///system
+            storage_pool_name: !os_env STORAGE_POOL_NAME, default
+            stp: False
+            hpet: False
+            enable_acpi: true
+            use_host_cpu: !os_env DRIVER_USE_HOST_CPU, true
+            use_hugepages: !os_env DRIVER_USE_HUGEPAGES, false
+
+        network_pools:
+          admin: admin-pool01
+          private: private-pool01
+          tenant: tenant-pool01
+          external: external-pool01
+
+        l2_network_devices:
+          private:
+            address_pool: private-pool01
+            dhcp: true
+            forward:
+              mode: route
+
+          admin:
+            address_pool: admin-pool01
+            dhcp: true
+            forward:
+              mode: nat
+
+          tenant:
+            address_pool: tenant-pool01
+            dhcp: true
+
+          external:
+            address_pool: external-pool01
+            dhcp: true
+            forward:
+              mode: nat
+
+
+        group_volumes:
+         - name: cloudimage1604    # This name is used for 'backing_store' option for node volumes.
+           source_image: !os_env IMAGE_PATH1604  # https://cloud-images.ubuntu.com/xenial/current/xenial-server-cloudimg-amd64-disk1.img
+           format: qcow2
+         - name: cfg01_day01_image               # Pre-configured day01 image
+           source_image: {{ os_env('IMAGE_PATH_CFG01_DAY01', os_env('IMAGE_PATH1604')) }} # http://images.mirantis.com/cfg01-day01.qcow2 or fallback to IMAGE_PATH1604
+           format: qcow2
+         - name: mcp_ubuntu_1604_image           # Pre-configured image for control plane
+           source_image: !os_env MCP_IMAGE_PATH1604
+           format: qcow2
+
+        nodes:
+          - name: {{ HOSTNAME_CFG01 }}
+            role: salt_master
+            params:
+              vcpu: !os_env SLAVE_NODE_CPU, 2
+              memory: !os_env SLAVE_NODE_MEMORY, 8192
+              boot:
+                - hd
+              cloud_init_volume_name: iso
+              cloud_init_iface_up: ens3
+              volumes:
+                - name: system
+                  capacity: !os_env NODE_VOLUME_SIZE, 150
+                  backing_store: cfg01_day01_image
+                  format: qcow2
+                - name: iso  # Volume with name 'iso' will be used
+                             # for store image with cloud-init metadata.
+                  capacity: 1
+                  format: raw
+                  device: cdrom
+                  bus: ide
+                  cloudinit_meta_data: *cloudinit_meta_data
+                  cloudinit_user_data: *cloudinit_user_data_cfg01
+
+              interfaces:
+                - label: ens3
+                  l2_network_device: admin
+                  interface_model: *interface_model
+                - label: ens4
+                  l2_network_device: private
+                  interface_model: *interface_model
+              network_config:
+                ens3:
+                  networks:
+                    - admin
+                ens4:
+                  networks:
+                    - private
+
+          - name: {{ HOSTNAME_CTL01 }}
+            role: salt_minion
+            params:
+              vcpu: !os_env SLAVE_NODE_CPU, 2
+              memory: !os_env SLAVE_NODE_MEMORY, 16384
+              boot:
+                - hd
+              cloud_init_volume_name: iso
+              cloud_init_iface_up: ens3
+              volumes:
+                - name: system
+                  capacity: !os_env NODE_VOLUME_SIZE, 150
+                  backing_store: mcp_ubuntu_1604_image
+                  format: qcow2
+                - name: cinder
+                  capacity: 50
+                  format: qcow2
+                - name: iso  # Volume with name 'iso' will be used
+                             # for store image with cloud-init metadata.
+                  capacity: 1
+                  format: raw
+                  device: cdrom
+                  bus: ide
+                  cloudinit_meta_data: *cloudinit_meta_data
+                  cloudinit_user_data: *cloudinit_user_data_1604
+
+              interfaces: &interfaces
+                - label: ens3
+                  l2_network_device: admin
+                  interface_model: *interface_model
+                - label: ens4
+                  l2_network_device: private
+                  interface_model: *interface_model
+              network_config: &network_config
+                ens3:
+                  networks:
+                    - admin
+                ens4:
+                  networks:
+                    - private
+
+          - name: {{ HOSTNAME_CTL02 }}
+            role: salt_minion
+            params:
+              vcpu: !os_env SLAVE_NODE_CPU, 2
+              memory: !os_env SLAVE_NODE_MEMORY, 16384
+              boot:
+                - hd
+              cloud_init_volume_name: iso
+              cloud_init_iface_up: ens3
+              volumes:
+                - name: system
+                  capacity: !os_env NODE_VOLUME_SIZE, 150
+                  backing_store: mcp_ubuntu_1604_image
+                  format: qcow2
+                - name: cinder
+                  capacity: 50
+                  format: qcow2
+                - name: iso  # Volume with name 'iso' will be used
+                             # for store image with cloud-init metadata.
+                  capacity: 1
+                  format: raw
+                  device: cdrom
+                  bus: ide
+                  cloudinit_meta_data: *cloudinit_meta_data
+                  cloudinit_user_data: *cloudinit_user_data_1604
+
+              interfaces: *interfaces
+              network_config: *network_config
+
+          - name: {{ HOSTNAME_CTL03 }}
+            role: salt_minion
+            params:
+              vcpu: !os_env SLAVE_NODE_CPU, 2
+              memory: !os_env SLAVE_NODE_MEMORY, 16384
+              boot:
+                - hd
+              cloud_init_volume_name: iso
+              cloud_init_iface_up: ens3
+              volumes:
+                - name: system
+                  capacity: !os_env NODE_VOLUME_SIZE, 150
+                  backing_store: mcp_ubuntu_1604_image
+                  format: qcow2
+                - name: cinder
+                  capacity: 50
+                  format: qcow2
+                - name: iso  # Volume with name 'iso' will be used
+                             # for store image with cloud-init metadata.
+                  capacity: 1
+                  format: raw
+                  device: cdrom
+                  bus: ide
+                  cloudinit_meta_data: *cloudinit_meta_data
+                  cloudinit_user_data: *cloudinit_user_data_1604
+
+              interfaces: *interfaces
+              network_config: *network_config
+
+          - name: {{ HOSTNAME_MON01 }}
+            role: salt_minion
+            params:
+              vcpu: !os_env SLAVE_NODE_CPU, 3
+              memory: !os_env SLAVE_NODE_MEMORY, 8192
+              boot:
+                - hd
+              cloud_init_volume_name: iso
+              cloud_init_iface_up: ens3
+              volumes:
+                - name: system
+                  capacity: !os_env NODE_VOLUME_SIZE, 150
+                  backing_store: mcp_ubuntu_1604_image
+                  format: qcow2
+                - name: iso  # Volume with name 'iso' will be used
+                             # for store image with cloud-init metadata.
+                  capacity: 1
+                  format: raw
+                  device: cdrom
+                  bus: ide
+                  cloudinit_meta_data: *cloudinit_meta_data
+                  cloudinit_user_data: *cloudinit_user_data_1604
+
+              interfaces: *interfaces
+              network_config: *network_config
+
+          - name: {{ HOSTNAME_MON02 }}
+            role: salt_minion
+            params:
+              vcpu: !os_env SLAVE_NODE_CPU, 3
+              memory: !os_env SLAVE_NODE_MEMORY, 8192
+              boot:
+                - hd
+              cloud_init_volume_name: iso
+              cloud_init_iface_up: ens3
+              volumes:
+                - name: system
+                  capacity: !os_env NODE_VOLUME_SIZE, 150
+                  backing_store: mcp_ubuntu_1604_image
+                  format: qcow2
+                - name: iso  # Volume with name 'iso' will be used
+                             # for store image with cloud-init metadata.
+                  capacity: 1
+                  format: raw
+                  device: cdrom
+                  bus: ide
+                  cloudinit_meta_data: *cloudinit_meta_data
+                  cloudinit_user_data: *cloudinit_user_data_1604
+
+              interfaces: *interfaces
+              network_config: *network_config
+
+          - name: {{ HOSTNAME_MON03 }}
+            role: salt_minion
+            params:
+              vcpu: !os_env SLAVE_NODE_CPU, 3
+              memory: !os_env SLAVE_NODE_MEMORY, 8192
+              boot:
+                - hd
+              cloud_init_volume_name: iso
+              cloud_init_iface_up: ens3
+              volumes:
+                - name: system
+                  capacity: !os_env NODE_VOLUME_SIZE, 150
+                  backing_store: mcp_ubuntu_1604_image
+                  format: qcow2
+                - name: iso  # Volume with name 'iso' will be used
+                             # for store image with cloud-init metadata.
+                  capacity: 1
+                  format: raw
+                  device: cdrom
+                  bus: ide
+                  cloudinit_meta_data: *cloudinit_meta_data
+                  cloudinit_user_data: *cloudinit_user_data_1604
+
+              interfaces: *interfaces
+              network_config: *network_config
+
+          - name: {{ HOSTNAME_PRX01 }}
+            role: salt_minion
+            params:
+              vcpu: !os_env SLAVE_NODE_CPU, 1
+              memory: !os_env SLAVE_NODE_MEMORY, 2048
+              boot:
+                - hd
+              cloud_init_volume_name: iso
+              cloud_init_iface_up: ens3
+              volumes:
+                - name: system
+                  capacity: !os_env NODE_VOLUME_SIZE, 150
+                  backing_store: mcp_ubuntu_1604_image
+                  format: qcow2
+                - name: cinder
+                  capacity: 50
+                  format: qcow2
+                - name: iso  # Volume with name 'iso' will be used
+                             # for store image with cloud-init metadata.
+                  capacity: 1
+                  format: raw
+                  device: cdrom
+                  bus: ide
+                  cloudinit_meta_data: *cloudinit_meta_data
+                  cloudinit_user_data: *cloudinit_user_data_1604
+
+              interfaces: *interfaces
+              network_config: *network_config
+
+          - name: {{ HOSTNAME_CMP01 }}
+            role: salt_minion
+            params:
+              vcpu: !os_env SLAVE_NODE_CPU, 3
+              memory: !os_env SLAVE_NODE_MEMORY, 4096
+              boot:
+                - hd
+              cloud_init_volume_name: iso
+              cloud_init_iface_up: ens3
+              volumes:
+                - name: system
+                  capacity: !os_env NODE_VOLUME_SIZE, 150
+                  backing_store: cloudimage1604
+                  format: qcow2
+                - name: iso  # Volume with name 'iso' will be used
+                             # for store image with cloud-init metadata.
+                  capacity: 1
+                  format: raw
+                  device: cdrom
+                  bus: ide
+                  cloudinit_meta_data: *cloudinit_meta_data
+                  cloudinit_user_data: *cloudinit_user_data_1604
+
+
+              interfaces: &all_interfaces
+                - label: ens3
+                  l2_network_device: admin
+                  interface_model: *interface_model
+                - label: ens4
+                  l2_network_device: private
+                  interface_model: *interface_model
+                - label: ens5
+                  l2_network_device: tenant
+                  interface_model: *interface_model
+                - label: ens6
+                  l2_network_device: external
+                  interface_model: *interface_model
+              network_config: &all_network_config
+                ens3:
+                  networks:
+                    - admin
+                ens4:
+                  networks:
+                    - private
+                ens5:
+                  networks:
+                    - tenant
+                ens6:
+                  networks:
+                    - external
+
+          - name: {{ HOSTNAME_CMP02 }}
+            role: salt_minion
+            params:
+              vcpu: !os_env SLAVE_NODE_CPU, 3
+              memory: !os_env SLAVE_NODE_MEMORY, 4096
+              boot:
+                - hd
+              cloud_init_volume_name: iso
+              cloud_init_iface_up: ens3
+              volumes:
+                - name: system
+                  capacity: !os_env NODE_VOLUME_SIZE, 150
+                  backing_store: cloudimage1604
+                  format: qcow2
+                - name: iso  # Volume with name 'iso' will be used
+                             # for store image with cloud-init metadata.
+                  capacity: 1
+                  format: raw
+                  device: cdrom
+                  bus: ide
+                  cloudinit_meta_data: *cloudinit_meta_data
+                  cloudinit_user_data: *cloudinit_user_data_1604
+
+              interfaces: *all_interfaces
+              network_config: *all_network_config
+
+          - name: {{ HOSTNAME_GTW01 }}
+            role: salt_minion
+            params:
+              vcpu: !os_env SLAVE_NODE_CPU, 4
+              memory: !os_env SLAVE_NODE_MEMORY, 4096
+              boot:
+                - hd
+              cloud_init_volume_name: iso
+              cloud_init_iface_up: ens3
+              volumes:
+                - name: system
+                  capacity: !os_env NODE_VOLUME_SIZE, 150
+                  backing_store: cloudimage1604
+                  format: qcow2
+                - name: iso  # Volume with name 'iso' will be used
+                             # for store image with cloud-init metadata.
+                  capacity: 1
+                  format: raw
+                  device: cdrom
+                  bus: ide
+                  cloudinit_meta_data: *cloudinit_meta_data
+                  cloudinit_user_data: *cloudinit_user_data_1604
+
+              interfaces: *all_interfaces
+              network_config: *all_network_config
diff --git a/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-ocata-dvr.yaml b/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-ocata-dvr.yaml
index bb17c15..2e3cfde 100644
--- a/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-ocata-dvr.yaml
+++ b/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-ocata-dvr.yaml
@@ -26,6 +26,9 @@
     sed -i '/system.reclass.storage.system.stacklight_telemetry_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
     sed -i '/system.reclass.storage.system.stacklight_log_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
 
+    # Start compute node addresses from .105 , as in static models
+    sed -i 's/start: 101/start: 105/g' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
+
     . /root/venv-reclass-tools/bin/activate;
     reclass-tools del-key parameters.reclass.storage.node.infra_kvm_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
     reclass-tools del-key parameters.reclass.storage.node.infra_kvm_node02 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
diff --git a/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-ocata-ovs.yaml b/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-ocata-ovs.yaml
index 6b85fea..34857bc 100644
--- a/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-ocata-ovs.yaml
+++ b/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-ocata-ovs.yaml
@@ -26,6 +26,9 @@
     sed -i '/system.reclass.storage.system.stacklight_telemetry_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
     sed -i '/system.reclass.storage.system.stacklight_log_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
 
+    # Start compute node addresses from .105 , as in static models
+    sed -i 's/start: 101/start: 105/g' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
+
     . /root/venv-reclass-tools/bin/activate;
     reclass-tools del-key parameters.reclass.storage.node.infra_kvm_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
     reclass-tools del-key parameters.reclass.storage.node.infra_kvm_node02 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
diff --git a/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-pike-dvr.yaml b/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-pike-dvr.yaml
index c823df7..10da5bf 100644
--- a/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-pike-dvr.yaml
+++ b/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-pike-dvr.yaml
@@ -4,7 +4,7 @@
 {% set SALT_MODELS_REPOSITORY = os_env('SALT_MODELS_REPOSITORY','https://gerrit.mcp.mirantis.net/salt-models/mcp-virtual-lab') %}
 # Other salt model repository parameters see in shared-salt.yaml
 
-{% set LAB_CONFIG_NAME = 'virtual-mcp-pike-dvr' %}
+{% set LAB_CONFIG_NAME = 'cookied-mcp-pike-dvr' %}
 # Name of the context file (without extension, that is fixed .yaml) used to render the Environment model
 {% set ENVIRONMENT_MODEL_INVENTORY_NAME = os_env('ENVIRONMENT_MODEL_INVENTORY_NAME', LAB_CONFIG_NAME) %}
 # Path to the context files used to render Cluster and Environment models
@@ -26,6 +26,9 @@
     sed -i '/system.reclass.storage.system.stacklight_telemetry_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
     sed -i '/system.reclass.storage.system.stacklight_log_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
 
+    # Start compute node addresses from .105 , as in static models
+    sed -i 's/start: 101/start: 105/g' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
+
     . /root/venv-reclass-tools/bin/activate;
     reclass-tools del-key parameters.reclass.storage.node.infra_kvm_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
     reclass-tools del-key parameters.reclass.storage.node.infra_kvm_node02 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
diff --git a/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-pike-ovs.yaml b/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-pike-ovs.yaml
index f117bd8..507d327 100644
--- a/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-pike-ovs.yaml
+++ b/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-pike-ovs.yaml
@@ -4,7 +4,7 @@
 {% set SALT_MODELS_REPOSITORY = os_env('SALT_MODELS_REPOSITORY','https://gerrit.mcp.mirantis.net/salt-models/mcp-virtual-lab') %}
 # Other salt model repository parameters see in shared-salt.yaml
 
-{% set LAB_CONFIG_NAME = 'virtual-mcp-pike-ovs' %}
+{% set LAB_CONFIG_NAME = 'cookied-mcp-pike-ovs' %}
 # Name of the context file (without extension, that is fixed .yaml) used to render the Environment model
 {% set ENVIRONMENT_MODEL_INVENTORY_NAME = os_env('ENVIRONMENT_MODEL_INVENTORY_NAME', LAB_CONFIG_NAME) %}
 # Path to the context files used to render Cluster and Environment models
@@ -26,6 +26,9 @@
     sed -i '/system.reclass.storage.system.stacklight_telemetry_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
     sed -i '/system.reclass.storage.system.stacklight_log_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
 
+    # Start compute node addresses from .105 , as in static models
+    sed -i 's/start: 101/start: 105/g' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
+
     . /root/venv-reclass-tools/bin/activate;
     reclass-tools del-key parameters.reclass.storage.node.infra_kvm_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
     reclass-tools del-key parameters.reclass.storage.node.infra_kvm_node02 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
diff --git a/tcp_tests/templates/cookied-model-generator/underlay.yaml b/tcp_tests/templates/cookied-model-generator/underlay.yaml
index e90a8c8..25fb76c 100644
--- a/tcp_tests/templates/cookied-model-generator/underlay.yaml
+++ b/tcp_tests/templates/cookied-model-generator/underlay.yaml
@@ -57,7 +57,7 @@
             role: salt_master
             params:
               vcpu: {{ os_env('CFG_NODE_CPU', 4) }}
-              memory: {{ os_env('CFG_NODE_MEMORY', 4096) }}
+              memory: {{ os_env('CFG_NODE_MEMORY', 8192) }}
               boot:
                 - hd
               cloud_init_volume_name: iso
diff --git a/tcp_tests/templates/ironic_standalone/underlay--user-data-ironic.yaml b/tcp_tests/templates/ironic_standalone/underlay--user-data-ironic.yaml
index 3afdf7f..740068d 100644
--- a/tcp_tests/templates/ironic_standalone/underlay--user-data-ironic.yaml
+++ b/tcp_tests/templates/ironic_standalone/underlay--user-data-ironic.yaml
@@ -77,7 +77,8 @@
    {%- endif %}
 
    # Enable SNAT to allow internet access for deploying nodes using ironic node as a gateway
-   - iptables -t nat -A POSTROUTING -s {{ os_env('IRONIC_DHCP_POOL_START', '10.0.175.100') }}/{{ os_env('IRONIC_DHCP_POOL_NETMASK_PREFIX', '24') }} ! -d {{ os_env('IRONIC_DHCP_POOL_START', '10.0.175.100') }}/{{ os_env('IRONIC_DHCP_POOL_NETMASK_PREFIX', '24') }} -j MASQUERADE
+   - iptables -t nat -I POSTROUTING -s {{ os_env('IRONIC_DHCP_POOL_START', '10.0.175.100') }}/{{ os_env('IRONIC_DHCP_POOL_NETMASK_PREFIX', '24') }} ! -d {{ os_env('IRONIC_DHCP_POOL_START', '10.0.175.100') }}/{{ os_env('IRONIC_DHCP_POOL_NETMASK_PREFIX', '24') }} -j MASQUERADE
+   - iptables -I FORWARD -j ACCEPT
 
    - echo "Building ironic agent image (stable/newton) ..."
    - apt-get install -y docker.io gzip uuid-runtime cpio findutils grep gnupg make
diff --git a/tcp_tests/templates/ironic_standalone/underlay.yaml b/tcp_tests/templates/ironic_standalone/underlay.yaml
index 8d30156..4837204 100644
--- a/tcp_tests/templates/ironic_standalone/underlay.yaml
+++ b/tcp_tests/templates/ironic_standalone/underlay.yaml
@@ -84,7 +84,7 @@
               volumes:
                 - name: system
                   capacity: !os_env NODE_VOLUME_SIZE, 150
-                  source_image: !os_env IMAGE_PATH1604
+                  source_image: {{ os_env('IRONIC_IMAGE_PATH1604', os_env('IMAGE_PATH1604')) }}
                   format: qcow2
                 - name: iso  # Volume with name 'iso' will be used
                              # for store image with cloud-init metadata.
diff --git a/tcp_tests/templates/mcp-ocata-local-repo-dvr/common-services.yaml b/tcp_tests/templates/mcp-ocata-local-repo-dvr/common-services.yaml
index 492cff4..32588b5 100644
--- a/tcp_tests/templates/mcp-ocata-local-repo-dvr/common-services.yaml
+++ b/tcp_tests/templates/mcp-ocata-local-repo-dvr/common-services.yaml
@@ -76,7 +76,7 @@
 
 - description: Install Galera on other servers
   cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@galera:slave' state.sls galera
+    -C 'I@galera:slave' state.sls galera -b 1
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 5}
   skip_fail: false
diff --git a/tcp_tests/templates/mcp-ocata-local-repo-dvr/sl.yaml b/tcp_tests/templates/mcp-ocata-local-repo-dvr/sl.yaml
index 4927c4f..b3a2679 100644
--- a/tcp_tests/templates/mcp-ocata-local-repo-dvr/sl.yaml
+++ b/tcp_tests/templates/mcp-ocata-local-repo-dvr/sl.yaml
@@ -1,22 +1,6 @@
 {% from 'mcp-ocata-local-repo-dvr/underlay.yaml' import HOSTNAME_CFG01 with context %}
 {% import 'shared-sl-tests.yaml' as SHARED_SL_TESTS with context %}
 # Install docker swarm
-- description: Install keepalived on mon nodes
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'mon*' state.sls keepalived
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 10}
-  skip_fail: false
-
-- description: Check the VIP on mon nodes
-  cmd: |
-    SL_VIP=`salt-call --out=newline_values_only pillar.get _param:stacklight_monitor_address`;
-    echo "_param:stacklight_monitor_address (vip): ${SL_VIP}";
-    salt --hard-crash --state-output=mixed --state-verbose=False -C 'mon*' cmd.run "ip a | grep ${SL_VIP}" | grep -B1 ${SL_VIP}
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
 - description: Configure docker service
   cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' state.sls docker.host
   node_name: {{ HOSTNAME_CFG01 }}
@@ -65,6 +49,22 @@
   retry: {count: 1, delay: 10}
   skip_fail: false
 
+- description: Install keepalived on mon nodes
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'mon*' state.sls keepalived
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 10}
+  skip_fail: false
+
+- description: Check the VIP on mon nodes
+  cmd: |
+    SL_VIP=`salt-call --out=newline_values_only pillar.get _param:stacklight_monitor_address`;
+    echo "_param:stacklight_monitor_address (vip): ${SL_VIP}";
+    salt --hard-crash --state-output=mixed --state-verbose=False -C 'mon*' cmd.run "ip a | grep ${SL_VIP}" | grep -B1 ${SL_VIP}
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
 # Install slv2 infra
 - description: Install telegraf
   cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@telegraf:agent or I@telegraf:remote_agent' state.sls telegraf
diff --git a/tcp_tests/templates/mcp-ocata-local-repo-dvr/underlay.yaml b/tcp_tests/templates/mcp-ocata-local-repo-dvr/underlay.yaml
index b066287..dbe05d6 100644
--- a/tcp_tests/templates/mcp-ocata-local-repo-dvr/underlay.yaml
+++ b/tcp_tests/templates/mcp-ocata-local-repo-dvr/underlay.yaml
@@ -213,7 +213,7 @@
             role: salt_master
             params:
               vcpu: !os_env SLAVE_NODE_CPU, 2
-              memory: !os_env SLAVE_NODE_MEMORY, 4096
+              memory: !os_env SLAVE_NODE_MEMORY, 8192
               boot:
                 - hd
               cloud_init_volume_name: iso
diff --git a/tcp_tests/templates/mk24_lab_ovs_dvr_vlan_bm/common-services.yaml b/tcp_tests/templates/mk24_lab_ovs_dvr_vlan_bm/common-services.yaml
index ab7da5d..fbf3a06 100644
--- a/tcp_tests/templates/mk24_lab_ovs_dvr_vlan_bm/common-services.yaml
+++ b/tcp_tests/templates/mk24_lab_ovs_dvr_vlan_bm/common-services.yaml
@@ -76,7 +76,7 @@
 
 - description: Install Galera on other servers
   cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@galera:slave' state.sls galera
+    -C 'I@galera:slave' state.sls galera -b 1
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 5}
   skip_fail: false
diff --git a/tcp_tests/templates/physical-mcp-ocata-offline-ovs/underlay.yaml b/tcp_tests/templates/physical-mcp-ocata-offline-ovs/underlay.yaml
index ca5c171..b0568d3 100644
--- a/tcp_tests/templates/physical-mcp-ocata-offline-ovs/underlay.yaml
+++ b/tcp_tests/templates/physical-mcp-ocata-offline-ovs/underlay.yaml
@@ -283,7 +283,7 @@
             role: salt_master
             params:
               vcpu: !os_env SLAVE_NODE_CPU, 4
-              memory: !os_env SLAVE_NODE_MEMORY, 4096
+              memory: !os_env SLAVE_NODE_MEMORY, 8192
               boot:
                 - hd
               cloud_init_volume_name: iso
diff --git a/tcp_tests/templates/physical_mcp11_dvr/common-services.yaml b/tcp_tests/templates/physical_mcp11_dvr/common-services.yaml
index b738b3a..cb176c1 100644
--- a/tcp_tests/templates/physical_mcp11_dvr/common-services.yaml
+++ b/tcp_tests/templates/physical_mcp11_dvr/common-services.yaml
@@ -76,7 +76,7 @@
 
 - description: Install Galera on other servers
   cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@galera:slave' state.sls galera
+    -C 'I@galera:slave' state.sls galera -b 1
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 5}
   skip_fail: false
diff --git a/tcp_tests/templates/physical_mcp11_ovs_dpdk/common-services.yaml b/tcp_tests/templates/physical_mcp11_ovs_dpdk/common-services.yaml
index bbbd2f5..0c30920 100644
--- a/tcp_tests/templates/physical_mcp11_ovs_dpdk/common-services.yaml
+++ b/tcp_tests/templates/physical_mcp11_ovs_dpdk/common-services.yaml
@@ -82,7 +82,7 @@
 
 - description: Install Galera on other servers
   cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@galera:slave' state.sls galera
+    -C 'I@galera:slave' state.sls galera -b 1
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 5}
   skip_fail: false
diff --git a/tcp_tests/templates/physical_mcp11_ovs_dpdk/underlay.yaml b/tcp_tests/templates/physical_mcp11_ovs_dpdk/underlay.yaml
index 0dfe7a4..e9a318f 100644
--- a/tcp_tests/templates/physical_mcp11_ovs_dpdk/underlay.yaml
+++ b/tcp_tests/templates/physical_mcp11_ovs_dpdk/underlay.yaml
@@ -123,7 +123,7 @@
             role: salt_master
             params:
               vcpu: !os_env SLAVE_NODE_CPU, 4
-              memory: !os_env SLAVE_NODE_MEMORY, 4096
+              memory: !os_env SLAVE_NODE_MEMORY, 8192
               boot:
                 - hd
               cloud_init_volume_name: iso
diff --git a/tcp_tests/templates/shared-salt.yaml b/tcp_tests/templates/shared-salt.yaml
index 60da5d8..aff5f14 100644
--- a/tcp_tests/templates/shared-salt.yaml
+++ b/tcp_tests/templates/shared-salt.yaml
@@ -186,15 +186,15 @@
     # 10.16.0 -> <external network>
     # So let's replace constant networks to the keywords, and then keywords to the desired networks.
     export REPLACE_DIRS="/srv/salt/reclass/classes/ /srv/salt/reclass/nodes/"
-    find ${REPLACE_DIRS} -type f -exec sed -i 's/192\.168\.10\./==IPV4_NET_ADMIN_PREFIX==/g' {} +
-    find ${REPLACE_DIRS} -type f -exec sed -i 's/172\.16\.10\./==IPV4_NET_CONTROL_PREFIX==/g' {} +
-    find ${REPLACE_DIRS} -type f -exec sed -i 's/10\.1\.0\./==IPV4_NET_TENANT_PREFIX==/g' {} +
-    find ${REPLACE_DIRS} -type f -exec sed -i 's/10\.16\.0\./==IPV4_NET_EXTERNAL_PREFIX==/g' {} +
+    find ${REPLACE_DIRS} -type f -exec sed -i 's/192\.168\.10/==IPV4_NET_ADMIN_PREFIX==/g' {} +
+    find ${REPLACE_DIRS} -type f -exec sed -i 's/172\.16\.10/==IPV4_NET_CONTROL_PREFIX==/g' {} +
+    find ${REPLACE_DIRS} -type f -exec sed -i 's/10\.1\.0/==IPV4_NET_TENANT_PREFIX==/g' {} +
+    find ${REPLACE_DIRS} -type f -exec sed -i 's/10\.16\.0/==IPV4_NET_EXTERNAL_PREFIX==/g' {} +
 
-    find ${REPLACE_DIRS} -type f -exec sed -i 's/==IPV4_NET_ADMIN_PREFIX==/{{ IPV4_NET_ADMIN_PREFIX }}./g' {} +
-    find ${REPLACE_DIRS} -type f -exec sed -i 's/==IPV4_NET_CONTROL_PREFIX==/{{ IPV4_NET_CONTROL_PREFIX }}./g' {} +
-    find ${REPLACE_DIRS} -type f -exec sed -i 's/==IPV4_NET_TENANT_PREFIX==/{{ IPV4_NET_TENANT_PREFIX }}./g' {} +
-    find ${REPLACE_DIRS} -type f -exec sed -i 's/==IPV4_NET_EXTERNAL_PREFIX==/{{ IPV4_NET_EXTERNAL_PREFIX }}./g' {} +
+    find ${REPLACE_DIRS} -type f -exec sed -i 's/==IPV4_NET_ADMIN_PREFIX==/{{ IPV4_NET_ADMIN_PREFIX }}/g' {} +
+    find ${REPLACE_DIRS} -type f -exec sed -i 's/==IPV4_NET_CONTROL_PREFIX==/{{ IPV4_NET_CONTROL_PREFIX }}/g' {} +
+    find ${REPLACE_DIRS} -type f -exec sed -i 's/==IPV4_NET_TENANT_PREFIX==/{{ IPV4_NET_TENANT_PREFIX }}/g' {} +
+    find ${REPLACE_DIRS} -type f -exec sed -i 's/==IPV4_NET_EXTERNAL_PREFIX==/{{ IPV4_NET_EXTERNAL_PREFIX }}/g' {} +
 
     find ${REPLACE_DIRS} -type f -exec sed -i 's/apt_mk_version:.*/apt_mk_version: {{ REPOSITORY_SUITE }}/g' {} +
 
@@ -289,12 +289,12 @@
     sed -i 's/10\.167\.5/==IPV4_NET_ADMIN_PREFIX==/g' {{ CLUSTER_CONTEXT_PATH }}
     sed -i 's/10\.167\.4/==IPV4_NET_CONTROL_PREFIX==/g' {{ CLUSTER_CONTEXT_PATH }}
     sed -i 's/10\.167\.6/==IPV4_NET_TENANT_PREFIX==/g' {{ CLUSTER_CONTEXT_PATH }}
-    sed -i 's/172\.17\.16\./==IPV4_NET_EXTERNAL_PREFIX==/g' {{ CLUSTER_CONTEXT_PATH }}
+    sed -i 's/172\.17\.16/==IPV4_NET_EXTERNAL_PREFIX==/g' {{ CLUSTER_CONTEXT_PATH }}
 
     sed -i 's/==IPV4_NET_ADMIN_PREFIX==/{{ IPV4_NET_ADMIN_PREFIX }}/g' {{ CLUSTER_CONTEXT_PATH }}
     sed -i 's/==IPV4_NET_CONTROL_PREFIX==/{{ IPV4_NET_CONTROL_PREFIX }}/g' {{ CLUSTER_CONTEXT_PATH }}
     sed -i 's/==IPV4_NET_TENANT_PREFIX==/{{ IPV4_NET_TENANT_PREFIX }}/g' {{ CLUSTER_CONTEXT_PATH }}
-    sed -i 's/==IPV4_NET_EXTERNAL_PREFIX==/{{ IPV4_NET_EXTERNAL_PREFIX }}./g' {{ CLUSTER_CONTEXT_PATH }}
+    sed -i 's/==IPV4_NET_EXTERNAL_PREFIX==/{{ IPV4_NET_EXTERNAL_PREFIX }}/g' {{ CLUSTER_CONTEXT_PATH }}
 
     {% set items = CLUSTER_PRODUCT_MODELS or '$(ls /tmp/cookiecutter-templates/cluster_product/)' %}
     for item in {{ items }}; do
@@ -649,6 +649,15 @@
   retry: {count: 1, delay: 5}
   skip_fail: false
 
+- description: Validate pillar on salt master node
+  cmd: |
+    set -e
+    if salt-call sys.doc reclass.validate_node_params | grep -q reclass.validate_node_params ; then salt-call reclass.validate_nodes_params ; fi
+    if salt-call sys.doc reclass.validate_pillar | grep -q reclass.validate_pillar ; then salt-call reclass.validate_pillar ; fi
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
 - description: Generate inventory for all the nodes to the /srv/salt/reclass/nodes/_generated
   cmd: salt --hard-crash --state-output=mixed --state-verbose=False
     -C 'I@salt:master' state.sls reclass
@@ -720,11 +729,23 @@
 
 - description: Configure salt.minion on other nodes
   cmd: salt --timeout=120 --hard-crash --state-output=mixed --state-verbose=False -C 'I@linux:system and not cfg01*' state.sls salt.minion &&
-    sleep 10
+    sleep 30
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 3, delay: 15}
   skip_fail: false
 
+- description: Wait for salt-minions wake up after restart
+  cmd: salt --timeout=30 --hard-crash --state-output=mixed --state-verbose=False '*' test.ping
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 25, delay: 30}
+  skip_fail: false
+
+- description: Execute linux.network.host one more time after salt.minion to apply dynamically registered hosts on the cluster nodes
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@linux:system' state.sls linux.network.host
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 10}
+  skip_fail: false
+
 - description: Check salt minion versions on slaves
   cmd: salt --timeout=60 '*' test.version
   node_name: {{ HOSTNAME_CFG01 }}
diff --git a/tcp_tests/templates/virtual-mcp-ocata-ceph-offline/common-services.yaml b/tcp_tests/templates/virtual-mcp-ocata-ceph-offline/common-services.yaml
index 38ff7ac..fd46292 100644
--- a/tcp_tests/templates/virtual-mcp-ocata-ceph-offline/common-services.yaml
+++ b/tcp_tests/templates/virtual-mcp-ocata-ceph-offline/common-services.yaml
@@ -83,7 +83,7 @@
 
 - description: Install Galera on other servers
   cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@galera:slave' state.sls galera
+    -C 'I@galera:slave' state.sls galera -b 1
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 5}
   skip_fail: false
diff --git a/tcp_tests/templates/virtual-mcp-ocata-ceph-offline/underlay.yaml b/tcp_tests/templates/virtual-mcp-ocata-ceph-offline/underlay.yaml
index 47120d5..c32f875 100644
--- a/tcp_tests/templates/virtual-mcp-ocata-ceph-offline/underlay.yaml
+++ b/tcp_tests/templates/virtual-mcp-ocata-ceph-offline/underlay.yaml
@@ -236,7 +236,7 @@
             role: salt_master
             params:
               vcpu: !os_env SLAVE_NODE_CPU, 2
-              memory: !os_env SLAVE_NODE_MEMORY, 4096
+              memory: !os_env SLAVE_NODE_MEMORY, 8192
               boot:
                 - hd
               cloud_init_volume_name: iso
diff --git a/tcp_tests/templates/virtual-mcp-ocata-dvr/_context-cookiecutter-mcp-ocata-dvr.yaml b/tcp_tests/templates/virtual-mcp-ocata-dvr/_context-cookiecutter-mcp-ocata-dvr.yaml
index 2c60b9a..80e88d1 100644
--- a/tcp_tests/templates/virtual-mcp-ocata-dvr/_context-cookiecutter-mcp-ocata-dvr.yaml
+++ b/tcp_tests/templates/virtual-mcp-ocata-dvr/_context-cookiecutter-mcp-ocata-dvr.yaml
@@ -47,7 +47,7 @@
   openstack_benchmark_node01_address: 172.16.10.95
   openstack_benchmark_node01_hostname: bmk01
   openstack_cluster_size: compact
-  openstack_compute_count: '100'
+  openstack_compute_count: '2'
   openstack_compute_rack01_hostname: cmp
   openstack_compute_rack01_single_subnet: 172.16.10
   openstack_compute_rack01_tenant_subnet: 10.1.0
@@ -111,6 +111,35 @@
   public_host: ${_param:openstack_proxy_address}
   publication_method: email
   reclass_repository: https://github.com/Mirantis/mk-lab-salt-model.git
+  backup_private_key: |
+      -----BEGIN RSA PRIVATE KEY-----
+      MIIEowIBAAKCAQEAxL6/rVgCetsETpZaUmXmkj8cZ1WN0eubH1FvMDOi/La9ZJyT
+      k0C6AYpJnIyEm93pMj5cLm08qRqMW+2pdOhYjcH69yg5MrX5SkRk8jCmIHIYoIbh
+      Qnwbnj3dd3I39ZdfU2FO7u2vlbglVou6ZoQxlJDItuLNtzq6EG+w9eF19e7+OsC6
+      6iUItp618zfw1l3J/8nKvCGe2RYDf7mJW6XwCl/DwryJmwwzvPgYJ3QMuDD8/HFj
+      lrJ3xjFTXj4b4Ws1XIoy78fFbtiLr4OwqCYkho03u2E5rOOP1qZxZB63sivHMLMO
+      MM5bOAQKbulFNoyALADGYfc7sf0bZ4u9XXDXxQIDAQABAoIBAQCfmc2MJRT97KW1
+      yqpCpX9BrAiymuiNHf+cjEcSZxEUyHkjIRFmJt+9WB0W7ba1anM92vCUiPDojSzH
+      dig9Oi578JxR20NrK8uqv4jUHzrknynzLveVI3CUEcOSnglfJQijbxDFKfOCFPvV
+      FUyE1UATMNBh6+LNfMprgu+exuMWOPnDyUiYQ+WZ0JfuZY8fuaZte4woJJOb9LUu
+      5rsMG/smIzjpgZ0Z9ZVDMurfq565qhpaXRAqKeIuyht8pacTo31iMQdHB78AvY/3
+      g0z21Gk8k3z0Kr/YFKr2r4FmXY5m/gAUvZly2ZrVQM5XsbTVCzq/JpI5fssNvSbU
+      AKmXzf4RAoGBAOO3d4/cstxERzW6hyOTjZIN1ppR52CsnZTsVPbfd0pCtmzmVZce
+      CtHKdcXSbTwZvvkK09QSWAp3MoSpd0gIOiLU8Wx/R/RIZsu9BlhTS3r3EQLnk72d
+      H/1TTA+j4T/LIYLSojQ1RxvIrHetAD44j732aTwKAHj/SybEAVqNkOB/AoGBAN0u
+      gLcrgqIHGrk4VjWSvlCGymfF40equcx+ud7XhfZDGETUOSahW4dPZ52cjPAkrCBQ
+      MMfcDwSVGsOAjd+mNt11BHUKobnhXwFaWWuyqyn9NmWFbjMbICVh7E3Of5aVN38o
+      lrmo/7LuKMVG7XRwphCv5NkaJmQG4njDyUQWlaW7AoGADCd8wDb9bPhP/LQqBmIX
+      ylXmwHHisaxE9O/wUQT4bwREjGd25gv6c9wkkRx8LBsLsGs9hzI7dMOL9Ly+2x9l
+      SvqmsC3S/1zl77X1Ir2/Z57MT6Vgo1xBmtnZU3Rhz2/eKAdqFPNLClaZrgGT475N
+      HcyLLWMzR0IJFtabY+Puea0CgYA8Zb5wRkldxWLewSuJZZDinGwY+kieAVjLJq/K
+      0j+ah6fQ48LXcah0wpIgz+cMjHcUO9GWQdk3/x9X03rqX5EL2DBnZYfUIl63F9zj
+      M97ZkHOSNWVqPzX//0Vv2butewG0j3jZKfTo/2/SrxOYgEpYtC9huWpSVi7xm0US
+      erhSkQKBgFIf9JEsfgE57ANhvITZ3ZI0uZXNxZkXQaVg8jvScDi79IIhy9iPzhKC
+      aIIQoDNIlWv1ftCRZ5AlBvVXgvQ/QNrwy48JiQTzWZlb9Ezg8w+olQmSbG6fq7Y+
+      7r3i+QUZ7RBdOb24QcQ618q54ozNTCB7OywY78ptFzeoBeptiNr1
+      -----END RSA PRIVATE KEY-----
+  backup_public_key: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDEvr+tWAJ62wROllpSZeaSPxxnVY3R65sfUW8wM6L8tr1knJOTQLoBikmcjISb3ekyPlwubTypGoxb7al06FiNwfr3KDkytflKRGTyMKYgchighuFCfBuePd13cjf1l19TYU7u7a+VuCVWi7pmhDGUkMi24s23OroQb7D14XX17v46wLrqJQi2nrXzN/DWXcn/ycq8IZ7ZFgN/uYlbpfAKX8PCvImbDDO8+BgndAy4MPz8cWOWsnfGMVNePhvhazVcijLvx8Vu2Iuvg7CoJiSGjTe7YTms44/WpnFkHreyK8cwsw4wzls4BApu6UU2jIAsAMZh9zux/Rtni71dcNfF
   salt_api_password: H0rTPdmktZ8RI7T7y6fjqY0uEbbs7Kwi
   salt_api_password_hash: $6$lfbIFtMZ$.nTbTDMzs1iYv0WqkZHia8H8Fma963Nv3qyyz1x68jQh0YXK9i907B/hvoG4QHMvfolE7V7vQnFClJ1mVA3Yb.
   salt_master_address: 172.16.10.90
diff --git a/tcp_tests/templates/virtual-mcp-ocata-dvr/_context-environment.yaml b/tcp_tests/templates/virtual-mcp-ocata-dvr/_context-environment.yaml
index 0127547..ca8114b 100644
--- a/tcp_tests/templates/virtual-mcp-ocata-dvr/_context-environment.yaml
+++ b/tcp_tests/templates/virtual-mcp-ocata-dvr/_context-environment.yaml
@@ -63,6 +63,7 @@
       reclass_storage_name: openstack_proxy_node01
       roles:
       - openstack_proxy
+      - features_designate_pool_manager_proxy
       - linux_system_codename_xenial
       interfaces:
         ens3:
diff --git a/tcp_tests/templates/virtual-mcp-ocata-dvr/common-services.yaml b/tcp_tests/templates/virtual-mcp-ocata-dvr/common-services.yaml
index f8bf7b9..23420b1 100644
--- a/tcp_tests/templates/virtual-mcp-ocata-dvr/common-services.yaml
+++ b/tcp_tests/templates/virtual-mcp-ocata-dvr/common-services.yaml
@@ -76,7 +76,7 @@
 
 - description: Install Galera on other servers
   cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@galera:slave' state.sls galera
+    -C 'I@galera:slave' state.sls galera -b 1
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 5}
   skip_fail: false
diff --git a/tcp_tests/templates/virtual-mcp-ocata-dvr/sl.yaml b/tcp_tests/templates/virtual-mcp-ocata-dvr/sl.yaml
index 5795a25..eff861b 100644
--- a/tcp_tests/templates/virtual-mcp-ocata-dvr/sl.yaml
+++ b/tcp_tests/templates/virtual-mcp-ocata-dvr/sl.yaml
@@ -1,22 +1,6 @@
 {% from 'virtual-mcp-ocata-dvr/underlay.yaml' import HOSTNAME_CFG01 with context %}
 {% import 'shared-sl-tests.yaml' as SHARED_SL_TESTS with context %}
 # Install docker swarm
-- description: Install keepalived on mon nodes
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'mon*' state.sls keepalived
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 10}
-  skip_fail: false
-
-- description: Check the VIP on mon nodes
-  cmd: |
-    SL_VIP=`salt-call --out=newline_values_only pillar.get _param:stacklight_monitor_address`;
-    echo "_param:stacklight_monitor_address (vip): ${SL_VIP}";
-    salt --hard-crash --state-output=mixed --state-verbose=False -C 'mon*' cmd.run "ip a | grep ${SL_VIP}" | grep -B1 ${SL_VIP}
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
 - description: Configure docker service
   cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' state.sls docker.host
   node_name: {{ HOSTNAME_CFG01 }}
@@ -65,6 +49,22 @@
   retry: {count: 1, delay: 10}
   skip_fail: false
 
+- description: Install keepalived on mon nodes
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'mon*' state.sls keepalived
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 10}
+  skip_fail: false
+
+- description: Check the VIP on mon nodes
+  cmd: |
+    SL_VIP=`salt-call --out=newline_values_only pillar.get _param:stacklight_monitor_address`;
+    echo "_param:stacklight_monitor_address (vip): ${SL_VIP}";
+    salt --hard-crash --state-output=mixed --state-verbose=False -C 'mon*' cmd.run "ip a | grep ${SL_VIP}" | grep -B1 ${SL_VIP}
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
 # Install slv2 infra
 - description: Install telegraf
   cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@telegraf:agent or I@telegraf:remote_agent' state.sls telegraf
diff --git a/tcp_tests/templates/virtual-mcp-ocata-dvr/underlay.yaml b/tcp_tests/templates/virtual-mcp-ocata-dvr/underlay.yaml
index 390bfdb..03777ac 100644
--- a/tcp_tests/templates/virtual-mcp-ocata-dvr/underlay.yaml
+++ b/tcp_tests/templates/virtual-mcp-ocata-dvr/underlay.yaml
@@ -184,7 +184,7 @@
             role: salt_master
             params:
               vcpu: !os_env SLAVE_NODE_CPU, 2
-              memory: !os_env SLAVE_NODE_MEMORY, 4096
+              memory: !os_env SLAVE_NODE_MEMORY, 8192
               boot:
                 - hd
               cloud_init_volume_name: iso
@@ -222,7 +222,7 @@
             role: salt_minion
             params:
               vcpu: !os_env SLAVE_NODE_CPU, 2
-              memory: !os_env SLAVE_NODE_MEMORY, 12288
+              memory: !os_env SLAVE_NODE_MEMORY, 16384
               boot:
                 - hd
               cloud_init_volume_name: iso
@@ -263,7 +263,7 @@
             role: salt_minion
             params:
               vcpu: !os_env SLAVE_NODE_CPU, 2
-              memory: !os_env SLAVE_NODE_MEMORY, 12288
+              memory: !os_env SLAVE_NODE_MEMORY, 16384
               boot:
                 - hd
               cloud_init_volume_name: iso
@@ -292,7 +292,7 @@
             role: salt_minion
             params:
               vcpu: !os_env SLAVE_NODE_CPU, 2
-              memory: !os_env SLAVE_NODE_MEMORY, 12288
+              memory: !os_env SLAVE_NODE_MEMORY, 16384
               boot:
                 - hd
               cloud_init_volume_name: iso
@@ -321,7 +321,7 @@
             role: salt_minion
             params:
               vcpu: !os_env SLAVE_NODE_CPU, 3
-              memory: !os_env SLAVE_NODE_MEMORY, 2048
+              memory: !os_env SLAVE_NODE_MEMORY, 6144
               boot:
                 - hd
               cloud_init_volume_name: iso
@@ -347,7 +347,7 @@
             role: salt_minion
             params:
               vcpu: !os_env SLAVE_NODE_CPU, 3
-              memory: !os_env SLAVE_NODE_MEMORY, 2048
+              memory: !os_env SLAVE_NODE_MEMORY, 6144
               boot:
                 - hd
               cloud_init_volume_name: iso
@@ -373,7 +373,7 @@
             role: salt_minion
             params:
               vcpu: !os_env SLAVE_NODE_CPU, 3
-              memory: !os_env SLAVE_NODE_MEMORY, 2048
+              memory: !os_env SLAVE_NODE_MEMORY, 6144
               boot:
                 - hd
               cloud_init_volume_name: iso
diff --git a/tcp_tests/templates/virtual-mcp-ocata-ovs-ceph/common-services.yaml b/tcp_tests/templates/virtual-mcp-ocata-ovs-ceph/common-services.yaml
index 8f9e14f..5419a9b 100644
--- a/tcp_tests/templates/virtual-mcp-ocata-ovs-ceph/common-services.yaml
+++ b/tcp_tests/templates/virtual-mcp-ocata-ovs-ceph/common-services.yaml
@@ -76,7 +76,7 @@
 
 - description: Install Galera on other servers
   cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@galera:slave' state.sls galera
+    -C 'I@galera:slave' state.sls galera -b 1
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 5}
   skip_fail: false
diff --git a/tcp_tests/templates/virtual-mcp-ocata-ovs-ceph/underlay.yaml b/tcp_tests/templates/virtual-mcp-ocata-ovs-ceph/underlay.yaml
index b2dfc14..98cf85a 100644
--- a/tcp_tests/templates/virtual-mcp-ocata-ovs-ceph/underlay.yaml
+++ b/tcp_tests/templates/virtual-mcp-ocata-ovs-ceph/underlay.yaml
@@ -184,7 +184,7 @@
             role: salt_master
             params:
               vcpu: !os_env SLAVE_NODE_CPU, 2
-              memory: !os_env SLAVE_NODE_MEMORY, 4096
+              memory: !os_env SLAVE_NODE_MEMORY, 8192
               boot:
                 - hd
               cloud_init_volume_name: iso
@@ -222,7 +222,7 @@
             role: salt_minion
             params:
               vcpu: !os_env SLAVE_NODE_CPU, 2
-              memory: !os_env SLAVE_NODE_MEMORY, 12288
+              memory: !os_env SLAVE_NODE_MEMORY, 16384
               boot:
                 - hd
               cloud_init_volume_name: iso
@@ -263,7 +263,7 @@
             role: salt_minion
             params:
               vcpu: !os_env SLAVE_NODE_CPU, 2
-              memory: !os_env SLAVE_NODE_MEMORY, 12288
+              memory: !os_env SLAVE_NODE_MEMORY, 16384
               boot:
                 - hd
               cloud_init_volume_name: iso
@@ -292,7 +292,7 @@
             role: salt_minion
             params:
               vcpu: !os_env SLAVE_NODE_CPU, 2
-              memory: !os_env SLAVE_NODE_MEMORY, 12288
+              memory: !os_env SLAVE_NODE_MEMORY, 16384
               boot:
                 - hd
               cloud_init_volume_name: iso
diff --git a/tcp_tests/templates/virtual-mcp-ocata-ovs/_context-cookiecutter-mcp-ocata-ovs.yaml b/tcp_tests/templates/virtual-mcp-ocata-ovs/_context-cookiecutter-mcp-ocata-ovs.yaml
index 5aaaaf1..37aeeef 100644
--- a/tcp_tests/templates/virtual-mcp-ocata-ovs/_context-cookiecutter-mcp-ocata-ovs.yaml
+++ b/tcp_tests/templates/virtual-mcp-ocata-ovs/_context-cookiecutter-mcp-ocata-ovs.yaml
@@ -47,7 +47,7 @@
   openstack_benchmark_node01_address: 172.16.10.95
   openstack_benchmark_node01_hostname: bmk01
   openstack_cluster_size: compact
-  openstack_compute_count: '100'
+  openstack_compute_count: '2'
   openstack_compute_rack01_hostname: cmp
   openstack_compute_rack01_single_subnet: 172.16.10
   openstack_compute_rack01_tenant_subnet: 10.1.0
@@ -111,6 +111,35 @@
   public_host: ${_param:openstack_proxy_address}
   publication_method: email
   reclass_repository: https://github.com/Mirantis/mk-lab-salt-model.git
+  backup_private_key: |
+      -----BEGIN RSA PRIVATE KEY-----
+      MIIEowIBAAKCAQEAxL6/rVgCetsETpZaUmXmkj8cZ1WN0eubH1FvMDOi/La9ZJyT
+      k0C6AYpJnIyEm93pMj5cLm08qRqMW+2pdOhYjcH69yg5MrX5SkRk8jCmIHIYoIbh
+      Qnwbnj3dd3I39ZdfU2FO7u2vlbglVou6ZoQxlJDItuLNtzq6EG+w9eF19e7+OsC6
+      6iUItp618zfw1l3J/8nKvCGe2RYDf7mJW6XwCl/DwryJmwwzvPgYJ3QMuDD8/HFj
+      lrJ3xjFTXj4b4Ws1XIoy78fFbtiLr4OwqCYkho03u2E5rOOP1qZxZB63sivHMLMO
+      MM5bOAQKbulFNoyALADGYfc7sf0bZ4u9XXDXxQIDAQABAoIBAQCfmc2MJRT97KW1
+      yqpCpX9BrAiymuiNHf+cjEcSZxEUyHkjIRFmJt+9WB0W7ba1anM92vCUiPDojSzH
+      dig9Oi578JxR20NrK8uqv4jUHzrknynzLveVI3CUEcOSnglfJQijbxDFKfOCFPvV
+      FUyE1UATMNBh6+LNfMprgu+exuMWOPnDyUiYQ+WZ0JfuZY8fuaZte4woJJOb9LUu
+      5rsMG/smIzjpgZ0Z9ZVDMurfq565qhpaXRAqKeIuyht8pacTo31iMQdHB78AvY/3
+      g0z21Gk8k3z0Kr/YFKr2r4FmXY5m/gAUvZly2ZrVQM5XsbTVCzq/JpI5fssNvSbU
+      AKmXzf4RAoGBAOO3d4/cstxERzW6hyOTjZIN1ppR52CsnZTsVPbfd0pCtmzmVZce
+      CtHKdcXSbTwZvvkK09QSWAp3MoSpd0gIOiLU8Wx/R/RIZsu9BlhTS3r3EQLnk72d
+      H/1TTA+j4T/LIYLSojQ1RxvIrHetAD44j732aTwKAHj/SybEAVqNkOB/AoGBAN0u
+      gLcrgqIHGrk4VjWSvlCGymfF40equcx+ud7XhfZDGETUOSahW4dPZ52cjPAkrCBQ
+      MMfcDwSVGsOAjd+mNt11BHUKobnhXwFaWWuyqyn9NmWFbjMbICVh7E3Of5aVN38o
+      lrmo/7LuKMVG7XRwphCv5NkaJmQG4njDyUQWlaW7AoGADCd8wDb9bPhP/LQqBmIX
+      ylXmwHHisaxE9O/wUQT4bwREjGd25gv6c9wkkRx8LBsLsGs9hzI7dMOL9Ly+2x9l
+      SvqmsC3S/1zl77X1Ir2/Z57MT6Vgo1xBmtnZU3Rhz2/eKAdqFPNLClaZrgGT475N
+      HcyLLWMzR0IJFtabY+Puea0CgYA8Zb5wRkldxWLewSuJZZDinGwY+kieAVjLJq/K
+      0j+ah6fQ48LXcah0wpIgz+cMjHcUO9GWQdk3/x9X03rqX5EL2DBnZYfUIl63F9zj
+      M97ZkHOSNWVqPzX//0Vv2butewG0j3jZKfTo/2/SrxOYgEpYtC9huWpSVi7xm0US
+      erhSkQKBgFIf9JEsfgE57ANhvITZ3ZI0uZXNxZkXQaVg8jvScDi79IIhy9iPzhKC
+      aIIQoDNIlWv1ftCRZ5AlBvVXgvQ/QNrwy48JiQTzWZlb9Ezg8w+olQmSbG6fq7Y+
+      7r3i+QUZ7RBdOb24QcQ618q54ozNTCB7OywY78ptFzeoBeptiNr1
+      -----END RSA PRIVATE KEY-----
+  backup_public_key: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDEvr+tWAJ62wROllpSZeaSPxxnVY3R65sfUW8wM6L8tr1knJOTQLoBikmcjISb3ekyPlwubTypGoxb7al06FiNwfr3KDkytflKRGTyMKYgchighuFCfBuePd13cjf1l19TYU7u7a+VuCVWi7pmhDGUkMi24s23OroQb7D14XX17v46wLrqJQi2nrXzN/DWXcn/ycq8IZ7ZFgN/uYlbpfAKX8PCvImbDDO8+BgndAy4MPz8cWOWsnfGMVNePhvhazVcijLvx8Vu2Iuvg7CoJiSGjTe7YTms44/WpnFkHreyK8cwsw4wzls4BApu6UU2jIAsAMZh9zux/Rtni71dcNfF
   salt_api_password: PGah7Ph3IdWuMdAX3ZBLSf5BtlBG1Qhl
   salt_api_password_hash: $6$kgvztcjH$9B2950AyxRjE2Tj5QNVCnvdrgaFo/u6c59pMoQPqfxs2MTLLU7ywxPTQnDH3cNV.BBEK6FilF9SulWfIfENou0
   salt_master_address: 172.16.10.90
diff --git a/tcp_tests/templates/virtual-mcp-ocata-ovs/_context-environment.yaml b/tcp_tests/templates/virtual-mcp-ocata-ovs/_context-environment.yaml
index 63cedf1..3e05cf0 100644
--- a/tcp_tests/templates/virtual-mcp-ocata-ovs/_context-environment.yaml
+++ b/tcp_tests/templates/virtual-mcp-ocata-ovs/_context-environment.yaml
@@ -65,6 +65,7 @@
       reclass_storage_name: openstack_proxy_node01
       roles:
       - openstack_proxy
+      - features_designate_bind9_proxy
       - linux_system_codename_xenial
       interfaces:
         ens3:
diff --git a/tcp_tests/templates/virtual-mcp-ocata-ovs/common-services.yaml b/tcp_tests/templates/virtual-mcp-ocata-ovs/common-services.yaml
index f654d14..ec1692a 100644
--- a/tcp_tests/templates/virtual-mcp-ocata-ovs/common-services.yaml
+++ b/tcp_tests/templates/virtual-mcp-ocata-ovs/common-services.yaml
@@ -76,7 +76,7 @@
 
 - description: Install Galera on other servers
   cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@galera:slave' state.sls galera
+    -C 'I@galera:slave' state.sls galera -b 1
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 5}
   skip_fail: false
diff --git a/tcp_tests/templates/virtual-mcp-ocata-ovs/sl.yaml b/tcp_tests/templates/virtual-mcp-ocata-ovs/sl.yaml
index c11350b..f581357 100644
--- a/tcp_tests/templates/virtual-mcp-ocata-ovs/sl.yaml
+++ b/tcp_tests/templates/virtual-mcp-ocata-ovs/sl.yaml
@@ -3,22 +3,6 @@
 {% import 'shared-sl-tests.yaml' as SHARED_SL_TESTS with context %}
 
 # Install docker swarm
-- description: Install keepalived on mon nodes
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'mon*' state.sls keepalived
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 10}
-  skip_fail: false
-
-- description: Check the VIP on mon nodes
-  cmd: |
-    SL_VIP=`salt-call --out=newline_values_only pillar.get _param:stacklight_monitor_address`;
-    echo "_param:stacklight_monitor_address (vip): ${SL_VIP}";
-    salt --hard-crash --state-output=mixed --state-verbose=False -C 'mon*' cmd.run "ip a | grep ${SL_VIP}" | grep -B1 ${SL_VIP}
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
 - description: Configure docker service
   cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' state.sls docker.host
   node_name: {{ HOSTNAME_CFG01 }}
@@ -67,6 +51,22 @@
   retry: {count: 1, delay: 10}
   skip_fail: false
 
+- description: Install keepalived on mon nodes
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'mon*' state.sls keepalived
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 10}
+  skip_fail: false
+
+- description: Check the VIP on mon nodes
+  cmd: |
+    SL_VIP=`salt-call --out=newline_values_only pillar.get _param:stacklight_monitor_address`;
+    echo "_param:stacklight_monitor_address (vip): ${SL_VIP}";
+    salt --hard-crash --state-output=mixed --state-verbose=False -C 'mon*' cmd.run "ip a | grep ${SL_VIP}" | grep -B1 ${SL_VIP}
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
 # Install slv2 infra
 - description: Install telegraf
   cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@telegraf:agent or I@telegraf:remote_agent' state.sls telegraf
diff --git a/tcp_tests/templates/virtual-mcp-ocata-ovs/underlay.yaml b/tcp_tests/templates/virtual-mcp-ocata-ovs/underlay.yaml
index 44214e9..7e2a8a4 100644
--- a/tcp_tests/templates/virtual-mcp-ocata-ovs/underlay.yaml
+++ b/tcp_tests/templates/virtual-mcp-ocata-ovs/underlay.yaml
@@ -174,7 +174,7 @@
             role: salt_master
             params:
               vcpu: !os_env SLAVE_NODE_CPU, 2
-              memory: !os_env SLAVE_NODE_MEMORY, 4096
+              memory: !os_env SLAVE_NODE_MEMORY, 8192
               boot:
                 - hd
               cloud_init_volume_name: iso
@@ -212,7 +212,7 @@
             role: salt_minion
             params:
               vcpu: !os_env SLAVE_NODE_CPU, 2
-              memory: !os_env SLAVE_NODE_MEMORY, 12288
+              memory: !os_env SLAVE_NODE_MEMORY, 16384
               boot:
                 - hd
               cloud_init_volume_name: iso
@@ -253,7 +253,7 @@
             role: salt_minion
             params:
               vcpu: !os_env SLAVE_NODE_CPU, 2
-              memory: !os_env SLAVE_NODE_MEMORY, 12288
+              memory: !os_env SLAVE_NODE_MEMORY, 16384
               boot:
                 - hd
               cloud_init_volume_name: iso
@@ -282,7 +282,7 @@
             role: salt_minion
             params:
               vcpu: !os_env SLAVE_NODE_CPU, 2
-              memory: !os_env SLAVE_NODE_MEMORY, 12288
+              memory: !os_env SLAVE_NODE_MEMORY, 16384
               boot:
                 - hd
               cloud_init_volume_name: iso
@@ -311,7 +311,7 @@
             role: salt_minion
             params:
               vcpu: !os_env SLAVE_NODE_CPU, 3
-              memory: !os_env SLAVE_NODE_MEMORY, 2048
+              memory: !os_env SLAVE_NODE_MEMORY, 6144
               boot:
                 - hd
               cloud_init_volume_name: iso
@@ -337,7 +337,7 @@
             role: salt_minion
             params:
               vcpu: !os_env SLAVE_NODE_CPU, 3
-              memory: !os_env SLAVE_NODE_MEMORY, 2048
+              memory: !os_env SLAVE_NODE_MEMORY, 6144
               boot:
                 - hd
               cloud_init_volume_name: iso
@@ -363,7 +363,7 @@
             role: salt_minion
             params:
               vcpu: !os_env SLAVE_NODE_CPU, 3
-              memory: !os_env SLAVE_NODE_MEMORY, 2048
+              memory: !os_env SLAVE_NODE_MEMORY, 6144
               boot:
                 - hd
               cloud_init_volume_name: iso
diff --git a/tcp_tests/templates/virtual-mcp-pike-dvr-ceph-rgw/common-services.yaml b/tcp_tests/templates/virtual-mcp-pike-dvr-ceph-rgw/common-services.yaml
index 72f2c9d..4017f70 100644
--- a/tcp_tests/templates/virtual-mcp-pike-dvr-ceph-rgw/common-services.yaml
+++ b/tcp_tests/templates/virtual-mcp-pike-dvr-ceph-rgw/common-services.yaml
@@ -76,7 +76,7 @@
 
 - description: Install Galera on other servers
   cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@galera:slave' state.sls galera
+    -C 'I@galera:slave' state.sls galera -b 1
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 5}
   skip_fail: false
diff --git a/tcp_tests/templates/virtual-mcp-pike-dvr-ceph-rgw/underlay.yaml b/tcp_tests/templates/virtual-mcp-pike-dvr-ceph-rgw/underlay.yaml
index cf558d1..f5592b2 100644
--- a/tcp_tests/templates/virtual-mcp-pike-dvr-ceph-rgw/underlay.yaml
+++ b/tcp_tests/templates/virtual-mcp-pike-dvr-ceph-rgw/underlay.yaml
@@ -196,7 +196,7 @@
             role: salt_master
             params:
               vcpu: !os_env SLAVE_NODE_CPU, 2
-              memory: !os_env SLAVE_NODE_MEMORY, 4096
+              memory: !os_env SLAVE_NODE_MEMORY, 8192
               boot:
                 - hd
               cloud_init_volume_name: iso
@@ -234,7 +234,7 @@
             role: salt_minion
             params:
               vcpu: !os_env SLAVE_NODE_CPU, 2
-              memory: !os_env SLAVE_NODE_MEMORY, 12288
+              memory: !os_env SLAVE_NODE_MEMORY, 16384
               boot:
                 - hd
               cloud_init_volume_name: iso
@@ -275,7 +275,7 @@
             role: salt_minion
             params:
               vcpu: !os_env SLAVE_NODE_CPU, 2
-              memory: !os_env SLAVE_NODE_MEMORY, 12288
+              memory: !os_env SLAVE_NODE_MEMORY, 16384
               boot:
                 - hd
               cloud_init_volume_name: iso
@@ -304,7 +304,7 @@
             role: salt_minion
             params:
               vcpu: !os_env SLAVE_NODE_CPU, 2
-              memory: !os_env SLAVE_NODE_MEMORY, 12288
+              memory: !os_env SLAVE_NODE_MEMORY, 16384
               boot:
                 - hd
               cloud_init_volume_name: iso
@@ -654,8 +654,8 @@
           - name: {{ HOSTNAME_GTW01 }}
             role: salt_minion
             params:
-              vcpu: !os_env SLAVE_NODE_CPU, 1
-              memory: !os_env SLAVE_NODE_MEMORY, 2048
+              vcpu: !os_env SLAVE_NODE_CPU, 4
+              memory: !os_env SLAVE_NODE_MEMORY, 4096
               boot:
                 - hd
               cloud_init_volume_name: iso
diff --git a/tcp_tests/templates/virtual-mcp-pike-dvr-ssl/_context-cookiecutter-mcp-pike-dvr-ssl.yaml b/tcp_tests/templates/virtual-mcp-pike-dvr-ssl/_context-cookiecutter-mcp-pike-dvr-ssl.yaml
index 9fb83bf..185226b 100644
--- a/tcp_tests/templates/virtual-mcp-pike-dvr-ssl/_context-cookiecutter-mcp-pike-dvr-ssl.yaml
+++ b/tcp_tests/templates/virtual-mcp-pike-dvr-ssl/_context-cookiecutter-mcp-pike-dvr-ssl.yaml
@@ -111,6 +111,35 @@
   public_host: ${_param:openstack_proxy_address}
   publication_method: email
   reclass_repository: https://github.com/Mirantis/mk-lab-salt-model.git
+  backup_private_key: |
+      -----BEGIN RSA PRIVATE KEY-----
+      MIIEowIBAAKCAQEAxL6/rVgCetsETpZaUmXmkj8cZ1WN0eubH1FvMDOi/La9ZJyT
+      k0C6AYpJnIyEm93pMj5cLm08qRqMW+2pdOhYjcH69yg5MrX5SkRk8jCmIHIYoIbh
+      Qnwbnj3dd3I39ZdfU2FO7u2vlbglVou6ZoQxlJDItuLNtzq6EG+w9eF19e7+OsC6
+      6iUItp618zfw1l3J/8nKvCGe2RYDf7mJW6XwCl/DwryJmwwzvPgYJ3QMuDD8/HFj
+      lrJ3xjFTXj4b4Ws1XIoy78fFbtiLr4OwqCYkho03u2E5rOOP1qZxZB63sivHMLMO
+      MM5bOAQKbulFNoyALADGYfc7sf0bZ4u9XXDXxQIDAQABAoIBAQCfmc2MJRT97KW1
+      yqpCpX9BrAiymuiNHf+cjEcSZxEUyHkjIRFmJt+9WB0W7ba1anM92vCUiPDojSzH
+      dig9Oi578JxR20NrK8uqv4jUHzrknynzLveVI3CUEcOSnglfJQijbxDFKfOCFPvV
+      FUyE1UATMNBh6+LNfMprgu+exuMWOPnDyUiYQ+WZ0JfuZY8fuaZte4woJJOb9LUu
+      5rsMG/smIzjpgZ0Z9ZVDMurfq565qhpaXRAqKeIuyht8pacTo31iMQdHB78AvY/3
+      g0z21Gk8k3z0Kr/YFKr2r4FmXY5m/gAUvZly2ZrVQM5XsbTVCzq/JpI5fssNvSbU
+      AKmXzf4RAoGBAOO3d4/cstxERzW6hyOTjZIN1ppR52CsnZTsVPbfd0pCtmzmVZce
+      CtHKdcXSbTwZvvkK09QSWAp3MoSpd0gIOiLU8Wx/R/RIZsu9BlhTS3r3EQLnk72d
+      H/1TTA+j4T/LIYLSojQ1RxvIrHetAD44j732aTwKAHj/SybEAVqNkOB/AoGBAN0u
+      gLcrgqIHGrk4VjWSvlCGymfF40equcx+ud7XhfZDGETUOSahW4dPZ52cjPAkrCBQ
+      MMfcDwSVGsOAjd+mNt11BHUKobnhXwFaWWuyqyn9NmWFbjMbICVh7E3Of5aVN38o
+      lrmo/7LuKMVG7XRwphCv5NkaJmQG4njDyUQWlaW7AoGADCd8wDb9bPhP/LQqBmIX
+      ylXmwHHisaxE9O/wUQT4bwREjGd25gv6c9wkkRx8LBsLsGs9hzI7dMOL9Ly+2x9l
+      SvqmsC3S/1zl77X1Ir2/Z57MT6Vgo1xBmtnZU3Rhz2/eKAdqFPNLClaZrgGT475N
+      HcyLLWMzR0IJFtabY+Puea0CgYA8Zb5wRkldxWLewSuJZZDinGwY+kieAVjLJq/K
+      0j+ah6fQ48LXcah0wpIgz+cMjHcUO9GWQdk3/x9X03rqX5EL2DBnZYfUIl63F9zj
+      M97ZkHOSNWVqPzX//0Vv2butewG0j3jZKfTo/2/SrxOYgEpYtC9huWpSVi7xm0US
+      erhSkQKBgFIf9JEsfgE57ANhvITZ3ZI0uZXNxZkXQaVg8jvScDi79IIhy9iPzhKC
+      aIIQoDNIlWv1ftCRZ5AlBvVXgvQ/QNrwy48JiQTzWZlb9Ezg8w+olQmSbG6fq7Y+
+      7r3i+QUZ7RBdOb24QcQ618q54ozNTCB7OywY78ptFzeoBeptiNr1
+      -----END RSA PRIVATE KEY-----
+  backup_public_key: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDEvr+tWAJ62wROllpSZeaSPxxnVY3R65sfUW8wM6L8tr1knJOTQLoBikmcjISb3ekyPlwubTypGoxb7al06FiNwfr3KDkytflKRGTyMKYgchighuFCfBuePd13cjf1l19TYU7u7a+VuCVWi7pmhDGUkMi24s23OroQb7D14XX17v46wLrqJQi2nrXzN/DWXcn/ycq8IZ7ZFgN/uYlbpfAKX8PCvImbDDO8+BgndAy4MPz8cWOWsnfGMVNePhvhazVcijLvx8Vu2Iuvg7CoJiSGjTe7YTms44/WpnFkHreyK8cwsw4wzls4BApu6UU2jIAsAMZh9zux/Rtni71dcNfF
   salt_api_password: H0rTPdmktZ8RI7T7y6fjqY0uEbbs7Kwi
   salt_api_password_hash: $6$lfbIFtMZ$.nTbTDMzs1iYv0WqkZHia8H8Fma963Nv3qyyz1x68jQh0YXK9i907B/hvoG4QHMvfolE7V7vQnFClJ1mVA3Yb.
   salt_master_address: 172.16.10.90
diff --git a/tcp_tests/templates/virtual-mcp-pike-dvr-ssl/_context-environment.yaml b/tcp_tests/templates/virtual-mcp-pike-dvr-ssl/_context-environment.yaml
index dcb1e40..278c1e0 100644
--- a/tcp_tests/templates/virtual-mcp-pike-dvr-ssl/_context-environment.yaml
+++ b/tcp_tests/templates/virtual-mcp-pike-dvr-ssl/_context-environment.yaml
@@ -63,6 +63,7 @@
       reclass_storage_name: openstack_proxy_node01
       roles:
       - openstack_proxy
+      - features_designate_pool_manager_proxy
       - linux_system_codename_xenial
       interfaces:
         ens3:
diff --git a/tcp_tests/templates/virtual-mcp-pike-dvr-ssl/common-services.yaml b/tcp_tests/templates/virtual-mcp-pike-dvr-ssl/common-services.yaml
index 39e9398..d0143da 100644
--- a/tcp_tests/templates/virtual-mcp-pike-dvr-ssl/common-services.yaml
+++ b/tcp_tests/templates/virtual-mcp-pike-dvr-ssl/common-services.yaml
@@ -76,7 +76,7 @@
 
 - description: Install Galera on other servers
   cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@galera:slave' state.sls galera
+    -C 'I@galera:slave' state.sls galera -b 1
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 5}
   skip_fail: false
diff --git a/tcp_tests/templates/virtual-mcp-pike-dvr-ssl/sl.yaml b/tcp_tests/templates/virtual-mcp-pike-dvr-ssl/sl.yaml
index f492e73..fbc6a00 100644
--- a/tcp_tests/templates/virtual-mcp-pike-dvr-ssl/sl.yaml
+++ b/tcp_tests/templates/virtual-mcp-pike-dvr-ssl/sl.yaml
@@ -1,22 +1,6 @@
 {% from 'virtual-mcp-pike-dvr-ssl/underlay.yaml' import HOSTNAME_CFG01 with context %}
 {% import 'shared-sl-tests.yaml' as SHARED_SL_TESTS with context %}
 # Install docker swarm
-- description: Install keepalived on mon nodes
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'mon*' state.sls keepalived
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 10}
-  skip_fail: false
-
-- description: Check the VIP on mon nodes
-  cmd: |
-    SL_VIP=`salt-call --out=newline_values_only pillar.get _param:stacklight_monitor_address`;
-    echo "_param:stacklight_monitor_address (vip): ${SL_VIP}";
-    salt --hard-crash --state-output=mixed --state-verbose=False -C 'mon*' cmd.run "ip a | grep ${SL_VIP}" | grep -B1 ${SL_VIP}
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
 - description: Configure docker service
   cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' state.sls docker.host
   node_name: {{ HOSTNAME_CFG01 }}
@@ -65,6 +49,22 @@
   retry: {count: 1, delay: 10}
   skip_fail: false
 
+- description: Install keepalived on mon nodes
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'mon*' state.sls keepalived
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 10}
+  skip_fail: false
+
+- description: Check the VIP on mon nodes
+  cmd: |
+    SL_VIP=`salt-call --out=newline_values_only pillar.get _param:stacklight_monitor_address`;
+    echo "_param:stacklight_monitor_address (vip): ${SL_VIP}";
+    salt --hard-crash --state-output=mixed --state-verbose=False -C 'mon*' cmd.run "ip a | grep ${SL_VIP}" | grep -B1 ${SL_VIP}
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
 # Install slv2 infra
 - description: Install telegraf
   cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@telegraf:agent or I@telegraf:remote_agent' state.sls telegraf
diff --git a/tcp_tests/templates/virtual-mcp-pike-dvr-ssl/underlay.yaml b/tcp_tests/templates/virtual-mcp-pike-dvr-ssl/underlay.yaml
index 32c0e91..d8a81d3 100644
--- a/tcp_tests/templates/virtual-mcp-pike-dvr-ssl/underlay.yaml
+++ b/tcp_tests/templates/virtual-mcp-pike-dvr-ssl/underlay.yaml
@@ -171,7 +171,7 @@
             role: salt_master
             params:
               vcpu: !os_env SLAVE_NODE_CPU, 2
-              memory: !os_env SLAVE_NODE_MEMORY, 4096
+              memory: !os_env SLAVE_NODE_MEMORY, 8192
               boot:
                 - hd
               cloud_init_volume_name: iso
@@ -209,7 +209,7 @@
             role: salt_minion
             params:
               vcpu: !os_env SLAVE_NODE_CPU, 2
-              memory: !os_env SLAVE_NODE_MEMORY, 12288
+              memory: !os_env SLAVE_NODE_MEMORY, 16384
               boot:
                 - hd
               cloud_init_volume_name: iso
@@ -250,7 +250,7 @@
             role: salt_minion
             params:
               vcpu: !os_env SLAVE_NODE_CPU, 2
-              memory: !os_env SLAVE_NODE_MEMORY, 12288
+              memory: !os_env SLAVE_NODE_MEMORY, 16384
               boot:
                 - hd
               cloud_init_volume_name: iso
@@ -279,7 +279,7 @@
             role: salt_minion
             params:
               vcpu: !os_env SLAVE_NODE_CPU, 2
-              memory: !os_env SLAVE_NODE_MEMORY, 12288
+              memory: !os_env SLAVE_NODE_MEMORY, 16384
               boot:
                 - hd
               cloud_init_volume_name: iso
@@ -308,7 +308,7 @@
             role: salt_minion
             params:
               vcpu: !os_env SLAVE_NODE_CPU, 3
-              memory: !os_env SLAVE_NODE_MEMORY, 2048
+              memory: !os_env SLAVE_NODE_MEMORY, 6144
               boot:
                 - hd
               cloud_init_volume_name: iso
@@ -334,7 +334,7 @@
             role: salt_minion
             params:
               vcpu: !os_env SLAVE_NODE_CPU, 3
-              memory: !os_env SLAVE_NODE_MEMORY, 2048
+              memory: !os_env SLAVE_NODE_MEMORY, 6144
               boot:
                 - hd
               cloud_init_volume_name: iso
@@ -360,7 +360,7 @@
             role: salt_minion
             params:
               vcpu: !os_env SLAVE_NODE_CPU, 3
-              memory: !os_env SLAVE_NODE_MEMORY, 2048
+              memory: !os_env SLAVE_NODE_MEMORY, 6144
               boot:
                 - hd
               cloud_init_volume_name: iso
@@ -492,8 +492,8 @@
           - name: {{ HOSTNAME_GTW01 }}
             role: salt_minion
             params:
-              vcpu: !os_env SLAVE_NODE_CPU, 1
-              memory: !os_env SLAVE_NODE_MEMORY, 2048
+              vcpu: !os_env SLAVE_NODE_CPU, 4
+              memory: !os_env SLAVE_NODE_MEMORY, 4096
               boot:
                 - hd
               cloud_init_volume_name: iso
diff --git a/tcp_tests/templates/virtual-mcp-pike-dvr/common-services.yaml b/tcp_tests/templates/virtual-mcp-pike-dvr/common-services.yaml
index 965d297..3981011 100644
--- a/tcp_tests/templates/virtual-mcp-pike-dvr/common-services.yaml
+++ b/tcp_tests/templates/virtual-mcp-pike-dvr/common-services.yaml
@@ -76,7 +76,7 @@
 
 - description: Install Galera on other servers
   cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@galera:slave' state.sls galera
+    -C 'I@galera:slave' state.sls galera -b 1
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 5}
   skip_fail: false
diff --git a/tcp_tests/templates/virtual-mcp-pike-dvr/underlay.yaml b/tcp_tests/templates/virtual-mcp-pike-dvr/underlay.yaml
index b4aff6a..eebc07f 100644
--- a/tcp_tests/templates/virtual-mcp-pike-dvr/underlay.yaml
+++ b/tcp_tests/templates/virtual-mcp-pike-dvr/underlay.yaml
@@ -20,9 +20,6 @@
 {% set HOSTNAME_CTL03 = os_env('HOSTNAME_CTL03', 'ctl03.' + DOMAIN_NAME) %}
 {% set HOSTNAME_CMP01 = os_env('HOSTNAME_CMP01', 'cmp01.' + DOMAIN_NAME) %}
 {% set HOSTNAME_CMP02 = os_env('HOSTNAME_CMP02', 'cmp02.' + DOMAIN_NAME) %}
-{% set HOSTNAME_MON01 = os_env('HOSTNAME_MON01', 'mon01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_MON02 = os_env('HOSTNAME_MON02', 'mon02.' + DOMAIN_NAME) %}
-{% set HOSTNAME_MON03 = os_env('HOSTNAME_MON03', 'mon03.' + DOMAIN_NAME) %}
 {% set HOSTNAME_GTW01 = os_env('HOSTNAME_GTW01', 'gtw01.' + DOMAIN_NAME) %}
 {% set HOSTNAME_DNS01 = os_env('HOSTNAME_DNS01', 'dns01.' + DOMAIN_NAME) %}
 {% set HOSTNAME_DNS02 = os_env('HOSTNAME_DNS02', 'dns02.' + DOMAIN_NAME) %}
@@ -45,9 +42,6 @@
             default_{{ HOSTNAME_CTL03 }}: +103
             default_{{ HOSTNAME_CMP01 }}: +105
             default_{{ HOSTNAME_CMP02 }}: +106
-            default_{{ HOSTNAME_MON01 }}: +107
-            default_{{ HOSTNAME_MON02 }}: +108
-            default_{{ HOSTNAME_MON03 }}: +109
             default_{{ HOSTNAME_GTW01 }}: +110
             default_{{ HOSTNAME_DNS01 }}: +111
             default_{{ HOSTNAME_DNS02 }}: +112
@@ -67,9 +61,6 @@
             default_{{ HOSTNAME_CTL03 }}: +103
             default_{{ HOSTNAME_CMP01 }}: +105
             default_{{ HOSTNAME_CMP02 }}: +106
-            default_{{ HOSTNAME_MON01 }}: +107
-            default_{{ HOSTNAME_MON02 }}: +108
-            default_{{ HOSTNAME_MON03 }}: +109
             default_{{ HOSTNAME_GTW01 }}: +110
             default_{{ HOSTNAME_DNS01 }}: +111
             default_{{ HOSTNAME_DNS02 }}: +112
@@ -89,9 +80,6 @@
             default_{{ HOSTNAME_CTL03 }}: +103
             default_{{ HOSTNAME_CMP01 }}: +105
             default_{{ HOSTNAME_CMP02 }}: +106
-            default_{{ HOSTNAME_MON01 }}: +107
-            default_{{ HOSTNAME_MON02 }}: +108
-            default_{{ HOSTNAME_MON03 }}: +109
             default_{{ HOSTNAME_GTW01 }}: +110
             default_{{ HOSTNAME_DNS01 }}: +111
             default_{{ HOSTNAME_DNS02 }}: +112
@@ -111,9 +99,6 @@
             default_{{ HOSTNAME_CTL03 }}: +103
             default_{{ HOSTNAME_CMP01 }}: +105
             default_{{ HOSTNAME_CMP02 }}: +106
-            default_{{ HOSTNAME_MON01 }}: +107
-            default_{{ HOSTNAME_MON02 }}: +108
-            default_{{ HOSTNAME_MON03 }}: +109
             default_{{ HOSTNAME_GTW01 }}: +110
             default_{{ HOSTNAME_DNS01 }}: +111
             default_{{ HOSTNAME_DNS02 }}: +112
@@ -181,7 +166,7 @@
             role: salt_master
             params:
               vcpu: !os_env SLAVE_NODE_CPU, 2
-              memory: !os_env SLAVE_NODE_MEMORY, 4096
+              memory: !os_env SLAVE_NODE_MEMORY, 8192
               boot:
                 - hd
               cloud_init_volume_name: iso
@@ -219,7 +204,7 @@
             role: salt_minion
             params:
               vcpu: !os_env SLAVE_NODE_CPU, 2
-              memory: !os_env SLAVE_NODE_MEMORY, 12288
+              memory: !os_env SLAVE_NODE_MEMORY, 16384
               boot:
                 - hd
               cloud_init_volume_name: iso
@@ -260,7 +245,7 @@
             role: salt_minion
             params:
               vcpu: !os_env SLAVE_NODE_CPU, 2
-              memory: !os_env SLAVE_NODE_MEMORY, 12288
+              memory: !os_env SLAVE_NODE_MEMORY, 16384
               boot:
                 - hd
               cloud_init_volume_name: iso
@@ -289,7 +274,7 @@
             role: salt_minion
             params:
               vcpu: !os_env SLAVE_NODE_CPU, 2
-              memory: !os_env SLAVE_NODE_MEMORY, 12288
+              memory: !os_env SLAVE_NODE_MEMORY, 16384
               boot:
                 - hd
               cloud_init_volume_name: iso
@@ -314,84 +299,6 @@
               interfaces: *interfaces
               network_config: *network_config
 
-          - name: {{ HOSTNAME_MON01 }}
-            role: salt_minion
-            params:
-              vcpu: !os_env SLAVE_NODE_CPU, 3
-              memory: !os_env SLAVE_NODE_MEMORY, 2048
-              boot:
-                - hd
-              cloud_init_volume_name: iso
-              cloud_init_iface_up: ens3
-              volumes:
-                - name: system
-                  capacity: !os_env NODE_VOLUME_SIZE, 150
-                  backing_store: mcp_ubuntu_1604_image
-                  format: qcow2
-                - name: iso  # Volume with name 'iso' will be used
-                             # for store image with cloud-init metadata.
-                  capacity: 1
-                  format: raw
-                  device: cdrom
-                  bus: ide
-                  cloudinit_meta_data: *cloudinit_meta_data
-                  cloudinit_user_data: *cloudinit_user_data_1604
-
-              interfaces: *interfaces
-              network_config: *network_config
-
-          - name: {{ HOSTNAME_MON02 }}
-            role: salt_minion
-            params:
-              vcpu: !os_env SLAVE_NODE_CPU, 3
-              memory: !os_env SLAVE_NODE_MEMORY, 2048
-              boot:
-                - hd
-              cloud_init_volume_name: iso
-              cloud_init_iface_up: ens3
-              volumes:
-                - name: system
-                  capacity: !os_env NODE_VOLUME_SIZE, 150
-                  backing_store: mcp_ubuntu_1604_image
-                  format: qcow2
-                - name: iso  # Volume with name 'iso' will be used
-                             # for store image with cloud-init metadata.
-                  capacity: 1
-                  format: raw
-                  device: cdrom
-                  bus: ide
-                  cloudinit_meta_data: *cloudinit_meta_data
-                  cloudinit_user_data: *cloudinit_user_data_1604
-
-              interfaces: *interfaces
-              network_config: *network_config
-
-          - name: {{ HOSTNAME_MON03 }}
-            role: salt_minion
-            params:
-              vcpu: !os_env SLAVE_NODE_CPU, 3
-              memory: !os_env SLAVE_NODE_MEMORY, 2048
-              boot:
-                - hd
-              cloud_init_volume_name: iso
-              cloud_init_iface_up: ens3
-              volumes:
-                - name: system
-                  capacity: !os_env NODE_VOLUME_SIZE, 150
-                  backing_store: mcp_ubuntu_1604_image
-                  format: qcow2
-                - name: iso  # Volume with name 'iso' will be used
-                             # for store image with cloud-init metadata.
-                  capacity: 1
-                  format: raw
-                  device: cdrom
-                  bus: ide
-                  cloudinit_meta_data: *cloudinit_meta_data
-                  cloudinit_user_data: *cloudinit_user_data_1604
-
-              interfaces: *interfaces
-              network_config: *network_config
-
           - name: {{ HOSTNAME_PRX01 }}
             role: salt_minion
             params:
@@ -502,8 +409,8 @@
           - name: {{ HOSTNAME_GTW01 }}
             role: salt_minion
             params:
-              vcpu: !os_env SLAVE_NODE_CPU, 1
-              memory: !os_env SLAVE_NODE_MEMORY, 2048
+              vcpu: !os_env SLAVE_NODE_CPU, 4
+              memory: !os_env SLAVE_NODE_MEMORY, 4096
               boot:
                 - hd
               cloud_init_volume_name: iso
diff --git a/tcp_tests/templates/virtual-mcp-pike-ovs-ceph/common-services.yaml b/tcp_tests/templates/virtual-mcp-pike-ovs-ceph/common-services.yaml
index a33ed13..b825cc2 100644
--- a/tcp_tests/templates/virtual-mcp-pike-ovs-ceph/common-services.yaml
+++ b/tcp_tests/templates/virtual-mcp-pike-ovs-ceph/common-services.yaml
@@ -76,7 +76,7 @@
 
 - description: Install Galera on other servers
   cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@galera:slave' state.sls galera
+    -C 'I@galera:slave' state.sls galera -b 1
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 5}
   skip_fail: false
diff --git a/tcp_tests/templates/virtual-mcp-pike-ovs-ceph/underlay.yaml b/tcp_tests/templates/virtual-mcp-pike-ovs-ceph/underlay.yaml
index 3cfaed6..77e33a2 100644
--- a/tcp_tests/templates/virtual-mcp-pike-ovs-ceph/underlay.yaml
+++ b/tcp_tests/templates/virtual-mcp-pike-ovs-ceph/underlay.yaml
@@ -181,7 +181,7 @@
             role: salt_master
             params:
               vcpu: !os_env SLAVE_NODE_CPU, 2
-              memory: !os_env SLAVE_NODE_MEMORY, 4096
+              memory: !os_env SLAVE_NODE_MEMORY, 8192
               boot:
                 - hd
               cloud_init_volume_name: iso
@@ -219,7 +219,7 @@
             role: salt_minion
             params:
               vcpu: !os_env SLAVE_NODE_CPU, 2
-              memory: !os_env SLAVE_NODE_MEMORY, 12288
+              memory: !os_env SLAVE_NODE_MEMORY, 16384
               boot:
                 - hd
               cloud_init_volume_name: iso
@@ -260,7 +260,7 @@
             role: salt_minion
             params:
               vcpu: !os_env SLAVE_NODE_CPU, 2
-              memory: !os_env SLAVE_NODE_MEMORY, 12288
+              memory: !os_env SLAVE_NODE_MEMORY, 16384
               boot:
                 - hd
               cloud_init_volume_name: iso
@@ -289,7 +289,7 @@
             role: salt_minion
             params:
               vcpu: !os_env SLAVE_NODE_CPU, 2
-              memory: !os_env SLAVE_NODE_MEMORY, 12288
+              memory: !os_env SLAVE_NODE_MEMORY, 16384
               boot:
                 - hd
               cloud_init_volume_name: iso
@@ -556,8 +556,8 @@
           - name: {{ HOSTNAME_GTW01 }}
             role: salt_minion
             params:
-              vcpu: !os_env SLAVE_NODE_CPU, 1
-              memory: !os_env SLAVE_NODE_MEMORY, 2048
+              vcpu: !os_env SLAVE_NODE_CPU, 4
+              memory: !os_env SLAVE_NODE_MEMORY, 4096
               boot:
                 - hd
               cloud_init_volume_name: iso
diff --git a/tcp_tests/templates/virtual-mcp-pike-ovs/common-services.yaml b/tcp_tests/templates/virtual-mcp-pike-ovs/common-services.yaml
index c762467..0187c6b 100644
--- a/tcp_tests/templates/virtual-mcp-pike-ovs/common-services.yaml
+++ b/tcp_tests/templates/virtual-mcp-pike-ovs/common-services.yaml
@@ -76,7 +76,7 @@
 
 - description: Install Galera on other servers
   cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@galera:slave' state.sls galera
+    -C 'I@galera:slave' state.sls galera -b 1
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 5}
   skip_fail: false
diff --git a/tcp_tests/templates/virtual-mcp-pike-ovs/underlay.yaml b/tcp_tests/templates/virtual-mcp-pike-ovs/underlay.yaml
index f87f2a5..e1c3ead 100644
--- a/tcp_tests/templates/virtual-mcp-pike-ovs/underlay.yaml
+++ b/tcp_tests/templates/virtual-mcp-pike-ovs/underlay.yaml
@@ -20,9 +20,6 @@
 {% set HOSTNAME_CTL03 = os_env('HOSTNAME_CTL03', 'ctl03.' + DOMAIN_NAME) %}
 {% set HOSTNAME_CMP01 = os_env('HOSTNAME_CMP01', 'cmp01.' + DOMAIN_NAME) %}
 {% set HOSTNAME_CMP02 = os_env('HOSTNAME_CMP02', 'cmp02.' + DOMAIN_NAME) %}
-{% set HOSTNAME_MON01 = os_env('HOSTNAME_MON01', 'mon01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_MON02 = os_env('HOSTNAME_MON02', 'mon02.' + DOMAIN_NAME) %}
-{% set HOSTNAME_MON03 = os_env('HOSTNAME_MON03', 'mon03.' + DOMAIN_NAME) %}
 {% set HOSTNAME_GTW01 = os_env('HOSTNAME_GTW01', 'gtw01.' + DOMAIN_NAME) %}
 {% set HOSTNAME_PRX01 = os_env('HOSTNAME_PRX01', 'prx01.' + DOMAIN_NAME) %}
 
@@ -43,9 +40,6 @@
             default_{{ HOSTNAME_CTL03 }}: +103
             default_{{ HOSTNAME_CMP01 }}: +105
             default_{{ HOSTNAME_CMP02 }}: +106
-            default_{{ HOSTNAME_MON01 }}: +107
-            default_{{ HOSTNAME_MON02 }}: +108
-            default_{{ HOSTNAME_MON03 }}: +109
             default_{{ HOSTNAME_GTW01 }}: +110
             default_{{ HOSTNAME_PRX01 }}: +121
           ip_ranges:
@@ -63,9 +57,6 @@
             default_{{ HOSTNAME_CTL03 }}: +103
             default_{{ HOSTNAME_CMP01 }}: +105
             default_{{ HOSTNAME_CMP02 }}: +106
-            default_{{ HOSTNAME_MON01 }}: +107
-            default_{{ HOSTNAME_MON02 }}: +108
-            default_{{ HOSTNAME_MON03 }}: +109
             default_{{ HOSTNAME_GTW01 }}: +110
             default_{{ HOSTNAME_PRX01 }}: +121
           ip_ranges:
@@ -83,9 +74,6 @@
             default_{{ HOSTNAME_CTL03 }}: +103
             default_{{ HOSTNAME_CMP01 }}: +105
             default_{{ HOSTNAME_CMP02 }}: +106
-            default_{{ HOSTNAME_MON01 }}: +107
-            default_{{ HOSTNAME_MON02 }}: +108
-            default_{{ HOSTNAME_MON03 }}: +109
             default_{{ HOSTNAME_GTW01 }}: +110
             default_{{ HOSTNAME_PRX01 }}: +121
           ip_ranges:
@@ -103,9 +91,6 @@
             default_{{ HOSTNAME_CTL03 }}: +103
             default_{{ HOSTNAME_CMP01 }}: +105
             default_{{ HOSTNAME_CMP02 }}: +106
-            default_{{ HOSTNAME_MON01 }}: +107
-            default_{{ HOSTNAME_MON02 }}: +108
-            default_{{ HOSTNAME_MON03 }}: +109
             default_{{ HOSTNAME_GTW01 }}: +110
             default_{{ HOSTNAME_PRX01 }}: +121
           ip_ranges:
@@ -171,7 +156,7 @@
             role: salt_master
             params:
               vcpu: !os_env SLAVE_NODE_CPU, 2
-              memory: !os_env SLAVE_NODE_MEMORY, 4096
+              memory: !os_env SLAVE_NODE_MEMORY, 8192
               boot:
                 - hd
               cloud_init_volume_name: iso
@@ -209,7 +194,7 @@
             role: salt_minion
             params:
               vcpu: !os_env SLAVE_NODE_CPU, 2
-              memory: !os_env SLAVE_NODE_MEMORY, 12288
+              memory: !os_env SLAVE_NODE_MEMORY, 16384
               boot:
                 - hd
               cloud_init_volume_name: iso
@@ -250,7 +235,7 @@
             role: salt_minion
             params:
               vcpu: !os_env SLAVE_NODE_CPU, 2
-              memory: !os_env SLAVE_NODE_MEMORY, 12288
+              memory: !os_env SLAVE_NODE_MEMORY, 16384
               boot:
                 - hd
               cloud_init_volume_name: iso
@@ -279,7 +264,7 @@
             role: salt_minion
             params:
               vcpu: !os_env SLAVE_NODE_CPU, 2
-              memory: !os_env SLAVE_NODE_MEMORY, 12288
+              memory: !os_env SLAVE_NODE_MEMORY, 16384
               boot:
                 - hd
               cloud_init_volume_name: iso
@@ -304,84 +289,6 @@
               interfaces: *interfaces
               network_config: *network_config
 
-          - name: {{ HOSTNAME_MON01 }}
-            role: salt_minion
-            params:
-              vcpu: !os_env SLAVE_NODE_CPU, 3
-              memory: !os_env SLAVE_NODE_MEMORY, 2048
-              boot:
-                - hd
-              cloud_init_volume_name: iso
-              cloud_init_iface_up: ens3
-              volumes:
-                - name: system
-                  capacity: !os_env NODE_VOLUME_SIZE, 150
-                  backing_store: mcp_ubuntu_1604_image
-                  format: qcow2
-                - name: iso  # Volume with name 'iso' will be used
-                             # for store image with cloud-init metadata.
-                  capacity: 1
-                  format: raw
-                  device: cdrom
-                  bus: ide
-                  cloudinit_meta_data: *cloudinit_meta_data
-                  cloudinit_user_data: *cloudinit_user_data_1604
-
-              interfaces: *interfaces
-              network_config: *network_config
-
-          - name: {{ HOSTNAME_MON02 }}
-            role: salt_minion
-            params:
-              vcpu: !os_env SLAVE_NODE_CPU, 3
-              memory: !os_env SLAVE_NODE_MEMORY, 2048
-              boot:
-                - hd
-              cloud_init_volume_name: iso
-              cloud_init_iface_up: ens3
-              volumes:
-                - name: system
-                  capacity: !os_env NODE_VOLUME_SIZE, 150
-                  backing_store: mcp_ubuntu_1604_image
-                  format: qcow2
-                - name: iso  # Volume with name 'iso' will be used
-                             # for store image with cloud-init metadata.
-                  capacity: 1
-                  format: raw
-                  device: cdrom
-                  bus: ide
-                  cloudinit_meta_data: *cloudinit_meta_data
-                  cloudinit_user_data: *cloudinit_user_data_1604
-
-              interfaces: *interfaces
-              network_config: *network_config
-
-          - name: {{ HOSTNAME_MON03 }}
-            role: salt_minion
-            params:
-              vcpu: !os_env SLAVE_NODE_CPU, 3
-              memory: !os_env SLAVE_NODE_MEMORY, 2048
-              boot:
-                - hd
-              cloud_init_volume_name: iso
-              cloud_init_iface_up: ens3
-              volumes:
-                - name: system
-                  capacity: !os_env NODE_VOLUME_SIZE, 150
-                  backing_store: mcp_ubuntu_1604_image
-                  format: qcow2
-                - name: iso  # Volume with name 'iso' will be used
-                             # for store image with cloud-init metadata.
-                  capacity: 1
-                  format: raw
-                  device: cdrom
-                  bus: ide
-                  cloudinit_meta_data: *cloudinit_meta_data
-                  cloudinit_user_data: *cloudinit_user_data_1604
-
-              interfaces: *interfaces
-              network_config: *network_config
-
           - name: {{ HOSTNAME_PRX01 }}
             role: salt_minion
             params:
@@ -491,8 +398,8 @@
           - name: {{ HOSTNAME_GTW01 }}
             role: salt_minion
             params:
-              vcpu: !os_env SLAVE_NODE_CPU, 1
-              memory: !os_env SLAVE_NODE_MEMORY, 2048
+              vcpu: !os_env SLAVE_NODE_CPU, 4
+              memory: !os_env SLAVE_NODE_MEMORY, 4096
               boot:
                 - hd
               cloud_init_volume_name: iso
diff --git a/tcp_tests/templates/virtual-mcp-sl-os/common-services.yaml b/tcp_tests/templates/virtual-mcp-sl-os/common-services.yaml
index 3ad67c2..7049d2a 100644
--- a/tcp_tests/templates/virtual-mcp-sl-os/common-services.yaml
+++ b/tcp_tests/templates/virtual-mcp-sl-os/common-services.yaml
@@ -76,7 +76,7 @@
 
 - description: Install Galera on other servers
   cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@galera:slave' state.sls galera
+    -C 'I@galera:slave' state.sls galera -b 1
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 5}
   skip_fail: false
diff --git a/tcp_tests/templates/virtual-mcp-sl-os/sl.yaml b/tcp_tests/templates/virtual-mcp-sl-os/sl.yaml
index 76b76b6..37a5bff 100644
--- a/tcp_tests/templates/virtual-mcp-sl-os/sl.yaml
+++ b/tcp_tests/templates/virtual-mcp-sl-os/sl.yaml
@@ -3,22 +3,6 @@
 {% import 'shared-sl-tests.yaml' as SHARED_SL_TESTS with context %}
 
 # Install docker swarm
-- description: Install keepalived on mon nodes
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'mon*' state.sls keepalived
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 10}
-  skip_fail: false
-
-- description: Check the VIP on mon nodes
-  cmd: |
-    SL_VIP=`salt-call --out=newline_values_only pillar.get _param:stacklight_monitor_address`;
-    echo "_param:stacklight_monitor_address (vip): ${SL_VIP}";
-    salt --hard-crash --state-output=mixed --state-verbose=False -C 'mon*' cmd.run "ip a | grep ${SL_VIP}" | grep -B1 ${SL_VIP}
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
 - description: Configure docker service
   cmd: salt -C 'I@docker:swarm' state.sls docker.host
   node_name: {{ HOSTNAME_CFG01 }}
@@ -67,6 +51,22 @@
   retry: {count: 1, delay: 10}
   skip_fail: false
 
+- description: Install keepalived on mon nodes
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'mon*' state.sls keepalived
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 10}
+  skip_fail: false
+
+- description: Check the VIP on mon nodes
+  cmd: |
+    SL_VIP=`salt-call --out=newline_values_only pillar.get _param:stacklight_monitor_address`;
+    echo "_param:stacklight_monitor_address (vip): ${SL_VIP}";
+    salt --hard-crash --state-output=mixed --state-verbose=False -C 'mon*' cmd.run "ip a | grep ${SL_VIP}" | grep -B1 ${SL_VIP}
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
 # Install slv2 infra
 - description: Install telegraf
   cmd: salt -C 'I@telegraf:agent or I@telegraf:remote_agent' state.sls telegraf
diff --git a/tcp_tests/templates/virtual-mcp10-contrail/common-services.yaml b/tcp_tests/templates/virtual-mcp10-contrail/common-services.yaml
index 5fa88e9..7b19b50 100644
--- a/tcp_tests/templates/virtual-mcp10-contrail/common-services.yaml
+++ b/tcp_tests/templates/virtual-mcp10-contrail/common-services.yaml
@@ -75,7 +75,7 @@
 
 - description: Install Galera on other servers
   cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@galera:slave' state.sls galera
+    -C 'I@galera:slave' state.sls galera -b 1
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 5}
   skip_fail: false
diff --git a/tcp_tests/templates/virtual-mcp10-dvr/common-services.yaml b/tcp_tests/templates/virtual-mcp10-dvr/common-services.yaml
index b59a90d..920286d 100644
--- a/tcp_tests/templates/virtual-mcp10-dvr/common-services.yaml
+++ b/tcp_tests/templates/virtual-mcp10-dvr/common-services.yaml
@@ -62,7 +62,7 @@
 
 - description: Install Galera on other servers
   cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@galera:slave' state.sls galera
+    -C 'I@galera:slave' state.sls galera -b 1
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 5}
   skip_fail: false
diff --git a/tcp_tests/templates/virtual-mcp10-ovs/common-services.yaml b/tcp_tests/templates/virtual-mcp10-ovs/common-services.yaml
index 7b85b51..c42213f 100644
--- a/tcp_tests/templates/virtual-mcp10-ovs/common-services.yaml
+++ b/tcp_tests/templates/virtual-mcp10-ovs/common-services.yaml
@@ -62,7 +62,7 @@
 
 - description: Install Galera on other servers
   cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@galera:slave' state.sls galera
+    -C 'I@galera:slave' state.sls galera -b 1
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 5}
   skip_fail: false
diff --git a/tcp_tests/templates/virtual-mcp11-dvr/common-services.yaml b/tcp_tests/templates/virtual-mcp11-dvr/common-services.yaml
index 82664ec..6653d1f 100644
--- a/tcp_tests/templates/virtual-mcp11-dvr/common-services.yaml
+++ b/tcp_tests/templates/virtual-mcp11-dvr/common-services.yaml
@@ -82,7 +82,7 @@
 
 - description: Install Galera on other servers
   cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@galera:slave' state.sls galera
+    -C 'I@galera:slave' state.sls galera -b 1
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 5}
   skip_fail: false
diff --git a/tcp_tests/templates/virtual-mcp11-k8s-calico/common-services.yaml b/tcp_tests/templates/virtual-mcp11-k8s-calico/common-services.yaml
index 58739fb..1362b8e 100644
--- a/tcp_tests/templates/virtual-mcp11-k8s-calico/common-services.yaml
+++ b/tcp_tests/templates/virtual-mcp11-k8s-calico/common-services.yaml
@@ -80,7 +80,7 @@
 
 - description: Install Galera on other servers
   cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@galera:slave' state.sls galera
+    -C 'I@galera:slave' state.sls galera -b 1
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 5}
   skip_fail: false
diff --git a/tcp_tests/templates/virtual-mcp11-k8s-calico/underlay.yaml b/tcp_tests/templates/virtual-mcp11-k8s-calico/underlay.yaml
index 2355c0c..1e3df5b 100644
--- a/tcp_tests/templates/virtual-mcp11-k8s-calico/underlay.yaml
+++ b/tcp_tests/templates/virtual-mcp11-k8s-calico/underlay.yaml
@@ -112,7 +112,7 @@
             role: salt_master
             params:
               vcpu: !os_env SLAVE_NODE_CPU, 2
-              memory: !os_env SLAVE_NODE_MEMORY, 4096
+              memory: !os_env SLAVE_NODE_MEMORY, 6144
               boot:
                 - hd
               cloud_init_volume_name: iso
diff --git a/tcp_tests/templates/virtual-mcp11-k8s-contrail/common-services.yaml b/tcp_tests/templates/virtual-mcp11-k8s-contrail/common-services.yaml
index 9ed734a..d21f166 100644
--- a/tcp_tests/templates/virtual-mcp11-k8s-contrail/common-services.yaml
+++ b/tcp_tests/templates/virtual-mcp11-k8s-contrail/common-services.yaml
@@ -67,7 +67,7 @@
 
 - description: Install Galera on other servers
   cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@galera:slave' state.sls galera
+    -C 'I@galera:slave' state.sls galera -b 1
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 5}
   skip_fail: false
diff --git a/tcp_tests/templates/virtual-mcp11-k8s-contrail/underlay.yaml b/tcp_tests/templates/virtual-mcp11-k8s-contrail/underlay.yaml
index 60094c7..d76d5d4 100644
--- a/tcp_tests/templates/virtual-mcp11-k8s-contrail/underlay.yaml
+++ b/tcp_tests/templates/virtual-mcp11-k8s-contrail/underlay.yaml
@@ -164,7 +164,7 @@
             role: salt_master
             params:
               vcpu: !os_env SLAVE_NODE_CPU, 2
-              memory: !os_env SLAVE_NODE_MEMORY, 4096
+              memory: !os_env SLAVE_NODE_MEMORY, 8192
               boot:
                 - hd
               cloud_init_volume_name: iso
@@ -366,7 +366,7 @@
             role: salt_minion
             params:
               vcpu: {{ os_env('MON_NODE_CPU', 1) }}
-              memory: {{ os_env('MON_NODE_MEMORY', 3072) }}
+              memory: {{ os_env('MON_NODE_MEMORY', 8192) }}
               boot:
                 - hd
               cloud_init_volume_name: iso
@@ -392,7 +392,7 @@
             role: salt_minion
             params:
               vcpu: {{ os_env('MON_NODE_CPU', 1) }}
-              memory: {{ os_env('MON_NODE_MEMORY', 3072) }}
+              memory: {{ os_env('MON_NODE_MEMORY', 8192) }}
               boot:
                 - hd
               cloud_init_volume_name: iso
@@ -418,7 +418,7 @@
             role: salt_minion
             params:
               vcpu: {{ os_env('MON_NODE_CPU', 1) }}
-              memory: {{ os_env('MON_NODE_MEMORY', 3072) }}
+              memory: {{ os_env('MON_NODE_MEMORY', 8192) }}
               boot:
                 - hd
               cloud_init_volume_name: iso
diff --git a/tcp_tests/templates/virtual-mcp11-ovs-dpdk/common-services.yaml b/tcp_tests/templates/virtual-mcp11-ovs-dpdk/common-services.yaml
index db00e7b..2356475 100644
--- a/tcp_tests/templates/virtual-mcp11-ovs-dpdk/common-services.yaml
+++ b/tcp_tests/templates/virtual-mcp11-ovs-dpdk/common-services.yaml
@@ -76,7 +76,7 @@
 
 - description: Install Galera on other servers
   cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@galera:slave' state.sls galera
+    -C 'I@galera:slave' state.sls galera -b 1
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 5}
   skip_fail: false
diff --git a/tcp_tests/templates/virtual-mcp11-ovs.new/common-services.yaml b/tcp_tests/templates/virtual-mcp11-ovs.new/common-services.yaml
index 129d360..7d13f72 100644
--- a/tcp_tests/templates/virtual-mcp11-ovs.new/common-services.yaml
+++ b/tcp_tests/templates/virtual-mcp11-ovs.new/common-services.yaml
@@ -76,7 +76,7 @@
 
 - description: Install Galera on other servers
   cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@galera:slave' state.sls galera
+    -C 'I@galera:slave' state.sls galera -b 1
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 5}
   skip_fail: false
diff --git a/tcp_tests/templates/virtual-mcp11-ovs/common-services.yaml b/tcp_tests/templates/virtual-mcp11-ovs/common-services.yaml
index 85faa31..7daf069 100644
--- a/tcp_tests/templates/virtual-mcp11-ovs/common-services.yaml
+++ b/tcp_tests/templates/virtual-mcp11-ovs/common-services.yaml
@@ -83,7 +83,7 @@
 
 - description: Install Galera on other servers
   cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@galera:slave' state.sls galera
+    -C 'I@galera:slave' state.sls galera -b 1
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 5}
   skip_fail: false
diff --git a/tcp_tests/templates/virtual-offline-pike-ovs/common-services.yaml b/tcp_tests/templates/virtual-offline-pike-ovs/common-services.yaml
index 34283a1..2527625 100644
--- a/tcp_tests/templates/virtual-offline-pike-ovs/common-services.yaml
+++ b/tcp_tests/templates/virtual-offline-pike-ovs/common-services.yaml
@@ -83,7 +83,7 @@
 
 - description: Install Galera on other servers
   cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@galera:slave' state.sls galera
+    -C 'I@galera:slave' state.sls galera -b 1
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 5}
   skip_fail: false
diff --git a/tcp_tests/templates/virtual-offline-pike-ovs/openstack.yaml b/tcp_tests/templates/virtual-offline-pike-ovs/openstack.yaml
index 44024a9..1ade409 100644
--- a/tcp_tests/templates/virtual-offline-pike-ovs/openstack.yaml
+++ b/tcp_tests/templates/virtual-offline-pike-ovs/openstack.yaml
@@ -114,6 +114,16 @@
   retry: {count: 1, delay: 5}
   skip_fail: false
 
+- description: Install bind if pillars 'bind:server' exists on any server
+  cmd: |
+    if salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@bind:server' match.pillar 'bind:server' ; then
+      salt --hard-crash --state-output=mixed --state-verbose=False  -C 'I@bind:server' state.sls bind;
+      salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@designate:server' state.sls designate -b 1
+    fi
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 10}
+  skip_fail: false
+
 - description: Check neutron agent-list
   cmd: salt --hard-crash --state-output=mixed --state-verbose=False
     -C 'I@keystone:server' cmd.run '. /root/keystonercv3; neutron agent-list'
diff --git a/tcp_tests/templates/virtual-offline-pike-ovs/underlay.yaml b/tcp_tests/templates/virtual-offline-pike-ovs/underlay.yaml
index ccb9a12..0823cb4 100644
--- a/tcp_tests/templates/virtual-offline-pike-ovs/underlay.yaml
+++ b/tcp_tests/templates/virtual-offline-pike-ovs/underlay.yaml
@@ -152,6 +152,9 @@
          - name: mcp_ubuntu_1604_image           # Pre-configured image for control plane
            source_image: !os_env MCP_IMAGE_PATH1604
            format: qcow2
+         - name: apt_cloudimage1604    # This name is used for 'backing_store' option for node volumes.
+           source_image: !os_env APT_IMAGE_PATH1604  # https://cloud-images.ubuntu.com/xenial/current/xenial-server-cloudimg-amd64-disk1.img
+           format: qcow2
 
         nodes:
           - name: {{ HOSTNAME_APT01 }}
@@ -166,7 +169,7 @@
               volumes:
                 - name: system
                   capacity: !os_env NODE_VOLUME_SIZE, 150
-                  backing_store: cloudimage1604
+                  backing_store: apt_cloudimage1604
                   format: qcow2
                 - name: iso  # Volume with name 'iso' will be used
                              # for store image with cloud-init metadata.
@@ -208,7 +211,7 @@
             role: salt_master
             params:
               vcpu: !os_env SLAVE_NODE_CPU, 2
-              memory: !os_env SLAVE_NODE_MEMORY, 4096
+              memory: !os_env SLAVE_NODE_MEMORY, 8192
               boot:
                 - hd
               cloud_init_volume_name: iso
diff --git a/tcp_tests/templates/virtual-pike-ovs-dpdk/_context-cookiecutter-pike-ovs-dpdk.yaml b/tcp_tests/templates/virtual-pike-ovs-dpdk/_context-cookiecutter-pike-ovs-dpdk.yaml
new file mode 100644
index 0000000..1055402
--- /dev/null
+++ b/tcp_tests/templates/virtual-pike-ovs-dpdk/_context-cookiecutter-pike-ovs-dpdk.yaml
@@ -0,0 +1,156 @@
+default_context:
+  bmk_enabled: 'False'
+  ceph_enabled: 'False'
+  cicd_enabled: 'False'
+  cluster_domain: virtual-pike-ovs-dpdk.local
+  cluster_name: virtual-pike-ovs-dpdk
+  compute_bond_mode: active-backup
+  compute_primary_first_nic: eth1
+  compute_primary_second_nic: eth2
+  context_seed: wUqrwKeBTCpRpVrhK1KwZQv4cjM9VhG7L2vQ0iQsTuMrXASklEBDmJEf6bnPEqcK
+  control_network_netmask: 255.255.255.0
+  control_network_subnet: 172.16.10.0/24
+  control_vlan: '10'
+  cookiecutter_template_branch: master
+  cookiecutter_template_credentials: gerrit
+  cookiecutter_template_url: https://gerrit.mcp.mirantis.net/mk/cookiecutter-templates.git
+  deploy_network_gateway: 192.168.10.1
+  deploy_network_netmask: 255.255.255.0
+  deploy_network_subnet: 192.168.10.0/24
+  deployment_type: physical
+  dns_server01: 8.8.8.8
+  dns_server02: 8.8.4.4
+  email_address: ddmitriev@mirantis.com
+  gateway_primary_first_nic: eth1
+  gateway_primary_second_nic: eth2
+  infra_bond_mode: active-backup
+  infra_deploy_nic: eth0
+  infra_kvm01_control_address: ${_param:openstack_control_node01_address}
+#  infra_kvm01_deploy_address: 192.168.10.101
+  infra_kvm01_hostname: ${_param:openstack_control_node01_hostname}
+  infra_kvm02_control_address: ${_param:openstack_control_node02_address}
+#  infra_kvm02_deploy_address: 192.168.10.102
+  infra_kvm02_hostname: ${_param:openstack_control_node02_hostname}
+  infra_kvm03_control_address: ${_param:openstack_control_node03_address}
+#  infra_kvm03_deploy_address: 192.168.10.103
+  infra_kvm03_hostname: ${_param:openstack_control_node03_hostname}
+  infra_kvm_vip_address: ${_param:openstack_control_address}
+  infra_primary_first_nic: eth1
+  infra_primary_second_nic: eth2
+  kubernetes_enabled: 'False'
+  local_repositories: 'False'
+  maas_deploy_address: 192.168.10.90
+  maas_hostname: cfg01
+  mcp_version: stable
+  offline_deployment: 'False'
+  opencontrail_enabled: 'False'
+  openstack_benchmark_node01_address: 172.16.10.95
+  openstack_benchmark_node01_hostname: bmk01
+  openstack_cluster_size: compact
+  openstack_compute_count: '2'
+  openstack_compute_rack01_hostname: cmp
+  openstack_compute_rack01_single_subnet: 172.16.10
+  openstack_compute_rack01_tenant_subnet: 10.1.0
+  openstack_control_address: 172.16.10.100
+  openstack_control_hostname: ctl
+  openstack_control_node01_address: 172.16.10.101
+  openstack_control_node01_hostname: ctl01
+  openstack_control_node02_address: 172.16.10.102
+  openstack_control_node02_hostname: ctl02
+  openstack_control_node03_address: 172.16.10.103
+  openstack_control_node03_hostname: ctl03
+  openstack_database_address: 10.167.4.50
+  openstack_database_hostname: dbs
+  openstack_database_node01_address: 10.167.4.51
+  openstack_database_node01_hostname: dbs01
+  openstack_database_node02_address: 10.167.4.52
+  openstack_database_node02_hostname: dbs02
+  openstack_database_node03_address: 10.167.4.53
+  openstack_database_node03_hostname: dbs03
+  openstack_enabled: 'True'
+  openstack_gateway_node01_address: 172.16.10.110
+  openstack_gateway_node01_hostname: gtw01
+  openstack_gateway_node01_tenant_address: 10.1.0.6
+  openstack_gateway_node02_address: 172.16.10.111
+  openstack_gateway_node02_hostname: gtw02
+  openstack_gateway_node02_tenant_address: 10.1.0.7
+  openstack_gateway_node03_address: 172.16.10.112
+  openstack_gateway_node03_hostname: gtw03
+  openstack_gateway_node03_tenant_address: 10.1.0.8
+  openstack_message_queue_address: 10.167.4.40
+  openstack_message_queue_hostname: msg
+  openstack_message_queue_node01_address: 10.167.4.41
+  openstack_message_queue_node01_hostname: msg01
+  openstack_message_queue_node02_address: 10.167.4.42
+  openstack_message_queue_node02_hostname: msg02
+  openstack_message_queue_node03_address: 10.167.4.43
+  openstack_message_queue_node03_hostname: msg03
+  openstack_network_engine: ovs
+  openstack_neutron_qos: 'False'
+  openstack_neutron_vlan_aware_vms: 'False'
+  openstack_nfv_dpdk_enabled: 'True'
+  openstack_nfv_sriov_enabled: 'False'
+  openstack_nova_compute_hugepages_count: '600'
+  openstack_nova_compute_nfv_req_enabled: 'False'
+  openstack_nova_cpu_pinning: '3'
+  openstack_ovs_dvr_enabled: 'False'
+  openstack_ovs_encapsulation_type: vxlan
+  openstack_proxy_address: 172.16.10.80
+  openstack_proxy_hostname: prx
+  openstack_proxy_node01_address: 172.16.10.121
+  openstack_proxy_node01_hostname: prx01
+  openstack_proxy_node02_address: 172.16.10.122
+  openstack_proxy_node02_hostname: prx02
+  openstack_upgrade_node01_address: 172.16.10.19
+  openstack_version: pike
+  oss_enabled: 'False'
+  oss_node03_address: ${_param:stacklight_monitor_node03_address}
+  platform: openstack_enabled
+  public_host: ${_param:openstack_proxy_address}
+  publication_method: email
+  reclass_repository: https://github.com/Mirantis/mk-lab-salt-model.git
+  backup_private_key: |
+      -----BEGIN RSA PRIVATE KEY-----
+      MIIEowIBAAKCAQEAxL6/rVgCetsETpZaUmXmkj8cZ1WN0eubH1FvMDOi/La9ZJyT
+      k0C6AYpJnIyEm93pMj5cLm08qRqMW+2pdOhYjcH69yg5MrX5SkRk8jCmIHIYoIbh
+      Qnwbnj3dd3I39ZdfU2FO7u2vlbglVou6ZoQxlJDItuLNtzq6EG+w9eF19e7+OsC6
+      6iUItp618zfw1l3J/8nKvCGe2RYDf7mJW6XwCl/DwryJmwwzvPgYJ3QMuDD8/HFj
+      lrJ3xjFTXj4b4Ws1XIoy78fFbtiLr4OwqCYkho03u2E5rOOP1qZxZB63sivHMLMO
+      MM5bOAQKbulFNoyALADGYfc7sf0bZ4u9XXDXxQIDAQABAoIBAQCfmc2MJRT97KW1
+      yqpCpX9BrAiymuiNHf+cjEcSZxEUyHkjIRFmJt+9WB0W7ba1anM92vCUiPDojSzH
+      dig9Oi578JxR20NrK8uqv4jUHzrknynzLveVI3CUEcOSnglfJQijbxDFKfOCFPvV
+      FUyE1UATMNBh6+LNfMprgu+exuMWOPnDyUiYQ+WZ0JfuZY8fuaZte4woJJOb9LUu
+      5rsMG/smIzjpgZ0Z9ZVDMurfq565qhpaXRAqKeIuyht8pacTo31iMQdHB78AvY/3
+      g0z21Gk8k3z0Kr/YFKr2r4FmXY5m/gAUvZly2ZrVQM5XsbTVCzq/JpI5fssNvSbU
+      AKmXzf4RAoGBAOO3d4/cstxERzW6hyOTjZIN1ppR52CsnZTsVPbfd0pCtmzmVZce
+      CtHKdcXSbTwZvvkK09QSWAp3MoSpd0gIOiLU8Wx/R/RIZsu9BlhTS3r3EQLnk72d
+      H/1TTA+j4T/LIYLSojQ1RxvIrHetAD44j732aTwKAHj/SybEAVqNkOB/AoGBAN0u
+      gLcrgqIHGrk4VjWSvlCGymfF40equcx+ud7XhfZDGETUOSahW4dPZ52cjPAkrCBQ
+      MMfcDwSVGsOAjd+mNt11BHUKobnhXwFaWWuyqyn9NmWFbjMbICVh7E3Of5aVN38o
+      lrmo/7LuKMVG7XRwphCv5NkaJmQG4njDyUQWlaW7AoGADCd8wDb9bPhP/LQqBmIX
+      ylXmwHHisaxE9O/wUQT4bwREjGd25gv6c9wkkRx8LBsLsGs9hzI7dMOL9Ly+2x9l
+      SvqmsC3S/1zl77X1Ir2/Z57MT6Vgo1xBmtnZU3Rhz2/eKAdqFPNLClaZrgGT475N
+      HcyLLWMzR0IJFtabY+Puea0CgYA8Zb5wRkldxWLewSuJZZDinGwY+kieAVjLJq/K
+      0j+ah6fQ48LXcah0wpIgz+cMjHcUO9GWQdk3/x9X03rqX5EL2DBnZYfUIl63F9zj
+      M97ZkHOSNWVqPzX//0Vv2butewG0j3jZKfTo/2/SrxOYgEpYtC9huWpSVi7xm0US
+      erhSkQKBgFIf9JEsfgE57ANhvITZ3ZI0uZXNxZkXQaVg8jvScDi79IIhy9iPzhKC
+      aIIQoDNIlWv1ftCRZ5AlBvVXgvQ/QNrwy48JiQTzWZlb9Ezg8w+olQmSbG6fq7Y+
+      7r3i+QUZ7RBdOb24QcQ618q54ozNTCB7OywY78ptFzeoBeptiNr1
+      -----END RSA PRIVATE KEY-----
+  backup_public_key: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDEvr+tWAJ62wROllpSZeaSPxxnVY3R65sfUW8wM6L8tr1knJOTQLoBikmcjISb3ekyPlwubTypGoxb7al06FiNwfr3KDkytflKRGTyMKYgchighuFCfBuePd13cjf1l19TYU7u7a+VuCVWi7pmhDGUkMi24s23OroQb7D14XX17v46wLrqJQi2nrXzN/DWXcn/ycq8IZ7ZFgN/uYlbpfAKX8PCvImbDDO8+BgndAy4MPz8cWOWsnfGMVNePhvhazVcijLvx8Vu2Iuvg7CoJiSGjTe7YTms44/WpnFkHreyK8cwsw4wzls4BApu6UU2jIAsAMZh9zux/Rtni71dcNfF
+  salt_api_password: H0rTPdmktZ8RI7T7y6fjqY0uEbbs7Kwi
+  salt_api_password_hash: $6$lfbIFtMZ$.nTbTDMzs1iYv0WqkZHia8H8Fma963Nv3qyyz1x68jQh0YXK9i907B/hvoG4QHMvfolE7V7vQnFClJ1mVA3Yb.
+  salt_master_address: 172.16.10.90
+  salt_master_hostname: cfg01
+  salt_master_management_address: 192.168.10.90
+  shared_reclass_branch: master
+  shared_reclass_url: https://gerrit.mcp.mirantis.net/salt-models/reclass-system.git
+  stacklight_enabled: 'False'
+  stacklight_version: '2'
+  static_ips_on_deploy_network_enabled: 'False'
+  tenant_network_gateway: 10.1.0.1
+  tenant_network_netmask: 255.255.255.0
+  tenant_network_subnet: 10.1.0.0/24
+  tenant_vlan: '20'
+  upstream_proxy_enabled: 'False'
+  use_default_network_scheme: 'False'
diff --git a/tcp_tests/templates/virtual-mcp-pike-ovs/_context-environment.yaml b/tcp_tests/templates/virtual-pike-ovs-dpdk/_context-environment.yaml
similarity index 66%
copy from tcp_tests/templates/virtual-mcp-pike-ovs/_context-environment.yaml
copy to tcp_tests/templates/virtual-pike-ovs-dpdk/_context-environment.yaml
index 63cedf1..a5640d1 100644
--- a/tcp_tests/templates/virtual-mcp-pike-ovs/_context-environment.yaml
+++ b/tcp_tests/templates/virtual-pike-ovs-dpdk/_context-environment.yaml
@@ -1,5 +1,5 @@
 nodes:
-    cfg01.mcp11-ovs-dpdk.local:
+    cfg01.virtual-pike-ovs-dpdk.local:
       reclass_storage_name: infra_config_node01
       roles:
       - infra_config
@@ -10,17 +10,11 @@
         ens4:
           role: single_ctl
 
-    ctl01.mcp11-ovs-dpdk.local:
+    ctl01.virtual-pike-ovs-dpdk.local:
       reclass_storage_name: openstack_control_node01
       roles:
       - infra_kvm
       - openstack_control_leader
-      - openstack_database_leader
-      - openstack_message_queue
-      - features_designate_bind9_database
-      - features_designate_bind9_dns
-      - features_designate_bind9
-      - features_designate_bind9_keystone
       - linux_system_codename_xenial
       interfaces:
         ens3:
@@ -28,16 +22,11 @@
         ens4:
           role: single_ctl
 
-    ctl02.mcp11-ovs-dpdk.local:
+    ctl02.virtual-pike-ovs-dpdk.local:
       reclass_storage_name: openstack_control_node02
       roles:
       - infra_kvm
       - openstack_control
-      - openstack_database
-      - openstack_message_queue
-      - features_designate_bind9_database
-      - features_designate_bind9_dns
-      - features_designate_bind9
       - linux_system_codename_xenial
       interfaces:
         ens3:
@@ -45,15 +34,11 @@
         ens4:
           role: single_ctl
 
-    ctl03.mcp11-ovs-dpdk.local:
+    ctl03.virtual-pike-ovs-dpdk.local:
       reclass_storage_name: openstack_control_node03
       roles:
       - infra_kvm
       - openstack_control
-      - openstack_database
-      - openstack_message_queue
-      - features_designate_bind9_database
-      - features_designate_bind9
       - linux_system_codename_xenial
       interfaces:
         ens3:
@@ -61,7 +46,73 @@
         ens4:
           role: single_ctl
 
-    prx01.mcp11-ovs-dpdk.local:
+    dbs01.virtual-pike-ovs-dpdk.local:
+      reclass_storage_name: openstack_database_node01
+      roles:
+      - openstack_database_leader
+      - linux_system_codename_xenial
+      interfaces:
+        ens3:
+          role: single_dhcp
+        ens4:
+          role: single_ctl
+
+    dbs02.virtual-pike-ovs-dpdk.local:
+      reclass_storage_name: openstack_database_node02
+      roles:
+      - openstack_database
+      - linux_system_codename_xenial
+      interfaces:
+        ens3:
+          role: single_dhcp
+        ens4:
+          role: single_ctl
+
+    dbs03.virtual-pike-ovs-dpdk.local:
+      reclass_storage_name: openstack_database_node03
+      roles:
+      - openstack_database
+      - linux_system_codename_xenial
+      interfaces:
+        ens3:
+          role: single_dhcp
+        ens4:
+          role: single_ctl
+
+    msg01.virtual-pike-ovs-dpdk.local:
+      reclass_storage_name: openstack_message_queue_node01
+      roles:
+      - openstack_message_queue
+      - linux_system_codename_xenial
+      interfaces:
+        ens3:
+          role: single_dhcp
+        ens4:
+          role: single_ctl
+
+    msg02.virtual-pike-ovs-dpdk.local:
+      reclass_storage_name: openstack_message_queue_node02
+      roles:
+      - openstack_message_queue
+      - linux_system_codename_xenial
+      interfaces:
+        ens3:
+          role: single_dhcp
+        ens4:
+          role: single_ctl
+
+    msg03.virtual-pike-ovs-dpdk.local:
+      reclass_storage_name: openstack_message_queue_node03
+      roles:
+      - openstack_message_queue
+      - linux_system_codename_xenial
+      interfaces:
+        ens3:
+          role: single_dhcp
+        ens4:
+          role: single_ctl
+
+    prx01.virtual-pike-ovs-dpdk.local:
       reclass_storage_name: openstack_proxy_node01
       roles:
       - openstack_proxy
@@ -72,47 +123,8 @@
         ens4:
           role: single_ctl
 
-    mon01.mcp11-ovs-dpdk.local:
-      reclass_storage_name: stacklight_server_node01
-      roles:
-      - stacklightv2_server_leader
-      - stacklight_telemetry_leader
-      - stacklight_log_leader_v2
-      - linux_system_codename_xenial
-      interfaces:
-        ens3:
-          role: single_dhcp
-        ens4:
-          role: single_ctl
-
-    mon02.mcp11-ovs-dpdk.local:
-      reclass_storage_name: stacklight_server_node02
-      roles:
-      - stacklightv2_server
-      - stacklight_telemetry
-      - stacklight_log
-      - linux_system_codename_xenial
-      interfaces:
-        ens3:
-          role: single_dhcp
-        ens4:
-          role: single_ctl
-
-    mon03.mcp11-ovs-dpdk.local:
-      reclass_storage_name: stacklight_server_node03
-      roles:
-      - stacklightv2_server
-      - stacklight_telemetry
-      - stacklight_log
-      - linux_system_codename_xenial
-      interfaces:
-        ens3:
-          role: single_dhcp
-        ens4:
-          role: single_ctl
-
     # Generator-based computes. For compatibility only
-    cmp<<count>>.mcp11-ovs-dpdk.local:
+    cmp<<count>>.virtual-pike-ovs-dpdk.local:
       reclass_storage_name: openstack_compute_rack01
       roles:
       - openstack_compute
@@ -127,7 +139,7 @@
         ens6:
           role: bond1_ab_ovs_floating
 
-    gtw01.mcp11-ovs-dpdk.local:
+    gtw01.virtual-pike-ovs-dpdk.local:
       reclass_storage_name: openstack_gateway_node01
       roles:
       - openstack_gateway
diff --git a/tcp_tests/templates/virtual-pike-ovs-dpdk/_salt_generate_cookied-pike-ovs-dpdk.yaml b/tcp_tests/templates/virtual-pike-ovs-dpdk/_salt_generate_cookied-pike-ovs-dpdk.yaml
new file mode 100644
index 0000000..050c4c4
--- /dev/null
+++ b/tcp_tests/templates/virtual-pike-ovs-dpdk/_salt_generate_cookied-pike-ovs-dpdk.yaml
@@ -0,0 +1,37 @@
+{% from 'cookied-model-generator/underlay.yaml' import HOSTNAME_CFG01 with context %}
+{% from 'cookied-model-generator/underlay.yaml' import LAB_CONFIG_NAME with context %}
+{% from 'cookied-model-generator/underlay.yaml' import DOMAIN_NAME with context %}
+
+# Name of the context file (without extension, that is fixed .yaml) used to render the Environment model
+{% set ENVIRONMENT_MODEL_INVENTORY_NAME = os_env('ENVIRONMENT_MODEL_INVENTORY_NAME', LAB_CONFIG_NAME) %}
+# Path to the context files used to render Cluster and Environment models
+{%- set CLUSTER_CONTEXT_NAME = '_context-cookiecutter-pike-ovs-dpdk.yaml' %}
+{%- set ENVIRONMENT_CONTEXT_NAMES = ['_context-environment.yaml'] %}
+
+{% import 'shared-salt.yaml' as SHARED with context %}
+
+{{ SHARED.MACRO_GENERATE_COOKIECUTTER_MODEL() }}
+
+{{ SHARED.MACRO_GENERATE_AND_ENABLE_ENVIRONMENT_MODEL() }}
+
+- description: "Workaround for combined roles: remove unnecessary classes"
+  cmd: |
+    set -e;
+    sed -i '/system.reclass.storage.system.physical_control_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
+    sed -i '/system.reclass.storage.system.stacklight_telemetry_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
+    sed -i '/system.reclass.storage.system.stacklight_log_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
+
+    # Start compute node addresses from .105 , as in static models
+    sed -i 's/start: 101/start: 105/g' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
+
+    . /root/venv-reclass-tools/bin/activate;
+    reclass-tools del-key parameters.reclass.storage.node.infra_kvm_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
+    reclass-tools del-key parameters.reclass.storage.node.infra_kvm_node02 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
+    reclass-tools del-key parameters.reclass.storage.node.infra_kvm_node03 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
+    reclass-tools del-key parameters.reclass.storage.node.stacklight_log_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
+
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+{{ SHARED.MACRO_GENERATE_INVENTORY(RERUN_SALTMASTER_STATE=true) }}
diff --git a/tcp_tests/templates/virtual-pike-ovs-dpdk/common-services.yaml b/tcp_tests/templates/virtual-pike-ovs-dpdk/common-services.yaml
index 84e4829..9da458c 100644
--- a/tcp_tests/templates/virtual-pike-ovs-dpdk/common-services.yaml
+++ b/tcp_tests/templates/virtual-pike-ovs-dpdk/common-services.yaml
@@ -76,7 +76,7 @@
 
 - description: Install Galera on other servers
   cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@galera:slave' state.sls galera
+    -C 'I@galera:slave' state.sls galera -b 1
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 5}
   skip_fail: false
diff --git a/tcp_tests/templates/virtual-pike-ovs-dpdk/salt.yaml b/tcp_tests/templates/virtual-pike-ovs-dpdk/salt.yaml
index 4079089..6c8e9c4 100644
--- a/tcp_tests/templates/virtual-pike-ovs-dpdk/salt.yaml
+++ b/tcp_tests/templates/virtual-pike-ovs-dpdk/salt.yaml
@@ -1,4 +1,7 @@
 {% from 'virtual-pike-ovs-dpdk/underlay.yaml' import HOSTNAME_CFG01 with context %}
+{% from 'virtual-pike-ovs-dpdk/underlay.yaml' import HOSTNAME_CMP01 with context %}
+{% from 'virtual-pike-ovs-dpdk/underlay.yaml' import HOSTNAME_CMP02 with context %}
+{% from 'virtual-pike-ovs-dpdk/underlay.yaml' import HOSTNAME_GTW01 with context %}
 {% from 'virtual-pike-ovs-dpdk/underlay.yaml' import LAB_CONFIG_NAME with context %}
 {% from 'virtual-pike-ovs-dpdk/underlay.yaml' import DOMAIN_NAME with context %}
 
@@ -28,19 +31,19 @@
 {{ SHARED.MACRO_BOOTSTRAP_ALL_MINIONS() }}
 
 - description: Hack gtw node
-  cmd: salt 'gtw*' cmd.run "ip addr del {{ SHARED.IPV4_NET_CONTROL_PREFIX }}.110/24 dev ens4; ip addr flush dev ens4";
+  cmd: salt '{{ HOSTNAME_GTW01 }}' cmd.run "ip addr del {{ SHARED.IPV4_NET_CONTROL_PREFIX }}.110/24 dev ens4; ip addr flush dev ens4";
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 10}
   skip_fail: false
 
 - description: Hack cmp01 node
-  cmd: salt 'cmp01*' cmd.run "ip addr del {{ SHARED.IPV4_NET_CONTROL_PREFIX }}.105/24 dev ens4; ip addr flush dev ens4";
+  cmd: salt '{{ HOSTNAME_CMP01 }}' cmd.run "ip addr del {{ SHARED.IPV4_NET_CONTROL_PREFIX }}.105/24 dev ens4; ip addr flush dev ens4";
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 10}
   skip_fail: false
 
 - description: Hack cmp02 node
-  cmd: salt 'cmp02*' cmd.run "ip addr del {{ SHARED.IPV4_NET_CONTROL_PREFIX }}.106/24 dev ens4; ip addr flush dev ens4";
+  cmd: salt '{{ HOSTNAME_CMP02 }}' cmd.run "ip addr del {{ SHARED.IPV4_NET_CONTROL_PREFIX }}.106/24 dev ens4; ip addr flush dev ens4";
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 10}
   skip_fail: false
diff --git a/tcp_tests/templates/virtual-pike-ovs-dpdk/underlay.yaml b/tcp_tests/templates/virtual-pike-ovs-dpdk/underlay.yaml
index f0463b1..d82c380 100644
--- a/tcp_tests/templates/virtual-pike-ovs-dpdk/underlay.yaml
+++ b/tcp_tests/templates/virtual-pike-ovs-dpdk/underlay.yaml
@@ -18,6 +18,14 @@
 {% set HOSTNAME_CTL01 = os_env('HOSTNAME_CTL01', 'ctl01.' + DOMAIN_NAME) %}
 {% set HOSTNAME_CTL02 = os_env('HOSTNAME_CTL02', 'ctl02.' + DOMAIN_NAME) %}
 {% set HOSTNAME_CTL03 = os_env('HOSTNAME_CTL03', 'ctl03.' + DOMAIN_NAME) %}
+{% set HOSTNAME_DBS = os_env('HOSTNAME_DBS', 'dbs.' + DOMAIN_NAME) %}
+{% set HOSTNAME_DBS01 = os_env('HOSTNAME_DBS01', 'dbs01.' + DOMAIN_NAME) %}
+{% set HOSTNAME_DBS02 = os_env('HOSTNAME_DBS02', 'dbs02.' + DOMAIN_NAME) %}
+{% set HOSTNAME_DBS03 = os_env('HOSTNAME_DBS03', 'dbs03.' + DOMAIN_NAME) %}
+{% set HOSTNAME_MSG = os_env('HOSTNAME_MSG', 'msg.' + DOMAIN_NAME) %}
+{% set HOSTNAME_MSG01 = os_env('HOSTNAME_MSG01', 'msg01.' + DOMAIN_NAME) %}
+{% set HOSTNAME_MSG02 = os_env('HOSTNAME_MSG02', 'msg02.' + DOMAIN_NAME) %}
+{% set HOSTNAME_MSG03 = os_env('HOSTNAME_MSG03', 'msg03.' + DOMAIN_NAME) %}
 {% set HOSTNAME_CMP01 = os_env('HOSTNAME_CMP01', 'cmp01.' + DOMAIN_NAME) %}
 {% set HOSTNAME_CMP02 = os_env('HOSTNAME_CMP02', 'cmp02.' + DOMAIN_NAME) %}
 {% set HOSTNAME_GTW01 = os_env('HOSTNAME_GTW01', 'gtw01.' + DOMAIN_NAME) %}
@@ -38,6 +46,14 @@
             default_{{ HOSTNAME_CTL01 }}: +101
             default_{{ HOSTNAME_CTL02 }}: +102
             default_{{ HOSTNAME_CTL03 }}: +103
+            default_{{ HOSTNAME_DBS }}: +50
+            default_{{ HOSTNAME_DBS01 }}: +51
+            default_{{ HOSTNAME_DBS02 }}: +52
+            default_{{ HOSTNAME_DBS03 }}: +53
+            default_{{ HOSTNAME_MSG }}: +40
+            default_{{ HOSTNAME_MSG01 }}: +41
+            default_{{ HOSTNAME_MSG02 }}: +42
+            default_{{ HOSTNAME_MSG03 }}: +43
             default_{{ HOSTNAME_CMP01 }}: +105
             default_{{ HOSTNAME_CMP02 }}: +106
             default_{{ HOSTNAME_GTW01 }}: +110
@@ -55,6 +71,14 @@
             default_{{ HOSTNAME_CTL01 }}: +101
             default_{{ HOSTNAME_CTL02 }}: +102
             default_{{ HOSTNAME_CTL03 }}: +103
+            default_{{ HOSTNAME_DBS }}: +50
+            default_{{ HOSTNAME_DBS01 }}: +51
+            default_{{ HOSTNAME_DBS02 }}: +52
+            default_{{ HOSTNAME_DBS03 }}: +53
+            default_{{ HOSTNAME_MSG }}: +40
+            default_{{ HOSTNAME_MSG01 }}: +41
+            default_{{ HOSTNAME_MSG02 }}: +42
+            default_{{ HOSTNAME_MSG03 }}: +43
             default_{{ HOSTNAME_CMP01 }}: +105
             default_{{ HOSTNAME_CMP02 }}: +106
             default_{{ HOSTNAME_GTW01 }}: +110
@@ -72,6 +96,14 @@
             default_{{ HOSTNAME_CTL01 }}: +101
             default_{{ HOSTNAME_CTL02 }}: +102
             default_{{ HOSTNAME_CTL03 }}: +103
+            default_{{ HOSTNAME_DBS }}: +50
+            default_{{ HOSTNAME_DBS01 }}: +51
+            default_{{ HOSTNAME_DBS02 }}: +52
+            default_{{ HOSTNAME_DBS03 }}: +53
+            default_{{ HOSTNAME_MSG }}: +40
+            default_{{ HOSTNAME_MSG01 }}: +41
+            default_{{ HOSTNAME_MSG02 }}: +42
+            default_{{ HOSTNAME_MSG03 }}: +43
             default_{{ HOSTNAME_CMP01 }}: +105
             default_{{ HOSTNAME_CMP02 }}: +106
             default_{{ HOSTNAME_GTW01 }}: +110
@@ -89,6 +121,14 @@
             default_{{ HOSTNAME_CTL01 }}: +101
             default_{{ HOSTNAME_CTL02 }}: +102
             default_{{ HOSTNAME_CTL03 }}: +103
+            default_{{ HOSTNAME_DBS }}: +50
+            default_{{ HOSTNAME_DBS01 }}: +51
+            default_{{ HOSTNAME_DBS02 }}: +52
+            default_{{ HOSTNAME_DBS03 }}: +53
+            default_{{ HOSTNAME_MSG }}: +40
+            default_{{ HOSTNAME_MSG01 }}: +41
+            default_{{ HOSTNAME_MSG02 }}: +42
+            default_{{ HOSTNAME_MSG03 }}: +43
             default_{{ HOSTNAME_CMP01 }}: +105
             default_{{ HOSTNAME_CMP02 }}: +106
             default_{{ HOSTNAME_GTW01 }}: +110
@@ -155,7 +195,7 @@
             role: salt_master
             params:
               vcpu: !os_env SLAVE_NODE_CPU, 2
-              memory: !os_env SLAVE_NODE_MEMORY, 4096
+              memory: !os_env SLAVE_NODE_MEMORY, 8192
               boot:
                 - hd
               cloud_init_volume_name: iso
@@ -193,7 +233,7 @@
             role: salt_minion
             params:
               vcpu: !os_env SLAVE_NODE_CPU, 2
-              memory: !os_env SLAVE_NODE_MEMORY, 12288
+              memory: !os_env SLAVE_NODE_MEMORY, 16384
               boot:
                 - hd
               cloud_init_volume_name: iso
@@ -234,7 +274,7 @@
             role: salt_minion
             params:
               vcpu: !os_env SLAVE_NODE_CPU, 2
-              memory: !os_env SLAVE_NODE_MEMORY, 12288
+              memory: !os_env SLAVE_NODE_MEMORY, 16384
               boot:
                 - hd
               cloud_init_volume_name: iso
@@ -263,7 +303,7 @@
             role: salt_minion
             params:
               vcpu: !os_env SLAVE_NODE_CPU, 2
-              memory: !os_env SLAVE_NODE_MEMORY, 12288
+              memory: !os_env SLAVE_NODE_MEMORY, 16384
               boot:
                 - hd
               cloud_init_volume_name: iso
@@ -288,6 +328,162 @@
               interfaces: *interfaces
               network_config: *network_config
 
+          - name: {{ HOSTNAME_DBS01 }}
+            role: salt_minion
+            params:
+              vcpu: !os_env SLAVE_NODE_CPU, 2
+              memory: !os_env SLAVE_NODE_MEMORY, 8192
+              boot:
+                - hd
+              cloud_init_volume_name: iso
+              cloud_init_iface_up: ens3
+              volumes:
+                - name: system
+                  capacity: !os_env NODE_VOLUME_SIZE, 150
+                  backing_store: cloudimage1604
+                  format: qcow2
+                - name: iso  # Volume with name 'iso' will be used
+                             # for store image with cloud-init metadata.
+                  capacity: 1
+                  format: raw
+                  device: cdrom
+                  bus: ide
+                  cloudinit_meta_data: *cloudinit_meta_data
+                  cloudinit_user_data: *cloudinit_user_data_1604
+
+              interfaces: *interfaces
+              network_config: *network_config
+
+          - name: {{ HOSTNAME_DBS02 }}
+            role: salt_minion
+            params:
+              vcpu: !os_env SLAVE_NODE_CPU, 2
+              memory: !os_env SLAVE_NODE_MEMORY, 8192
+              boot:
+                - hd
+              cloud_init_volume_name: iso
+              cloud_init_iface_up: ens3
+              volumes:
+                - name: system
+                  capacity: !os_env NODE_VOLUME_SIZE, 150
+                  backing_store: cloudimage1604
+                  format: qcow2
+                - name: iso  # Volume with name 'iso' will be used
+                             # for store image with cloud-init metadata.
+                  capacity: 1
+                  format: raw
+                  device: cdrom
+                  bus: ide
+                  cloudinit_meta_data: *cloudinit_meta_data
+                  cloudinit_user_data: *cloudinit_user_data_1604
+
+              interfaces: *interfaces
+              network_config: *network_config
+
+          - name: {{ HOSTNAME_DBS03 }}
+            role: salt_minion
+            params:
+              vcpu: !os_env SLAVE_NODE_CPU, 2
+              memory: !os_env SLAVE_NODE_MEMORY, 8192
+              boot:
+                - hd
+              cloud_init_volume_name: iso
+              cloud_init_iface_up: ens3
+              volumes:
+                - name: system
+                  capacity: !os_env NODE_VOLUME_SIZE, 150
+                  backing_store: cloudimage1604
+                  format: qcow2
+                - name: iso  # Volume with name 'iso' will be used
+                             # for store image with cloud-init metadata.
+                  capacity: 1
+                  format: raw
+                  device: cdrom
+                  bus: ide
+                  cloudinit_meta_data: *cloudinit_meta_data
+                  cloudinit_user_data: *cloudinit_user_data_1604
+
+              interfaces: *interfaces
+              network_config: *network_config
+
+          - name: {{ HOSTNAME_MSG01 }}
+            role: salt_minion
+            params:
+              vcpu: !os_env SLAVE_NODE_CPU, 2
+              memory: !os_env SLAVE_NODE_MEMORY, 4096
+              boot:
+                - hd
+              cloud_init_volume_name: iso
+              cloud_init_iface_up: ens3
+              volumes:
+                - name: system
+                  capacity: !os_env NODE_VOLUME_SIZE, 150
+                  backing_store: cloudimage1604
+                  format: qcow2
+                - name: iso  # Volume with name 'iso' will be used
+                             # for store image with cloud-init metadata.
+                  capacity: 1
+                  format: raw
+                  device: cdrom
+                  bus: ide
+                  cloudinit_meta_data: *cloudinit_meta_data
+                  cloudinit_user_data: *cloudinit_user_data_1604
+
+              interfaces: *interfaces
+              network_config: *network_config
+
+          - name: {{ HOSTNAME_MSG02 }}
+            role: salt_minion
+            params:
+              vcpu: !os_env SLAVE_NODE_CPU, 2
+              memory: !os_env SLAVE_NODE_MEMORY, 4096
+              boot:
+                - hd
+              cloud_init_volume_name: iso
+              cloud_init_iface_up: ens3
+              volumes:
+                - name: system
+                  capacity: !os_env NODE_VOLUME_SIZE, 150
+                  backing_store: cloudimage1604
+                  format: qcow2
+                - name: iso  # Volume with name 'iso' will be used
+                             # for store image with cloud-init metadata.
+                  capacity: 1
+                  format: raw
+                  device: cdrom
+                  bus: ide
+                  cloudinit_meta_data: *cloudinit_meta_data
+                  cloudinit_user_data: *cloudinit_user_data_1604
+
+              interfaces: *interfaces
+              network_config: *network_config
+
+          - name: {{ HOSTNAME_MSG03 }}
+            role: salt_minion
+            params:
+              vcpu: !os_env SLAVE_NODE_CPU, 2
+              memory: !os_env SLAVE_NODE_MEMORY, 4096
+              boot:
+                - hd
+              cloud_init_volume_name: iso
+              cloud_init_iface_up: ens3
+              volumes:
+                - name: system
+                  capacity: !os_env NODE_VOLUME_SIZE, 150
+                  backing_store: cloudimage1604
+                  format: qcow2
+                - name: iso  # Volume with name 'iso' will be used
+                             # for store image with cloud-init metadata.
+                  capacity: 1
+                  format: raw
+                  device: cdrom
+                  bus: ide
+                  cloudinit_meta_data: *cloudinit_meta_data
+                  cloudinit_user_data: *cloudinit_user_data_1604
+
+              interfaces: *interfaces
+              network_config: *network_config
+
           - name: {{ HOSTNAME_PRX01 }}
             role: salt_minion
             params:
@@ -408,8 +604,8 @@
           - name: {{ HOSTNAME_GTW01 }}
             role: salt_minion
             params:
-              vcpu: !os_env SLAVE_NODE_CPU, 1
-              memory: !os_env SLAVE_NODE_MEMORY, 2048
+              vcpu: !os_env SLAVE_NODE_CPU, 4
+              memory: !os_env SLAVE_NODE_MEMORY, 4096
               boot:
                 - hd
               cloud_init_volume_name: iso
diff --git a/tcp_tests/tests/system/test_failover.py b/tcp_tests/tests/system/test_failover.py
deleted file mode 100644
index 615e26f..0000000
--- a/tcp_tests/tests/system/test_failover.py
+++ /dev/null
@@ -1,603 +0,0 @@
-#    Copyright 2017 Mirantis, Inc.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-from devops.helpers import helpers
-import pytest
-
-from tcp_tests import logger
-from tcp_tests.helpers import ext
-
-LOG = logger.logger
-
-
-class TestFailover(object):
-    """Test class for testing OpenStack nodes failover"""
-
-    @staticmethod
-    def check_influxdb_xfail(sl_deployed, node_name, value):
-
-        def check_influxdb_data():
-            return value in sl_deployed.check_data_in_influxdb(node_name)
-
-        try:
-            helpers.wait(
-                check_influxdb_data,
-                timeout=10, interval=2,
-                timeout_msg=('Influxdb data {0} was not replicated to {1} '
-                             '[https://mirantis.jira.com/browse/PROD-16272]'
-                             .format(value, node_name)))
-        except Exception:
-            pytest.xfail('Influxdb data {0} was not replicated to {1} '
-                         '[https://mirantis.jira.com/browse/PROD-16272]'
-                         .format(value, node_name))
-
-    @pytest.mark.grab_versions
-    @pytest.mark.fail_snapshot
-    def test_warm_shutdown_ctl01_node(self, underlay, openstack_deployed,
-                                      openstack_actions, show_step):
-        """Test warm shutdown ctl01
-
-        Scenario:
-            1. Prepare salt on hosts
-            2. Setup controller nodes
-            3. Setup compute nodes
-            4. Shutdown ctl01
-            5. Run tempest smoke after failover
-
-
-        """
-        # STEP #1,2,3
-        show_step(1)
-        show_step(2)
-        show_step(3)
-        # STEP #4
-        show_step(4)
-        openstack_actions.warm_shutdown_openstack_nodes('ctl01')
-        # STEP #5
-        show_step(5)
-        openstack_actions.run_tempest(pattern='smoke')
-
-        LOG.info("*************** DONE **************")
-
-    @pytest.mark.grab_versions
-    @pytest.mark.fail_snapshot
-    def test_restart_ctl01_node(self, underlay, openstack_deployed,
-                                openstack_actions, show_step):
-        """Test restart ctl01
-
-        Scenario:
-            1. Prepare salt on hosts
-            2. Setup controller nodes
-            3. Setup compute nodes
-            4. Restart ctl01
-            5. Run tempest smoke after failover
-
-
-        """
-        # STEP #1,2,3
-        show_step(1)
-        show_step(2)
-        show_step(3)
-
-        # STEP #4
-        show_step(4)
-        openstack_actions.warm_restart_nodes('ctl01')
-        # STEP #5
-        show_step(5)
-        openstack_actions.run_tempest(pattern='smoke')
-
-        LOG.info("*************** DONE **************")
-
-    @pytest.mark.grab_versions
-    @pytest.mark.fail_snapshot
-    def test_warm_shutdown_cmp01_node(self, underlay, openstack_deployed,
-                                      openstack_actions, show_step):
-        """Test warm shutdown cmp01
-
-        Scenario:
-            1. Prepare salt on hosts
-            2. Setup controller nodes
-            3. Setup compute nodes
-            4. Shutdown cmp01
-            5. Run tempest smoke after failover
-
-
-        """
-        # STEP #1,2,3
-        show_step(1)
-        show_step(2)
-        show_step(3)
-
-        # STEP #4
-        show_step(4)
-        openstack_actions.warm_shutdown_openstack_nodes('cmp01')
-        # STEP #5
-        show_step(5)
-        openstack_actions.run_tempest(pattern='smoke')
-
-        LOG.info("*************** DONE **************")
-
-    @pytest.mark.grab_versions
-    @pytest.mark.fail_snapshot
-    def test_restart_cmp01_node(self, underlay, openstack_deployed,
-                                openstack_actions, show_step):
-        """Test restart cmp01
-
-        Scenario:
-            1. Prepare salt on hosts
-            2. Setup controller nodes
-            3. Setup compute nodes
-            4. Restart cmp01
-            5. Run tempest smoke after failover
-
-
-        """
-        # STEP #1,2,3
-        show_step(1)
-        show_step(2)
-        show_step(3)
-
-        # STEP #4
-        show_step(4)
-        openstack_actions.warm_restart_nodes('cmp01')
-        # STEP #5
-        show_step(5)
-        openstack_actions.run_tempest(pattern='smoke')
-
-        LOG.info("*************** DONE **************")
-
-    @pytest.mark.grab_versions
-    @pytest.mark.fail_snapshot
-    @pytest.mark.revert_snapshot(ext.SNAPSHOT.sl_deployed)
-    def test_restart_mon01_node(self, openstack_actions,
-                                sl_os_deployed, show_step):
-        """Test restart mon01
-
-        Scenario:
-            1. Prepare salt on hosts
-            2. Setup controller nodes
-            3. Setup compute, monitoring nodes
-            4. Check targets before restart
-            5. Restart mon01
-            6. Check targets after restart
-            7. Run LMA smoke after failover
-
-
-        """
-        # STEP #1,2,3
-        show_step(1)
-        show_step(2)
-        show_step(3)
-
-        # STEP #4
-        show_step(4)
-        mon_nodes = sl_os_deployed.get_monitoring_nodes()
-        LOG.debug('Mon nodes list {0}'.format(mon_nodes))
-        sl_os_deployed.check_prometheus_targets(mon_nodes)
-        before_result = sl_os_deployed.run_sl_tests_json(
-            'cfg01', '/root/stacklight-pytest/stacklight_tests/',
-            'tests/prometheus/', 'test_alerts.py')
-        failed_tests = [test['name'] for test in
-                        before_result if 'passed' not in test['outcome']]
-        # STEP #5
-        show_step(5)
-        openstack_actions.warm_restart_nodes('mon01')
-        # STEP #6
-        show_step(6)
-        sl_os_deployed.check_prometheus_targets(mon_nodes)
-        # STEP #7
-        show_step(7)
-        # Run SL component tetsts
-        after_result = sl_os_deployed.run_sl_tests_json(
-            'cfg01', '/root/stacklight-pytest/stacklight_tests/',
-            'tests/prometheus/', 'test_alerts.py')
-        for test in after_result:
-            if test['name'] not in failed_tests:
-                assert 'passed' in test['outcome'], \
-                    'Failed test {}'.format(test)
-        LOG.info("*************** DONE **************")
-
-    @pytest.mark.grab_versions
-    @pytest.mark.fail_snapshot
-    @pytest.mark.revert_snapshot(ext.SNAPSHOT.sl_deployed)
-    def test_warm_shutdown_mon01_node(self, openstack_actions,
-                                      sl_os_deployed,
-                                      show_step):
-        """Test warm shutdown mon01
-
-        Scenario:
-            1. Prepare salt on hosts
-            2. Setup controller nodes
-            3. Setup compute, monitoring nodes
-            4. Check LMA before mon node shutdown
-            5. Shutdown mon01 node
-            6. Run LMA tests after failover
-
-
-        """
-        # STEP #1,2,3
-        show_step(1)
-        show_step(2)
-        show_step(3)
-
-        # STEP #4
-        show_step(4)
-        mon_nodes = sl_os_deployed.get_monitoring_nodes()
-        LOG.debug('Mon nodes list {0}'.format(mon_nodes))
-        sl_os_deployed.check_prometheus_targets(mon_nodes)
-        before_result = sl_os_deployed.run_sl_tests_json(
-            'cfg01', '/root/stacklight-pytest/stacklight_tests/',
-            'tests/prometheus/', 'test_alerts.py')
-        failed_tests = [test['name'] for test in
-                        before_result if 'passed' not in test['outcome']]
-        # STEP #5
-        show_step(5)
-        openstack_actions.warm_shutdown_openstack_nodes('mon01')
-        # STEP #6
-        show_step(6)
-        # Run SL component tetsts
-        after_result = sl_os_deployed.run_sl_tests_json(
-            'cfg01', '/root/stacklight-pytest/stacklight_tests/',
-            'tests/prometheus/', 'test_alerts.py')
-        for test in after_result:
-            if test['name'] not in failed_tests:
-                assert 'passed' in test['outcome'], \
-                    'Failed test {}'.format(test)
-        LOG.info("*************** DONE **************")
-
-    @pytest.mark.grab_versions
-    @pytest.mark.fail_snapshot
-    @pytest.mark.revert_snapshot(ext.SNAPSHOT.sl_deployed)
-    def test_kill_influxdb_relay_mon01_node(self, sl_os_deployed,
-                                            show_step):
-        """Test kill influxdb relay on mon01 node
-
-        Scenario:
-            1. Prepare salt on hosts
-            2. Setup controller nodes
-            3. Setup compute, monitoring nodes
-            4. Check LMA before mon node shutdown
-            5. Kill influxdb relay on mon01 node
-            6. Post data into influx
-            7. Get data from all healthy nodes
-            8. Start influx db
-            9. Request data on mon01
-            10. Run LMA tests after fail and compare with result before fail
-
-
-        """
-        # STEP #1,2,3
-        show_step(1)
-        show_step(2)
-        show_step(3)
-
-        # STEP #4
-        show_step(4)
-        mon_nodes = sl_os_deployed.get_monitoring_nodes()
-        LOG.debug('Mon nodes list {0}'.format(mon_nodes))
-        before_result = sl_os_deployed.run_sl_tests_json(
-            'cfg01', '/root/stacklight-pytest/stacklight_tests/',
-            'tests/prometheus/', 'test_alerts.py')
-        failed_tests = [test['name'] for test in
-                        before_result if 'passed' not in test['outcome']]
-
-        # STEP #5
-        show_step(5)
-        sl_os_deployed.kill_sl_service_on_node('mon01', 'influxdb-relay')
-
-        # STEP #6
-        show_step(6)
-        sl_os_deployed.post_data_into_influx('mon02')
-
-        # STEP #7
-        show_step(7)
-        assert 'mymeas' in sl_os_deployed.check_data_in_influxdb('mon02')
-        assert 'mymeas' in sl_os_deployed.check_data_in_influxdb('mon03')
-
-        # STEP #8
-        show_step(8)
-        sl_os_deployed.start_service('mon01', 'influxdb-relay')
-
-        # STEP #9
-        show_step(9)
-        assert 'mymeas' in sl_os_deployed.check_data_in_influxdb('mon01')
-
-        # STEP #10
-        show_step(10)
-        after_result = sl_os_deployed.run_sl_tests_json(
-            'cfg01', '/root/stacklight-pytest/stacklight_tests/',
-            'tests/prometheus/', 'test_alerts.py')
-        for test in after_result:
-            if test['name'] not in failed_tests:
-                assert 'passed' in test['outcome'], \
-                    'Failed test {}'.format(test)
-        LOG.info("*************** DONE **************")
-
-    @pytest.mark.grab_versions
-    @pytest.mark.fail_snapshot
-    @pytest.mark.revert_snapshot(ext.SNAPSHOT.sl_deployed)
-    def test_kill_influxdb_mon01_node(self, sl_os_deployed, show_step):
-        """Test kill influxdb on mon01 node
-
-        Scenario:
-            1. Prepare salt on hosts
-            2. Setup controller nodes
-            3. Setup compute, monitoring nodes
-            4. Check LMA before mon node shutdown
-            5. Kill influxdb on mon01 node
-            6. Post data into influx
-            7. Get data from all healthy nodes
-            8. Start influx db
-            9. Request data on mon01
-            10. Run LMA tests after fail and compare with result before fail
-
-
-        """
-        # STEP #1,2,3
-        show_step(1)
-        show_step(2)
-        show_step(3)
-
-        # STEP #4
-        show_step(4)
-        mon_nodes = sl_os_deployed.get_monitoring_nodes()
-        LOG.debug('Mon nodes list {0}'.format(mon_nodes))
-        before_result = sl_os_deployed.run_sl_tests_json(
-            'cfg01', '/root/stacklight-pytest/stacklight_tests/',
-            'tests/prometheus/', 'test_alerts.py')
-        failed_tests = [test['name'] for test in
-                        before_result if 'passed' not in test['outcome']]
-        # STEP #5
-        show_step(5)
-        sl_os_deployed.kill_sl_service_on_node('mon01', 'influxd')
-
-        # STEP #6
-        show_step(6)
-        sl_os_deployed.post_data_into_influx('mon02')
-
-        # STEP #7
-        show_step(7)
-        assert 'mymeas' in sl_os_deployed.check_data_in_influxdb('mon02')
-        assert 'mymeas' in sl_os_deployed.check_data_in_influxdb('mon03')
-
-        # STEP #8
-        show_step(8)
-        sl_os_deployed.start_service('mon01', 'influxd')
-
-        # STEP #9
-        show_step(9)
-        self.check_influxdb_xfail(sl_os_deployed, 'mon01', 'mymeas')
-
-        # STEP #10
-        show_step(10)
-        after_result = sl_os_deployed.run_sl_tests_json(
-            'cfg01', '/root/stacklight-pytest/stacklight_tests/',
-            'tests/prometheus/', 'test_alerts.py')
-        for test in after_result:
-            if test['name'] not in failed_tests:
-                assert 'passed' in test['outcome'], \
-                    'Failed test {}'.format(test)
-        LOG.info("*************** DONE **************")
-
-    @pytest.mark.grab_versions
-    @pytest.mark.fail_snapshot
-    @pytest.mark.revert_snapshot(ext.SNAPSHOT.sl_deployed)
-    def test_stop_influxdb_relay_mon_nodes(self, sl_os_deployed,
-                                           show_step):
-        """Test stop influxdb relay on mon01 node
-
-        Scenario:
-            1. Prepare salt on hosts
-            2. Setup controller nodes
-            3. Setup compute, monitoring nodes
-            4. Check LMA before mon node shutdown
-            5. Stop influxdb relay on mon01 and mon02 nodes
-            6. Post data into influx
-            7. Get data from all healthy nodes
-            8. Start influx db
-            9. Request data on mon01, 02
-            10. Run LMA tests after fail and compare with result before fail
-
-
-        """
-        # STEP #1,2,3
-        show_step(1)
-        show_step(2)
-        show_step(3)
-
-        # STEP #4
-        show_step(4)
-        mon_nodes = sl_os_deployed.get_monitoring_nodes()
-        LOG.debug('Mon nodes list {0}'.format(mon_nodes))
-        before_result = sl_os_deployed.run_sl_tests_json(
-            'cfg01', '/root/stacklight-pytest/stacklight_tests/',
-            'tests/prometheus/', 'test_alerts.py')
-        failed_tests = [test['name'] for test in
-                        before_result if 'passed' not in test['outcome']]
-        # STEP #5
-        show_step(5)
-        sl_os_deployed.stop_sl_service_on_node('mon01', 'influxdb-relay')
-        sl_os_deployed.stop_sl_service_on_node('mon02', 'influxdb-relay')
-
-        # STEP #6
-        show_step(6)
-        sl_os_deployed.post_data_into_influx('mon03')
-
-        # STEP #7
-        show_step(7)
-        assert 'mymeas' in sl_os_deployed.check_data_in_influxdb('mon03')
-
-        # STEP #8
-        show_step(8)
-        sl_os_deployed.start_service('mon01', 'influxdb-relay')
-        sl_os_deployed.start_service('mon02', 'influxdb-relay')
-
-        # STEP #9
-        show_step(9)
-        assert 'mymeas' in sl_os_deployed.check_data_in_influxdb('mon01')
-        assert 'mymeas' in sl_os_deployed.check_data_in_influxdb('mon02')
-
-        # STEP #10
-        show_step(10)
-        after_result = sl_os_deployed.run_sl_tests_json(
-            'cfg01', '/root/stacklight-pytest/stacklight_tests/',
-            'tests/prometheus/', 'test_alerts.py')
-        for test in after_result:
-            if test['name'] not in failed_tests:
-                assert 'passed' in test['outcome'], \
-                    'Failed test {}'.format(test)
-        LOG.info("*************** DONE **************")
-
-    @pytest.mark.grab_versions
-    @pytest.mark.fail_snapshot
-    @pytest.mark.revert_snapshot(ext.SNAPSHOT.sl_deployed)
-    def test_stop_influxdb_mon_nodes(self, sl_os_deployed, show_step):
-        """Test stop influxdb on mon01 node
-
-        Scenario:
-            1. Prepare salt on hosts
-            2. Setup controller nodes
-            3. Setup compute, monitoring nodes
-            4. Check LMA before mon node shutdown
-            5. Stop influxdb on mon01 and mon02 node
-            6. Post data into influx
-            7. Get data from all healthy nodes
-            8. Start influx db
-            9. Request data on mon01
-            10. Run LMA tests after fail and compare with result before fail
-
-
-        """
-        # STEP #1,2,3
-        show_step(1)
-        show_step(2)
-        show_step(3)
-
-        # STEP #4
-        show_step(4)
-        mon_nodes = sl_os_deployed.get_monitoring_nodes()
-        LOG.debug('Mon nodes list {0}'.format(mon_nodes))
-        before_result = sl_os_deployed.run_sl_tests_json(
-            'cfg01', '/root/stacklight-pytest/stacklight_tests/',
-            'tests/prometheus/', 'test_alerts.py')
-        failed_tests = [test['name'] for test in
-                        before_result if 'passed' not in test['outcome']]
-
-        # STEP #5
-        show_step(5)
-        sl_os_deployed.stop_sl_service_on_node('mon01', 'influxdb')
-        sl_os_deployed.stop_sl_service_on_node('mon02', 'influxdb')
-
-        # STEP #6
-        show_step(6)
-        sl_os_deployed.post_data_into_influx('mon03')
-
-        # STEP #7
-        show_step(7)
-        assert 'mymeas' in sl_os_deployed.check_data_in_influxdb('mon03')
-
-        # STEP #8
-        show_step(8)
-        sl_os_deployed.start_service('mon01', 'influxdb')
-        sl_os_deployed.start_service('mon02', 'influxdb')
-
-        # STEP #9
-        show_step(9)
-        self.check_influxdb_xfail(sl_os_deployed, 'mon01', 'mymeas')
-        self.check_influxdb_xfail(sl_os_deployed, 'mon02', 'mymeas')
-
-        # STEP #10
-        show_step(10)
-        after_result = sl_os_deployed.run_sl_tests_json(
-            'cfg01', '/root/stacklight-pytest/stacklight_tests/',
-            'tests/prometheus/', 'test_alerts.py')
-        for test in after_result:
-            if test['name'] not in failed_tests:
-                assert 'passed' in test['outcome'], \
-                    'Failed test {}'.format(test)
-        LOG.info("*************** DONE **************")
-
-    @pytest.mark.grab_versions
-    @pytest.mark.fail_snapshot
-    @pytest.mark.revert_snapshot(ext.SNAPSHOT.sl_deployed)
-    def test_restart_mon_with_vip(self, sl_os_deployed,
-                                  openstack_actions, salt_actions,
-                                  common_services_actions, show_step):
-        """Test restart mon with VIP
-
-        Scenario:
-            1. Prepare salt on hosts
-            2. Setup controller nodes
-            3. Setup compute and monitoring nodes
-            4. Check LMA before mon node restart
-            5. Find mon minion id with VIP
-            6. Restart mon minion id with VIP
-            7. Check that VIP was actually migrated on a new node
-            8. Run tempest smoke after failover
-
-
-        """
-        # TR case #4753939
-        common_services_actions.check_keepalived_pillar()
-        salt = salt_actions
-
-        # STEP #1,2,3
-        show_step(1)
-        show_step(2)
-        show_step(3)
-
-        # STEP #4
-        show_step(4)
-        mon_nodes = sl_os_deployed.get_monitoring_nodes()
-        LOG.debug('Mon nodes list {0}'.format(mon_nodes))
-        before_result = sl_os_deployed.run_sl_tests_json(
-            'cfg01', '/root/stacklight-pytest/stacklight_tests/',
-            'tests/prometheus/', 'test_alerts.py')
-        failed_tests = [test['name'] for test in
-                        before_result if 'passed' not in test['outcome']]
-
-        # STEP #5
-        show_step(5)
-        mon_vip_pillar = salt.get_pillar(
-            tgt="mon0*",
-            pillar="_param:cluster_vip_address")[0]
-        vip = [vip for minion_id, vip in mon_vip_pillar.items()][0]
-        minion_vip = common_services_actions.get_keepalived_vip_minion_id(vip)
-        LOG.info("VIP {0} is on {1}".format(vip, minion_vip))
-
-        # STEP #6
-        show_step(6)
-        openstack_actions.warm_restart_nodes(minion_vip)
-
-        # STEP #7
-        show_step(7)
-        # Check that VIP has been actually migrated to a new node
-        new_minion_vip = common_services_actions.get_keepalived_vip_minion_id(
-            vip)
-        LOG.info("Migrated VIP {0} is on {1}".format(vip, new_minion_vip))
-        assert new_minion_vip != minion_vip, (
-            "VIP {0} wasn't migrated from {1} after node reboot!"
-            .format(vip, new_minion_vip))
-        common_services_actions.check_keepalived_pillar()
-
-        # STEP #8
-        show_step(8)
-        # Run SL component tetsts
-        after_result = sl_os_deployed.run_sl_tests_json(
-            'cfg01', '/root/stacklight-pytest/stacklight_tests/',
-            'tests/prometheus/', 'test_alerts.py')
-        for test in after_result:
-            if test['name'] not in failed_tests:
-                assert 'passed' in test['outcome'], \
-                    'Failed test {}'.format(test)
-        LOG.info("*************** DONE **************")
diff --git a/tcp_tests/tests/system/test_failover_nodes.py b/tcp_tests/tests/system/test_failover_nodes.py
new file mode 100644
index 0000000..0abada9
--- /dev/null
+++ b/tcp_tests/tests/system/test_failover_nodes.py
@@ -0,0 +1,370 @@
+#    Copyright 2017 Mirantis, Inc.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+import pytest
+
+from tcp_tests import logger
+from tcp_tests.helpers import ext
+
+LOG = logger.logger
+
+
+class TestFailoverNodes(object):
+    """Test class for testing OpenStack nodes failover"""
+
+    @pytest.mark.grab_versions
+    @pytest.mark.fail_snapshot
+    def test_warm_shutdown_ctl01_node(self, underlay, openstack_deployed,
+                                      openstack_actions, show_step):
+        """Test warm shutdown ctl01
+
+        Scenario:
+            1. Prepare salt on hosts
+            2. Setup controller nodes
+            3. Setup compute nodes
+            4. Shutdown ctl01
+            5. Run tempest smoke after failover
+
+
+        """
+        # STEP #1,2,3
+        show_step(1)
+        show_step(2)
+        show_step(3)
+        # STEP #4
+        show_step(4)
+        openstack_actions.warm_shutdown_openstack_nodes('ctl01')
+        # STEP #5
+        show_step(5)
+        openstack_actions.run_tempest(pattern='smoke')
+
+        LOG.info("*************** DONE **************")
+
+    @pytest.mark.grab_versions
+    @pytest.mark.fail_snapshot
+    def test_restart_ctl01_node(self, underlay, openstack_deployed,
+                                openstack_actions, show_step):
+        """Test restart ctl01
+
+        Scenario:
+            1. Prepare salt on hosts
+            2. Setup controller nodes
+            3. Setup compute nodes
+            4. Restart ctl01
+            5. Run tempest smoke after failover
+
+
+        """
+        # STEP #1,2,3
+        show_step(1)
+        show_step(2)
+        show_step(3)
+
+        # STEP #4
+        show_step(4)
+        openstack_actions.warm_restart_nodes('ctl01')
+        # STEP #5
+        show_step(5)
+        openstack_actions.run_tempest(pattern='smoke')
+
+        LOG.info("*************** DONE **************")
+
+    @pytest.mark.grab_versions
+    @pytest.mark.fail_snapshot
+    def test_warm_shutdown_cmp01_node(self, underlay, openstack_deployed,
+                                      openstack_actions, show_step):
+        """Test warm shutdown cmp01
+
+        Scenario:
+            1. Prepare salt on hosts
+            2. Setup controller nodes
+            3. Setup compute nodes
+            4. Shutdown cmp01
+            5. Run tempest smoke after failover
+
+
+        """
+        # STEP #1,2,3
+        show_step(1)
+        show_step(2)
+        show_step(3)
+
+        # STEP #4
+        show_step(4)
+        openstack_actions.warm_shutdown_openstack_nodes('cmp01')
+        # STEP #5
+        show_step(5)
+        openstack_actions.run_tempest(pattern='smoke')
+
+        LOG.info("*************** DONE **************")
+
+    @pytest.mark.grab_versions
+    @pytest.mark.fail_snapshot
+    def test_restart_cmp01_node(self, underlay, openstack_deployed,
+                                openstack_actions, show_step):
+        """Test restart cmp01
+
+        Scenario:
+            1. Prepare salt on hosts
+            2. Setup controller nodes
+            3. Setup compute nodes
+            4. Restart cmp01
+            5. Run tempest smoke after failover
+
+
+        """
+        # STEP #1,2,3
+        show_step(1)
+        show_step(2)
+        show_step(3)
+
+        # STEP #4
+        show_step(4)
+        openstack_actions.warm_restart_nodes('cmp01')
+        # STEP #5
+        show_step(5)
+        openstack_actions.run_tempest(pattern='smoke')
+
+        LOG.info("*************** DONE **************")
+
+    @pytest.mark.grab_versions
+    @pytest.mark.fail_snapshot
+    @pytest.mark.revert_snapshot(ext.SNAPSHOT.sl_deployed)
+    def test_restart_mon01_node(self, openstack_actions,
+                                sl_os_deployed, show_step):
+        """Test restart mon01
+
+        Scenario:
+            1. Prepare salt on hosts
+            2. Setup controller nodes
+            3. Setup compute, monitoring nodes
+            4. Check targets before restart
+            5. Restart mon01
+            6. Check targets after restart
+            7. Run LMA smoke after failover
+
+
+        """
+        # STEP #1,2,3
+        show_step(1)
+        show_step(2)
+        show_step(3)
+
+        # STEP #4
+        show_step(4)
+        mon_nodes = sl_os_deployed.get_monitoring_nodes()
+        LOG.debug('Mon nodes list {0}'.format(mon_nodes))
+        sl_os_deployed.check_prometheus_targets(mon_nodes)
+        before_result = sl_os_deployed.run_sl_tests_json(
+            'cfg01', '/root/stacklight-pytest/stacklight_tests/',
+            'tests/prometheus/', 'test_alerts.py')
+        failed_tests = [test['name'] for test in
+                        before_result if 'passed' not in test['outcome']]
+        # STEP #5
+        show_step(5)
+        openstack_actions.warm_restart_nodes('mon01')
+        # STEP #6
+        show_step(6)
+        sl_os_deployed.check_prometheus_targets(mon_nodes)
+        # STEP #7
+        show_step(7)
+        # Run SL component tetsts
+        after_result = sl_os_deployed.run_sl_tests_json(
+            'cfg01', '/root/stacklight-pytest/stacklight_tests/',
+            'tests/prometheus/', 'test_alerts.py')
+        for test in after_result:
+            if test['name'] not in failed_tests:
+                assert 'passed' in test['outcome'], \
+                    'Failed test {}'.format(test)
+        LOG.info("*************** DONE **************")
+
+    @pytest.mark.grab_versions
+    @pytest.mark.fail_snapshot
+    @pytest.mark.revert_snapshot(ext.SNAPSHOT.sl_deployed)
+    def test_warm_shutdown_mon01_node(self, openstack_actions,
+                                      sl_os_deployed,
+                                      show_step):
+        """Test warm shutdown mon01
+
+        Scenario:
+            1. Prepare salt on hosts
+            2. Setup controller nodes
+            3. Setup compute, monitoring nodes
+            4. Check LMA before mon node shutdown
+            5. Shutdown mon01 node
+            6. Run LMA tests after failover
+
+
+        """
+        # STEP #1,2,3
+        show_step(1)
+        show_step(2)
+        show_step(3)
+
+        # STEP #4
+        show_step(4)
+        mon_nodes = sl_os_deployed.get_monitoring_nodes()
+        LOG.debug('Mon nodes list {0}'.format(mon_nodes))
+        sl_os_deployed.check_prometheus_targets(mon_nodes)
+        before_result = sl_os_deployed.run_sl_tests_json(
+            'cfg01', '/root/stacklight-pytest/stacklight_tests/',
+            'tests/prometheus/', 'test_alerts.py')
+        failed_tests = [test['name'] for test in
+                        before_result if 'passed' not in test['outcome']]
+        # STEP #5
+        show_step(5)
+        openstack_actions.warm_shutdown_openstack_nodes('mon01')
+        # STEP #6
+        show_step(6)
+        # Run SL component tetsts
+        after_result = sl_os_deployed.run_sl_tests_json(
+            'cfg01', '/root/stacklight-pytest/stacklight_tests/',
+            'tests/prometheus/', 'test_alerts.py')
+        for test in after_result:
+            if test['name'] not in failed_tests:
+                assert 'passed' in test['outcome'], \
+                    'Failed test {}'.format(test)
+        LOG.info("*************** DONE **************")
+
+    @pytest.mark.grab_versions
+    @pytest.mark.fail_snapshot
+    @pytest.mark.revert_snapshot(ext.SNAPSHOT.sl_deployed)
+    def test_restart_mon_with_vip(self, sl_os_deployed,
+                                  openstack_actions, salt_actions,
+                                  common_services_actions, show_step):
+        """Test restart mon with VIP
+
+        Scenario:
+            1. Prepare salt on hosts
+            2. Setup controller nodes
+            3. Setup compute and monitoring nodes
+            4. Check LMA before mon node restart
+            5. Find mon minion id with VIP
+            6. Restart mon minion id with VIP
+            7. Check that VIP was actually migrated on a new node
+            8. Run tempest smoke after failover
+
+
+        """
+        # TR case #4753939
+        common_services_actions.check_keepalived_pillar()
+        salt = salt_actions
+
+        # STEP #1,2,3
+        show_step(1)
+        show_step(2)
+        show_step(3)
+
+        # STEP #4
+        show_step(4)
+        mon_nodes = sl_os_deployed.get_monitoring_nodes()
+        LOG.debug('Mon nodes list {0}'.format(mon_nodes))
+        before_result = sl_os_deployed.run_sl_tests_json(
+            'cfg01', '/root/stacklight-pytest/stacklight_tests/',
+            'tests/prometheus/', 'test_alerts.py')
+        failed_tests = [test['name'] for test in
+                        before_result if 'passed' not in test['outcome']]
+
+        # STEP #5
+        show_step(5)
+        mon_vip_pillar = salt.get_pillar(
+            tgt="mon0*",
+            pillar="_param:cluster_vip_address")[0]
+        vip = [vip for minion_id, vip in mon_vip_pillar.items()][0]
+        minion_vip = common_services_actions.get_keepalived_vip_minion_id(vip)
+        LOG.info("VIP {0} is on {1}".format(vip, minion_vip))
+
+        # STEP #6
+        show_step(6)
+        openstack_actions.warm_restart_nodes(minion_vip)
+
+        # STEP #7
+        show_step(7)
+        # Check that VIP has been actually migrated to a new node
+        new_minion_vip = common_services_actions.get_keepalived_vip_minion_id(
+            vip)
+        LOG.info("Migrated VIP {0} is on {1}".format(vip, new_minion_vip))
+        assert new_minion_vip != minion_vip, (
+            "VIP {0} wasn't migrated from {1} after node reboot!"
+            .format(vip, new_minion_vip))
+        common_services_actions.check_keepalived_pillar()
+
+        # STEP #8
+        show_step(8)
+        # Run SL component tetsts
+        after_result = sl_os_deployed.run_sl_tests_json(
+            'cfg01', '/root/stacklight-pytest/stacklight_tests/',
+            'tests/prometheus/', 'test_alerts.py')
+        for test in after_result:
+            if test['name'] not in failed_tests:
+                assert 'passed' in test['outcome'], \
+                    'Failed test {}'.format(test)
+        LOG.info("*************** DONE **************")
+
+    @pytest.mark.grab_versions
+    @pytest.mark.fail_snapshot
+    @pytest.mark.revert_snapshot(ext.SNAPSHOT.openstack_deployed)
+    def test_restart_ctl_with_vip(self, underlay, openstack_deployed,
+                                  openstack_actions, salt_actions,
+                                  common_services_actions, show_step):
+        """Test restart clt with VIP
+
+        Scenario:
+            1. Prepare salt on hosts
+            2. Setup controller nodes
+            3. Setup compute nodes
+            4. Find controller minion id with VIP
+            5. Restart controller minion id with VIP
+            6. Check that VIP was actually migrated on a new node
+            7. Run tempest smoke after failover
+
+
+        """
+        # TR case #3385671
+        common_services_actions.check_keepalived_pillar()
+        salt = salt_actions
+
+        # STEP #1,2,3
+        show_step(1)
+        show_step(2)
+        show_step(3)
+
+        # STEP #4
+        show_step(4)
+        ctl_vip_pillar = salt.get_pillar(
+            tgt="I@nova:controller:enabled:True",
+            pillar="_param:cluster_vip_address")[0]
+        vip = [vip for minion_id, vip in ctl_vip_pillar.items()][0]
+        minion_vip = common_services_actions.get_keepalived_vip_minion_id(vip)
+        LOG.info("VIP {0} is on {1}".format(vip, minion_vip))
+
+        # STEP #5
+        show_step(5)
+        openstack_actions.warm_restart_nodes(minion_vip)
+
+        # STEP #6
+        show_step(6)
+        # Check that VIP has been actually migrated to a new node
+        new_minion_vip = common_services_actions.get_keepalived_vip_minion_id(
+            vip)
+        LOG.info("Migrated VIP {0} is on {1}".format(vip, new_minion_vip))
+        assert new_minion_vip != minion_vip, (
+            "VIP {0} wasn't migrated from {1} after node reboot!"
+            .format(vip, new_minion_vip))
+        common_services_actions.check_keepalived_pillar()
+
+        # STEP #7
+        show_step(7)
+        openstack_actions.run_tempest(pattern='smoke')
+
+        LOG.info("*************** DONE **************")
diff --git a/tcp_tests/tests/system/test_failover_openstack_services.py b/tcp_tests/tests/system/test_failover_openstack_services.py
index efbba58..37cff72 100644
--- a/tcp_tests/tests/system/test_failover_openstack_services.py
+++ b/tcp_tests/tests/system/test_failover_openstack_services.py
@@ -600,60 +600,3 @@
         assert not results['fail'], self.show_failed_msg(results['fail'])
 
         LOG.info("*************** DONE **************")
-
-    @pytest.mark.grab_versions
-    @pytest.mark.fail_snapshot
-    def test_restart_ctl_with_vip(self, underlay, openstack_deployed,
-                                  openstack_actions, salt_actions,
-                                  common_services_actions, show_step):
-        """Test restart clt with VIP
-
-        Scenario:
-            1. Prepare salt on hosts
-            2. Setup controller nodes
-            3. Setup compute nodes
-            4. Find controller minion id with VIP
-            5. Restart controller minion id with VIP
-            6. Check that VIP was actually migrated on a new node
-            7. Run tempest smoke after failover
-
-
-        """
-        # TR case #3385671
-        common_services_actions.check_keepalived_pillar()
-        salt = salt_actions
-
-        # STEP #1,2,3
-        show_step(1)
-        show_step(2)
-        show_step(3)
-
-        # STEP #4
-        show_step(4)
-        ctl_vip_pillar = salt.get_pillar(
-            tgt="I@nova:controller:enabled:True",
-            pillar="_param:cluster_vip_address")[0]
-        vip = [vip for minion_id, vip in ctl_vip_pillar.items()][0]
-        minion_vip = common_services_actions.get_keepalived_vip_minion_id(vip)
-        LOG.info("VIP {0} is on {1}".format(vip, minion_vip))
-
-        # STEP #5
-        show_step(5)
-        openstack_actions.warm_restart_nodes(minion_vip)
-
-        # STEP #6
-        show_step(6)
-        # Check that VIP has been actually migrated to a new node
-        new_minion_vip = common_services_actions.get_keepalived_vip_minion_id(
-            vip)
-        LOG.info("Migrated VIP {0} is on {1}".format(vip, new_minion_vip))
-        assert new_minion_vip != minion_vip, (
-            "VIP {0} wasn't migrated from {1} after node reboot!"
-            .format(vip, new_minion_vip))
-        common_services_actions.check_keepalived_pillar()
-
-        # STEP #7
-        show_step(7)
-        openstack_actions.run_tempest(pattern='smoke')
-
-        LOG.info("*************** DONE **************")
diff --git a/tcp_tests/tests/system/test_failover_stacklight_services.py b/tcp_tests/tests/system/test_failover_stacklight_services.py
new file mode 100644
index 0000000..3bb47eb
--- /dev/null
+++ b/tcp_tests/tests/system/test_failover_stacklight_services.py
@@ -0,0 +1,314 @@
+#    Copyright 2017 Mirantis, Inc.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+from devops.helpers import helpers
+import pytest
+
+from tcp_tests import logger
+from tcp_tests.helpers import ext
+
+LOG = logger.logger
+
+
+class TestFailoverStacklightServices(object):
+    """Test class for testing OpenStack nodes failover"""
+
+    @staticmethod
+    def check_influxdb_xfail(sl_deployed, node_name, value):
+
+        def check_influxdb_data():
+            return value in sl_deployed.check_data_in_influxdb(node_name)
+
+        try:
+            helpers.wait(
+                check_influxdb_data,
+                timeout=10, interval=2,
+                timeout_msg=('Influxdb data {0} was not replicated to {1} '
+                             '[https://mirantis.jira.com/browse/PROD-16272]'
+                             .format(value, node_name)))
+        except Exception:
+            pytest.xfail('Influxdb data {0} was not replicated to {1} '
+                         '[https://mirantis.jira.com/browse/PROD-16272]'
+                         .format(value, node_name))
+
+    @pytest.mark.grab_versions
+    @pytest.mark.fail_snapshot
+    @pytest.mark.revert_snapshot(ext.SNAPSHOT.sl_deployed)
+    def test_kill_influxdb_relay_mon01_node(self, sl_os_deployed,
+                                            show_step):
+        """Test kill influxdb relay on mon01 node
+
+        Scenario:
+            1. Prepare salt on hosts
+            2. Setup controller nodes
+            3. Setup compute, monitoring nodes
+            4. Check LMA before mon node shutdown
+            5. Kill influxdb relay on mon01 node
+            6. Post data into influx
+            7. Get data from all healthy nodes
+            8. Start influx db
+            9. Request data on mon01
+            10. Run LMA tests after fail and compare with result before fail
+
+
+        """
+        # STEP #1,2,3
+        show_step(1)
+        show_step(2)
+        show_step(3)
+
+        # STEP #4
+        show_step(4)
+        mon_nodes = sl_os_deployed.get_monitoring_nodes()
+        LOG.debug('Mon nodes list {0}'.format(mon_nodes))
+        before_result = sl_os_deployed.run_sl_tests_json(
+            'cfg01', '/root/stacklight-pytest/stacklight_tests/',
+            'tests/prometheus/', 'test_alerts.py')
+        failed_tests = [test['name'] for test in
+                        before_result if 'passed' not in test['outcome']]
+
+        # STEP #5
+        show_step(5)
+        sl_os_deployed.kill_sl_service_on_node('mon01', 'influxdb-relay')
+
+        # STEP #6
+        show_step(6)
+        sl_os_deployed.post_data_into_influx('mon02')
+
+        # STEP #7
+        show_step(7)
+        assert 'mymeas' in sl_os_deployed.check_data_in_influxdb('mon02')
+        assert 'mymeas' in sl_os_deployed.check_data_in_influxdb('mon03')
+
+        # STEP #8
+        show_step(8)
+        sl_os_deployed.start_service('mon01', 'influxdb-relay')
+
+        # STEP #9
+        show_step(9)
+        assert 'mymeas' in sl_os_deployed.check_data_in_influxdb('mon01')
+
+        # STEP #10
+        show_step(10)
+        after_result = sl_os_deployed.run_sl_tests_json(
+            'cfg01', '/root/stacklight-pytest/stacklight_tests/',
+            'tests/prometheus/', 'test_alerts.py')
+        for test in after_result:
+            if test['name'] not in failed_tests:
+                assert 'passed' in test['outcome'], \
+                    'Failed test {}'.format(test)
+        LOG.info("*************** DONE **************")
+
+    @pytest.mark.grab_versions
+    @pytest.mark.fail_snapshot
+    @pytest.mark.revert_snapshot(ext.SNAPSHOT.sl_deployed)
+    def test_kill_influxdb_mon01_node(self, sl_os_deployed, show_step):
+        """Test kill influxdb on mon01 node
+
+        Scenario:
+            1. Prepare salt on hosts
+            2. Setup controller nodes
+            3. Setup compute, monitoring nodes
+            4. Check LMA before mon node shutdown
+            5. Kill influxdb on mon01 node
+            6. Post data into influx
+            7. Get data from all healthy nodes
+            8. Start influx db
+            9. Request data on mon01
+            10. Run LMA tests after fail and compare with result before fail
+
+
+        """
+        # STEP #1,2,3
+        show_step(1)
+        show_step(2)
+        show_step(3)
+
+        # STEP #4
+        show_step(4)
+        mon_nodes = sl_os_deployed.get_monitoring_nodes()
+        LOG.debug('Mon nodes list {0}'.format(mon_nodes))
+        before_result = sl_os_deployed.run_sl_tests_json(
+            'cfg01', '/root/stacklight-pytest/stacklight_tests/',
+            'tests/prometheus/', 'test_alerts.py')
+        failed_tests = [test['name'] for test in
+                        before_result if 'passed' not in test['outcome']]
+        # STEP #5
+        show_step(5)
+        sl_os_deployed.kill_sl_service_on_node('mon01', 'influxd')
+
+        # STEP #6
+        show_step(6)
+        sl_os_deployed.post_data_into_influx('mon02')
+
+        # STEP #7
+        show_step(7)
+        assert 'mymeas' in sl_os_deployed.check_data_in_influxdb('mon02')
+        assert 'mymeas' in sl_os_deployed.check_data_in_influxdb('mon03')
+
+        # STEP #8
+        show_step(8)
+        sl_os_deployed.start_service('mon01', 'influxd')
+
+        # STEP #9
+        show_step(9)
+        self.check_influxdb_xfail(sl_os_deployed, 'mon01', 'mymeas')
+
+        # STEP #10
+        show_step(10)
+        after_result = sl_os_deployed.run_sl_tests_json(
+            'cfg01', '/root/stacklight-pytest/stacklight_tests/',
+            'tests/prometheus/', 'test_alerts.py')
+        for test in after_result:
+            if test['name'] not in failed_tests:
+                assert 'passed' in test['outcome'], \
+                    'Failed test {}'.format(test)
+        LOG.info("*************** DONE **************")
+
+    @pytest.mark.grab_versions
+    @pytest.mark.fail_snapshot
+    @pytest.mark.revert_snapshot(ext.SNAPSHOT.sl_deployed)
+    def test_stop_influxdb_relay_mon_nodes(self, sl_os_deployed,
+                                           show_step):
+        """Test stop influxdb relay on mon01 node
+
+        Scenario:
+            1. Prepare salt on hosts
+            2. Setup controller nodes
+            3. Setup compute, monitoring nodes
+            4. Check LMA before mon node shutdown
+            5. Stop influxdb relay on mon01 and mon02 nodes
+            6. Post data into influx
+            7. Get data from all healthy nodes
+            8. Start influx db
+            9. Request data on mon01, 02
+            10. Run LMA tests after fail and compare with result before fail
+
+
+        """
+        # STEP #1,2,3
+        show_step(1)
+        show_step(2)
+        show_step(3)
+
+        # STEP #4
+        show_step(4)
+        mon_nodes = sl_os_deployed.get_monitoring_nodes()
+        LOG.debug('Mon nodes list {0}'.format(mon_nodes))
+        before_result = sl_os_deployed.run_sl_tests_json(
+            'cfg01', '/root/stacklight-pytest/stacklight_tests/',
+            'tests/prometheus/', 'test_alerts.py')
+        failed_tests = [test['name'] for test in
+                        before_result if 'passed' not in test['outcome']]
+        # STEP #5
+        show_step(5)
+        sl_os_deployed.stop_sl_service_on_node('mon01', 'influxdb-relay')
+        sl_os_deployed.stop_sl_service_on_node('mon02', 'influxdb-relay')
+
+        # STEP #6
+        show_step(6)
+        sl_os_deployed.post_data_into_influx('mon03')
+
+        # STEP #7
+        show_step(7)
+        assert 'mymeas' in sl_os_deployed.check_data_in_influxdb('mon03')
+
+        # STEP #8
+        show_step(8)
+        sl_os_deployed.start_service('mon01', 'influxdb-relay')
+        sl_os_deployed.start_service('mon02', 'influxdb-relay')
+
+        # STEP #9
+        show_step(9)
+        assert 'mymeas' in sl_os_deployed.check_data_in_influxdb('mon01')
+        assert 'mymeas' in sl_os_deployed.check_data_in_influxdb('mon02')
+
+        # STEP #10
+        show_step(10)
+        after_result = sl_os_deployed.run_sl_tests_json(
+            'cfg01', '/root/stacklight-pytest/stacklight_tests/',
+            'tests/prometheus/', 'test_alerts.py')
+        for test in after_result:
+            if test['name'] not in failed_tests:
+                assert 'passed' in test['outcome'], \
+                    'Failed test {}'.format(test)
+        LOG.info("*************** DONE **************")
+
+    @pytest.mark.grab_versions
+    @pytest.mark.fail_snapshot
+    @pytest.mark.revert_snapshot(ext.SNAPSHOT.sl_deployed)
+    def test_stop_influxdb_mon_nodes(self, sl_os_deployed, show_step):
+        """Test stop influxdb on mon01 node
+
+        Scenario:
+            1. Prepare salt on hosts
+            2. Setup controller nodes
+            3. Setup compute, monitoring nodes
+            4. Check LMA before mon node shutdown
+            5. Stop influxdb on mon01 and mon02 node
+            6. Post data into influx
+            7. Get data from all healthy nodes
+            8. Start influx db
+            9. Request data on mon01
+            10. Run LMA tests after fail and compare with result before fail
+
+
+        """
+        # STEP #1,2,3
+        show_step(1)
+        show_step(2)
+        show_step(3)
+
+        # STEP #4
+        show_step(4)
+        mon_nodes = sl_os_deployed.get_monitoring_nodes()
+        LOG.debug('Mon nodes list {0}'.format(mon_nodes))
+        before_result = sl_os_deployed.run_sl_tests_json(
+            'cfg01', '/root/stacklight-pytest/stacklight_tests/',
+            'tests/prometheus/', 'test_alerts.py')
+        failed_tests = [test['name'] for test in
+                        before_result if 'passed' not in test['outcome']]
+
+        # STEP #5
+        show_step(5)
+        sl_os_deployed.stop_sl_service_on_node('mon01', 'influxdb')
+        sl_os_deployed.stop_sl_service_on_node('mon02', 'influxdb')
+
+        # STEP #6
+        show_step(6)
+        sl_os_deployed.post_data_into_influx('mon03')
+
+        # STEP #7
+        show_step(7)
+        assert 'mymeas' in sl_os_deployed.check_data_in_influxdb('mon03')
+
+        # STEP #8
+        show_step(8)
+        sl_os_deployed.start_service('mon01', 'influxdb')
+        sl_os_deployed.start_service('mon02', 'influxdb')
+
+        # STEP #9
+        show_step(9)
+        self.check_influxdb_xfail(sl_os_deployed, 'mon01', 'mymeas')
+        self.check_influxdb_xfail(sl_os_deployed, 'mon02', 'mymeas')
+
+        # STEP #10
+        show_step(10)
+        after_result = sl_os_deployed.run_sl_tests_json(
+            'cfg01', '/root/stacklight-pytest/stacklight_tests/',
+            'tests/prometheus/', 'test_alerts.py')
+        for test in after_result:
+            if test['name'] not in failed_tests:
+                assert 'passed' in test['outcome'], \
+                    'Failed test {}'.format(test)
+        LOG.info("*************** DONE **************")
diff --git a/tcp_tests/tests/system/test_install_mcp_ovs_pike.py b/tcp_tests/tests/system/test_install_mcp_ovs_pike.py
index b1adedc..ce9174f 100644
--- a/tcp_tests/tests/system/test_install_mcp_ovs_pike.py
+++ b/tcp_tests/tests/system/test_install_mcp_ovs_pike.py
@@ -26,6 +26,7 @@
 
     @pytest.mark.grab_versions
     @pytest.mark.fail_snapshot
+    @pytest.mark.pike_ovs
     def test_mcp_pike_ovs_install(self, underlay,
                                   openstack_deployed,
                                   openstack_actions):
@@ -48,6 +49,7 @@
 
     @pytest.mark.grab_versions
     @pytest.mark.fail_snapshot
+    @pytest.mark.pike_ovs_sl
     def test_mcp_pike_ovs_sl_install(self, underlay, config,
                                      openstack_deployed,
                                      sl_deployed):
@@ -82,6 +84,7 @@
 
     @pytest.mark.grab_versions
     @pytest.mark.fail_snapshot
+    @pytest.mark.pike_ovs_dvr
     def test_mcp_pike_dvr_install(self,
                                   underlay,
                                   openstack_deployed,
@@ -104,6 +107,7 @@
 
     @pytest.mark.grab_versions
     @pytest.mark.fail_snapshot
+    @pytest.mark.pike_ovs_dvr_sl
     def test_mcp_pike_dvr_sl_install(self, underlay, config,
                                      openstack_deployed,
                                      sl_deployed):
@@ -139,8 +143,76 @@
 
     @pytest.mark.grab_versions
     @pytest.mark.fail_snapshot
-    def test_mcp11_pike_dpdk_install(self, underlay, openstack_deployed,
-                                     show_step, openstack_actions):
+    def test_mcp_pike_dpdk_install(self, underlay, openstack_deployed,
+                                   show_step, openstack_actions):
+        """Test for deploying an mcp dpdk environment and check it
+        Scenario:
+        1. Prepare salt on hosts
+        2. Setup controller nodes
+        3. Setup compute nodes
+        """
+        LOG.info("*************** DONE **************")
+        openstack_actions._salt.local(
+            tgt='*', fun='cmd.run',
+            args='service ntp stop; ntpd -gq; service ntp start')
+
+        if settings.RUN_TEMPEST:
+            openstack_actions.run_tempest(pattern=settings.PATTERN)
+            openstack_actions.download_tempest_report()
+        LOG.info("*************** DONE **************")
+
+    @pytest.mark.grab_versions
+    @pytest.mark.fail_snapshot
+    @pytest.mark.pike_cookied_ovs_sl
+    def test_mcp_pike_cookied_ovs_install(self, underlay,
+                                          openstack_deployed,
+                                          openstack_actions,
+                                          sl_deployed):
+        """Test for deploying an mcp environment and check it
+        Scenario:
+        1. Prepare salt on hosts
+        2. Setup controller nodes
+        3. Setup compute nodes
+        4. Run tempest
+
+        """
+        openstack_actions._salt.local(
+                tgt='*', fun='cmd.run',
+                args='service ntp stop; ntpd -gq; service ntp start')
+
+        if settings.RUN_TEMPEST:
+            openstack_actions.run_tempest(pattern=settings.PATTERN)
+            openstack_actions.download_tempest_report()
+        LOG.info("*************** DONE **************")
+
+    @pytest.mark.grab_versions
+    @pytest.mark.fail_snapshot
+    @pytest.mark.pike_cookied_ovs_dvr_sl
+    def test_mcp_pike_cookied_dvr_install(self,
+                                          underlay,
+                                          openstack_deployed,
+                                          openstack_actions,
+                                          sl_deployed):
+        """Test for deploying an mcp environment and check it
+        Scenario:
+        1. Prepare salt on hosts
+        2. Setup controller nodes
+        3. Setup compute nodes
+
+        """
+        openstack_actions._salt.local(
+            tgt='*', fun='cmd.run',
+            args='service ntp stop; ntpd -gq; service ntp start')
+
+        if settings.RUN_TEMPEST:
+            openstack_actions.run_tempest(pattern=settings.PATTERN)
+            openstack_actions.download_tempest_report()
+        LOG.info("*************** DONE **************")
+
+    @pytest.mark.grab_versions
+    @pytest.mark.fail_snapshot
+    def test_mcp_pike_cookied_dpdk_install(self, underlay, openstack_deployed,
+                                           show_step, openstack_actions):
         """Test for deploying an mcp dpdk environment and check it
         Scenario:
         1. Prepare salt on hosts