Add the workflow for cookieshape-mcp-ocata-dvr-vxlan

The workflow 'cookieshape-mcp-ocata-dvr-vxlan' is used to
generate the model with specific options using the
latest cookiecutter templates, and try to deploy the environment
using this model.

Change-Id: Ibf72967e638938680fce9866eec4fd165e012d6e
Reviewed-on: https://review.gerrithub.io/375325
Reviewed-by: Dennis Dmitriev <dis.xcom@gmail.com>
Tested-by: Dennis Dmitriev <dis.xcom@gmail.com>
diff --git a/tcp_tests/environment/environment_inventory/virtual-devops-mcp-ocata-sl2.yaml b/tcp_tests/environment/environment_inventory/virtual-devops-mcp-ocata-sl2.yaml
new file mode 100644
index 0000000..71ee047
--- /dev/null
+++ b/tcp_tests/environment/environment_inventory/virtual-devops-mcp-ocata-sl2.yaml
@@ -0,0 +1,220 @@
+nodes:
+    ctl01.mcp11-ovs-dpdk.local:
+      reclass_storage_name: openstack_control_node01
+      roles:
+      - openstack_control_leader
+      - linux_system_codename_xenial
+      interfaces:
+        ens4:
+          role: control_vcp_single
+
+    ctl02.mcp11-ovs-dpdk.local:
+      reclass_storage_name: openstack_control_node02
+      roles:
+      - openstack_control
+      - linux_system_codename_xenial
+      interfaces:
+        ens4:
+          role: control_vcp_single
+
+    ctl03.mcp11-ovs-dpdk.local:
+      reclass_storage_name: openstack_control_node03
+      roles:
+      - openstack_control
+      - linux_system_codename_xenial
+      interfaces:
+        ens4:
+          role: control_vcp_single
+
+    dbs01.mcp11-ovs-dpdk.local:
+      reclass_storage_name: openstack_database_node01
+      roles:
+      - openstack_database_leader
+      - linux_system_codename_xenial
+      interfaces:
+        ens4:
+          role: control_vcp_single
+
+    dbs02.mcp11-ovs-dpdk.local:
+      reclass_storage_name: openstack_database_node01
+      roles:
+      - openstack_database
+      - linux_system_codename_xenial
+      interfaces:
+        ens4:
+          role: control_vcp_single
+
+    dbs03.mcp11-ovs-dpdk.local:
+      reclass_storage_name: openstack_database_node01
+      roles:
+      - openstack_database
+      - linux_system_codename_xenial
+      interfaces:
+        ens4:
+          role: control_vcp_single
+
+    msg01.mcp11-ovs-dpdk.local:
+      reclass_storage_name: openstack_message_queue_node01
+      roles:
+      - openstack_message_queue
+      - linux_system_codename_xenial
+      interfaces:
+        ens4:
+          role: control_vcp_single
+
+    msg02.mcp11-ovs-dpdk.local:
+      reclass_storage_name: openstack_message_queue_node02
+      roles:
+      - openstack_message_queue
+      - linux_system_codename_xenial
+      interfaces:
+        ens4:
+          role: control_vcp_single
+
+    msg03.mcp11-ovs-dpdk.local:
+      reclass_storage_name: openstack_message_queue_node03
+      roles:
+      - openstack_message_queue
+      - linux_system_codename_xenial
+      interfaces:
+        ens4:
+          role: control_vcp_single
+
+    mdb01.mcp11-ovs-dpdk.local:
+      reclass_storage_name: openstack_telemetry_node01
+      roles:
+      - openstack_telemetry
+      - linux_system_codename_xenial
+      interfaces:
+        ens4:
+          role: control_vcp_single
+
+    mdb02.mcp11-ovs-dpdk.local:
+      reclass_storage_name: openstack_telemetry_node02
+      roles:
+      - openstack_telemetry
+      - linux_system_codename_xenial
+      interfaces:
+        ens4:
+          role: control_vcp_single
+
+    mdb03.mcp11-ovs-dpdk.local:
+      reclass_storage_name: openstack_telemetry_node03
+      roles:
+      - openstack_telemetry
+      - linux_system_codename_xenial
+      interfaces:
+        ens4:
+          role: control_vcp_single
+
+    prx01.mcp11-ovs-dpdk.local:
+      reclass_storage_name: openstack_proxy_node01
+      roles:
+      - openstack_proxy
+      - linux_system_codename_xenial
+      interfaces:
+        ens4:
+          role: control_vcp_single
+
+    prx02.mcp11-ovs-dpdk.local:
+      reclass_storage_name: openstack_proxy_node02
+      roles:
+      - openstack_proxy
+      - linux_system_codename_xenial
+      interfaces:
+        ens4:
+          role: control_vcp_single
+
+    mtr01.mcp11-ovs-dpdk.local:
+      reclass_storage_name: stacklight_telemetry_node01
+      roles:
+      - stacklight_telemetry
+      - linux_system_codename_xenial
+      interfaces:
+        ens4:
+          role: control_vcp_single
+
+    mtr02.mcp11-ovs-dpdk.local:
+      reclass_storage_name: stacklight_telemetry_node02
+      roles:
+      - stacklight_telemetry
+      - linux_system_codename_xenial
+      interfaces:
+        ens4:
+          role: control_vcp_single
+
+    mtr03.mcp11-ovs-dpdk.local:
+      reclass_storage_name: stacklight_telemetry_node03
+      roles:
+      - stacklight_telemetry
+      - linux_system_codename_xenial
+      interfaces:
+        ens4:
+          role: control_vcp_single
+
+    log01.mcp11-ovs-dpdk.local:
+      reclass_storage_name: stacklight_log_node01
+      roles:
+      - stacklight_log_leader
+      - linux_system_codename_xenial
+      interfaces:
+        ens4:
+          role: control_vcp_single
+
+    log02.mcp11-ovs-dpdk.local:
+      reclass_storage_name: stacklight_log_node02
+      roles:
+      - stacklight_log
+      - linux_system_codename_xenial
+      interfaces:
+        ens4:
+          role: control_vcp_single
+
+    log03.mcp11-ovs-dpdk.local:
+      reclass_storage_name: stacklight_log_node03
+      roles:
+      - stacklight_log
+      - linux_system_codename_xenial
+      interfaces:
+        ens4:
+          role: control_vcp_single
+
+    # Generator-based computes. For compatibility only
+    cmp000.mcp11-ovs-dpdk.local:
+      reclass_storage_name: openstack_compute_rack01
+      roles:
+      - openstack_compute
+      - linux_system_codename_xenial
+      interfaces:
+        #ens3:
+        #  role: management_single
+        ens4:
+          role: control_bond0
+        #TBD: ens5: tenant
+
+    gtw01.mcp11-ovs-dpdk.local:
+      reclass_storage_name: openstack_gateway_node01
+      roles:
+      - openstack_gateway
+      - linux_system_codename_xenial
+      interfaces:
+        #ens3:
+        #  role: management_single
+        ens4:
+          role: control_bond0
+        #TBD: ens5: tenant
+        #TBD: ens6: external
+
+    gtw02.mcp11-ovs-dpdk.local:
+      reclass_storage_name: openstack_gateway_node02
+      roles:
+      - openstack_gateway
+      - linux_system_codename_xenial
+      interfaces:
+        #ens3:
+        #  role: management_single
+        ens4:
+          role: control_bond0
+        #TBD: ens5: tenant
+        #TBD: ens6: external
+
diff --git "a/tcp_tests/environment/environment_template/\173\173 cookiecutter._env_name \175\175/init.yml" "b/tcp_tests/environment/environment_template/\173\173 cookiecutter._env_name \175\175/init.yml"
index a5a83c5..600697e 100644
--- "a/tcp_tests/environment/environment_template/\173\173 cookiecutter._env_name \175\175/init.yml"
+++ "b/tcp_tests/environment/environment_template/\173\173 cookiecutter._env_name \175\175/init.yml"
@@ -1,4 +1,6 @@
 parameters:
+  _param:
+    _esc: $
   reclass:
     storage:
       node:
diff --git a/tcp_tests/managers/execute_commands.py b/tcp_tests/managers/execute_commands.py
index 1b961b2..1965750 100644
--- a/tcp_tests/managers/execute_commands.py
+++ b/tcp_tests/managers/execute_commands.py
@@ -18,30 +18,6 @@
         self.__underlay = underlay
         super(ExecuteCommandsMixin, self).__init__()
 
-    def ensure_running_service(self, service_name, host, check_cmd,
-                               state_running='start/running'):
-        """Check if the service_name running or try to restart it
-
-        :param node_name: node on which the service will be checked
-        :param check_cmd: shell command to ensure that the service is running
-        :param state_running: string for check the service state
-        """
-        cmd = "service {0} status | grep -q '{1}'".format(
-            service_name, state_running)
-        with self.__underlay.remote(host=host) as remote:
-            result = remote.execute(cmd)
-            if result.exit_code != 0:
-                LOG.info("{0} is not in running state on the node {1},"
-                         " trying to start".format(service_name, host))
-                cmd = ("service {0} stop;"
-                       " sleep 3; killall -9 {0};"
-                       "service {0} start; sleep 5;"
-                       .format(service_name))
-                remote.execute(cmd)
-
-                remote.execute(check_cmd)
-                remote.execute(check_cmd)
-
     def execute_commands(self, commands, label="Command"):
         """Execute a sequence of commands
 
@@ -141,19 +117,7 @@
                         " === RETRY ({0}/{1}) ======================="
                         .format(x - 1, retry_count))
                 else:
-                    if self.__config.salt.salt_master_host != '0.0.0.0':
-                        # Workarounds for crashed services
-                        self.ensure_running_service(
-                            "salt-master",
-                            self.__config.salt.salt_master_host,
-                            "salt-call pillar.items",
-                            'active (running)')  # Hardcoded for now
-                        self.ensure_running_service(
-                            "salt-minion",
-                            self.__config.salt.salt_master_host,
-                            "salt 'cfg01*' pillar.items",
-                            "active (running)")  # Hardcoded for now
-                        break
+                    break
 
                 if x == 1 and skip_fail is False:
                     # In the last retry iteration, raise an exception
@@ -229,14 +193,22 @@
         description = step.get('description', local_path)
         skip_fail = step.get('skip_fail', False)
 
-        if not local_path or not local_filename or not remote_path:
+        if not local_path or not remote_path:
             raise Exception("Step '{0}' failed: please specify 'local_path', "
                             "'local_filename' and 'remote_path' correctly"
                             .format(description))
 
+        if not local_filename:
+            # If local_path is not specified then uploading a directory
+            with self.__underlay.remote(node_name=node_name) as remote:
+                LOG.info("Uploading directory {0} to {1}:{2}"
+                         .format(local_path, node_name, remote_path))
+                remote.upload(source=local_path.rstrip(), target=remote_path.rstrip())
+                return
+
         result = {}
         with self.__underlay.local() as local:
-            result = local.execute('find {0} -type f -name {1}'
+            result = local.execute('cd {0} && find . -type f -name "{1}"'
                                    .format(local_path, local_filename))
             LOG.info("Found files to upload:\n{0}".format(result))
 
@@ -247,9 +219,11 @@
         with self.__underlay.remote(node_name=node_name) as remote:
             file_names = result['stdout']
             for file_name in file_names:
-                LOG.info("Uploading {0} to {1}:{2}"
-                         .format(local_path, node_name, file_name))
-                remote.upload(source=local_path, target=file_name.rstrip())
+                source_path = local_path + file_name.rstrip()
+                destination_path = remote_path.rstrip() + file_name.rstrip()
+                LOG.info("Uploading file {0} to {1}:{2}"
+                         .format(source_path, node_name, remote_path))
+                remote.upload(source=source_path, target=destination_path)
 
     def action_download(self, step):
         """Download from environment node to local host
diff --git a/tcp_tests/settings_oslo.py b/tcp_tests/settings_oslo.py
index 9013df9..86ef7b8 100644
--- a/tcp_tests/settings_oslo.py
+++ b/tcp_tests/settings_oslo.py
@@ -30,6 +30,8 @@
 
 _default_salt_steps = pkg_resources.resource_filename(
     __name__, 'templates/{0}/salt.yaml'.format(settings.LAB_CONFIG_NAME))
+_default_environment_template_dir = pkg_resources.resource_filename(
+    __name__, 'environment/')
 _default_common_services_steps = pkg_resources.resource_filename(
     __name__,
     'templates/{0}/common-services.yaml'.format(
@@ -118,6 +120,9 @@
     ct.Cfg('salt_steps_path', ct.String(),
            help="Path to YAML with steps to deploy salt",
            default=_default_salt_steps),
+    ct.Cfg('environment_template_dir', ct.String(),
+           help="Path to directory with Environment template and inventory",
+           default=_default_environment_template_dir),
 ]
 salt_opts = [
     ct.Cfg('salt_master_host', ct.IPAddress(),
diff --git a/tcp_tests/templates/cookieshape-mcp-ocata-dvr-vxlan/common-services.yaml b/tcp_tests/templates/cookieshape-mcp-ocata-dvr-vxlan/common-services.yaml
new file mode 100644
index 0000000..0a80e82
--- /dev/null
+++ b/tcp_tests/templates/cookieshape-mcp-ocata-dvr-vxlan/common-services.yaml
@@ -0,0 +1,118 @@
+{% from 'cookieshape-mcp-ocata-dvr-vxlan/underlay.yaml' import HOSTNAME_CFG01 with context %}
+
+# Install support services
+- description: Install keepalived on ctl01
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@keepalived:cluster and *01*' state.sls keepalived
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 10}
+  skip_fail: true
+
+- description: Install keepalived
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@keepalived:cluster' state.sls keepalived
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 10}
+  skip_fail: true
+
+- description: Check the VIP
+  cmd: |
+    OPENSTACK_CONTROL_ADDRESS=`salt-call --out=newline_values_only pillar.get _param:openstack_control_address`;
+    echo "_param:openstack_control_address (vip): ${OPENSTACK_CONTROL_ADDRESS}";
+    salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@keepalived:cluster' cmd.run "ip a | grep ${OPENSTACK_CONTROL_ADDRESS}" | grep -B1 ${OPENSTACK_CONTROL_ADDRESS}
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+
+- description: Install glusterfs
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@glusterfs:server' state.sls glusterfs.server.service
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+- description: Setup glusterfs on primary controller
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@glusterfs:server' state.sls glusterfs.server.setup -b 1
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+- description: Check the gluster status
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@glusterfs:server' cmd.run 'gluster peer status; gluster volume status' -b 1
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+- description: Install RabbitMQ on ctl01
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@rabbitmq:server and *01*' state.sls rabbitmq
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+- description: Install RabbitMQ
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@rabbitmq:server' state.sls rabbitmq
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+- description: Check the rabbitmq status
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@rabbitmq:server' cmd.run 'rabbitmqctl cluster_status'
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+- description: Install Galera on first server
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@galera:master' state.sls galera
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+- description: Install Galera on other servers
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@galera:slave' state.sls galera
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+- description: Check mysql status
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@galera:*' mysql.status | grep -A1 -e "wsrep_incoming_addresses\|wsrep_cluster_size"
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: true
+
+
+- description: Install haproxy
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@haproxy:proxy' state.sls haproxy
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+- description: Check haproxy status
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@haproxy:proxy' service.status haproxy
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+- description: Restart rsyslog
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@haproxy:proxy' service.restart rsyslog
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+- description: Install memcached on all controllers
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@memcached:server' state.sls memcached
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
diff --git a/tcp_tests/templates/cookieshape-mcp-ocata-dvr-vxlan/openstack.yaml b/tcp_tests/templates/cookieshape-mcp-ocata-dvr-vxlan/openstack.yaml
new file mode 100644
index 0000000..79ae1bc
--- /dev/null
+++ b/tcp_tests/templates/cookieshape-mcp-ocata-dvr-vxlan/openstack.yaml
@@ -0,0 +1,401 @@
+{% from 'cookieshape-mcp-ocata-dvr-vxlan/underlay.yaml' import HOSTNAME_CFG01 with context %}
+{% from 'cookieshape-mcp-ocata-dvr-vxlan/underlay.yaml' import HOSTNAME_CTL01 with context %}
+{% from 'cookieshape-mcp-ocata-dvr-vxlan/underlay.yaml' import HOSTNAME_CTL02 with context %}
+{% from 'cookieshape-mcp-ocata-dvr-vxlan/underlay.yaml' import HOSTNAME_CTL03 with context %}
+{% from 'cookieshape-mcp-ocata-dvr-vxlan/underlay.yaml' import HOSTNAME_GTW01 with context %}
+{% from 'shared-salt.yaml' import IPV4_NET_EXTERNAL_PREFIX with context %}
+{% from 'shared-salt.yaml' import IPV4_NET_TENANT_PREFIX with context %}
+{% set PATTERN = os_env('PATTERN', 'smoke') %}
+
+# Install OpenStack control services
+
+- description: Install glance on all controllers
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+     -C 'I@glance:server' state.sls glance -b 1
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+- description: Install keystone service (note that different fernet keys are created on different nodes)
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@keystone:server' state.sls keystone.server -b 1
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 2, delay: 15}
+  skip_fail: false
+
+- description: Restart apache due to PROD-10477
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl*' cmd.run "systemctl restart apache2"
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 15}
+  skip_fail: false
+
+- description: Check apache status to PROD-10477
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl*' cmd.run "systemctl status apache2"
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 15}
+  skip_fail: false
+
+- description: Mount glusterfs.client volumes (resuires created 'keystone' and 'glusterfs' system users)
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@glance:server' state.sls glusterfs.client
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+- description: Update fernet keys for keystone server on the mounted glusterfs volume
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@keystone:server' state.sls keystone.server -b 1
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+- description: Populate keystone services/tenants/admins
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@keystone:client' state.sls keystone.client
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+- description: Check keystone service-list
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@keystone:server' cmd.run '. /root/keystonerc; openstack service list'
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+- description: Check glance image-list
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@keystone:server' cmd.run '. /root/keystonerc; glance image-list'
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+
+- description: Install nova on all controllers
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@nova:controller' state.sls nova -b 1
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 2, delay: 5}
+  skip_fail: false
+
+- description: Check nova service-list
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@keystone:server' cmd.run '. /root/keystonerc; nova --debug service-list'
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 3, delay: 5}
+  skip_fail: false
+
+
+- description: Install cinder
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@cinder:controller' state.sls cinder -b 1
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+- description: Check cinder list
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@keystone:server' cmd.run '. /root/keystonerc; cinder list'
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+
+- description: Install neutron service
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@neutron:server' state.sls neutron -b 1
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+- description: Install neutron on gtw node
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@neutron:gateway' state.sls neutron
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+# isntall designate
+- description: Install powerdns
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'ctl*' state.sls powerdns
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+- description: Install designate
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@designate:server' state.sls designate -b 1
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 5, delay: 10}
+  skip_fail: false
+
+- description: Check neutron agent-list
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@keystone:server' cmd.run '. /root/keystonerc; neutron agent-list'
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+- description: Install heat service
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@heat:server' state.sls heat -b 1
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+- description: Check heat service
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@keystone:server' cmd.run '. /root/keystonerc; openstack orchestration resource type list'
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 5, delay: 10}
+  skip_fail: false
+
+
+- description: Deploy horizon dashboard
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@horizon:server' state.sls horizon
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: true
+
+- description: Deploy nginx proxy
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@nginx:server' state.sls nginx
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: true
+
+
+# Install compute node
+
+- description: Apply formulas for compute node
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'cmp*' state.apply
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: true
+
+- description: Re-apply(as in doc) formulas for compute node
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'cmp*' state.apply
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: true
+
+- description: Check IP on computes
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'cmp*' cmd.run
+    'ip a'
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 10, delay: 30}
+  skip_fail: false
+
+
+  # Upload cirros image
+
+- description: Upload cirros image on ctl01
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
+    'wget http://download.cirros-cloud.net/0.3.4/cirros-0.3.4-i386-disk.img'
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 2, delay: 30}
+  skip_fail: false
+
+- description: Register image in glance
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
+    '. /root/keystonercv3; glance image-create --name cirros --visibility public --disk-format qcow2 --container-format bare --progress < /root/cirros-0.3.4-i386-disk.img'
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 30}
+  skip_fail: false
+
+- description: Create net04_external
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
+    '. /root/keystonercv3; neutron net-create net04_ext --router:external True --provider:physical_network physnet1 --provider:network_type flat'
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 30}
+  skip_fail: false
+
+- description: Create subnet_external
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
+    '. /root/keystonercv3; neutron subnet-create net04_ext {{ IPV4_NET_EXTERNAL_PREFIX }}.0/24 --name net04_ext__subnet --disable-dhcp --allocation-pool start={{ IPV4_NET_EXTERNAL_PREFIX }}.150,end={{ IPV4_NET_EXTERNAL_PREFIX }}.180 --gateway {{ IPV4_NET_EXTERNAL_PREFIX }}.1'
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 30}
+  skip_fail: false
+
+- description: Create net04
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
+    '. /root/keystonercv3; neutron net-create net04'
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 30}
+  skip_fail: false
+
+- description: Create subnet_net04
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
+    '. /root/keystonercv3; neutron subnet-create net04 {{ IPV4_NET_TENANT_PREFIX }}.0/24 --name net04__subnet --allocation-pool start={{ IPV4_NET_TENANT_PREFIX }}.120,end={{ IPV4_NET_TENANT_PREFIX }}.240'
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 30}
+  skip_fail: false
+
+- description: Create router
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
+    '. /root/keystonercv3; neutron router-create net04_router01'
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 30}
+  skip_fail: false
+
+- description: Set geteway
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
+    '. /root/keystonercv3; neutron router-gateway-set net04_router01 net04_ext'
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 30}
+  skip_fail: false
+
+- description:  Add interface
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
+    '. /root/keystonercv3; neutron router-interface-add net04_router01 net04__subnet'
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 30}
+  skip_fail: false
+
+- description:  Allow all tcp
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
+    '. /root/keystonercv3; nova secgroup-add-rule default tcp 1 65535 0.0.0.0/0'
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 30}
+  skip_fail: false
+
+- description:  Allow all icmp
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
+    '. /root/keystonercv3; nova secgroup-add-rule default icmp -1 -1 0.0.0.0/0'
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 30}
+  skip_fail: false
+
+- description: sync time
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False '*' cmd.run
+    'service ntp stop; ntpd -gq;  service ntp start'
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 30}
+  skip_fail: false
+
+- description: Temp workaround of  PROD-13167
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl*' cmd.run
+    'apt-get install python-pymysql -y'
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 30}
+  skip_fail: false
+
+# Configure cinder-volume salt-call PROD-13167
+- description: Set disks 01
+  cmd: salt-call cmd.run 'echo -e "nn\np\n\n\n\nw" | fdisk /dev/vdb'
+  node_name: {{ HOSTNAME_CTL01 }}
+  retry: {count: 1, delay: 30}
+  skip_fail: false
+
+- description: Set disks 02
+  cmd: salt-call cmd.run 'echo -e "nn\np\n\n\n\nw" | fdisk /dev/vdb'
+  node_name: {{ HOSTNAME_CTL02 }}
+  retry: {count: 1, delay: 30}
+  skip_fail: false
+
+- description: Set disks 03
+  cmd: salt-call cmd.run 'echo -e "nn\np\n\n\n\nw" | fdisk /dev/vdb'
+  node_name: {{ HOSTNAME_CTL03 }}
+  retry: {count: 1, delay: 30}
+  skip_fail: false
+
+- description: Create partitions 01
+  cmd: salt-call cmd.run 'pvcreate /dev/vdb1'
+  node_name: {{ HOSTNAME_CTL01 }}
+  retry: {count: 1, delay: 30}
+  skip_fail: false
+
+- description: Create partitions 02
+  cmd: salt-call cmd.run 'pvcreate /dev/vdb1'
+  node_name: {{ HOSTNAME_CTL01 }}
+  retry: {count: 1, delay: 30}
+  skip_fail: false
+
+- description: Create partitions 03
+  cmd: salt-call cmd.run 'pvcreate /dev/vdb1'
+  node_name: {{ HOSTNAME_CTL01 }}
+  retry: {count: 1, delay: 30}
+  skip_fail: false
+
+- description: create volume_group
+  cmd: salt "ctl*" cmd.run 'vgcreate cinder-volumes /dev/vdb1'
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 30}
+  skip_fail: false
+
+- description: Install cinder-volume
+  cmd: salt 'ctl*' cmd.run 'apt-get install cinder-volume -y'
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 30}
+  skip_fail: false
+
+- description: Install crudini
+  cmd: salt "ctl*" cmd.run 'apt-get install crudini -y'
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 30}
+  skip_fail: false
+
+- description: Temporary WR set enabled backends value 01
+  cmd: salt-call cmd.run 'crudini --verbose --set /etc/cinder/cinder.conf DEFAULT enabled_backends lvm'
+  node_name: {{ HOSTNAME_CTL01 }}
+  retry: {count: 1, delay: 30}
+  skip_fail: false
+
+- description: Temporary WR set enabled backends value 02
+  cmd: salt-call cmd.run 'crudini --verbose --set /etc/cinder/cinder.conf DEFAULT enabled_backends lvm'
+  node_name: {{ HOSTNAME_CTL02 }}
+  retry: {count: 1, delay: 30}
+  skip_fail: false
+
+- description: Temporary WR set enabled backends value 03
+  cmd: salt-call cmd.run 'crudini --verbose --set /etc/cinder/cinder.conf DEFAULT enabled_backends lvm'
+  node_name: {{ HOSTNAME_CTL03 }}
+  retry: {count: 1, delay: 30}
+  skip_fail: false
+
+- description: Install docker.io on gtw
+  cmd: salt-call cmd.run 'apt-get install docker.io -y'
+  node_name: {{ HOSTNAME_GTW01 }}
+  retry: {count: 1, delay: 30}
+  skip_fail: false
+
+- description: create rc file on cfg
+  cmd: scp ctl01:/root/keystonercv3 /root
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 30}
+  skip_fail: false
+
+- description: Copy rc file
+  cmd: scp /root/keystonercv3 gtw01:/root
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 30}
+  skip_fail: false
+
+- description: Run tests
+  cmd: |
+    if [[ {{ PATTERN }} == "false" ]]; then
+        salt-call cmd.run 'docker run --rm --net=host  -e TEMPEST_CONF=lvm_mcp.conf  -e SKIP_LIST=mcp_skip.list  -e SOURCE_FILE=keystonercv3 -v /root/:/home/rally docker-sandbox.sandbox.mirantis.net/rally-tempest/rally-tempest:with_designate  >> image.output'
+    else
+        salt-call cmd.run "docker run --rm --net=host  -e TEMPEST_CONF=lvm_mcp.conf  -e SKIP_LIST=mcp_skip.list  -e SOURCE_FILE=keystonercv3  -e CUSTOM='--pattern {{ PATTERN }}' -v /root/:/home/rally docker-sandbox.sandbox.mirantis.net/rally-tempest/rally-tempest:with_designate  >> image.output"
+    fi
+  node_name: {{ HOSTNAME_GTW01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+- description: Download xml results
+  download:
+    remote_path: /root
+    remote_filename: "report_*.xml"
+    local_path: {{ os_env('PWD') }}
+  node_name: {{ HOSTNAME_GTW01 }}
+  skip_fail: true
+
+- description: Download html results
+  download:
+    remote_path: /root
+    remote_filename: "report_*.html"
+    local_path: {{ os_env('PWD') }}
+  node_name: {{ HOSTNAME_GTW01 }}
+  skip_fail: true
diff --git a/tcp_tests/templates/cookieshape-mcp-ocata-dvr-vxlan/salt.yaml b/tcp_tests/templates/cookieshape-mcp-ocata-dvr-vxlan/salt.yaml
new file mode 100644
index 0000000..47dfc5e
--- /dev/null
+++ b/tcp_tests/templates/cookieshape-mcp-ocata-dvr-vxlan/salt.yaml
@@ -0,0 +1,158 @@
+{% from 'cookieshape-mcp-ocata-dvr-vxlan/underlay.yaml' import HOSTNAME_CFG01 with context %}
+{% from 'cookieshape-mcp-ocata-dvr-vxlan/underlay.yaml' import LAB_CONFIG_NAME with context %}
+{% from 'cookieshape-mcp-ocata-dvr-vxlan/underlay.yaml' import DOMAIN_NAME with context %}
+
+{% set SALT_MODELS_REPOSITORY = os_env('SALT_MODELS_REPOSITORY','https://gerrit.mcp.mirantis.net/salt-models/mcp-virtual-lab') %}
+# Other salt model repository parameters see in shared-salt.yaml
+
+# Name of the context file (without extension, that is fixed .yaml) used to render the Environment model
+{% set ENVIRONMENT_MODEL_INVENTORY_NAME = os_env('ENVIRONMENT_MODEL_INVENTORY_NAME','virtual-devops-mcp-ocata-sl2') %}
+
+
+{% import 'shared-salt.yaml' as SHARED with context %}
+
+{{ SHARED.MACRO_INSTALL_SALT_MASTER() }}
+
+
+{#{ SHARED.MACRO_CLONE_RECLASS_MODELS() }#}
+- description: Create cluster model from cookiecutter templates
+  cmd: |
+    set -e;
+    pip install cookiecutter
+    export GIT_SSL_NO_VERIFY=true; git clone  https://gerrit.mcp.mirantis.net/mk/cookiecutter-templates /tmp/cookiecutter-templates
+    mkdir -p /srv/salt/reclass/classes/cluster/
+    mkdir -p /srv/salt/reclass/classes/system/
+    mkdir -p /srv/salt/reclass/classes/service/
+    mkdir -p /srv/salt/reclass/nodes/
+
+    sed -i 's/cluster_name:.*/cluster_name: {{ LAB_CONFIG_NAME }}/g' /tmp/cookiecutter-templates/contexts/openstack_ovs_dvr_vxlan.yml
+    sed -i 's/cluster_domain:.*/cluster_domain: {{ DOMAIN_NAME }}/g' /tmp/cookiecutter-templates/contexts/openstack_ovs_dvr_vxlan.yml
+
+    # Replace firstly to an intermediate value to avoid intersection between
+    # already replaced and replacing networks.
+    # For example, if generated IPV4_NET_ADMIN_PREFIX=10.16.0 , then there is a risk of replacing twice:
+    # 192.168.10 -> 10.16.0 (generated network for admin)
+    # 10.16.0 -> <external network>
+    # So let's replace constant networks to the keywords, and then keywords to the desired networks.
+    sed -i 's/10\.167\.5\./==IPV4_NET_ADMIN_PREFIX==/g' /tmp/cookiecutter-templates/contexts/openstack_ovs_dvr_vxlan.yml
+    sed -i 's/10\.167\.4\./==IPV4_NET_CONTROL_PREFIX==/g' /tmp/cookiecutter-templates/contexts/openstack_ovs_dvr_vxlan.yml
+    sed -i 's/10\.167\.6\./==IPV4_NET_TENANT_PREFIX==/g' /tmp/cookiecutter-templates/contexts/openstack_ovs_dvr_vxlan.yml
+    sed -i 's/172\.17\.16\./==IPV4_NET_EXTERNAL_PREFIX==/g' /tmp/cookiecutter-templates/contexts/openstack_ovs_dvr_vxlan.yml
+
+    sed -i 's/==IPV4_NET_ADMIN_PREFIX==/{{ SHARED.IPV4_NET_ADMIN_PREFIX }}./g' /tmp/cookiecutter-templates/contexts/openstack_ovs_dvr_vxlan.yml
+    sed -i 's/==IPV4_NET_CONTROL_PREFIX==/{{ SHARED.IPV4_NET_CONTROL_PREFIX }}./g' /tmp/cookiecutter-templates/contexts/openstack_ovs_dvr_vxlan.yml
+    sed -i 's/==IPV4_NET_TENANT_PREFIX==/{{ SHARED.IPV4_NET_TENANT_PREFIX }}./g' /tmp/cookiecutter-templates/contexts/openstack_ovs_dvr_vxlan.yml
+    sed -i 's/==IPV4_NET_EXTERNAL_PREFIX==/{{ SHARED.IPV4_NET_EXTERNAL_PREFIX }}./g' /tmp/cookiecutter-templates/contexts/openstack_ovs_dvr_vxlan.yml
+
+    for i in $(ls /tmp/cookiecutter-templates/cluster_product/); do
+      cookiecutter /tmp/cookiecutter-templates/cluster_product/$i \
+        --config-file /tmp/cookiecutter-templates/contexts/openstack_ovs_dvr_vxlan.yml \
+        --output-dir /srv/salt/reclass/classes/cluster/ \
+        --no-input -f;
+    done
+
+    export GIT_SSL_NO_VERIFY=true; git clone https://gerrit.mcp.mirantis.net/salt-models/reclass-system /srv/salt/reclass/classes/system/
+
+    # Create the cfg01 node and disable checkouting the model from remote repository
+    cat << 'EOF' >> /srv/salt/reclass/nodes/{{ HOSTNAME_CFG01 }}.yml
+    classes:
+    - cluster.{{ LAB_CONFIG_NAME }}.infra.config
+    parameters:
+      _param:
+        linux_system_codename: xenial
+        reclass_data_revision: master
+      linux:
+        system:
+          name: cfg01
+          domain: {{ DOMAIN_NAME }}
+    # local storage
+      reclass:
+        storage:
+          data_source:
+            engine: local
+    EOF
+
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 1}
+  skip_fail: false
+
+- description: Modify cluster model for testing purposes
+  cmd: |
+    set -e;
+    apt-get -y install python-virtualenv python-pip build-essential python-dev libssl-dev;
+    pip install git+https://github.com/dis-xcom/reclass-tools;
+
+    # Enable 'root' user
+    reclass-tools add-key classes system.openssh.server.team.lab /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/config.yml --merge
+
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 1}
+  skip_fail: false
+
+
+{{ SHARED.MACRO_CONFIGURE_RECLASS(FORMULA_SERVICES='"linux" "reclass" "salt" "openssh" "ntp" "git" "nginx" "collectd" "sensu" "heka" "sphinx" "keystone" "mysql" "grafana" "haproxy" "rsyslog" "horizon" "prometheus" "telegraf" "elasticsearch" "powerdns" "glusterfs" "xtrabackup"') }}
+
+
+- description: Upload 'environment' to {{ HOSTNAME_GTW01 }}
+  upload:
+    local_path: {{ config.salt_deploy.environment_template_dir }}
+    remote_path: /tmp/environment/
+  node_name: {{ HOSTNAME_CFG01 }}
+  skip_fail: false
+
+- description: "[EXPERIMENTAL] Create environment model for virtual environment"
+  cmd: |
+    set -e;
+    ln -s '/tmp/environment/environment_template/{{ '{# interfaces #}' }}' '/tmp/environment/environment_template/{{ '{{ cookiecutter._env_name }}' }}/';
+    ln -s '/tmp/environment/environment_template/{{ '{# roles #}' }}' '/tmp/environment/environment_template/{{ '{{ cookiecutter._env_name }}' }}/';
+    reclass-tools render --template-dir /tmp/environment/environment_template/ \
+                         --output-dir /srv/salt/reclass/classes/environment/ \
+                         --context /tmp/environment/environment_inventory/{{ ENVIRONMENT_MODEL_INVENTORY_NAME }}.yaml \
+                         --env-name {{ ENVIRONMENT_MODEL_INVENTORY_NAME }}
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+- description: "[EXPERIMENTAL] Remove linux.network.interface object from the cluster/system models and use fixed 'environment' model instead"
+  cmd: |
+    apt-get -y install python-virtualenv python-pip build-essential python-dev libssl-dev;
+    pip install git+https://github.com/dis-xcom/reclass-tools;
+    reclass-tools del-key parameters.linux.network.interface /srv/salt/reclass/classes/cluster/;
+    reclass-tools del-key parameters.linux.network.interface /srv/salt/reclass/classes/system/;
+    reclass-tools del-key parameters.linux.network.interface /usr/share/salt-formulas/reclass/;
+
+    if ! reclass-tools get-key 'classes' /srv/salt/reclass/nodes/{{ HOSTNAME_CFG01 }}.yml | grep -q "environment.{{ ENVIRONMENT_MODEL_INVENTORY_NAME }}$"; then
+      reclass-tools add-key 'classes' 'environment.{{ ENVIRONMENT_MODEL_INVENTORY_NAME }}' /srv/salt/reclass/nodes/{{ HOSTNAME_CFG01 }}.yml --merge ;
+    fi;
+
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+{{ SHARED.MACRO_INSTALL_SALT_MINIONS() }}
+
+{{ SHARED.MACRO_RUN_SALT_MASTER_UNDERLAY_STATES() }}
+
+{{ SHARED.MACRO_GENERATE_INVENTORY() }}
+
+{{ SHARED.MACRO_NETWORKING_WORKAROUNDS() }}
+
+{{ SHARED.MACRO_BOOTSTRAP_ALL_MINIONS() }}
+
+#- description: Hack gtw node
+#  cmd: salt 'gtw*' cmd.run "ip addr del {{ SHARED.IPV4_NET_CONTROL_PREFIX }}.110/24 dev ens4; ip addr flush dev ens4";
+#  node_name: {{ HOSTNAME_CFG01 }}
+#  retry: {count: 1, delay: 10}
+#  skip_fail: false
+
+#- description: Hack cmp01 node
+#  cmd: salt 'cmp01*' cmd.run "ip addr del {{ SHARED.IPV4_NET_CONTROL_PREFIX }}.105/24 dev ens4; ip addr flush dev ens4";
+#  node_name: {{ HOSTNAME_CFG01 }}
+#  retry: {count: 1, delay: 10}
+#  skip_fail: false
+
+#- description: Hack cmp02 node
+#  cmd: salt 'cmp02*' cmd.run "ip addr del {{ SHARED.IPV4_NET_CONTROL_PREFIX }}.106/24 dev ens4; ip addr flush dev ens4";
+#  node_name: {{ HOSTNAME_CFG01 }}
+#  retry: {count: 1, delay: 10}
+#  skip_fail: false
diff --git a/tcp_tests/templates/cookieshape-mcp-ocata-dvr-vxlan/sl.yaml b/tcp_tests/templates/cookieshape-mcp-ocata-dvr-vxlan/sl.yaml
new file mode 100644
index 0000000..d95e3d0
--- /dev/null
+++ b/tcp_tests/templates/cookieshape-mcp-ocata-dvr-vxlan/sl.yaml
@@ -0,0 +1,184 @@
+{% from 'cookieshape-mcp-ocata-dvr-vxlan/underlay.yaml' import HOSTNAME_CFG01 with context %}
+
+# Install docker swarm
+- description: Install keepalived on mon nodes
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'mon*' state.sls keepalived
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 10}
+  skip_fail: false
+
+- description: Check the VIP on mon nodes
+  cmd: |
+    SL_VIP=`salt-call --out=newline_values_only pillar.get _param:stacklight_monitor_address`;
+    echo "_param:stacklight_monitor_address (vip): ${SL_VIP}";
+    salt --hard-crash --state-output=mixed --state-verbose=False -C 'mon*' cmd.run "ip a | grep ${SL_VIP}" | grep -B1 ${SL_VIP}
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+- description: Configure docker service
+  cmd: salt -C 'I@docker:swarm' state.sls docker.host
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 10}
+  skip_fail: false
+
+- description: Install docker swarm on master node
+  cmd: salt -C 'I@docker:swarm:role:master' state.sls docker.swarm
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 10}
+  skip_fail: false
+
+- description: Send grains to the swarm slave nodes
+  cmd: salt -C 'I@docker:swarm' state.sls salt.minion.grains
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 10}
+  skip_fail: false
+
+- description:  Update mine
+  cmd: salt -C 'I@docker:swarm' mine.update
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 10}
+  skip_fail: false
+
+- description:  Refresh modules
+  cmd: salt -C 'I@docker:swarm' saltutil.refresh_modules; sleep 5;
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 10}
+  skip_fail: false
+
+- description:  Rerun swarm on slaves to proper token population
+  cmd: salt -C 'I@docker:swarm:role:master' state.sls docker.swarm
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 10}
+  skip_fail: false
+
+- description:  Configure slave nodes
+  cmd: salt -C 'I@docker:swarm:role:manager' state.sls docker.swarm -b 1
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 10}
+  skip_fail: false
+
+- description:  List registered Docker swarm nodes
+  cmd: salt -C 'I@docker:swarm:role:master' cmd.run 'docker node ls'
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 10}
+  skip_fail: false
+
+# Install slv2 infra
+- description: Install telegraf
+  cmd: salt -C 'I@telegraf:agent or I@telegraf:remote_agent' state.sls telegraf
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 2, delay: 10}
+  skip_fail: false
+
+- description: Configure Prometheus exporters
+  cmd: salt -C 'I@prometheus:exporters' state.sls prometheus
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 10}
+  skip_fail: false
+
+- description: Configure collector
+  cmd: salt -C 'I@heka:log_collector' state.sls heka.log_collector
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 10}
+  skip_fail: false
+
+- description: Install elasticsearch server
+  cmd: salt -C 'I@elasticsearch:server' state.sls elasticsearch.server -b 1
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 10}
+  skip_fail: false
+
+- description: Install kibana server
+  cmd: salt -C 'I@kibana:server' state.sls kibana.server -b 1
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 10}
+  skip_fail: false
+
+- description: Install elasticsearch client
+  cmd: salt -C 'I@elasticsearch:client' state.sls elasticsearch.client
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 10}
+  skip_fail: false
+
+- description: Install kibana client
+  cmd: salt -C 'I@kibana:client' state.sls kibana.client
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 10}
+  skip_fail: false
+
+- description: Check influix db
+  cmd: |
+    INFLUXDB_SERVICE=`salt -C 'I@influxdb:server' test.ping 1>/dev/null 2>&1 && echo true`;
+    echo "Influxdb service presence: ${INFLUXDB_SERVICE}";
+    if [[ "$INFLUXDB_SERVICE" == "true" ]]; then
+        salt -C 'I@influxdb:server' state.sls influxdb
+    fi
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: true
+
+# Collect grains needed to configure the services
+
+- description: Get grains
+  cmd: salt -C 'I@salt:minion' state.sls salt.minion.grains
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 10}
+  skip_fail: false
+
+- description: Sync modules
+  cmd: salt -C 'I@salt:minion' saltutil.refresh_modules
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 10}
+  skip_fail: false
+
+- description: Update mine
+  cmd: salt -C 'I@salt:minion' mine.update; sleep 5;
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 10}
+  skip_fail: false
+
+# Change environment configuration before deploy
+- description: Set SL docker images deploy parameters
+  cmd: |
+  {% for sl_opt, value in config.sl_deploy.items() %}
+    {% if value|string() %}
+    salt-call reclass.cluster_meta_set {{ sl_opt }} {{ value }};
+    {% endif %}
+  {% endfor %}
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 1}
+  skip_fail: false
+
+# Configure the services running in Docker Swarm
+- description: Install prometheus alertmanager
+  cmd: salt -C 'I@docker:swarm' state.sls prometheus,heka.remote_collector -b 1
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 10}
+  skip_fail: false
+
+- description: run docker state
+  cmd: salt -C 'I@docker:swarm:role:master' state.sls docker
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 10}
+  skip_fail: false
+
+- description: docker ps
+  cmd: salt -C 'I@docker:swarm' dockerng.ps
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 10}
+  skip_fail: false
+
+- description: Configure Grafana dashboards and datasources
+  cmd: sleep 30;  salt -C 'I@grafana:client' state.sls grafana.client
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 2, delay: 10}
+  skip_fail: false
+
+- description: Run salt minion to create cert files
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False "*" state.sls salt.minion
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 10}
+  skip_fail: false
+
diff --git a/tcp_tests/templates/cookieshape-mcp-ocata-dvr-vxlan/underlay--meta-data.yaml b/tcp_tests/templates/cookieshape-mcp-ocata-dvr-vxlan/underlay--meta-data.yaml
new file mode 100644
index 0000000..3699401
--- /dev/null
+++ b/tcp_tests/templates/cookieshape-mcp-ocata-dvr-vxlan/underlay--meta-data.yaml
@@ -0,0 +1,4 @@
+| # All the data below will be stored as a string object
+  instance-id: iid-local1
+  hostname: {hostname}
+  local-hostname: {hostname}
diff --git a/tcp_tests/templates/cookieshape-mcp-ocata-dvr-vxlan/underlay--user-data-cfg01.yaml b/tcp_tests/templates/cookieshape-mcp-ocata-dvr-vxlan/underlay--user-data-cfg01.yaml
new file mode 100644
index 0000000..905842d
--- /dev/null
+++ b/tcp_tests/templates/cookieshape-mcp-ocata-dvr-vxlan/underlay--user-data-cfg01.yaml
@@ -0,0 +1,90 @@
+| # All the data below will be stored as a string object
+  #cloud-config, see http://cloudinit.readthedocs.io/en/latest/topics/examples.html
+
+  ssh_pwauth: True
+  users:
+   - name: root
+     sudo: ALL=(ALL) NOPASSWD:ALL
+     shell: /bin/bash
+     ssh_authorized_keys:
+     {% for key in config.underlay.ssh_keys %}
+      - ssh-rsa {{ key['public'] }}
+     {% endfor %}
+
+  disable_root: false
+  chpasswd:
+   list: |
+    root:r00tme
+   expire: False
+
+  bootcmd:
+   # Block access to SSH while node is preparing
+   - cloud-init-per once sudo iptables -A INPUT -p tcp --dport 22 -j DROP
+   # Enable root access
+   - sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
+   - service sshd restart
+  output:
+    all: '| tee -a /var/log/cloud-init-output.log /dev/tty0'
+
+  runcmd:
+   # Configure dhclient
+   - sudo echo "nameserver {gateway}" >> /etc/resolvconf/resolv.conf.d/base
+   - sudo resolvconf -u
+
+   # Prepare network connection
+   - sudo ifup ens3
+   #- sudo route add default gw {gateway} {interface_name}
+   - sudo ifup ens4
+
+   # Create swap
+   - fallocate -l 4G /swapfile
+   - chmod 600 /swapfile
+   - mkswap /swapfile
+   - swapon /swapfile
+   - echo "/swapfile   none    swap    defaults    0   0" >> /etc/fstab
+
+   ############## TCP Cloud cfg01 node ##################
+   #- sleep 120
+   - echo "Preparing base OS"
+
+   - echo "nameserver 172.18.208.44 >> /etc/resolv.conf;
+   - echo "nameserver 8.8.8.8 >> /etc/resolv.conf;
+   - which wget >/dev/null || (apt-get update; apt-get install -y wget);
+
+   - echo "deb [arch=amd64] http://apt.mirantis.com/xenial {{ REPOSITORY_SUITE }} salt extra" > /etc/apt/sources.list.d/mcp_salt.list;
+   - wget -O - http://apt.mirantis.com/public.gpg | apt-key add -;
+   - echo "deb http://repo.saltstack.com/apt/ubuntu/16.04/amd64/2016.3 xenial main" > /etc/apt/sources.list.d/saltstack.list;
+   - wget -O - https://repo.saltstack.com/apt/ubuntu/16.04/amd64/2016.3/SALTSTACK-GPG-KEY.pub | apt-key add -;
+
+   - apt-get clean
+   - apt-get update
+
+   # Install common packages
+   - eatmydata apt-get install -y python-pip git curl tmux byobu iputils-ping traceroute htop tree mc
+
+   # Install salt-minion and stop it until it is configured
+   - eatmydata apt-get install -y salt-minion && service salt-minion stop
+
+   ########################################################
+   # Node is ready, allow SSH access
+   - echo "Allow SSH access ..."
+   - sudo iptables -D INPUT -p tcp --dport 22 -j DROP
+   ########################################################
+
+  write_files:
+   - path: /etc/network/interfaces
+     content: |
+          auto ens3
+          iface ens3 inet dhcp
+          auto ens4
+          iface ens4 inet dhcp
+
+   - path: /root/.ssh/config
+     owner: root:root
+     permissions: '0600'
+     content: |
+          Host *
+            ServerAliveInterval 300
+            ServerAliveCountMax 10
+            StrictHostKeyChecking no
+            UserKnownHostsFile /dev/null
diff --git a/tcp_tests/templates/cookieshape-mcp-ocata-dvr-vxlan/underlay--user-data1604.yaml b/tcp_tests/templates/cookieshape-mcp-ocata-dvr-vxlan/underlay--user-data1604.yaml
new file mode 100644
index 0000000..e6ceb1a
--- /dev/null
+++ b/tcp_tests/templates/cookieshape-mcp-ocata-dvr-vxlan/underlay--user-data1604.yaml
@@ -0,0 +1,81 @@
+| # All the data below will be stored as a string object
+  #cloud-config, see http://cloudinit.readthedocs.io/en/latest/topics/examples.html
+
+  ssh_pwauth: True
+  users:
+   - name: root
+     sudo: ALL=(ALL) NOPASSWD:ALL
+     shell: /bin/bash
+     ssh_authorized_keys:
+     {% for key in config.underlay.ssh_keys %}
+      - ssh-rsa {{ key['public'] }}
+     {% endfor %}
+
+  disable_root: false
+  chpasswd:
+   list: |
+    root:r00tme
+   expire: False
+
+  bootcmd:
+   # Block access to SSH while node is preparing
+   - cloud-init-per once sudo iptables -A INPUT -p tcp --dport 22 -j DROP
+   # Enable root access
+   - sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
+   - service sshd restart
+  output:
+    all: '| tee -a /var/log/cloud-init-output.log /dev/tty0'
+
+  runcmd:
+   - export TERM=linux
+   - export LANG=C
+   # Configure dhclient
+   - sudo echo "nameserver {gateway}" >> /etc/resolvconf/resolv.conf.d/base
+   - sudo resolvconf -u
+
+   # Prepare network connection
+   - sudo ifup ens3
+   #- sudo route add default gw {gateway} {interface_name}
+   - sudo ifup ens4
+
+   # Create swap
+   - fallocate -l 4G /swapfile
+   - chmod 600 /swapfile
+   - mkswap /swapfile
+   - swapon /swapfile
+   - echo "/swapfile   none    swap    defaults   0   0" >> /etc/fstab
+
+
+   ############## TCP Cloud cfg01 node ##################
+   #- sleep 120
+   - echo "Preparing base OS"
+   - which wget >/dev/null || (apt-get update; apt-get install -y wget)
+
+   - echo "deb [arch=amd64] http://apt.mirantis.com/xenial {{ REPOSITORY_SUITE }} salt extra" > /etc/apt/sources.list.d/mcp_salt.list;
+   - wget -O - http://apt.mirantis.com/public.gpg | apt-key add -;
+   - echo "deb http://repo.saltstack.com/apt/ubuntu/16.04/amd64/2016.3 xenial main" > /etc/apt/sources.list.d/saltstack.list;
+   - wget -O - https://repo.saltstack.com/apt/ubuntu/16.04/amd64/2016.3/SALTSTACK-GPG-KEY.pub | apt-key add -;
+
+   - apt-get clean
+   - eatmydata apt-get update && apt-get -y upgrade
+
+   # Install common packages
+   - eatmydata apt-get install -y python-pip git curl tmux byobu iputils-ping traceroute htop tree mc
+
+   # Install salt-minion and stop it until it is configured
+   - eatmydata apt-get install -y salt-minion && service salt-minion stop
+
+   ########################################################
+   # Node is ready, allow SSH access
+   - echo "Allow SSH access ..."
+   - sudo iptables -D INPUT -p tcp --dport 22 -j DROP
+   ########################################################
+
+  write_files:
+   - path: /etc/network/interfaces
+     content: |
+          auto ens3
+          iface ens3 inet dhcp
+          auto ens4
+          iface ens4 inet dhcp
+
diff --git a/tcp_tests/templates/cookieshape-mcp-ocata-dvr-vxlan/underlay.yaml b/tcp_tests/templates/cookieshape-mcp-ocata-dvr-vxlan/underlay.yaml
new file mode 100644
index 0000000..af1062f
--- /dev/null
+++ b/tcp_tests/templates/cookieshape-mcp-ocata-dvr-vxlan/underlay.yaml
@@ -0,0 +1,625 @@
+# Set the repository suite, one of the: 'nightly', 'testing', 'stable', or any other required
+{% set REPOSITORY_SUITE = os_env('REPOSITORY_SUITE', 'testing') %}
+
+{% set LAB_CONFIG_NAME = os_env('LAB_CONFIG_NAME', 'virtual-mcp-ocata-dvr') %}
+{% set DOMAIN_NAME = os_env('DOMAIN_NAME', LAB_CONFIG_NAME) + '.local' %}
+{% set HOSTNAME_CFG01 = os_env('HOSTNAME_CFG01', 'cfg01.' + DOMAIN_NAME) %}
+{% set HOSTNAME_CTL01 = os_env('HOSTNAME_CTL01', 'ctl01.' + DOMAIN_NAME) %}
+{% set HOSTNAME_CTL02 = os_env('HOSTNAME_CTL02', 'ctl02.' + DOMAIN_NAME) %}
+{% set HOSTNAME_CTL03 = os_env('HOSTNAME_CTL03', 'ctl03.' + DOMAIN_NAME) %}
+{% set HOSTNAME_CMP01 = os_env('HOSTNAME_CMP01', 'cmp001.' + DOMAIN_NAME) %}
+{% set HOSTNAME_CMP02 = os_env('HOSTNAME_CMP02', 'cmp002.' + DOMAIN_NAME) %}
+{% set HOSTNAME_MON01 = os_env('HOSTNAME_MON01', 'mon01.' + DOMAIN_NAME) %}
+{% set HOSTNAME_MON02 = os_env('HOSTNAME_MON02', 'mon02.' + DOMAIN_NAME) %}
+{% set HOSTNAME_MON03 = os_env('HOSTNAME_MON03', 'mon03.' + DOMAIN_NAME) %}
+{% set HOSTNAME_GTW01 = os_env('HOSTNAME_GTW01', 'gtw01.' + DOMAIN_NAME) %}
+{% set HOSTNAME_PRX01 = os_env('HOSTNAME_PRX01', 'prx01.' + DOMAIN_NAME) %}
+
+{% import 'cookieshape-mcp-ocata-dvr-vxlan/underlay--meta-data.yaml' as CLOUDINIT_META_DATA with context %}
+{% import 'cookieshape-mcp-ocata-dvr-vxlan/underlay--user-data-cfg01.yaml' as CLOUDINIT_USER_DATA_CFG01 with context %}
+{% import 'cookieshape-mcp-ocata-dvr-vxlan/underlay--user-data1604.yaml' as CLOUDINIT_USER_DATA_1604 with context %}
+
+---
+aliases:
+ - &interface_model {{ os_env('INTERFACE_MODEL', 'virtio') }}
+ - &cloudinit_meta_data {{ CLOUDINIT_META_DATA }}
+ - &cloudinit_user_data_cfg01 {{ CLOUDINIT_USER_DATA_CFG01 }}
+ - &cloudinit_user_data_1604 {{ CLOUDINIT_USER_DATA_1604 }}
+
+template:
+  devops_settings:
+    env_name: {{ os_env('ENV_NAME', LAB_CONFIG_NAME + '_' + REPOSITORY_SUITE + "_" + os_env('BUILD_NUMBER', '')) }}
+
+    address_pools:
+      private-pool01:
+        net: {{ os_env('PRIVATE_ADDRESS_POOL01', '10.60.0.0/16:24') }}
+        params:
+          ip_reserved:
+            gateway: +1
+            l2_network_device: +1
+            default_{{ HOSTNAME_CFG01 }}: +90
+            default_{{ HOSTNAME_CTL }}: +10
+            default_{{ HOSTNAME_CTL01 }}: +11
+            default_{{ HOSTNAME_CTL02 }}: +12
+            default_{{ HOSTNAME_CTL03 }}: +13
+            default_{{ HOSTNAME_DBS }}: +50
+            default_{{ HOSTNAME_DBS01 }}: +51
+            default_{{ HOSTNAME_DBS02 }}: +52
+            default_{{ HOSTNAME_DBS03 }}: +53
+            default_{{ HOSTNAME_GTW01 }}: +224
+            default_{{ HOSTNAME_GTW02 }}: +225
+            default_{{ HOSTNAME_GTW03 }}: +226
+            default_{{ HOSTNAME_MSG }}: +40
+            default_{{ HOSTNAME_MSG01 }}: +41
+            default_{{ HOSTNAME_MSG02 }}: +42
+            default_{{ HOSTNAME_MSG03 }}: +43
+            default_{{ HOSTNAME_PRX }}: +80
+            default_{{ HOSTNAME_PRX01 }}: +81
+            default_{{ HOSTNAME_PRX02 }}: +82
+            default_{{ HOSTNAME_MDB }}: +75
+            default_{{ HOSTNAME_MDB01 }}: +76
+            default_{{ HOSTNAME_MDB02 }}: +77
+            default_{{ HOSTNAME_MDB03 }}: +78
+            default_{{ HOSTNAME_BMK01 }}: +85
+
+            default_{{ HOSTNAME_LOG }}: +60
+            default_{{ HOSTNAME_LOG01 }}: +61
+            default_{{ HOSTNAME_LOG02 }}: +62
+            default_{{ HOSTNAME_LOG03 }}: +63
+            default_{{ HOSTNAME_MON }}: +70
+            default_{{ HOSTNAME_MON01 }}: +71
+            default_{{ HOSTNAME_MON02 }}: +72
+            default_{{ HOSTNAME_MON03 }}: +73
+            default_{{ HOSTNAME_MTR }}: +85
+            default_{{ HOSTNAME_MTR01 }}: +86
+            default_{{ HOSTNAME_MTR02 }}: +87
+            default_{{ HOSTNAME_MTR03 }}: +88
+
+            default_{{ HOSTNAME_CMP01 }}: +101
+            default_{{ HOSTNAME_CMP02 }}: +102
+          ip_ranges:
+            dhcp: [+90, -10]
+
+      admin-pool01:
+        net: {{ os_env('ADMIN_ADDRESS_POOL01', '10.70.0.0/16:24') }}
+        params:
+          ip_reserved:
+            gateway: +1
+            l2_network_device: +1
+            default_{{ HOSTNAME_CFG01 }}: +90
+            default_{{ HOSTNAME_CTL }}: +10
+            default_{{ HOSTNAME_CTL01 }}: +11
+            default_{{ HOSTNAME_CTL02 }}: +12
+            default_{{ HOSTNAME_CTL03 }}: +13
+            default_{{ HOSTNAME_DBS }}: +50
+            default_{{ HOSTNAME_DBS01 }}: +51
+            default_{{ HOSTNAME_DBS02 }}: +52
+            default_{{ HOSTNAME_DBS03 }}: +53
+            default_{{ HOSTNAME_GTW01 }}: +224
+            default_{{ HOSTNAME_GTW02 }}: +225
+            default_{{ HOSTNAME_GTW03 }}: +226
+            default_{{ HOSTNAME_MSG }}: +40
+            default_{{ HOSTNAME_MSG01 }}: +41
+            default_{{ HOSTNAME_MSG02 }}: +42
+            default_{{ HOSTNAME_MSG03 }}: +43
+            default_{{ HOSTNAME_PRX }}: +80
+            default_{{ HOSTNAME_PRX01 }}: +81
+            default_{{ HOSTNAME_PRX02 }}: +82
+            default_{{ HOSTNAME_MDB }}: +75
+            default_{{ HOSTNAME_MDB01 }}: +76
+            default_{{ HOSTNAME_MDB02 }}: +77
+            default_{{ HOSTNAME_MDB03 }}: +78
+            default_{{ HOSTNAME_BMK01 }}: +85
+
+            default_{{ HOSTNAME_LOG }}: +60
+            default_{{ HOSTNAME_LOG01 }}: +61
+            default_{{ HOSTNAME_LOG02 }}: +62
+            default_{{ HOSTNAME_LOG03 }}: +63
+            default_{{ HOSTNAME_MON }}: +70
+            default_{{ HOSTNAME_MON01 }}: +71
+            default_{{ HOSTNAME_MON02 }}: +72
+            default_{{ HOSTNAME_MON03 }}: +73
+            default_{{ HOSTNAME_MTR }}: +85
+            default_{{ HOSTNAME_MTR01 }}: +86
+            default_{{ HOSTNAME_MTR02 }}: +87
+            default_{{ HOSTNAME_MTR03 }}: +88
+
+            default_{{ HOSTNAME_CMP01 }}: +101
+            default_{{ HOSTNAME_CMP02 }}: +102
+          ip_ranges:
+            dhcp: [+90, -10]
+
+      tenant-pool01:
+        net: {{ os_env('TENANT_ADDRESS_POOL01', '10.80.0.0/16:24') }}
+        params:
+          ip_reserved:
+            gateway: +1
+            l2_network_device: +1
+            default_{{ HOSTNAME_CFG01 }}: +90
+            default_{{ HOSTNAME_CTL }}: +10
+            default_{{ HOSTNAME_CTL01 }}: +11
+            default_{{ HOSTNAME_CTL02 }}: +12
+            default_{{ HOSTNAME_CTL03 }}: +13
+            default_{{ HOSTNAME_DBS }}: +50
+            default_{{ HOSTNAME_DBS01 }}: +51
+            default_{{ HOSTNAME_DBS02 }}: +52
+            default_{{ HOSTNAME_DBS03 }}: +53
+            default_{{ HOSTNAME_GTW01 }}: +224
+            default_{{ HOSTNAME_GTW02 }}: +225
+            default_{{ HOSTNAME_GTW03 }}: +226
+            default_{{ HOSTNAME_MSG }}: +40
+            default_{{ HOSTNAME_MSG01 }}: +41
+            default_{{ HOSTNAME_MSG02 }}: +42
+            default_{{ HOSTNAME_MSG03 }}: +43
+            default_{{ HOSTNAME_PRX }}: +80
+            default_{{ HOSTNAME_PRX01 }}: +81
+            default_{{ HOSTNAME_PRX02 }}: +82
+            default_{{ HOSTNAME_MDB }}: +75
+            default_{{ HOSTNAME_MDB01 }}: +76
+            default_{{ HOSTNAME_MDB02 }}: +77
+            default_{{ HOSTNAME_MDB03 }}: +78
+            default_{{ HOSTNAME_BMK01 }}: +85
+
+            default_{{ HOSTNAME_LOG }}: +60
+            default_{{ HOSTNAME_LOG01 }}: +61
+            default_{{ HOSTNAME_LOG02 }}: +62
+            default_{{ HOSTNAME_LOG03 }}: +63
+            default_{{ HOSTNAME_MON }}: +70
+            default_{{ HOSTNAME_MON01 }}: +71
+            default_{{ HOSTNAME_MON02 }}: +72
+            default_{{ HOSTNAME_MON03 }}: +73
+            default_{{ HOSTNAME_MTR }}: +85
+            default_{{ HOSTNAME_MTR01 }}: +86
+            default_{{ HOSTNAME_MTR02 }}: +87
+            default_{{ HOSTNAME_MTR03 }}: +88
+
+            default_{{ HOSTNAME_CMP01 }}: +101
+            default_{{ HOSTNAME_CMP02 }}: +102
+          ip_ranges:
+            dhcp: [+10, -10]
+
+      external-pool01:
+        net: {{ os_env('EXTERNAL_ADDRESS_POOL01', '10.90.0.0/16:24') }}
+        params:
+          ip_reserved:
+            gateway: +1
+            l2_network_device: +1
+            default_{{ HOSTNAME_CFG01 }}: +90
+            default_{{ HOSTNAME_CTL }}: +10
+            default_{{ HOSTNAME_CTL01 }}: +11
+            default_{{ HOSTNAME_CTL02 }}: +12
+            default_{{ HOSTNAME_CTL03 }}: +13
+            default_{{ HOSTNAME_DBS }}: +50
+            default_{{ HOSTNAME_DBS01 }}: +51
+            default_{{ HOSTNAME_DBS02 }}: +52
+            default_{{ HOSTNAME_DBS03 }}: +53
+            default_{{ HOSTNAME_GTW01 }}: +224
+            default_{{ HOSTNAME_GTW02 }}: +225
+            default_{{ HOSTNAME_GTW03 }}: +226
+            default_{{ HOSTNAME_MSG }}: +40
+            default_{{ HOSTNAME_MSG01 }}: +41
+            default_{{ HOSTNAME_MSG02 }}: +42
+            default_{{ HOSTNAME_MSG03 }}: +43
+            default_{{ HOSTNAME_PRX }}: +80
+            default_{{ HOSTNAME_PRX01 }}: +81
+            default_{{ HOSTNAME_PRX02 }}: +82
+            default_{{ HOSTNAME_MDB }}: +75
+            default_{{ HOSTNAME_MDB01 }}: +76
+            default_{{ HOSTNAME_MDB02 }}: +77
+            default_{{ HOSTNAME_MDB03 }}: +78
+            default_{{ HOSTNAME_BMK01 }}: +85
+
+            default_{{ HOSTNAME_LOG }}: +60
+            default_{{ HOSTNAME_LOG01 }}: +61
+            default_{{ HOSTNAME_LOG02 }}: +62
+            default_{{ HOSTNAME_LOG03 }}: +63
+            default_{{ HOSTNAME_MON }}: +70
+            default_{{ HOSTNAME_MON01 }}: +71
+            default_{{ HOSTNAME_MON02 }}: +72
+            default_{{ HOSTNAME_MON03 }}: +73
+            default_{{ HOSTNAME_MTR }}: +85
+            default_{{ HOSTNAME_MTR01 }}: +86
+            default_{{ HOSTNAME_MTR02 }}: +87
+            default_{{ HOSTNAME_MTR03 }}: +88
+
+            default_{{ HOSTNAME_CMP01 }}: +101
+            default_{{ HOSTNAME_CMP02 }}: +102
+          ip_ranges:
+            dhcp: [+10, -10]
+
+
+    groups:
+      - name: default
+        driver:
+          name: devops.driver.libvirt
+          params:
+            connection_string: !os_env CONNECTION_STRING, qemu:///system
+            storage_pool_name: !os_env STORAGE_POOL_NAME, default
+            stp: False
+            hpet: False
+            enable_acpi: true
+            use_host_cpu: !os_env DRIVER_USE_HOST_CPU, true
+            use_hugepages: !os_env DRIVER_USE_HUGEPAGES, false
+
+        network_pools:
+          admin: admin-pool01
+          private: private-pool01
+          tenant: tenant-pool01
+          external: external-pool01
+
+        l2_network_devices:
+          private:
+            address_pool: private-pool01
+            dhcp: true
+
+          admin:
+            address_pool: admin-pool01
+            dhcp: true
+            forward:
+              mode: nat
+
+          tenant:
+            address_pool: tenant-pool01
+            dhcp: true
+
+          external:
+            address_pool: external-pool01
+            dhcp: true
+            forward:
+              mode: nat
+
+
+        group_volumes:
+         - name: cloudimage1604    # This name is used for 'backing_store' option for node volumes.
+           source_image: !os_env IMAGE_PATH1604  # https://cloud-images.ubuntu.com/xenial/current/xenial-server-cloudimg-amd64-disk1.img or
+                                             # http://apt.tcpcloud.eu/images/ubuntu-16-04-x64-201608231004.qcow2
+           format: qcow2
+
+        nodes:
+          - name: {{ HOSTNAME_CFG01 }}
+            role: salt_master
+            params:
+              vcpu: !os_env SLAVE_NODE_CPU, 2
+              memory: !os_env SLAVE_NODE_MEMORY, 2048
+              boot:
+                - hd
+              cloud_init_volume_name: iso
+              cloud_init_iface_up: ens3
+              volumes:
+                - name: system
+                  capacity: !os_env NODE_VOLUME_SIZE, 150
+                  backing_store: cloudimage1604
+                  format: qcow2
+                - name: iso  # Volume with name 'iso' will be used
+                             # for store image with cloud-init metadata.
+                  capacity: 1
+                  format: raw
+                  device: cdrom
+                  bus: ide
+                  cloudinit_meta_data: *cloudinit_meta_data
+                  cloudinit_user_data: *cloudinit_user_data_cfg01
+
+              interfaces:
+                - label: ens3
+                  l2_network_device: admin
+                  interface_model: *interface_model
+                - label: ens4
+                  l2_network_device: private
+                  interface_model: *interface_model
+              network_config:
+                ens3:
+                  networks:
+                    - admin
+                ens4:
+                  networks:
+                    - private
+
+          - name: {{ HOSTNAME_CTL01 }}
+            role: salt_minion
+            params:
+              vcpu: !os_env SLAVE_NODE_CPU, 2
+              memory: !os_env SLAVE_NODE_MEMORY, 8192
+              boot:
+                - hd
+              cloud_init_volume_name: iso
+              cloud_init_iface_up: ens3
+              volumes:
+                - name: system
+                  capacity: !os_env NODE_VOLUME_SIZE, 150
+                  backing_store: cloudimage1604
+                  format: qcow2
+                - name: cinder
+                  capacity: 50
+                  format: qcow2
+                - name: iso  # Volume with name 'iso' will be used
+                             # for store image with cloud-init metadata.
+                  capacity: 1
+                  format: raw
+                  device: cdrom
+                  bus: ide
+                  cloudinit_meta_data: *cloudinit_meta_data
+                  cloudinit_user_data: *cloudinit_user_data_1604
+
+              interfaces: &interfaces
+                - label: ens3
+                  l2_network_device: admin
+                  interface_model: *interface_model
+                - label: ens4
+                  l2_network_device: private
+                  interface_model: *interface_model
+              network_config: &network_config
+                ens3:
+                  networks:
+                    - admin
+                ens4:
+                  networks:
+                    - private
+
+          - name: {{ HOSTNAME_CTL02 }}
+            role: salt_minion
+            params:
+              vcpu: !os_env SLAVE_NODE_CPU, 2
+              memory: !os_env SLAVE_NODE_MEMORY, 8192
+              boot:
+                - hd
+              cloud_init_volume_name: iso
+              cloud_init_iface_up: ens3
+              volumes:
+                - name: system
+                  capacity: !os_env NODE_VOLUME_SIZE, 150
+                  backing_store: cloudimage1604
+                  format: qcow2
+                - name: cinder
+                  capacity: 50
+                  format: qcow2
+                - name: iso  # Volume with name 'iso' will be used
+                             # for store image with cloud-init metadata.
+                  capacity: 1
+                  format: raw
+                  device: cdrom
+                  bus: ide
+                  cloudinit_meta_data: *cloudinit_meta_data
+                  cloudinit_user_data: *cloudinit_user_data_1604
+
+              interfaces: *interfaces
+              network_config: *network_config
+
+          - name: {{ HOSTNAME_CTL03 }}
+            role: salt_minion
+            params:
+              vcpu: !os_env SLAVE_NODE_CPU, 2
+              memory: !os_env SLAVE_NODE_MEMORY, 8192
+              boot:
+                - hd
+              cloud_init_volume_name: iso
+              cloud_init_iface_up: ens3
+              volumes:
+                - name: system
+                  capacity: !os_env NODE_VOLUME_SIZE, 150
+                  backing_store: cloudimage1604
+                  format: qcow2
+                - name: cinder
+                  capacity: 50
+                  format: qcow2
+                - name: iso  # Volume with name 'iso' will be used
+                             # for store image with cloud-init metadata.
+                  capacity: 1
+                  format: raw
+                  device: cdrom
+                  bus: ide
+                  cloudinit_meta_data: *cloudinit_meta_data
+                  cloudinit_user_data: *cloudinit_user_data_1604
+
+              interfaces: *interfaces
+              network_config: *network_config
+
+          - name: {{ HOSTNAME_MON01 }}
+            role: salt_minion
+            params:
+              vcpu: !os_env SLAVE_NODE_CPU, 3
+              memory: !os_env SLAVE_NODE_MEMORY, 2048
+              boot:
+                - hd
+              cloud_init_volume_name: iso
+              cloud_init_iface_up: ens3
+              volumes:
+                - name: system
+                  capacity: !os_env NODE_VOLUME_SIZE, 150
+                  backing_store: cloudimage1604
+                  format: qcow2
+                - name: iso  # Volume with name 'iso' will be used
+                             # for store image with cloud-init metadata.
+                  capacity: 1
+                  format: raw
+                  device: cdrom
+                  bus: ide
+                  cloudinit_meta_data: !include underlay--meta-data.yaml
+                  cloudinit_user_data: !include underlay--user-data1604.yaml
+
+              interfaces: *interfaces
+              network_config: *network_config
+
+          - name: {{ HOSTNAME_MON02 }}
+            role: salt_minion
+            params:
+              vcpu: !os_env SLAVE_NODE_CPU, 3
+              memory: !os_env SLAVE_NODE_MEMORY, 2048
+              boot:
+                - hd
+              cloud_init_volume_name: iso
+              cloud_init_iface_up: ens3
+              volumes:
+                - name: system
+                  capacity: !os_env NODE_VOLUME_SIZE, 150
+                  backing_store: cloudimage1604
+                  format: qcow2
+                - name: iso  # Volume with name 'iso' will be used
+                             # for store image with cloud-init metadata.
+                  capacity: 1
+                  format: raw
+                  device: cdrom
+                  bus: ide
+                  cloudinit_meta_data: !include underlay--meta-data.yaml
+                  cloudinit_user_data: !include underlay--user-data1604.yaml
+
+              interfaces: *interfaces
+              network_config: *network_config
+
+          - name: {{ HOSTNAME_MON03 }}
+            role: salt_minion
+            params:
+              vcpu: !os_env SLAVE_NODE_CPU, 3
+              memory: !os_env SLAVE_NODE_MEMORY, 2048
+              boot:
+                - hd
+              cloud_init_volume_name: iso
+              cloud_init_iface_up: ens3
+              volumes:
+                - name: system
+                  capacity: !os_env NODE_VOLUME_SIZE, 150
+                  backing_store: cloudimage1604
+                  format: qcow2
+                - name: iso  # Volume with name 'iso' will be used
+                             # for store image with cloud-init metadata.
+                  capacity: 1
+                  format: raw
+                  device: cdrom
+                  bus: ide
+                  cloudinit_meta_data: !include underlay--meta-data.yaml
+                  cloudinit_user_data: !include underlay--user-data1604.yaml
+
+              interfaces: *interfaces
+              network_config: *network_config
+
+          - name: {{ HOSTNAME_PRX01 }}
+            role: salt_minion
+            params:
+              vcpu: !os_env SLAVE_NODE_CPU, 1
+              memory: !os_env SLAVE_NODE_MEMORY, 8192
+              boot:
+                - hd
+              cloud_init_volume_name: iso
+              cloud_init_iface_up: ens3
+              volumes:
+                - name: system
+                  capacity: !os_env NODE_VOLUME_SIZE, 150
+                  backing_store: cloudimage1604
+                  format: qcow2
+                - name: cinder
+                  capacity: 50
+                  format: qcow2
+                - name: iso  # Volume with name 'iso' will be used
+                             # for store image with cloud-init metadata.
+                  capacity: 1
+                  format: raw
+                  device: cdrom
+                  bus: ide
+                  cloudinit_meta_data: *cloudinit_meta_data
+                  cloudinit_user_data: *cloudinit_user_data_1604
+
+              interfaces: *interfaces
+              network_config: *network_config
+
+
+          - name: {{ HOSTNAME_CMP01 }}
+            role: salt_minion
+            params:
+              vcpu: !os_env SLAVE_NODE_CPU, 3
+              memory: !os_env SLAVE_NODE_MEMORY, 4096
+              boot:
+                - hd
+              cloud_init_volume_name: iso
+              cloud_init_iface_up: ens3
+              volumes:
+                - name: system
+                  capacity: !os_env NODE_VOLUME_SIZE, 150
+                  backing_store: cloudimage1604
+                  format: qcow2
+                - name: iso  # Volume with name 'iso' will be used
+                             # for store image with cloud-init metadata.
+                  capacity: 1
+                  format: raw
+                  device: cdrom
+                  bus: ide
+                  cloudinit_meta_data: *cloudinit_meta_data
+                  cloudinit_user_data: *cloudinit_user_data_1604
+
+
+              interfaces: &all_interfaces
+                - label: ens3
+                  l2_network_device: admin
+                  interface_model: *interface_model
+                - label: ens4
+                  l2_network_device: private
+                  interface_model: *interface_model
+                - label: ens5
+                  l2_network_device: tenant
+                  interface_model: *interface_model
+                - label: ens6
+                  l2_network_device: external
+                  interface_model: *interface_model
+              network_config: &all_network_config
+                ens3:
+                  networks:
+                    - admin
+                ens4:
+                  networks:
+                    - private
+                ens5:
+                  networks:
+                    - tenant
+                ens6:
+                  networks:
+                    - external
+
+          - name: {{ HOSTNAME_CMP02 }}
+            role: salt_minion
+            params:
+              vcpu: !os_env SLAVE_NODE_CPU, 3
+              memory: !os_env SLAVE_NODE_MEMORY, 4096
+              boot:
+                - hd
+              cloud_init_volume_name: iso
+              cloud_init_iface_up: ens3
+              volumes:
+                - name: system
+                  capacity: !os_env NODE_VOLUME_SIZE, 150
+                  backing_store: cloudimage1604
+                  format: qcow2
+                - name: iso  # Volume with name 'iso' will be used
+                             # for store image with cloud-init metadata.
+                  capacity: 1
+                  format: raw
+                  device: cdrom
+                  bus: ide
+                  cloudinit_meta_data: *cloudinit_meta_data
+                  cloudinit_user_data: *cloudinit_user_data_1604
+
+              interfaces: *all_interfaces
+              network_config: *all_network_config
+
+          - name: {{ HOSTNAME_GTW01 }}
+            role: salt_minion
+            params:
+              vcpu: !os_env SLAVE_NODE_CPU, 1
+              memory: !os_env SLAVE_NODE_MEMORY, 2048
+              boot:
+                - hd
+              cloud_init_volume_name: iso
+              cloud_init_iface_up: ens3
+              volumes:
+                - name: system
+                  capacity: !os_env NODE_VOLUME_SIZE, 150
+                  backing_store: cloudimage1604
+                  format: qcow2
+                - name: iso  # Volume with name 'iso' will be used
+                             # for store image with cloud-init metadata.
+                  capacity: 1
+                  format: raw
+                  device: cdrom
+                  bus: ide
+                  cloudinit_meta_data: *cloudinit_meta_data
+                  cloudinit_user_data: *cloudinit_user_data_1604
+
+              interfaces: *all_interfaces
+              network_config: *all_network_config
diff --git a/tcp_tests/templates/physical_mcp11_ovs_dpdk/salt.yaml b/tcp_tests/templates/physical_mcp11_ovs_dpdk/salt.yaml
index dbf7236..c32f4e2 100644
--- a/tcp_tests/templates/physical_mcp11_ovs_dpdk/salt.yaml
+++ b/tcp_tests/templates/physical_mcp11_ovs_dpdk/salt.yaml
@@ -41,21 +41,7 @@
 # Spin up Control Plane VMs on KVM nodes
 ########################################
 
-- description: '*Workaround 1/2* of the bug PROD-9576 to get bond0-connectivity *without* rebooting nodes'
-  cmd: salt-call --hard-crash --state-output=mixed --state-verbose=False cmd.run
-    "mkdir -p /tmp/PROD-9576; cd /tmp/PROD-9576; git clone https://gerrit.mcp.mirantis.net/salt-formulas/linux; cd linux;
-    git fetch https://gerrit.mcp.mirantis.net/salt-formulas/linux refs/changes/54/2354/12 && git checkout FETCH_HEAD;
-    cp -f linux/network/interface.sls /srv/salt/env/prd/linux/network/;
-    cp -f linux/map.jinja /srv/salt/env/prd/linux/;"
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: '*Workaround: Load bonding module before call state.linux'
-  cmd: salt -C "I@linux:network:interface:*:type:bond" cmd.run 'modprobe bonding'
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
+{{ SHARED.MACRO_NETWORKING_WORKAROUNDS() }}
 
 - description: Refresh pillars for present baremetal nodes
   cmd: salt --hard-crash --state-output=mixed --state-verbose=False '*' saltutil.refresh_pillar
@@ -70,14 +56,6 @@
   retry: {count: 1, delay: 5}
   skip_fail: false
 
-- description: '*Workaround* install bridge-utils before running linux formula'
-  # The formula removes default route and then tries to install the package, fails.
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False  -C '* and not
-    cfg01*' cmd.run 'sudo apt-get install -y bridge-utils'
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
 - description: Sync all salt resources for present baremetal nodes
   cmd: salt --hard-crash --state-output=mixed --state-verbose=False '*' saltutil.sync_all && sleep 5
   node_name: {{ HOSTNAME_CFG01 }}
diff --git a/tcp_tests/templates/shared-salt.yaml b/tcp_tests/templates/shared-salt.yaml
index 2f6aad9..9d98bf6 100644
--- a/tcp_tests/templates/shared-salt.yaml
+++ b/tcp_tests/templates/shared-salt.yaml
@@ -413,3 +413,33 @@
   retry: {count: 1, delay: 10}
   skip_fail: false
 {%- endmacro %}
+
+
+{%- macro MACRO_NETWORKING_WORKAROUNDS() %}
+{#########################################}
+
+- description: '*Workaround 1/2* of the bug PROD-9576 to get bond0-connectivity *without* rebooting nodes'
+  cmd: salt-call --hard-crash --state-output=mixed --state-verbose=False cmd.run
+    "mkdir -p /tmp/PROD-9576; cd /tmp/PROD-9576; git clone https://gerrit.mcp.mirantis.net/salt-formulas/linux; cd linux;
+    git fetch https://gerrit.mcp.mirantis.net/salt-formulas/linux refs/changes/54/2354/13 && git checkout FETCH_HEAD;
+    cp -f linux/network/interface.sls /srv/salt/env/prd/linux/network/;
+    cp -f linux/map.jinja /srv/salt/env/prd/linux/;"
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+- description: '*Workaround: Load bonding module before call state.linux'
+  cmd: salt -C "I@linux:network:interface:*:type:bond" cmd.run 'modprobe bonding'
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+- description: '*Workaround* install bridge-utils before running linux formula'
+  # The formula removes default route and then tries to install the package, fails.
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False  -C '* and not
+    cfg01*' cmd.run 'sudo apt-get install -y bridge-utils'
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+{%- endmacro %}