Merge "Add model cookied-bm-contrail-nfv with MaaS"
diff --git a/tcp_tests/fixtures/day1_fixtures.py b/tcp_tests/fixtures/day1_fixtures.py
new file mode 100644
index 0000000..ff3a0b5
--- /dev/null
+++ b/tcp_tests/fixtures/day1_fixtures.py
@@ -0,0 +1,162 @@
+#    Copyright 2018 Mirantis, Inc.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+from collections import namedtuple
+import pytest
+
+from tcp_tests.helpers import ext
+from tcp_tests import logger
+from tcp_tests.managers import saltmanager
+from tcp_tests.managers import underlay_ssh_manager
+
+LOG = logger.logger
+
+
+@pytest.mark.revert_snapshot(ext.SNAPSHOT.day1_underlay)
+@pytest.fixture(scope="function")
+def day1_underlay(revert_snapshot, config, hardware):
+    """Fixture that should provide SSH access to underlay objects.
+
+    - Starts the 'hardware' environment and creates 'underlay' with required
+      configuration.
+    - Fills the following object using the 'hardware' fixture:
+      config.underlay.ssh = JSONList of SSH access credentials for nodes.
+                            This list will be used for initialization the
+                            model UnderlaySSHManager, see it for details.
+
+    :rtype UnderlaySSHManager: Object that encapsulate SSH credentials;
+                               - provide list of underlay nodes;
+                               - provide SSH access to underlay nodes using
+                                 node names or node IPs.
+    """
+    # Create Underlay
+    if not config.day1_underlay.ssh:
+        # If config.underlay.ssh wasn't provided from external config, then
+        # try to get necessary data from hardware manager (fuel-devops)
+
+        # for devops manager: power on nodes and wait for SSH
+        # for empty manager: do nothing
+        # for maas manager: provision nodes and wait for SSH
+        # hardware.start(underlay_node_roles=config.underlay.roles,
+        hardware.start(
+            underlay_node_roles=['salt_master'],
+            timeout=config.underlay.bootstrap_timeout)
+
+        config.day1_underlay.ssh = hardware.get_ssh_data(
+            roles=config.underlay.roles)
+
+        underlay = underlay_ssh_manager.UnderlaySSHManager(config)
+
+        LOG.info("Generate MACs for MaaS")
+        macs = {
+            n.name.split('.')[0]: {
+                "interface": {
+                    "mac": n.get_interface_by_network_name('admin').mac_address}}  # noqa
+            for n in hardware.slave_nodes}
+
+        config.day1_cfg_config.maas_machines_macs = {
+            "parameters": {
+                "maas": {
+                    "region": {
+                        "machines": macs}}}}
+
+        if not config.day1_underlay.lvm:
+            underlay.enable_lvm(hardware.lvm_storages())
+            config.day1_underlay.lvm = underlay.config_lvm
+
+        hardware.create_snapshot(ext.SNAPSHOT.day1_underlay)
+
+    else:
+        # 1. hardware environment created and powered on
+        # 2. config.underlay.ssh contains SSH access to provisioned nodes
+        #    (can be passed from external config with TESTS_CONFIGS variable)
+        underlay = underlay_ssh_manager.UnderlaySSHManager(config)
+
+    return underlay
+
+
+@pytest.mark.revert_snapshot(ext.SNAPSHOT.cfg_configured)
+@pytest.fixture(scope='function')
+def day1_cfg_config(revert_snapshot, request, config, hardware, underlay,
+                    salt_actions, snapshot, grab_versions):
+    """Fixture to get or install cfg node from day1 image on environment
+
+    :param revert_snapshot: fixture that reverts snapshot that is specified
+                            in test with @pytest.mark.revert_snapshot(<name>)
+    :param request: fixture provides pytest data
+    :param config: fixture provides oslo.config
+    :param hardware: fixture provides enviromnet manager
+    :param day1_underlay: fixture provides underlay manager
+    :param salt_actions: fixture provides SaltManager instance
+    :rtype: SaltManager
+
+    If config.salt.salt_master_host is not set, this fixture assumes that
+    the salt was not installed, and do the following:
+    - install salt master and salt minions
+    - make snapshot with name 'cfg_configured'
+    - return SaltManager
+
+    If config.salt.salt_master_host was set, this fixture assumes that the
+    salt was already deployed, and do the following:
+    - return SaltManager instance
+
+    If you want to revert 'cfg_configured' snapshot, please use mark:
+    @pytest.mark.revert_snapshot("cfg_configured")
+    """
+    # Create Salt cluster
+    if config.salt.salt_master_host == '0.0.0.0':
+        # Temporary workaround. Underlay should be extended with roles
+        config.salt.salt_master_host = \
+            underlay.host_by_node_role(
+                node_role=ext.UNDERLAY_NODE_ROLES.salt_master)
+
+        commands = underlay.read_template(
+            config.day1_cfg_config.configure_steps_path)
+        LOG.info("############ Executing command ####### {0}".format(commands))
+        salt_actions.install(commands)
+
+        salt_nodes = salt_actions.get_ssh_data()
+        config.underlay.ssh = config.underlay.ssh + \
+            [node for node in salt_nodes
+             if not any(node['node_name'] == n['node_name']
+                        for n in config.underlay.ssh)]
+
+        hardware.create_snapshot(ext.SNAPSHOT.cfg_configured)
+        salt_actions.sync_time()
+
+    else:
+        # 1. hardware environment created and powered on
+        # 2. config.underlay.ssh contains SSH access to provisioned nodes
+        #    (can be passed from external config with TESTS_CONFIGS variable)
+        # 3. config.tcp.* options contain access credentials to the already
+        #    installed TCP API endpoint
+        pass
+
+    salt_actions.sync_time()
+
+    Collection = namedtuple(
+        'Collection', ['salt', 'underlay', 'config'], verbose=True)
+
+    return Collection(salt_actions, underlay, config)
+
+
+@pytest.fixture(scope='function')
+def day1_salt_actions(config, day1_underlay):
+    """Fixture that provides various actions for salt
+
+    :param config: fixture provides oslo.config
+    :param day1_underlay: fixture provides underlay manager
+    :rtype: SaltManager
+    """
+    return saltmanager.SaltManager(config, day1_underlay)
diff --git a/tcp_tests/fixtures/underlay_fixtures.py b/tcp_tests/fixtures/underlay_fixtures.py
index d991548..9b4bed0 100644
--- a/tcp_tests/fixtures/underlay_fixtures.py
+++ b/tcp_tests/fixtures/underlay_fixtures.py
@@ -152,7 +152,7 @@
 
 @pytest.mark.revert_snapshot(ext.SNAPSHOT.underlay)
 @pytest.fixture(scope="function")
-def underlay(revert_snapshot, config, hardware):
+def underlay(request, revert_snapshot, config, hardware):
     """Fixture that should provide SSH access to underlay objects.
 
     - Starts the 'hardware' environment and creates 'underlay' with required
@@ -168,7 +168,8 @@
                                  node names or node IPs.
     """
     # Create Underlay
-    if not config.underlay.ssh:
+
+    def basic_underlay():
         # If config.underlay.ssh wasn't provided from external config, then
         # try to get necessary data from hardware manager (fuel-devops)
 
@@ -189,6 +190,53 @@
 
         hardware.create_snapshot(ext.SNAPSHOT.underlay)
 
+        return underlay
+
+    def day1_underlay():
+        hardware.start(
+            underlay_node_roles=['salt_master'],
+            timeout=config.underlay.bootstrap_timeout)
+
+        config.underlay.ssh = hardware.get_ssh_data(
+            roles=config.underlay.roles)
+
+        underlay = underlay_ssh_manager.UnderlaySSHManager(config)
+
+        LOG.info("Generate MACs for MaaS")
+        macs = {
+            n.name.split('.')[0]: {
+                "interface": {
+                    "mac": n.get_interface_by_network_name('admin').mac_address},  # noqa
+                "power_parameters": {
+                    "power_address": "{}:{}".format(
+                        n.get_interface_by_network_name('admin').l2_network_device.address_pool.get_ip('l2_network_device'),  # noqa
+                        n.bmc_port
+                        )}} for n in hardware.slave_nodes}
+
+        config.day1_cfg_config.maas_machines_macs = {
+            "parameters": {
+                "maas": {
+                    "region": {
+                        "machines": macs}}}}
+
+        if not config.underlay.lvm:
+            underlay.enable_lvm(hardware.lvm_storages())
+            config.underlay.lvm = underlay.config_lvm
+
+        for node in hardware.slave_nodes:
+            # For correct comissioning by MaaS nodes should be powered off
+            node.destroy()
+
+        hardware.create_snapshot(ext.SNAPSHOT.underlay)
+
+        return underlay
+
+    if not config.underlay.ssh:
+        if request.node.get_marker('day1_underlay'):
+            underlay = day1_underlay()
+        else:
+            underlay = basic_underlay()
+
     else:
         # 1. hardware environment created and powered on
         # 2. config.underlay.ssh contains SSH access to provisioned nodes
diff --git a/tcp_tests/helpers/ext.py b/tcp_tests/helpers/ext.py
index 7999201..800367d 100644
--- a/tcp_tests/helpers/ext.py
+++ b/tcp_tests/helpers/ext.py
@@ -53,6 +53,8 @@
     'k8s_deployed',
     'decapod_deployed',
     'ceph_deployed',
+    'day1_underlay',
+    'cfg_configured',
 )
 
 
diff --git a/tcp_tests/managers/envmanager_devops.py b/tcp_tests/managers/envmanager_devops.py
index 1e7f66c..d17c6bd 100644
--- a/tcp_tests/managers/envmanager_devops.py
+++ b/tcp_tests/managers/envmanager_devops.py
@@ -25,12 +25,12 @@
     AuthenticationException,
     BadAuthenticationType)
 
-from tcp_tests.helpers import env_config
-from tcp_tests.helpers import exceptions
-from tcp_tests.helpers import ext
-from tcp_tests import logger
 from tcp_tests import settings
 from tcp_tests import settings_oslo
+from tcp_tests.helpers import env_config
+from tcp_tests.helpers import ext
+from tcp_tests.helpers import exceptions
+from tcp_tests import logger
 
 LOG = logger.logger
 
diff --git a/tcp_tests/managers/jenkins/client.py b/tcp_tests/managers/jenkins/client.py
index 474713c..8c9939f 100644
--- a/tcp_tests/managers/jenkins/client.py
+++ b/tcp_tests/managers/jenkins/client.py
@@ -1,6 +1,8 @@
+from __future__ import print_function
 import time
 
 import jenkins
+import requests
 
 from devops.helpers import helpers
 
@@ -53,10 +55,22 @@
         build_id = self.job_info(name)['lastBuild']['number']
         return name, build_id
 
-    def wait_end_of_build(self, name, build_id, timeout=600):
+    def wait_end_of_build(self, name, build_id, timeout=600,
+                          print_job_output=False):
+        start = [0]
 
         def building():
-            return not self.build_info(name, build_id)['building']
+            status = not self.build_info(name, build_id)['building']
+            if print_job_output:
+                res = self.get_progressive_build_output(name,
+                                                        build_id,
+                                                        start=start[0])
+                if 'X-Text-Size' in res.headers:
+                    text_size = int(res.headers['X-Text-Size'])
+                    if start[0] < text_size:
+                        print(res.content, end='')
+                        start[0] = text_size
+            return status
 
         helpers.wait(
             building,
@@ -66,3 +80,20 @@
 
     def get_build_output(self, name, build_id):
         return self.__client.get_build_console_output(name, build_id)
+
+    def get_progressive_build_output(self, name, build_id, start=0,
+                                     raise_on_err=False):
+        '''Get build console text.
+
+        :param name: Job name, ``str``
+        :param name: Build id, ``int``
+        :param name: Start offset, ``int``
+        :returns: requests object with headers and console output,  ``obj``
+        '''
+        folder_url, short_name = self.__client._get_job_folder(name)
+
+        PROGRESSIVE_CONSOLE_OUTPUT = (
+            '%(folder_url)sjob/%(short_name)s/%(build_id)d/'
+            'logText/progressiveHtml?start=%(start)d')
+        url = self.__client._build_url(PROGRESSIVE_CONSOLE_OUTPUT, locals())
+        return(requests.get(url))
diff --git a/tcp_tests/managers/saltmanager.py b/tcp_tests/managers/saltmanager.py
index 2b06dc3..b5d5f04 100644
--- a/tcp_tests/managers/saltmanager.py
+++ b/tcp_tests/managers/saltmanager.py
@@ -19,8 +19,8 @@
 from datetime import datetime
 from pepper import libpepper
 from tcp_tests.helpers import utils
-from tcp_tests import settings
 from tcp_tests import logger
+from tcp_tests import settings
 from tcp_tests.managers.execute_commands import ExecuteCommandsMixin
 
 LOG = logger.logger
@@ -234,7 +234,7 @@
         self.__api = None
         self.run_state(
             tgt,
-            'cmd.run', 'service ntp stop; ntpd -gq; service ntp start')
+            'cmd.run', 'service ntp stop; if [ -x /usr/sbin/ntpdate ]; then ntpdate -s ntp.ubuntu.com; else ntpd -gq ; fi; service ntp start')  # noqa
         new_time_res = self.run_state(tgt, 'cmd.run', 'date')
         for node_name, time in sorted(new_time_res[0]['return'][0].items()):
             LOG.info("{0}: {1}".format(node_name, time))
diff --git a/tcp_tests/managers/underlay_ssh_manager.py b/tcp_tests/managers/underlay_ssh_manager.py
index 2c9ed55..5f919ce 100644
--- a/tcp_tests/managers/underlay_ssh_manager.py
+++ b/tcp_tests/managers/underlay_ssh_manager.py
@@ -236,7 +236,8 @@
                                    address_pool=address_pool)
         return ssh_data['host']
 
-    def remote(self, node_name=None, host=None, address_pool=None):
+    def remote(self, node_name=None, host=None, address_pool=None,
+               username=None):
         """Get SSHClient by a node name or hostname.
 
            One of the following arguments should be specified:
@@ -249,7 +250,7 @@
         ssh_data = self.__ssh_data(node_name=node_name, host=host,
                                    address_pool=address_pool)
         ssh_auth = ssh_client.SSHAuth(
-            username=ssh_data['login'],
+            username=username or ssh_data['login'],
             password=ssh_data['password'],
             keys=[rsakey.RSAKey(file_obj=StringIO.StringIO(key))
                   for key in ssh_data['keys']])
diff --git a/tcp_tests/requirements.txt b/tcp_tests/requirements.txt
index 64769ed..3d1b4d2 100644
--- a/tcp_tests/requirements.txt
+++ b/tcp_tests/requirements.txt
@@ -11,7 +11,7 @@
 docker-compose==1.7.1
 urllib3
 junit-xml
-jinja2>=2.1
+jinja2>=2.9
 jira
 testrail<=0.3.8
 functools32
diff --git a/tcp_tests/settings.py b/tcp_tests/settings.py
index f90000c..24c6c0b 100644
--- a/tcp_tests/settings.py
+++ b/tcp_tests/settings.py
@@ -59,3 +59,4 @@
 
 PATTERN = os.environ.get('PATTERN', None)
 RUN_TEMPEST = get_var_as_bool('RUN_TEMPEST', False)
+RUN_SL_TESTS = get_var_as_bool('RUN_SL_TESTS', False)
diff --git a/tcp_tests/settings_oslo.py b/tcp_tests/settings_oslo.py
index 062e421..8ba9f4d 100644
--- a/tcp_tests/settings_oslo.py
+++ b/tcp_tests/settings_oslo.py
@@ -67,7 +67,13 @@
     __name__, 'templates/{0}/k8s.yaml'.format(
         settings.LAB_CONFIG_NAME))
 _default_net_mgm = os.environ.get("NET_MGMT", "admin-pool01")
-
+_default_configure_steps = pkg_resources.resource_filename(
+    __name__, 'templates/{0}/cfg01_configure.yaml'.format(
+        settings.LAB_CONFIG_NAME))
+# _default_cluster_maas_config = pkg_resources.resource_filename(
+#     __name__, 'templates/{0}/cluster_infra_maas.yml'.format(
+#         settings.LAB_CONFIG_NAME))
+_default_cluster_maas_config = 'cluster_infra_maas.yml'
 
 hardware_opts = [
     ct.Cfg('manager', ct.String(),
@@ -352,6 +358,38 @@
                    'kubernetes/k8s-conformance:v1.8.13-11')
 ]
 
+day1_cfg_config_opts = [
+    ct.Cfg('configure_steps_path', ct.String(),
+           help="Path to YAML with steps to config cfg01 node",
+           default=_default_configure_steps),
+    ct.Cfg('environment_template_dir', ct.String(),
+           help="Path to directory with Environment template and inventory",
+           default=_default_environment_template_dir),
+    ct.Cfg('templates_dir', ct.String(),
+           help="Path to directory with templates",
+           default=_default_templates_dir),
+    ct.Cfg('cluster_maas_config', ct.String(),
+           help="Path to maas class yaml file for cfg node",
+           default=_default_cluster_maas_config),
+    ct.Cfg('maas_machines_macs', ct.JSONDict(),
+           help="""MAC of machines interfaces for maas config:
+                  'parameters': {
+                    'maas' : {
+                      'region' : {
+                        'machines': {
+                          'ctl01': {
+                            'interface': {
+                              'mac': 'aa:bb:cc:dd:ee:ff'
+                            }
+                          }
+                          '...': {
+                            'interface': {
+                              'mac': 'aa:bb:cc:dd:ee:ff'
+                            }
+                          }
+                  }}}}""", default={}),
+]
+
 _group_opts = [
     ('hardware', hardware_opts),
     ('underlay', underlay_opts),
@@ -374,6 +412,8 @@
     ('ceph_deploy', ceph_deploy_opts),
     ('k8s_deploy', k8s_deploy_opts),
     ('k8s', k8s_opts),
+    ('day1_cfg_config', day1_cfg_config_opts),
+    ('day1_underlay', underlay_opts),
 ]
 
 
@@ -472,6 +512,14 @@
                      help=""))
     config.register_opts(group='ceph_deploy', opts=ceph_deploy_opts)
 
+    config.register_group(cfg.OptGroup(name='day1_cfg_config',
+                          title="Day01 config node configuration", help=""))
+    config.register_opts(group='day1_cfg_config', opts=day1_cfg_config_opts)
+
+    config.register_group(cfg.OptGroup(name='day1_underlay',
+                          title="Day01 underlay configuration", help=""))
+    config.register_opts(group='day1_underlay', opts=underlay_opts)
+
     return config
 
 
diff --git a/tcp_tests/templates/cookied-bm-contrail-maas/common-services.yaml b/tcp_tests/templates/cookied-bm-contrail-maas/common-services.yaml
new file mode 100644
index 0000000..0aebf89
--- /dev/null
+++ b/tcp_tests/templates/cookied-bm-contrail-maas/common-services.yaml
@@ -0,0 +1,118 @@
+{% from 'cookied-bm-contrail-maas/underlay.yaml' import HOSTNAME_CFG01 with context %}
+
+# Install support services
+- description: Install keepalived on ctl01
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@keepalived:cluster and *01*' state.sls keepalived
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 10}
+  skip_fail: true
+
+- description: Install keepalived
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@keepalived:cluster' state.sls keepalived
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 10}
+  skip_fail: true
+
+- description: Check the VIP
+  cmd: |
+    OPENSTACK_CONTROL_ADDRESS=`salt-call --out=newline_values_only pillar.get _param:openstack_control_address`;
+    echo "_param:openstack_control_address (vip): ${OPENSTACK_CONTROL_ADDRESS}";
+    salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@keepalived:cluster' cmd.run "ip a | grep ${OPENSTACK_CONTROL_ADDRESS}" | grep -B1 ${OPENSTACK_CONTROL_ADDRESS}
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+
+- description: Install glusterfs
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@glusterfs:server' state.sls glusterfs.server.service
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+- description: Setup glusterfs on primary controller
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@glusterfs:server' state.sls glusterfs.server.setup -b 1
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 2, delay: 5}
+  skip_fail: false
+
+- description: Check the gluster status
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@glusterfs:server' cmd.run 'gluster peer status; gluster volume status' -b 1
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+- description: Install RabbitMQ on ctl01
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@rabbitmq:server and *01*' state.sls rabbitmq
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+- description: Install RabbitMQ
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@rabbitmq:server' state.sls rabbitmq
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+- description: Check the rabbitmq status
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@rabbitmq:server' cmd.run 'rabbitmqctl cluster_status'
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+- description: Install Galera on first server
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@galera:master' state.sls galera
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+- description: Install Galera on other servers
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@galera:slave' state.sls galera -b 1
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+- description: Check mysql status
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@galera:*' mysql.status | grep -A1 -e "wsrep_incoming_addresses\|wsrep_cluster_size"
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: true
+
+
+- description: Install haproxy
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@haproxy:proxy' state.sls haproxy
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+- description: Check haproxy status
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@haproxy:proxy' service.status haproxy
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+- description: Restart rsyslog
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@haproxy:proxy' service.restart rsyslog
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+- description: Install memcached on all controllers
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@memcached:server' state.sls memcached
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
diff --git a/tcp_tests/templates/cookied-bm-contrail-maas/lab04-physical-inventory.yaml b/tcp_tests/templates/cookied-bm-contrail-maas/lab04-physical-inventory.yaml
new file mode 100644
index 0000000..69a9df0
--- /dev/null
+++ b/tcp_tests/templates/cookied-bm-contrail-maas/lab04-physical-inventory.yaml
@@ -0,0 +1,77 @@
+nodes:

+    cfg01.cookied-bm-contrail-maas.local:

+      reclass_storage_name: infra_config_node01

+      roles:

+      - infra_config

+      - linux_system_codename_xenial

+      interfaces:

+        ens3:

+          role: single_dhcp

+    # Physical nodes

+

+    kvm01.cookied-bm-contrail-maas.local:

+      reclass_storage_name: infra_kvm_node01

+      roles:

+      - infra_kvm

+      - linux_system_codename_xenial

+      interfaces:

+        enp9s0f0:

+          role: single_mgm

+        enp9s0f1:

+          role: bond0_ab_ovs_vlan_ctl

+

+    kvm02.cookied-bm-contrail-maas.local:

+      reclass_storage_name: infra_kvm_node02

+      roles:

+      - infra_kvm

+      - linux_system_codename_xenial

+      interfaces:

+        enp9s0f0:

+          role: single_mgm

+        enp9s0f1:

+          role: bond0_ab_ovs_vlan_ctl

+

+    kvm03.cookied-bm-contrail-maas.local:

+      reclass_storage_name: infra_kvm_node03

+      roles:

+      - infra_kvm

+      - linux_system_codename_xenial

+      interfaces:

+        enp9s0f0:

+          role: single_mgm

+        enp9s0f1:

+          role: bond0_ab_ovs_vlan_ctl

+

+    cmp001.cookied-bm-contrail-maas.local:

+      reclass_storage_name: openstack_compute_node01

+      roles:

+      - openstack_compute

+      - features_lvm_backend

+      - linux_system_codename_xenial

+      interfaces:

+        enp2s0f1:

+          role: single_mgm

+          deploy_address: 172.16.49.73

+        enp5s0f0:

+          role: bond0_ab_contrail

+          tenant_address: 192.168.0.101

+        enp5s0f1:

+          role: single_vlan_ctl

+          single_address: 10.167.8.101

+

+    cmp002.cookied-bm-contrail-maas.local:

+      reclass_storage_name: openstack_compute_node02

+      roles:

+      - openstack_compute

+      - features_lvm_backend

+      - linux_system_codename_xenial

+      interfaces:

+        enp2s0f1:

+          role: single_mgm

+          deploy_address: 172.16.49.74

+        enp5s0f0:

+          role: bond0_ab_contrail

+          tenant_address: 192.168.0.102

+        enp5s0f1:

+          role: single_vlan_ctl

+          single_address: 10.167.8.102

diff --git a/tcp_tests/templates/cookied-bm-contrail-maas/maas.yml b/tcp_tests/templates/cookied-bm-contrail-maas/maas.yml
new file mode 100644
index 0000000..ed0191c
--- /dev/null
+++ b/tcp_tests/templates/cookied-bm-contrail-maas/maas.yml
@@ -0,0 +1,111 @@
+classes:
+- system.linux.system.repo.mcp.apt_mirantis.maas
+- system.maas.region.single
+parameters:
+  _param:
+    maas_region_port: 5240
+    maas_cluster_region_port: ${_param:maas_region_port}
+    power_user: mcp-qa
+    power_password: password
+  maas:
+    cluster:
+      region:
+        host: ${_param:deploy_address}:${_param:maas_cluster_region_port}
+    region:
+      bind:
+        host: ${_param:deploy_address}:${_param:maas_region_port}
+      subnets:
+        172.16.49.64/26:
+          cidr: 172.16.49.64/26
+          fabric: fabric-51
+          gateway_ip: 172.16.49.65
+          iprange:
+            end: 172.16.49.119
+            start: 172.16.49.77
+      fabrics:
+        fabric-51:
+          description: Fabric for deploy
+      #commissioning_scripts:
+      #  00-maas-05-simplify-network-interfaces: /etc/maas/files/commisioning_scripts/00-maas-05-simplify-network-interfaces
+      machines:
+        kvm01: # cz7341-kvm.host-telecom.com
+          distro_series: "xenial"
+          # hwe_kernel: "hwe-16.04"
+          # pxe_interface_mac:
+          pxe_interface_mac: "0c:c4:7a:33:1f:e4"
+          interfaces:
+            one1:
+              mac: "0c:c4:7a:33:1f:e4"
+              mode: "static"
+              ip: ${_param:infra_kvm_node01_deploy_address}
+              subnet: "10.10.0.0/16" # create it manually... in UI
+              gateway: ${_param:deploy_network_gateway}
+          power_parameters:
+            power_address: "185.8.59.161"
+            power_password: ${_param:power_password}
+            power_type: ipmi
+            power_user: ${_param:power_user}
+        kvm02: # #cz7342-kvm.host-telecom.com
+          distro_series: "xenial"
+          # hwe_kernel: "hwe-16.04"
+          pxe_interface_mac: "0c:c4:7a:33:20:fc"
+          interfaces:
+            one1:
+              mac: "0c:c4:7a:33:20:fc"
+              mode: "static"
+              ip: ${_param:infra_kvm_node02_deploy_address}
+              subnet: "10.10.0.0/16" # create it manually... in UI
+              gateway: ${_param:deploy_network_gateway}
+          power_parameters:
+            power_address: "185.8.59.162"
+            power_password: ${_param:power_password}
+            power_type: ipmi
+            power_user: ${_param:power_user}
+        kvm03: # #cz7343-kvm.host-telecom.com
+          distro_series: "xenial"
+          # hwe_kernel: "hwe-16.04"
+          pxe_interface_mac: "0c:c4:7a:31:fb:b6"
+          interfaces:
+            one1:
+              mac: "0c:c4:7a:31:fb:b6"
+              mode: "static"
+              ip: ${_param:infra_kvm_node03_deploy_address}
+              subnet: "10.10.0.0/16" # create it manually... in UI
+              gateway: ${_param:deploy_network_gateway}
+          power_parameters:
+            power_address: "185.8.59.163"
+            power_password: ${_param:power_password}
+            power_type: ipmi
+            power_user: ${_param:power_user}
+        cmp001: # #cz7345-kvm.host-telecom.com
+          distro_series: "xenial"
+          # hwe_kernel: "hwe-16.04"
+          pxe_interface_mac: "0c:c4:7a:31:f0:12"
+          interfaces:
+            one1:
+              mac: "0c:c4:7a:31:f0:12"
+              mode: "static"
+              ip: ${_param:infra_kvm_node04_deploy_address}
+              subnet: "10.10.0.0/16" # create it manually... in UI
+              gateway: ${_param:deploy_network_gateway}
+          power_parameters:
+            power_address: "185.8.59.17"
+            power_password: ${_param:power_password}
+            power_type: ipmi
+            power_user: ${_param:power_user}
+        cmp002: # cz7346-kvm.host-telecom.com
+          distro_series: "xenial"
+          # hwe_kernel: "hwe-16.04"
+          pxe_interface_mac: "0c:c4:7a:31:ef:bc"
+          interfaces:
+            one1:
+              mac: "0c:c4:7a:31:ef:bc"
+              mode: "static"
+              ip: ${_param:infra_kvm_node05_deploy_address}
+              subnet: "10.10.0.0/16" # create it manually... in UI
+              gateway: ${_param:deploy_network_gateway}
+          power_parameters:
+            power_address: "185.8.59.18"
+            power_password: ${_param:power_password}
+            power_type: ipmi
+            power_user: ${_param:power_user}
diff --git a/tcp_tests/templates/cookied-bm-contrail-maas/openstack.yaml b/tcp_tests/templates/cookied-bm-contrail-maas/openstack.yaml
new file mode 100644
index 0000000..0fed1c0
--- /dev/null
+++ b/tcp_tests/templates/cookied-bm-contrail-maas/openstack.yaml
@@ -0,0 +1,294 @@
+{% from 'cookied-bm-contrail-maas/underlay.yaml' import HOSTNAME_CFG01 with context %}
+{% from 'cookied-bm-contrail-maas/underlay.yaml' import HOSTNAME_GTW01 with context %}
+{% from 'shared-salt.yaml' import IPV4_NET_EXTERNAL_PREFIX with context %}
+{% from 'shared-salt.yaml' import IPV4_NET_TENANT_PREFIX with context %}
+# Install OpenStack control services
+
+- description: Install glance on all controllers
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+     -C 'I@glance:server' state.sls glance -b 1
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+- description: Install keystone service (note that different fernet keys are created on different nodes)
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@keystone:server' state.sls keystone.server -b 1
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 2, delay: 15}
+  skip_fail: false
+
+- description: Restart apache due to PROD-10477
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl*' cmd.run "systemctl restart apache2"
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 15}
+  skip_fail: false
+
+- description: Check apache status to PROD-10477
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl*' cmd.run "systemctl status apache2"
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 15}
+  skip_fail: false
+
+- description: Mount glusterfs.client volumes (resuires created 'keystone' and 'glusterfs' system users)
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@glance:server' state.sls glusterfs.client
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+- description: Update fernet keys for keystone server on the mounted glusterfs volume
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@keystone:server' state.sls keystone.server -b 1
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+- description: Populate keystone services/tenants/admins
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@keystone:client' state.sls keystone.client
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 2, delay: 5}
+  skip_fail: false
+
+- description: Check keystone service-list
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@keystone:server' cmd.run '. /root/keystonercv3; openstack service list'
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+- description: Check glance image-list
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@keystone:server' cmd.run '. /root/keystonerc; glance image-list'
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+
+- description: Install nova on all controllers
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@nova:controller' state.sls nova -b 1
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 2, delay: 5}
+  skip_fail: false
+
+- description: Check nova service-list
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@keystone:server' cmd.run '. /root/keystonerc; nova --debug service-list'
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 3, delay: 5}
+  skip_fail: false
+
+
+- description: Install cinder
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@cinder:controller' state.sls cinder -b 1
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+- description: Install cinder volume
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@cinder:volume' state.sls cinder
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+- description: Check cinder list
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@keystone:server' cmd.run '. /root/keystonerc; cinder list'
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+
+- description: Install neutron service
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@neutron:server' state.sls neutron -b 1
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: true
+
+# install contrail
+- description: Install Opencontrail db on ctl01
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@opencontrail:database and *01*' state.sls opencontrail.database
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 2, delay: 20}
+  skip_fail: false
+
+- description: Install Opencontrail db on all nodes
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@opencontrail:database' state.sls opencontrail.database
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 2, delay: 20}
+  skip_fail: false
+
+- description: Install Opencontrail control on ctl01
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@opencontrail:control and *01*' state.sls opencontrail exclude=opencontrail.client
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+- description: Install Opencontrail control on all nodes
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@opencontrail:control' state.sls opencontrail exclude=opencontrail.client
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+- description: Install Opencontrail on collector
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@opencontrail:collector' state.sls opencontrail exclude=opencontrail.client
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+- description: Workaround for https://mirantis.jira.com/browse/PROD-12798
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@opencontrail:control' service.restart 'keepalived'
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+# OpenContrail vrouters
+- description: Install Opencontrail client
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@opencontrail:database:id:1' state.sls 'opencontrail.client'
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+- description: Install Opencontrail client on computes
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@opencontrail:compute' state.sls 'opencontrail.client'
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 2, delay: 5}
+  skip_fail: false
+
+- description: Install Opencontrail on computes
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@opencontrail:compute' state.sls 'opencontrail'
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 2, delay: 5}
+  skip_fail: false
+
+- description: Test Opencontrail
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@opencontrail:control' cmd.run 'contrail-status'
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+- description: Install heat service
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@heat:server' state.sls heat -b 1
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+- description: Check heat service
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@keystone:server' cmd.run '. /root/keystonercv3; openstack orchestration resource type list'
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 5, delay: 10}
+  skip_fail: false
+
+
+- description: Deploy horizon dashboard
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@horizon:server' state.sls horizon
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: true
+
+- description: Deploy nginx proxy
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@nginx:server' state.sls nginx
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: true
+
+
+# Install compute node
+
+- description: Apply formulas for compute node
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'cmp*' state.apply
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: true
+
+- description: Re-apply(as in doc) formulas for compute node
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'cmp*' state.apply
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+- description: Check IP on computes
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'cmp*' cmd.run
+    'ip a'
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 10, delay: 30}
+  skip_fail: false
+
+
+  # Upload cirros image
+
+- description: Upload cirros image on ctl01
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
+    'wget http://download.cirros-cloud.net/0.3.4/cirros-0.3.4-i386-disk.img'
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 2, delay: 30}
+  skip_fail: false
+
+- description: Register image in glance
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
+    '. /root/keystonercv3; glance --timeout 120 image-create --name cirros --visibility public --disk-format qcow2 --container-format bare --progress < /root/cirros-0.3.4-i386-disk.img'
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 30}
+  skip_fail: false
+
+- description: sync time
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False '*' cmd.run
+    'service ntp stop; ntpd -gq;  service ntp start'
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 30}
+  skip_fail: false
+
+- description: Install docker.io on ctl
+  cmd: salt "ctl01*" cmd.run 'apt-get install docker.io -y'
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 30}
+  skip_fail: false
+
+- description: Enable forward policy
+  cmd: salt "ctl01*" cmd.run 'iptables --policy FORWARD ACCEPT'
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 30}
+  skip_fail: false
+
+- description: Hack vrouter (Delete default moun point)
+  cmd: salt "cmp*" cmd.run "sed -i 's/exit 0//g' /etc/rc.local; echo 'umount /dev/hugepages; service supervisor-vrouter restart' >> /etc/rc.local; echo 'exit 0' >> /etc/rc.local"
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 30}
+  skip_fail: true
+
+- description: Temporary WR for correct pci in vrouter.conf
+  cmd: salt  "cmp*" cmd.run "sed -i 's/physical\_interface\_address\=.*/physical\_interface\_address=0000\:05\:00\.0/g' /etc/contrail/contrail-vrouter-agent.conf"
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 30}
+  skip_fail: true
+
+- description: Remove crashes files from /var/crashes/ while vrouter was crashed
+  cmd: salt  "cmp*" cmd.run "rm -rf /var/crashes/*"
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 30}
+  skip_fail: true
+
+- description: Reboot computes
+  cmd: salt --timeout=600  "cmp*" system.reboot
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 30}
+  skip_fail: true
diff --git a/tcp_tests/templates/cookied-bm-contrail-maas/overrides-policy.yml b/tcp_tests/templates/cookied-bm-contrail-maas/overrides-policy.yml
new file mode 100644
index 0000000..1f35a6b
--- /dev/null
+++ b/tcp_tests/templates/cookied-bm-contrail-maas/overrides-policy.yml
@@ -0,0 +1,40 @@
+parameters:
+  nova:
+    controller:
+      policy:
+        context_is_admin: 'role:admin or role:administrator'
+        'compute:create': 'rule:admin_or_owner'
+        'compute:create:attach_network':
+  cinder:
+    controller:
+      policy:
+        'volume:delete': 'rule:admin_or_owner'
+        'volume:extend':
+  neutron:
+    server:
+      policy:
+        create_subnet: 'rule:admin_or_network_owner'
+        'get_network:queue_id': 'rule:admin_only'
+        'create_network:shared':
+  glance:
+    server:
+      policy:
+        publicize_image: "role:admin"
+        add_member:
+  keystone:
+    server:
+      policy:
+        admin_or_token_subject: 'rule:admin_required or rule:token_subject'
+  heat:
+    server:
+      policy:
+        context_is_admin: 'role:admin and is_admin_project:True'
+        deny_stack_user: 'not role:heat_stack_user'
+        deny_everybody: '!'
+        'cloudformation:ValidateTemplate': 'rule:deny_everybody'
+        'cloudformation:DescribeStackResources':
+  ceilometer:
+    server:
+      policy:
+        segregation: 'rule:context_is_admin'
+        'telemetry:get_resource':
diff --git a/tcp_tests/templates/cookied-bm-contrail-maas/salt-context-cookiecutter-contrail.yaml b/tcp_tests/templates/cookied-bm-contrail-maas/salt-context-cookiecutter-contrail.yaml
new file mode 100644
index 0000000..e053de3
--- /dev/null
+++ b/tcp_tests/templates/cookied-bm-contrail-maas/salt-context-cookiecutter-contrail.yaml
@@ -0,0 +1,202 @@
+default_context:
+  backup_private_key: |-
+    -----BEGIN RSA PRIVATE KEY-----
+    MIIEpAIBAAKCAQEA1Hbf7nJ3VGyRxqwPNqnDcspyuJXf0WwuEJJyDxATV0JTuZSz
+    jcT4A1XLN/WG8diN0Q5/tYRpuSxNKaz3nPxXjTqK1byrB7jGBhNQeWYHMNoBH3VR
+    Kzm4yBGIKaG6k5wYB5kS950zxNHIKQ7Mo4+0WloIlgZTkMrQo98FaD1X9jyVJm7O
+    TfIqTwYSJb4TJf/hgL8xntpGC6gRjZDQzlxB25bSP2+u3KzfUyDiaK8hs5PWwnNn
+    KfvZmYZmsgA4+D+dQh4YkyfDN6hQL4ttW+2SRpZ/js+KTkqU1Vyn5P5cbf2RwH0G
+    VewWyYA2ZK7nXPYH7+ia1rPj6wO3UXEFnxVzowIDAQABAoIBAQDCgenURFrWoWZ7
+    ub1bz+MszgZk0mDLOvyZE1j0TUVHl2NK/MW8vlxHwV2AZ3kZI6YBhAKKzSR08Afc
+    ZYty3tnQY44CyuzQ7unrWfdMjIl8wbhRcnfS2M8/6jz70CIdTMP7ALqKkhJ4140l
+    eXUDMGZuaQp/Pl92qRaFT4GFwtMqivQobCX5/yehd3+mMu8CkK+1U0T/9gWacEdv
+    JISAkfpOGkXLmZ/ekkqNFfv6SrNaefaYjqMeGk3ZrFmPAEstqxo0tiUjV5BYXCME
+    SsJCF/EnDHxMYzwkqCCGrXY0DbXGqF1B+dO8MqeYTIYyohEJ82vkezP5BqM4L/3Z
+    Lec3ypPhAoGBAPh0eBKNgLOhMz+h1DnjdTJAcsa58ZQCH6r+pTVIhwmbCT/3RTJT
+    oizYVhtlQyi0lHsxMwtYx+oj1Qacs7jb6UH24d6+oX0JZr/lfxqZVvgmTOoNED3l
+    ZdX3xU4GOMhWXS7IEhk750LFGF1k7QLcoULx3u/8dZNgW0kNZUsptkxpAoGBANrq
+    m9CFnbzSm20EicfT8FX7Hu6Wl3Lgwnsgc+7+dkm8uST+DdDWjUymkTjeIq/8Z/va
+    I+rhURp+r7tLFAy+sC6YeLZ7oHcUIfUwXOdlIJOpdVayF5oiZ5qCSkNcT2oJfFdJ
+    Uf3/gwQDxg6CKJIKdnyK1njnfldfoFLZz9Z1Ze4rAoGBAMhfde7Qe/liiihJZRUC
+    oiPS4j3u/Ct3wv5uu+JLCczvYfhafU3nMSWlm1wgwJb1e8IWnaoLAb+NAmKAwljV
+    0jrG1saDS03B5UHh3i4feIpMqT8hJfYlKYn0dwVD80tui1wNMrtzGkE5HztDB/qE
+    4PFSi49UNaaT0UsLKKQDkefxAoGAf91+iwowOuTsoX2AGHajLyVRSNwus4uyLIal
+    EJgScTlJDuFRIoTe3UGBGy0sJ4yPE9yzE/LtE0Oh0wykNll+wIiQIU4OSN86gmLw
+    MLuxjm3xOmUlQgMMboPhanzVacMGnFkYCfqfBM5LdZfyqHJyCIZzhQT5l4EkPKA6
+    NDI4CicCgYAtaC63kRjv8NNWo1iuovOLF4pdnYEviT37s24zlf18IKLZTW66AGbX
+    2lLHBiS7SyLEvIpn8/Vwh185CbTitlfRpU5bPzwk4dvKPgUa0eiov/Voe+WPWafQ
+    +uErJ3mt7l+dThL1q70aD6Dl1pbMjG5xbIKSmXNrmrrMVN8+pM2BJQ==
+    -----END RSA PRIVATE KEY-----
+  backup_public_key: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDUdt/ucndUbJHGrA82qcNyynK4ld/RbC4QknIPEBNXQlO5lLONxPgDVcs39Ybx2I3RDn+1hGm5LE0prPec/FeNOorVvKsHuMYGE1B5Zgcw2gEfdVErObjIEYgpobqTnBgHmRL3nTPE0cgpDsyjj7RaWgiWBlOQytCj3wVoPVf2PJUmbs5N8ipPBhIlvhMl/+GAvzGe2kYLqBGNkNDOXEHbltI/b67crN9TIOJoryGzk9bCc2cp+9mZhmayADj4P51CHhiTJ8M3qFAvi21b7ZJGln+Oz4pOSpTVXKfk/lxt/ZHAfQZV7BbJgDZkrudc9gfv6JrWs+PrA7dRcQWfFXOj
+  bmk_enabled: 'False'
+  ceph_enabled: 'False'
+  cicd_enabled: 'False'
+  cluster_domain: cookied-bm-contrail-maas.local
+  cluster_name: cookied-bm-contrail-maas
+  compute_bond_mode: active-backup
+  compute_padding_with_zeros: 'True'
+  compute_primary_first_nic: eth1
+  compute_primary_second_nic: eth2
+  context_seed: ADkYzQUNzQcfbBH3McX2MLVK7EkgliUBuVRvaw6e4pFcRtbkawgc9FTHFaw1L5Eh
+  control_network_netmask: 255.255.255.0
+  control_network_subnet: 10.167.8.0/24
+  control_vlan: '2422'
+  cookiecutter_template_branch: ''
+  cookiecutter_template_credentials: gerrit
+  cookiecutter_template_url: https://gerrit.mcp.mirantis.net/mk/cookiecutter-templates.git
+  deploy_network_gateway: 172.16.49.65
+  deploy_network_netmask: 255.255.255.192
+  deploy_network_subnet: 172.16.49.64/26
+  deployment_type: physical
+  dns_server01: 172.18.208.44
+  dns_server02: 8.8.4.4
+  email_address: sgudz@mirantis.com
+  infra_bond_mode: active-backup
+  infra_deploy_nic: eth0
+  infra_kvm01_control_address: 10.167.8.241
+  infra_kvm01_deploy_address: 172.16.49.67
+  infra_kvm01_hostname: kvm01
+  infra_kvm02_control_address: 10.167.8.242
+  infra_kvm02_deploy_address: 172.16.49.68
+  infra_kvm02_hostname: kvm02
+  infra_kvm03_control_address: 10.167.8.243
+  infra_kvm03_deploy_address: 172.16.49.69
+  infra_kvm03_hostname: kvm03
+  infra_kvm_vip_address: 10.167.8.240
+  infra_primary_first_nic: eth1
+  infra_primary_second_nic: eth2
+  internal_proxy_enabled: 'False'
+  kubernetes_enabled: 'False'
+  local_repositories: 'False'
+  maas_deploy_address: 172.16.49.66
+  maas_deploy_cidr: 172.16.49.64/26
+  maas_deploy_gateway: 172.16.49.65
+  maas_deploy_range_end: 172.16.49.119
+  maas_deploy_range_start: 172.16.49.77
+  maas_deploy_vlan: '0'
+  maas_dhcp_enabled: 'True'
+  maas_fabric_name: fabric-51
+  maas_hostname: cfg01
+  maas_manage_deploy_network: 'True'
+  mcp_common_scripts_branch: ''
+  mcp_version: proposed
+  offline_deployment: 'False'
+  opencontrail_analytics_address: 10.167.8.30
+  opencontrail_analytics_hostname: nal
+  opencontrail_analytics_node01_address: 10.167.8.31
+  opencontrail_analytics_node01_hostname: nal01
+  opencontrail_analytics_node02_address: 10.167.8.32
+  opencontrail_analytics_node02_hostname: nal02
+  opencontrail_analytics_node03_address: 10.167.8.33
+  opencontrail_analytics_node03_hostname: nal03
+  opencontrail_compute_iface_mask: '24'
+  opencontrail_control_address: 10.167.8.20
+  opencontrail_control_hostname: ntw
+  opencontrail_control_node01_address: 10.167.8.21
+  opencontrail_control_node01_hostname: ntw01
+  opencontrail_control_node02_address: 10.167.8.22
+  opencontrail_control_node02_hostname: ntw02
+  opencontrail_control_node03_address: 10.167.8.23
+  opencontrail_control_node03_hostname: ntw03
+  opencontrail_enabled: 'True'
+  opencontrail_router01_address: 10.167.8.100
+  opencontrail_router01_hostname: rtr01
+  opencontrail_router02_address: 10.167.8.101
+  opencontrail_router02_hostname: rtr02
+  openssh_groups: ''
+  openstack_benchmark_node01_address: 10.167.8.95
+  openstack_benchmark_node01_hostname: bmk01
+  openstack_cluster_size: compact
+  openstack_compute_count: '2'
+  openstack_compute_rack01_hostname: cmp
+  openstack_compute_rack01_single_subnet: 10.167.8
+  openstack_compute_rack01_tenant_subnet: 192.168.0
+  openstack_control_address: 10.167.8.10
+  openstack_control_hostname: ctl
+  openstack_control_node01_address: 10.167.8.11
+  openstack_control_node01_hostname: ctl01
+  openstack_control_node02_address: 10.167.8.12
+  openstack_control_node02_hostname: ctl02
+  openstack_control_node03_address: 10.167.8.13
+  openstack_control_node03_hostname: ctl03
+  openstack_database_address: 10.167.8.50
+  openstack_database_hostname: dbs
+  openstack_database_node01_address: 10.167.8.51
+  openstack_database_node01_hostname: dbs01
+  openstack_database_node02_address: 10.167.8.52
+  openstack_database_node02_hostname: dbs02
+  openstack_database_node03_address: 10.167.8.53
+  openstack_database_node03_hostname: dbs03
+  openstack_enabled: 'True'
+  openstack_message_queue_address: 10.167.8.40
+  openstack_message_queue_hostname: msg
+  openstack_message_queue_node01_address: 10.167.8.41
+  openstack_message_queue_node01_hostname: msg01
+  openstack_message_queue_node02_address: 10.167.8.42
+  openstack_message_queue_node02_hostname: msg02
+  openstack_message_queue_node03_address: 10.167.8.43
+  openstack_message_queue_node03_hostname: msg03
+  openstack_network_engine: opencontrail
+  openstack_neutron_bgp_vpn: 'False'
+  openstack_neutron_bgp_vpn_driver: bagpipe
+  openstack_nfv_dpdk_enabled: 'False'
+  openstack_nfv_sriov_enabled: 'False'
+  openstack_nova_compute_nfv_req_enabled: 'False'
+  openstack_nova_compute_reserved_host_memory_mb: '900'
+  openstack_proxy_address: 10.167.8.80
+  openstack_proxy_hostname: prx
+  openstack_proxy_node01_address: 10.167.8.81
+  openstack_proxy_node01_hostname: prx01
+  openstack_proxy_node02_address: 10.167.8.82
+  openstack_proxy_node02_hostname: prx02
+  openstack_upgrade_node01_address: 10.167.8.19
+  openstack_version: ocata
+  oss_enabled: 'False'
+  oss_node03_address: ${_param:stacklight_monitor_node03_address}
+  oss_webhook_app_id: '24'
+  oss_webhook_login_id: '13'
+  platform: openstack_enabled
+  public_host: ${_param:openstack_proxy_address}
+  publication_method: email
+  reclass_repository: https://github.com/Mirantis/mk-lab-salt-model.git
+  salt_api_password: 981k0rv2KevPhpyyy3BgZK8cNZWUjifx
+  salt_api_password_hash: $6$kpjwqhVv$gUQV0XYxXNUu3ESKSmE1s.eDAaYunerIsF3DdzjvMqCRiH7DdOWuun/pdjSVp.jjKHYsb0GimXyUh6sX/77PM/
+  salt_master_address: 172.16.49.66
+  salt_master_hostname: cfg01
+  salt_master_management_address: 172.16.49.66
+  shared_reclass_branch: ''
+  shared_reclass_url: https://gerrit.mcp.mirantis.net/salt-models/reclass-system.git
+  stacklight_enabled: 'True'
+  stacklight_log_address: 10.167.8.60
+  stacklight_log_hostname: log
+  stacklight_log_node01_address: 10.167.8.61
+  stacklight_log_node01_hostname: log01
+  stacklight_log_node02_address: 10.167.8.62
+  stacklight_log_node02_hostname: log02
+  stacklight_log_node03_address: 10.167.8.63
+  stacklight_log_node03_hostname: log03
+  stacklight_long_term_storage_type: influxdb
+  stacklight_monitor_address: 10.167.8.70
+  stacklight_monitor_hostname: mon
+  stacklight_monitor_node01_address: 10.167.8.71
+  stacklight_monitor_node01_hostname: mon01
+  stacklight_monitor_node02_address: 10.167.8.72
+  stacklight_monitor_node02_hostname: mon02
+  stacklight_monitor_node03_address: 10.167.8.73
+  stacklight_monitor_node03_hostname: mon03
+  stacklight_telemetry_address: 10.167.8.85
+  stacklight_telemetry_hostname: mtr
+  stacklight_telemetry_node01_address: 10.167.8.86
+  stacklight_telemetry_node01_hostname: mtr01
+  stacklight_telemetry_node02_address: 10.167.8.87
+  stacklight_telemetry_node02_hostname: mtr02
+  stacklight_telemetry_node03_address: 10.167.8.88
+  stacklight_telemetry_node03_hostname: mtr03
+  stacklight_version: '2'
+  static_ips_on_deploy_network_enabled: 'False'
+  tenant_network_gateway: 192.168.0.1
+  tenant_network_netmask: 255.255.255.0
+  tenant_network_subnet: 192.168.0.0/24
+  tenant_vlan: '2423'
+  upstream_proxy_enabled: 'False'
+  use_default_network_scheme: 'True'
diff --git a/tcp_tests/templates/cookied-bm-contrail-maas/salt-context-environment.yaml b/tcp_tests/templates/cookied-bm-contrail-maas/salt-context-environment.yaml
new file mode 100644
index 0000000..1b560f4
--- /dev/null
+++ b/tcp_tests/templates/cookied-bm-contrail-maas/salt-context-environment.yaml
@@ -0,0 +1,245 @@
+nodes:

+    # Virtual Control Plane nodes

+

+    ctl01.cookied-bm-contrail-maas.local:

+      reclass_storage_name: openstack_control_node01

+      roles:

+      - openstack_control_leader

+      - linux_system_codename_xenial

+      interfaces:

+        ens3:

+          role: single_ctl

+

+    ctl02.cookied-bm-contrail-maas.local:

+      reclass_storage_name: openstack_control_node02

+      roles:

+      - openstack_control

+      - linux_system_codename_xenial

+      interfaces:

+        ens3:

+          role: single_ctl

+

+    ctl03.cookied-bm-contrail-maas.local:

+      reclass_storage_name: openstack_control_node03

+      roles:

+      - openstack_control

+      - linux_system_codename_xenial

+      interfaces:

+        ens3:

+          role: single_ctl

+

+    dbs01.cookied-bm-contrail-maas.local:

+      reclass_storage_name: openstack_database_node01

+      roles:

+      - openstack_database_leader

+      - linux_system_codename_xenial

+      interfaces:

+        ens3:

+          role: single_ctl

+

+    dbs02.cookied-bm-contrail-maas.local:

+      reclass_storage_name: openstack_database_node02

+      roles:

+      - openstack_database

+      - linux_system_codename_xenial

+      interfaces:

+        ens3:

+          role: single_ctl

+

+    dbs03.cookied-bm-contrail-maas.local:

+      reclass_storage_name: openstack_database_node03

+      roles:

+      - openstack_database

+      - linux_system_codename_xenial

+      interfaces:

+        ens3:

+          role: single_ctl

+

+    msg01.cookied-bm-contrail-maas.local:

+      reclass_storage_name: openstack_message_queue_node01

+      roles:

+      - openstack_message_queue

+      - linux_system_codename_xenial

+      interfaces:

+        ens3:

+          role: single_ctl

+

+    msg02.cookied-bm-contrail-maas.local:

+      reclass_storage_name: openstack_message_queue_node02

+      roles:

+      - openstack_message_queue

+      - linux_system_codename_xenial

+      interfaces:

+        ens3:

+          role: single_ctl

+

+    msg03.cookied-bm-contrail-maas.local:

+      reclass_storage_name: openstack_message_queue_node03

+      roles:

+      - openstack_message_queue

+      - linux_system_codename_xenial

+      interfaces:

+        ens3:

+          role: single_ctl

+

+    prx01.cookied-bm-contrail-maas.local:

+      reclass_storage_name: openstack_proxy_node01

+      roles:

+      - openstack_proxy

+      - linux_system_codename_xenial

+      interfaces:

+        ens3:

+          role: single_ctl

+

+    prx02.cookied-bm-contrail-maas.local:

+      reclass_storage_name: openstack_proxy_node02

+      roles:

+      - openstack_proxy

+      - linux_system_codename_xenial

+      interfaces:

+        ens3:

+          role: single_ctl

+

+    mon01.cookied-bm-contrail-maas.local:

+      reclass_storage_name: stacklight_server_node01

+      roles:

+      - stacklightv2_server_leader

+      - linux_system_codename_xenial

+      interfaces:

+        ens3:

+          role: single_ctl

+

+    mon02.cookied-bm-contrail-maas.local:

+      reclass_storage_name: stacklight_server_node02

+      roles:

+      - stacklightv2_server

+      - linux_system_codename_xenial

+      interfaces:

+        ens3:

+          role: single_ctl

+

+    mon03.cookied-bm-contrail-maas.local:

+      reclass_storage_name: stacklight_server_node03

+      roles:

+      - stacklightv2_server

+      - linux_system_codename_xenial

+      interfaces:

+        ens3:

+          role: single_ctl

+

+    nal01.cookied-bm-contrail-maas.local:

+      reclass_storage_name: opencontrail_analytics_node01

+      roles:

+      - opencontrail_analytics

+      - linux_system_codename_trusty

+      interfaces:

+        eth1:

+          role: single_ctl

+

+    nal02.cookied-bm-contrail-maas.local:

+      reclass_storage_name: opencontrail_analytics_node02

+      roles:

+      - opencontrail_analytics

+      - linux_system_codename_trusty

+      interfaces:

+        eth1:

+          role: single_ctl

+

+    nal03.cookied-bm-contrail-maas.local:

+      reclass_storage_name: opencontrail_analytics_node03

+      roles:

+      - opencontrail_analytics

+      - linux_system_codename_trusty

+      interfaces:

+        eth1:

+          role: single_ctl

+

+    ntw01.cookied-bm-contrail-maas.local:

+      reclass_storage_name: opencontrail_control_node01

+      roles:

+      - opencontrail_control

+      - linux_system_codename_trusty

+      interfaces:

+        eth1:

+          role: single_ctl

+

+    ntw02.cookied-bm-contrail-maas.local:

+      reclass_storage_name: opencontrail_control_node02

+      roles:

+      - opencontrail_control

+      - linux_system_codename_trusty

+      interfaces:

+        eth1:

+          role: single_ctl

+

+    ntw03.cookied-bm-contrail-maas.local:

+      reclass_storage_name: opencontrail_control_node03

+      roles:

+      - opencontrail_control

+      - linux_system_codename_trusty

+      interfaces:

+        eth1:

+          role: single_ctl

+

+    mtr01.cookied-bm-contrail-maas.local:

+      reclass_storage_name: stacklight_telemetry_node01

+      roles:

+      - stacklight_telemetry

+      - linux_system_codename_xenial

+      interfaces:

+        ens3:

+          role: single_ctl

+

+    mtr02.cookied-bm-contrail-maas.local:

+      reclass_storage_name: stacklight_telemetry_node02

+      roles:

+      - stacklight_telemetry

+      - linux_system_codename_xenial

+      interfaces:

+        ens3:

+          role: single_ctl

+

+    mtr03.cookied-bm-contrail-maas.local:

+      reclass_storage_name: stacklight_telemetry_node03

+      roles:

+      - stacklight_telemetry

+      - linux_system_codename_xenial

+      interfaces:

+        ens3:

+          role: single_ctl

+

+    log01.cookied-bm-contrail-maas.local:

+      reclass_storage_name: stacklight_log_node01

+      roles:

+      - stacklight_log_leader_v2

+      - linux_system_codename_xenial

+      interfaces:

+        ens3:

+          role: single_ctl

+

+    log02.cookied-bm-contrail-maas.local:

+      reclass_storage_name: stacklight_log_node02

+      roles:

+      - stacklight_log

+      - linux_system_codename_xenial

+      interfaces:

+        ens3:

+          role: single_ctl

+

+    log03.cookied-bm-contrail-maas.local:

+      reclass_storage_name: stacklight_log_node03

+      roles:

+      - stacklight_log

+      - linux_system_codename_xenial

+      interfaces:

+        ens3:

+          role: single_ctl

+

+#    bmk01.cookied-bm-contrail-maas.local:

+#      reclass_storage_name: openstack_benchmark_node01

+#      roles:

+#      - openstack_benchmark

+#      - linux_system_codename_xenial

+#      interfaces:

+#        ens3:

+#          role: single_ctl

diff --git a/tcp_tests/templates/cookied-bm-contrail-maas/salt.yaml b/tcp_tests/templates/cookied-bm-contrail-maas/salt.yaml
new file mode 100644
index 0000000..2888a65
--- /dev/null
+++ b/tcp_tests/templates/cookied-bm-contrail-maas/salt.yaml
@@ -0,0 +1,231 @@
+{% from 'cookied-bm-contrail-maas/underlay.yaml' import HOSTNAME_CFG01 with context %}
+{% from 'cookied-bm-contrail-maas/underlay.yaml' import LAB_CONFIG_NAME with context %}
+{% from 'cookied-bm-contrail-maas/underlay.yaml' import DOMAIN_NAME with context %}
+{% from 'cookied-bm-contrail-maas/underlay.yaml' import REPOSITORY_SUITE with context %}
+{% from 'cookied-bm-contrail-maas/underlay.yaml' import ETH1_IP_ADDRESS_CFG01 with context %}
+{% from 'cookied-bm-contrail-maas/underlay.yaml' import MAAS_DHCP_POOL_START with context %}
+{% from 'cookied-bm-contrail-maas/underlay.yaml' import MAAS_DHCP_POOL_END with context %}
+
+{% set SALT_MODELS_REPOSITORY = os_env('SALT_MODELS_REPOSITORY','https://gerrit.mcp.mirantis.net/salt-models/mcp-virtual-lab') %}
+# Other salt model repository parameters see in shared-salt.yaml
+
+{% set ENVIRONMENT_MODEL_INVENTORY_NAME = os_env('ENVIRONMENT_MODEL_INVENTORY_NAME','physical-cookied-bm-contrail-maas') %}
+# Path to the context files used to render Cluster and Environment models
+{%- set CLUSTER_CONTEXT_NAME = 'salt-context-cookiecutter-contrail.yaml' %}
+{%- set ENVIRONMENT_CONTEXT_NAMES = ['salt-context-environment.yaml','lab04-physical-inventory.yaml'] %}
+{%- set CONTROL_VLAN = os_env('CONTROL_VLAN', '2422') %}
+{%- set TENANT_VLAN = os_env('TENANT_VLAN', '2423') %}
+
+{% import 'shared-salt.yaml' as SHARED with context %}
+
+{{ SHARED.MACRO_INSTALL_SALT_MASTER() }}
+
+{{ SHARED.MACRO_GENERATE_COOKIECUTTER_MODEL(CONTROL_VLAN=CONTROL_VLAN, TENANT_VLAN=TENANT_VLAN) }}
+
+{{ SHARED.MACRO_GENERATE_AND_ENABLE_ENVIRONMENT_MODEL() }}
+
+{{ SHARED.MACRO_CONFIGURE_RECLASS(FORMULA_SERVICES='"linux" "reclass" "salt" "openssh" "ntp" "git" "nginx" "collectd" "sensu" "heka" "sphinx" "keystone" "mysql" "grafana" "haproxy" "rsyslog" "horizon" "prometheus" "telegraf" "elasticsearch" "powerdns" "glusterfs" "xtrabackup" "maas" "backupninja" "jenkins" "fluentd"') }}
+
+{{ SHARED.MACRO_INSTALL_SALT_MINIONS() }}
+
+{{ SHARED.MACRO_RUN_SALT_MASTER_UNDERLAY_STATES() }}
+
+- description: Upload maas template
+  upload:
+    local_path: {{ config.salt_deploy.templates_dir }}{{ LAB_CONFIG_NAME }}/
+    local_filename: maas.yml
+    remote_path: /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/
+  node_name: {{ HOSTNAME_CFG01 }}
+  skip_fail: False
+
+- description: "Workaround for rack01 compute generator"
+  cmd: |
+    set -e;
+    # Remove rack01 key
+    . /root/venv-reclass-tools/bin/activate;
+    reclass-tools del-key parameters.reclass.storage.node.openstack_compute_rack01 /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/config.yml;
+    # Add openstack_compute_node definition from system
+    reclass-tools add-key 'classes' 'system.reclass.storage.system.openstack_compute_multi' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/config.yml --merge;
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 10}
+  skip_fail: false
+
+- description: "WR for changing image to proposed"
+  cmd: |
+    set -e;
+    # Add message_queu host for opencontrail
+    . /root/venv-reclass-tools/bin/activate;
+    reclass-tools add-key parameters._param.salt_control_xenial_image 'http://ci.mcp.mirantis.net:8085/images/ubuntu-16-04-x64-mcp{{ REPOSITORY_SUITE }}.qcow2' /srv/salt/reclass/classes/cluster//{{ LAB_CONFIG_NAME }}infra/init.yml;
+    reclass-tools add-key parameters._param.salt_control_trusty_image 'http://ci.mcp.mirantis.net:8085/images/ubuntu-14-04-x64-mcp{{ REPOSITORY_SUITE }}.qcow2' /srv/salt/reclass/classes/cluster//{{ LAB_CONFIG_NAME }}infra/init.yml;
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 10}
+  skip_fail: false
+
+{{ SHARED.MACRO_GENERATE_INVENTORY() }}
+
+- description: Temporary workaround for removing cinder-volume from CTL nodes
+  cmd: |
+    sed -i 's/\-\ system\.cinder\.volume\.single//g' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/openstack/control.yml;
+    sed -i 's/\-\ system\.cinder\.volume\.notification\.messagingv2//g' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/openstack/control.yml;
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: true
+
+- description: Temporary WR for correct bridge name according to envoronment templates
+  cmd: |
+    sed -i 's/br\-ctl/br\_ctl/g' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/kvm.yml;
+    sed -i 's/br\-mgm/br\_mgm/g' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/kvm.yml;
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 10}
+  skip_fail: false
+
+- description: Update minion information
+  cmd: |
+    salt --hard-crash --state-output=mixed --state-verbose=False '*' saltutil.sync_grains &&
+    salt --hard-crash --state-output=mixed --state-verbose=False '*' mine.update &&
+    salt --hard-crash --state-output=mixed --state-verbose=False '*' saltutil.refresh_pillar && sleep 10
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 10}
+  skip_fail: false
+
+- description: Rerun openssh after env model is generated
+  cmd: |
+    salt-call state.sls openssh
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 10}
+  skip_fail: false
+
+- description: Execute linux.network.host one more time after salt.minion to apply dynamically registered hosts on the cluster nodes
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@linux:system' state.sls linux
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 10}
+  skip_fail: false
+
+- description: Configure rsyslog on nodes
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@linux:system' state.sls rsyslog
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 10}
+  skip_fail: false
+
+- description: Configure maas.cluster
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@salt:master' state.sls maas.cluster
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 10}
+  skip_fail: false
+
+- description: Configure maas.region
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@salt:master' state.sls maas.region
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 3, delay: 10}
+  skip_fail: false
+
+- description: Configure dhcp for fabric
+  cmd: |
+    touch /root/API_KEY_FILE;
+    export PROFILE=mirantis;
+    export API_KEY_FILE=/root/API_KEY_FILE;
+    export MAAS_URL=http://{{ ETH1_IP_ADDRESS_CFG01 }}:5240/MAAS;
+    maas-region apikey --username=$PROFILE > $API_KEY_FILE;
+    maas login $PROFILE $MAAS_URL - < $API_KEY_FILE;
+    maas $PROFILE ipranges create type=dynamic start_ip={{ MAAS_DHCP_POOL_START }} end_ip={{ MAAS_DHCP_POOL_END }} comment='Reserved dynamic range for HW and VCP nodes'
+    maas $PROFILE ipranges create type=reserved  start_ip={{ ETH1_IP_ADDRESS_CFG01 }} end_ip={{ ETH1_IP_ADDRESS_CFG01 }} comment='This is a reserved IP for cfg with maas node';
+    maas $PROFILE vlan update 51 0 dhcp_on=True primary_rack=cfg01;
+    maas $PROFILE nodes read |grep status -A 1 -B 1;
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 10}
+  skip_fail: false
+
+- description: Comissioning nodes
+  cmd: |
+    salt-call state.apply maas.machines;
+    salt-call state.apply maas.machines.wait_for_ready;
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 10}
+  skip_fail: false
+
+- description: Assign IPs
+  cmd: |
+    salt-call state.sls maas.machines.assign_ip;
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 10}
+  skip_fail: false
+
+- description: Deploying BM nodes
+  cmd: |
+    salt-call maas.deploy_machines || true
+    salt-call state.apply maas.machines.wait_for_deployed;
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 10}
+  skip_fail: false
+
+- description: Configure ntp on all nodes
+  cmd: |
+    salt '*' state.sls ntp;
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 10}
+  skip_fail: false
+
+{{ SHARED.MACRO_NETWORKING_WORKAROUNDS() }}
+
+{{ SHARED.MACRO_BOOTSTRAP_ALL_MINIONS() }}
+
+########################################
+# Spin up Control Plane VMs on KVM nodes
+########################################
+
+- description: Execute 'libvirt' states to create necessary libvirt networks
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'kvm*' state.sls libvirt
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 2, delay: 10}
+  skip_fail: false
+
+- description: Syncing before salt control state
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False '*' saltutil.sync_all;
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 10}
+  skip_fail: false
+
+- description: Create VMs for control plane
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'kvm*' state.sls salt.control
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 3, delay: 10}
+  skip_fail: false
+
+- description: '*Workaround* for waiting the control-plane VMs in the salt-key (instead of sleep)'
+  cmd: |
+    salt-key -l acc| sort > /tmp/current_keys.txt &&
+    salt 'kvm*' cmd.run 'virsh list --name' | grep -v 'kvm'|sort|xargs -I {} fgrep {} /tmp/current_keys.txt
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 20, delay: 30}
+  skip_fail: false
+
+#########################################
+# Configure all running salt minion nodes
+#########################################
+
+- description: Hack resolv.conf on VCP nodes for internal services access
+  cmd: |
+    salt --hard-crash --state-output=mixed --state-verbose=False -C '*' cmd.run "echo 'nameserver 172.18.208.44' > /etc/resolv.conf;"
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+- description: Refresh pillars on all minions
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False '*' saltutil.refresh_pillar
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+- description: Sync all salt resources
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False '*' saltutil.sync_all && sleep 5
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+- description: Show  reclass-salt --top for generated nodes
+  cmd: reclass-salt --top -u /srv/salt/reclass/nodes/_generated/
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+{{ SHARED.MACRO_BOOTSTRAP_ALL_MINIONS() }}
diff --git a/tcp_tests/templates/cookied-bm-contrail-maas/sl.yaml b/tcp_tests/templates/cookied-bm-contrail-maas/sl.yaml
new file mode 100644
index 0000000..4748f8a
--- /dev/null
+++ b/tcp_tests/templates/cookied-bm-contrail-maas/sl.yaml
@@ -0,0 +1,243 @@
+{% from 'cookied-bm-contrail-maas/underlay.yaml' import HOSTNAME_CFG01 with context %}
+{% import 'shared-sl-tests.yaml' as SHARED_SL_TESTS with context %}
+
+# Install docker swarm
+- description: Configure docker service
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' state.sls docker.host
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 10}
+  skip_fail: false
+
+- description: Install docker swarm on master node
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:master' state.sls docker.swarm
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 10}
+  skip_fail: false
+
+- description: Send grains to the swarm slave nodes
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' state.sls salt.minion.grains
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 10}
+  skip_fail: false
+
+- description:  Update mine
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' mine.update
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 10}
+  skip_fail: false
+
+- description:  Refresh modules
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' saltutil.refresh_modules; sleep 5;
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 10}
+  skip_fail: false
+
+- description:  Rerun swarm on slaves to proper token population
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:master' state.sls docker.swarm
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 10}
+  skip_fail: false
+
+- description:  Configure slave nodes
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:manager' state.sls docker.swarm -b 1
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 10}
+  skip_fail: false
+
+- description:  List registered Docker swarm nodes
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:master' cmd.run 'docker node ls'
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 10}
+  skip_fail: false
+
+- description: Install keepalived on mon nodes
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'mon*' state.sls keepalived
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 10}
+  skip_fail: false
+
+- description: Check the VIP on mon nodes
+  cmd: |
+    SL_VIP=`salt-call --out=newline_values_only pillar.get _param:stacklight_monitor_address`;
+    echo "_param:stacklight_monitor_address (vip): ${SL_VIP}";
+    salt --hard-crash --state-output=mixed --state-verbose=False -C 'mon*' cmd.run "ip a | grep ${SL_VIP}" | grep -B1 ${SL_VIP}
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+# Install slv2 infra
+#Launch containers
+- description: launch prometheus containers
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:master and I@prometheus:server' state.sls docker.client
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 2, delay: 10}
+  skip_fail: false
+
+- description: Check docker ps
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm and I@prometheus:server' cmd.run "docker ps"
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 2, delay: 10}
+  skip_fail: false
+
+- description: Install telegraf
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@telegraf:agent or I@telegraf:remote_agent' state.sls telegraf
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 2, delay: 10}
+  skip_fail: false
+
+- description: Configure Prometheus exporters, if pillar 'prometheus:exporters' exists on any server
+  cmd: |
+    if salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@prometheus:exporters' match.pillar 'prometheus:exporters' ; then
+      salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@prometheus:exporters' state.sls prometheus
+    fi
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 10}
+  skip_fail: false
+
+- description: Install elasticsearch server
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@elasticsearch:server' state.sls elasticsearch.server -b 1
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 10}
+  skip_fail: false
+
+- description: Install kibana server
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@kibana:server' state.sls kibana.server -b 1
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 10}
+  skip_fail: false
+
+- description: Install elasticsearch client
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@elasticsearch:client' state.sls elasticsearch.client
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 2, delay: 30}
+  skip_fail: false
+
+- description: Install kibana client
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@kibana:client' state.sls kibana.client
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 10}
+  skip_fail: false
+
+- description: Check influix db
+  cmd: |
+    INFLUXDB_SERVICE=`salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@influxdb:server' test.ping 1>/dev/null 2>&1 && echo true`;
+    echo "Influxdb service presence: ${INFLUXDB_SERVICE}";
+    if [[ "$INFLUXDB_SERVICE" == "true" ]]; then
+        salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@influxdb:server' state.sls influxdb
+    fi
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: true
+
+# Install Prometheus LTS(optional if set in model)
+- description: Prometheus LTS(optional if set in model)
+  cmd: |
+    PROMETHEUS_SERVICE=`salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@prometheus:relay' test.ping 1>/dev/null 2>&1 && echo true`;
+    echo "PROMETHEUS rely service presence: ${PROMETHEUS_SERVICE}";
+    if [[ "$PROMETHEUS_SERVICE" == "true" ]]; then
+        salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@prometheus:relay' state.sls prometheus
+    fi
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: true
+
+# Install service for the log collection
+- description: Configure fluentd
+  cmd: |
+    FLUENTD_SERVICE=`salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@fluentd:agent' test.ping 1>/dev/null 2>&1 && echo true`;
+    echo "Fluentd service presence: ${FLUENTD_SERVICE}";
+    if [[ "$FLUENTD_SERVICE" == "true" ]]; then
+        salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@fluentd:agent' state.sls fluentd
+    else
+        salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@heka:log_collector' state.sls heka.log_collector
+    fi
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 10}
+  skip_fail: false
+
+#Install heka ceilometer collector
+- description: Install heka ceilometer if they exists
+  cmd: |
+    CEILO=`salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@heka:ceilometer_collector:enabled' test.ping 1>/dev/null 2>&1 && echo true`;
+    echo "Ceilometer service presence: ${CEILO}";
+    if [[ "$CEILO" == "true" ]]; then
+        salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@heka:ceilometer_collector:enabled' state.sls heka.ceilometer_collector;
+        salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@heka:ceilometer_collector:enabled' service.restart ceilometer_collector
+    fi
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 10}
+  skip_fail: false
+
+# Collect grains needed to configure the services
+
+- description: Get grains
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@salt:minion' state.sls salt.minion.grains
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 10}
+  skip_fail: false
+
+- description: Sync modules
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@salt:minion' saltutil.refresh_modules
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 10}
+  skip_fail: false
+
+- description: Update mine
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@salt:minion' mine.update; sleep 5;
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 5, delay: 15}
+  skip_fail: false
+
+# Configure the services running in Docker Swarm
+- description: Configure prometheus in docker swarm
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm and I@prometheus:server' state.sls prometheus
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 10}
+  skip_fail: false
+
+- description: Configure Remote Collector in Docker Swarm for Openstack deployments
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm and I@prometheus:server' state.sls heka.remote_collector
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 10}
+  skip_fail: false
+
+- description: Install sphinx
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@sphinx:server' state.sls sphinx
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 10}
+  skip_fail: false
+
+
+#- description: Install prometheus alertmanager
+#  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' state.sls prometheus,heka.remote_collector -b 1
+#  node_name: {{ HOSTNAME_CFG01 }}
+#  retry: {count: 1, delay: 10}
+#  skip_fail: false
+
+#- description: run docker state
+#  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:master' state.sls docker
+#  node_name: {{ HOSTNAME_CFG01 }}
+#  retry: {count: 1, delay: 10}
+#  skip_fail: false
+#
+#- description: docker ps
+#  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' dockerng.ps
+#  node_name: {{ HOSTNAME_CFG01 }}
+#  retry: {count: 1, delay: 10}
+#  skip_fail: false
+
+- description: Configure Grafana dashboards and datasources
+  cmd: sleep 30;  salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@grafana:client' state.sls grafana.client
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 2, delay: 10}
+  skip_fail: false
+
+- description: Run salt minion to create cert files
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False "*" state.sls salt.minion
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 10}
+  skip_fail: false
+
+{{  SHARED_SL_TESTS.MACRO_CLONE_SL_TESTS() }}
+{{  SHARED_SL_TESTS.MACRO_CONFIGURE_TESTS() }}
diff --git a/tcp_tests/templates/cookied-bm-contrail-maas/underlay--meta-data.yaml b/tcp_tests/templates/cookied-bm-contrail-maas/underlay--meta-data.yaml
new file mode 100644
index 0000000..3699401
--- /dev/null
+++ b/tcp_tests/templates/cookied-bm-contrail-maas/underlay--meta-data.yaml
@@ -0,0 +1,4 @@
+| # All the data below will be stored as a string object
+  instance-id: iid-local1
+  hostname: {hostname}
+  local-hostname: {hostname}
diff --git a/tcp_tests/templates/cookied-bm-contrail-maas/underlay--user-data-cfg01.yaml b/tcp_tests/templates/cookied-bm-contrail-maas/underlay--user-data-cfg01.yaml
new file mode 100644
index 0000000..1e318a5
--- /dev/null
+++ b/tcp_tests/templates/cookied-bm-contrail-maas/underlay--user-data-cfg01.yaml
@@ -0,0 +1,69 @@
+| # All the data below will be stored as a string object
+  #cloud-config, see http://cloudinit.readthedocs.io/en/latest/topics/examples.html
+
+  ssh_pwauth: True
+  users:
+   - name: root
+     sudo: ALL=(ALL) NOPASSWD:ALL
+     shell: /bin/bash
+     ssh_authorized_keys:
+     {% for key in config.underlay.ssh_keys %}
+      - ssh-rsa {{ key['public'] }}
+     {% endfor %}
+
+  disable_root: false
+  chpasswd:
+   list: |
+    root:r00tme
+   expire: False
+
+  bootcmd:
+   # Enable root access
+   - sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
+   - service sshd restart
+  output:
+    all: '| tee -a /var/log/cloud-init-output.log /dev/tty0'
+
+  runcmd:
+   # Configure dhclient
+   - sudo echo "nameserver {gateway}" >> /etc/resolvconf/resolv.conf.d/base
+   - sudo resolvconf -u
+
+   # Prepare network connection
+   - sudo ifdown ens3
+   - sudo ip r d default || true  # remove existing default route to get it from dhcp
+   - sudo ifup ens3
+   - sudo route add default gw {gateway} ens3
+
+   # Create swap
+   - fallocate -l 4G /swapfile
+   - chmod 600 /swapfile
+   - mkswap /swapfile
+   - swapon /swapfile
+   - echo "/swapfile   none    swap    defaults    0   0" >> /etc/fstab
+
+   - echo "nameserver 172.18.208.44" > /etc/resolv.conf;
+
+   - export MAAS_PXE_INTERFACE_NAME=ens4
+   - export MAAS_PXE_INTERFACE_ADDRESS={{ os_env('MAAS_PXE_INTERFACE_ADDRESS', '172.16.49.66') }}
+   - export MAAS_DHCP_POOL_NETMASK={{ os_env('MAAS_DHCP_POOL_NETMASK', '255.255.255.192') }}
+   - export MAAS_DHCP_POOL_NETMASK_PREFIX={{ os_env('MAAS_DHCP_POOL_NETMASK_PREFIX', '26') }}
+   - export MAAS_DHCP_POOL_START={{ os_env('MAAS_DHCP_POOL_START', '172.16.49.77') }}
+   - export MAAS_DHCP_POOL_END={{ os_env('MAAS_DHCP_POOL_END', '172.16.49.119') }}
+   - ifconfig $MAAS_PXE_INTERFACE_NAME $MAAS_PXE_INTERFACE_ADDRESS/$MAAS_DHCP_POOL_NETMASK_PREFIX
+
+  write_files:
+   - path: /etc/network/interfaces
+     content: |
+          auto ens3
+          iface ens3 inet dhcp
+
+   - path: /root/.ssh/config
+     owner: root:root
+     permissions: '0600'
+     content: |
+          Host *
+            ServerAliveInterval 300
+            ServerAliveCountMax 10
+            StrictHostKeyChecking no
+            UserKnownHostsFile /dev/null
diff --git a/tcp_tests/templates/cookied-bm-contrail-maas/underlay--user-data1604.yaml b/tcp_tests/templates/cookied-bm-contrail-maas/underlay--user-data1604.yaml
new file mode 100644
index 0000000..3fbb777
--- /dev/null
+++ b/tcp_tests/templates/cookied-bm-contrail-maas/underlay--user-data1604.yaml
@@ -0,0 +1,50 @@
+| # All the data below will be stored as a string object
+  #cloud-config, see http://cloudinit.readthedocs.io/en/latest/topics/examples.html
+
+  ssh_pwauth: True
+  users:
+   - name: root
+     sudo: ALL=(ALL) NOPASSWD:ALL
+     shell: /bin/bash
+     ssh_authorized_keys:
+     {% for key in config.underlay.ssh_keys %}
+      - ssh-rsa {{ key['public'] }}
+     {% endfor %}
+
+  disable_root: false
+  chpasswd:
+   list: |
+    root:r00tme
+   expire: False
+
+  bootcmd:
+   # Enable root access
+   - sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
+   - service sshd restart
+  output:
+    all: '| tee -a /var/log/cloud-init-output.log /dev/tty0'
+
+  runcmd:
+   - export TERM=linux
+   - export LANG=C
+   # Configure dhclient
+   - sudo echo "nameserver {gateway}" >> /etc/resolvconf/resolv.conf.d/base
+   - sudo resolvconf -u
+
+   # Prepare network connection
+   - sudo ifup ens3
+   #- sudo route add default gw {gateway} {interface_name}
+
+   # Create swap
+   - fallocate -l 4G /swapfile
+   - chmod 600 /swapfile
+   - mkswap /swapfile
+   - swapon /swapfile
+   - echo "/swapfile   none    swap    defaults   0   0" >> /etc/fstab
+
+  write_files:
+   - path: /etc/network/interfaces
+     content: |
+          auto ens3
+          iface ens3 inet dhcp
+
diff --git a/tcp_tests/templates/cookied-bm-contrail-maas/underlay.yaml b/tcp_tests/templates/cookied-bm-contrail-maas/underlay.yaml
new file mode 100644
index 0000000..cdab801
--- /dev/null
+++ b/tcp_tests/templates/cookied-bm-contrail-maas/underlay.yaml
@@ -0,0 +1,132 @@
+# Set the repository suite, one of the: 'nightly', 'testing', 'stable', or any other required
+{% set REPOSITORY_SUITE = os_env('REPOSITORY_SUITE', 'testing') %}
+
+{% import 'cookied-bm-contrail-maas/underlay--meta-data.yaml' as CLOUDINIT_META_DATA with context %}
+{% import 'cookied-bm-contrail-maas/underlay--user-data-cfg01.yaml' as CLOUDINIT_USER_DATA_CFG01 with context %}
+{% import 'cookied-bm-contrail-maas/underlay--user-data1604.yaml' as CLOUDINIT_USER_DATA_1604 with context %}
+
+{% set LAB_CONFIG_NAME = os_env('LAB_CONFIG_NAME', 'cookied-bm-contrail-maas') %}
+{% set DOMAIN_NAME = os_env('DOMAIN_NAME', LAB_CONFIG_NAME) + '.local' %}
+{% set HOSTNAME_CFG01 = os_env('HOSTNAME_CFG01', 'cfg01.' + DOMAIN_NAME) %}
+
+{% set MAAS_DHCP_POOL_START = os_env('MAAS_DHCP_POOL_START', '172.16.49.77') %}
+{% set MAAS_DHCP_POOL_END = os_env('MAAS_DHCP_POOL_END', '172.16.49.119') %}
+
+{% set ETH1_IP_ADDRESS_CFG01 = os_env('ETH1_IP_ADDRESS_CFG01', '172.16.49.66') %}
+
+---
+aliases:
+ - &interface_model {{ os_env('INTERFACE_MODEL', 'virtio') }}
+ - &cloudinit_meta_data {{ CLOUDINIT_META_DATA }}
+ - &cloudinit_user_data_cfg01 {{ CLOUDINIT_USER_DATA_CFG01 }}
+ - &cloudinit_user_data_1604 {{ CLOUDINIT_USER_DATA_1604 }}
+
+template:
+  devops_settings:
+    env_name: {{ os_env('ENV_NAME', 'cookied-bm-contrail-maas_' + REPOSITORY_SUITE + "_" + os_env('BUILD_NUMBER', '')) }}
+
+    address_pools:
+      admin-pool01:
+        net: {{ os_env('MAAS_ADMIN_ADDRESS_POOL01', '10.50.0.0/16:24') }}
+        params:
+          ip_reserved:
+            gateway: +1
+            l2_network_device: +1
+            default_{{ HOSTNAME_CFG01 }}: +2
+          ip_ranges:
+            dhcp: [+90, -10]
+
+      provisioning-pool01:
+        net: {{ os_env('ADMIN_ADDRESS_POOL01', '172.16.49.64/26:26') }}
+        params:
+          ip_reserved:
+            gateway: +61
+            l2_network_device: +61
+            default_{{ HOSTNAME_CFG01 }}: +2
+            virtual_{{ HOSTNAME_CFG01 }}: +2
+          ip_ranges:
+            dhcp: [+12, +55]
+
+    groups:
+      - name: default
+        driver:
+          name: devops.driver.libvirt
+          params:
+            connection_string: !os_env CONNECTION_STRING, qemu:///system
+            storage_pool_name: !os_env STORAGE_POOL_NAME, default
+            stp: False
+            hpet: False
+            enable_acpi: true
+            use_host_cpu: !os_env DRIVER_USE_HOST_CPU, true
+            use_hugepages: !os_env DRIVER_USE_HUGEPAGES, false
+
+        network_pools:
+          admin: admin-pool01
+          provisioning: provisioning-pool01
+
+        l2_network_devices:
+          # Ironic management interface
+          admin:
+            address_pool: admin-pool01
+            dhcp: true
+            forward:
+              mode: nat
+
+          provisioning:
+            address_pool: provisioning-pool01
+            dhcp: false
+            forward:
+              mode: !os_env MAAS_PXE_IFACE_MODE, bridge
+            parent_iface:
+              phys_dev: !os_env MAAS_PXE_IFACE
+
+        group_volumes:
+         - name: cloudimage1604    # This name is used for 'backing_store' option for node volumes.
+           source_image: !os_env IMAGE_PATH1604  # https://cloud-images.ubuntu.com/xenial/current/xenial-server-cloudimg-amd64-disk1.img
+           format: qcow2
+         - name: cfg01_day01_image               # Pre-configured day01 image
+           source_image: {{ os_env('IMAGE_PATH_CFG01_DAY01', os_env('IMAGE_PATH1604')) }} # http://images.mirantis.com/cfg01-day01.qcow2 or fallback to IMAGE_PATH1604
+           format: qcow2
+         - name: mcp_ubuntu_1604_image           # Pre-configured image for control plane
+           source_image: !os_env MCP_IMAGE_PATH1604
+           format: qcow2
+
+        nodes:
+          - name: {{ HOSTNAME_CFG01 }}
+            role: salt_master
+            params:
+              vcpu: !os_env SLAVE_NODE_CPU, 4
+              memory: !os_env SLAVE_NODE_MEMORY, 16384
+              boot:
+                - hd
+              cloud_init_volume_name: iso
+              cloud_init_iface_up: ens3
+              volumes:
+                - name: system
+                  capacity: !os_env NODE_VOLUME_SIZE, 180
+                  backing_store: cfg01_day01_image
+                  format: qcow2
+                - name: iso  # Volume with name 'iso' will be used
+                             # for store image with cloud-init metadata.
+                  capacity: 1
+                  format: raw
+                  device: cdrom
+                  bus: ide
+                  cloudinit_meta_data: *cloudinit_meta_data
+                  cloudinit_user_data: *cloudinit_user_data_cfg01
+              interfaces:
+                - label: ens3
+                  l2_network_device: admin
+                  interface_model: *interface_model
+                  mac_address: !os_env ETH1_MAC_ADDRESS_CFG01
+                - label: ens4
+                  l2_network_device: provisioning
+                  interface_model: *interface_model
+
+              network_config:
+                ens3:
+                  networks:
+                    - admin
+                ens4:
+                  networks:
+                    - provisioning
diff --git a/tcp_tests/templates/cookied-mcp-mitaka-dvr/sl.yaml b/tcp_tests/templates/cookied-mcp-mitaka-dvr/sl.yaml
index 06ff674..f2a0907 100644
--- a/tcp_tests/templates/cookied-mcp-mitaka-dvr/sl.yaml
+++ b/tcp_tests/templates/cookied-mcp-mitaka-dvr/sl.yaml
@@ -66,6 +66,13 @@
   skip_fail: false
 
 # Install slv2 infra
+# Install MongoDB for alerta
+- description: Install MongoDB
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@mongodb:server' state.sls mongodb
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 2, delay: 10}
+  skip_fail: false
+
 - description: Install telegraf
   cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@telegraf:agent or I@telegraf:remote_agent' state.sls telegraf
   node_name: {{ HOSTNAME_CFG01 }}
diff --git a/tcp_tests/templates/cookied-mcp-pike-dvr-ssl/sl.yaml b/tcp_tests/templates/cookied-mcp-pike-dvr-ssl/sl.yaml
index b34225f..bb65f49 100644
--- a/tcp_tests/templates/cookied-mcp-pike-dvr-ssl/sl.yaml
+++ b/tcp_tests/templates/cookied-mcp-pike-dvr-ssl/sl.yaml
@@ -66,6 +66,13 @@
   skip_fail: false
 
 # Install slv2 infra
+# Install MongoDB for alerta
+- description: Install MongoDB
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@mongodb:server' state.sls mongodb
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 2, delay: 10}
+  skip_fail: false
+
 - description: Install telegraf
   cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@telegraf:agent or I@telegraf:remote_agent' state.sls telegraf
   node_name: {{ HOSTNAME_CFG01 }}
diff --git a/tcp_tests/templates/cookied-mcp-pike-ovs/sl.yaml b/tcp_tests/templates/cookied-mcp-pike-ovs/sl.yaml
index a23b74c..e7c1175 100644
--- a/tcp_tests/templates/cookied-mcp-pike-ovs/sl.yaml
+++ b/tcp_tests/templates/cookied-mcp-pike-ovs/sl.yaml
@@ -68,6 +68,13 @@
   skip_fail: false
 
 # Install slv2 infra
+# Install MongoDB for alerta
+- description: Install MongoDB
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@mongodb:server' state.sls mongodb
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 2, delay: 10}
+  skip_fail: false
+
 #Launch containers
 - description: launch prometheus containers
   cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:master and I@prometheus:server' state.sls docker.client
diff --git a/tcp_tests/templates/shared-salt.yaml b/tcp_tests/templates/shared-salt.yaml
index f3b8414..4c8e787 100644
--- a/tcp_tests/templates/shared-salt.yaml
+++ b/tcp_tests/templates/shared-salt.yaml
@@ -149,6 +149,68 @@
   skip_fail: false
 {%- endmacro %}
 
+
+{%- macro MACRO_CONFIG_DAY01_SALT_MASTER() %}
+{######################################}
+
+- description: Remove /etc/update-motd.d/52-info
+  cmd: rm -vf /etc/update-motd.d/52-info
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 10}
+  skip_fail: false
+
+- description: Set up static interface config
+  cmd: |
+    kill $(pidof /sbin/dhclient) || /bin/true
+    cat << 'EOF' > /etc/network/interfaces
+    # This file describes the network interfaces available on your system
+    # and how to activate them. For more information, see interfaces(5).
+
+    # The loopback network interface
+    auto lo
+    iface lo inet loopback
+
+    # The primary network interface
+    auto ens3
+    iface ens3 inet static
+      address {{ IPV4_NET_ADMIN_PREFIX }}.90
+      netmask 255.255.255.0
+      gateway {{ IPV4_NET_ADMIN_PREFIX }}.1
+    EOF
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 10}
+  skip_fail: false
+
+- description: Install common packages on cfg01
+  cmd: eatmydata apt-get update && apt-get install -y python-pip git curl at tmux byobu iputils-ping traceroute htop tree wget jq ntpdate
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 1}
+  skip_fail: false
+
+- description: Remove any existing minion keys
+  cmd:  salt-key -y -D || true
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 1}
+  skip_fail: false
+
+- description: Configure GIT settings and certificates
+  cmd: |
+    set -e;
+    #touch /root/.git_trusted_certs.pem;
+    #for server in github.com; do \
+    #    openssl s_client -showcerts -connect $server:443 </dev/null \
+    #    | openssl x509 -outform PEM \
+    #    >> /root/.git_trusted_certs.pem;
+    #done;
+    #HOME=/root git config --global http.sslCAInfo /root/.git_trusted_certs.pem;
+    HOME=/root git config --global user.email "mcp-integration-qa@example.com";
+    HOME=/root git config --global user.name "MCP Integration QA";
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 1}
+  skip_fail: false
+{%- endmacro %}
+
+
 {%- macro MACRO_CLONE_RECLASS_MODELS(IS_CONTRAIL_LAB=false) %}
 {############################################################}
 {# Creates a 'cluster' model from cookiecutter-templates and 'environment' model from uploaded template #}
@@ -223,6 +285,8 @@
     [ -f ${CFG01_INVENTORY_FILE} ] || cat << 'EOF' > ${CFG01_INVENTORY_FILE}
     classes:
     - cluster.{{ CLUSTER_NAME }}.infra.config
+    - cluster.{{ CLUSTER_NAME }}.infra.maas
+    - cluster.{{ CLUSTER_NAME }}.infra.maas-machines
     parameters:
       _param:
         linux_system_codename: xenial
@@ -565,6 +629,66 @@
 {%- endmacro %}
 
 
+{%- macro MACRO_INSTALL_FORMULAS(FORMULA_SERVICES='') %}
+{#######################################################}
+- description: Configure reclass
+  cmd: |
+    set -e;
+    FORMULA_PATH=${FORMULA_PATH:-/usr/share/salt-formulas};
+    which wget > /dev/null || (apt-get update; apt-get install -y wget);
+    . /etc/lsb-release;  # Get DISTRIB_CODENAME variable
+    # echo "{{ FORMULA_REPOSITORY }}" > /etc/apt/sources.list.d/mcp_salt.list;
+    # wget -O - "{{ FORMULA_GPG }}" | apt-key add -;
+    apt-get clean; apt-get update;
+    [ ! -d /srv/salt/reclass/classes/service ] && mkdir -p /srv/salt/reclass/classes/service;
+    declare -a formula_services=({{ FORMULA_SERVICES }});
+    echo -e "\nInstalling all required salt formulas\n";
+    eatmydata apt-get install -y "${formula_services[@]/#/salt-formula-}";
+    for formula_service in "${formula_services[@]}"; do
+      echo -e "\nLink service metadata for formula ${formula_service} ...\n";
+      [ ! -L "/srv/salt/reclass/classes/service/${formula_service}" ] && ln -s ${FORMULA_PATH}/reclass/service/${formula_service} /srv/salt/reclass/classes/service/${formula_service};
+    done;
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 1}
+  skip_fail: false
+
+- description: Show reclass-salt --top for cfg01 node
+  cmd: reclass-salt --top
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+- description: Restart salt-master service
+  cmd: systemctl restart salt-master;
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+{%- endmacro %}
+
+{%- macro MACRO_CONFIG_DAY01_SALT_MINION() %}
+{#######################################}
+- description: Configure salt-minion on {{ HOSTNAME_CFG01 }}
+  cmd: |
+    export SALT_MASTER_MINION_ID={{ HOSTNAME_CFG01 }}
+    envsubst < /root/minion.conf > /etc/salt/minion.d/minion.conf
+    service salt-minion restart
+
+    while true; do
+     salt-key | grep "$SALT_MASTER_MINION_ID" && break
+     sleep 5
+    done
+
+    sleep 5
+
+    for i in `salt-key -l accepted | grep -v Accepted | grep -v "$SALT_MASTER_MINION_ID"`; do
+     salt-key -d $i -y
+    done
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 1}
+  skip_fail: false
+{%- endmacro %}
+
+
 {%- macro MACRO_RUN_SALT_MASTER_UNDERLAY_STATES() %}
 {##################################################}
 
diff --git a/tcp_tests/templates/virtual-mcp-pike-dvr-maas/_context-cookiecutter-mcp-pike-dvr.yaml b/tcp_tests/templates/virtual-mcp-pike-dvr-maas/_context-cookiecutter-mcp-pike-dvr.yaml
new file mode 100644
index 0000000..e2ba165
--- /dev/null
+++ b/tcp_tests/templates/virtual-mcp-pike-dvr-maas/_context-cookiecutter-mcp-pike-dvr.yaml
@@ -0,0 +1,154 @@
+default_context:
+  bmk_enabled: 'False'
+  ceph_enabled: 'False'
+  cicd_enabled: 'False'
+  cluster_domain: virtual-mcp-pike-dvr.local
+  cluster_name: virtual-mcp-pike-dvr
+  compute_bond_mode: active-backup
+  compute_primary_first_nic: eth1
+  compute_primary_second_nic: eth2
+  context_seed: wUqrwKeBTCpRpVrhK1KwZQv4cjM9VhG7L2vQ0iQsTuMrXASklEBDmJEf6bnPEqcK
+  control_network_netmask: 255.255.255.0
+  control_network_subnet: 172.16.10.0/24
+  control_vlan: '10'
+  cookiecutter_template_branch: master
+  cookiecutter_template_credentials: gerrit
+  cookiecutter_template_url: ssh://mcp-jenkins@gerrit.mcp.mirantis.net:29418/mk/cookiecutter-templates.git
+  deploy_network_gateway: 192.168.10.1
+  deploy_network_netmask: 255.255.255.0
+  deploy_network_subnet: 192.168.10.0/24
+  deployment_type: physical
+  dns_server01: 8.8.8.8
+  dns_server02: 8.8.4.4
+  email_address: ddmitriev@mirantis.com
+  gateway_primary_first_nic: eth1
+  gateway_primary_second_nic: eth2
+  infra_bond_mode: active-backup
+  infra_deploy_nic: eth0
+  infra_kvm01_control_address: 172.16.10.101
+  infra_kvm01_deploy_address: 192.168.10.101
+  infra_kvm01_hostname: kvm01
+  infra_kvm02_control_address: 172.16.10.102
+  infra_kvm02_deploy_address: 192.168.10.102
+  infra_kvm02_hostname: kvm02
+  infra_kvm03_control_address: 172.16.10.103
+  infra_kvm03_deploy_address: 192.168.10.103
+  infra_kvm03_hostname: kvm03
+  infra_kvm_vip_address: 172.16.10.100
+  infra_primary_first_nic: eth1
+  infra_primary_second_nic: eth2
+  kubernetes_enabled: 'False'
+  local_repositories: 'False'
+  maas_deploy_address: 192.168.10.90
+  maas_hostname: cfg01
+  mcp_version: stable
+  offline_deployment: 'False'
+  opencontrail_enabled: 'False'
+  openstack_benchmark_node01_address: 172.16.10.95
+  openstack_benchmark_node01_hostname: bmk01
+  openstack_cluster_size: compact
+  openstack_compute_count: '100'
+  openstack_compute_rack01_hostname: cmp
+  openstack_compute_rack01_single_subnet: 172.16.10
+  openstack_compute_rack01_tenant_subnet: 10.1.0
+  openstack_control_address: 172.16.10.100
+  openstack_control_hostname: ctl
+  openstack_control_node01_address: 172.16.10.101
+  openstack_control_node01_hostname: ctl01
+  openstack_control_node02_address: 172.16.10.102
+  openstack_control_node02_hostname: ctl02
+  openstack_control_node03_address: 172.16.10.103
+  openstack_control_node03_hostname: ctl03
+  openstack_database_address: 172.16.10.100
+  openstack_database_hostname: ctl
+  openstack_database_node01_address: 172.16.10.101
+  openstack_database_node01_hostname: ctl01
+  openstack_database_node02_address: 172.16.10.102
+  openstack_database_node02_hostname: ctl02
+  openstack_database_node03_address: 172.16.10.103
+  openstack_database_node03_hostname: ctl03
+  openstack_enabled: 'True'
+  openstack_gateway_node01_address: 172.16.10.110
+  openstack_gateway_node01_hostname: gtw01
+  openstack_gateway_node01_tenant_address: 10.1.0.6
+  openstack_gateway_node02_address: 172.16.10.111
+  openstack_gateway_node02_hostname: gtw02
+  openstack_gateway_node02_tenant_address: 10.1.0.7
+  openstack_gateway_node03_address: 172.16.10.112
+  openstack_gateway_node03_hostname: gtw03
+  openstack_gateway_node03_tenant_address: 10.1.0.8
+  openstack_message_queue_address: 172.16.10.100
+  openstack_message_queue_hostname: ctl
+  openstack_message_queue_node01_address: 172.16.10.101
+  openstack_message_queue_node01_hostname: ctl01
+  openstack_message_queue_node02_address: 172.16.10.102
+  openstack_message_queue_node02_hostname: ctl02
+  openstack_message_queue_node03_address: 172.16.10.103
+  openstack_message_queue_node03_hostname: ctl03
+  openstack_network_engine: ovs
+  openstack_neutron_qos: 'False'
+  openstack_neutron_vlan_aware_vms: 'False'
+  openstack_nfv_dpdk_enabled: 'False'
+  openstack_nfv_sriov_enabled: 'False'
+  openstack_nova_compute_nfv_req_enabled: 'False'
+  openstack_ovs_dvr_enabled: 'True'
+  openstack_ovs_encapsulation_type: vxlan
+  openstack_proxy_address: 172.16.10.80
+  openstack_proxy_hostname: prx
+  openstack_proxy_node01_address: 172.16.10.121
+  openstack_proxy_node01_hostname: prx01
+  openstack_proxy_node02_address: 172.16.10.122
+  openstack_proxy_node02_hostname: prx02
+  openstack_upgrade_node01_address: 172.16.10.19
+  openstack_version: pike
+  oss_enabled: 'False'
+  oss_node03_address: ${_param:stacklight_monitor_node03_address}
+  oss_notification_app_id: '24'
+  oss_notification_sender_password: password
+  oss_notification_smtp_port: '587'
+  oss_notification_webhook_login_id: '13'
+  platform: openstack_enabled
+  public_host: ${_param:openstack_proxy_address}
+  publication_method: email
+  reclass_repository: https://github.com/Mirantis/mk-lab-salt-model.git
+  salt_api_password: H0rTPdmktZ8RI7T7y6fjqY0uEbbs7Kwi
+  salt_api_password_hash: $6$lfbIFtMZ$.nTbTDMzs1iYv0WqkZHia8H8Fma963Nv3qyyz1x68jQh0YXK9i907B/hvoG4QHMvfolE7V7vQnFClJ1mVA3Yb.
+  salt_master_address: 172.16.10.90
+  salt_master_hostname: cfg01
+  salt_master_management_address: 192.168.10.90
+  shared_reclass_url: ssh://mcp-jenkins@gerrit.mcp.mirantis.net:29418/salt-models/reclass-system.git
+  stacklight_enabled: 'True'
+  stacklight_log_address: 172.16.10.70
+  stacklight_log_hostname: mon
+  stacklight_log_node01_address: 172.16.10.107
+  stacklight_log_node01_hostname: mon01
+  stacklight_log_node02_address: 172.16.10.108
+  stacklight_log_node02_hostname: mon02
+  stacklight_log_node03_address: 172.16.10.109
+  stacklight_log_node03_hostname: mon03
+  stacklight_monitor_address: 172.16.10.70
+  stacklight_monitor_hostname: mon
+  stacklight_monitor_node01_address: 172.16.10.107
+  stacklight_monitor_node01_hostname: mon01
+  stacklight_monitor_node02_address: 172.16.10.108
+  stacklight_monitor_node02_hostname: mon02
+  stacklight_monitor_node03_address: 172.16.10.109
+  stacklight_monitor_node03_hostname: mon03
+  stacklight_notification_address: alerts@localhost
+  stacklight_notification_smtp_host: 127.0.0.1
+  stacklight_telemetry_address: 172.16.10.70
+  stacklight_telemetry_hostname: mon
+  stacklight_telemetry_node01_address: 172.16.10.107
+  stacklight_telemetry_node01_hostname: mon01
+  stacklight_telemetry_node02_address: 172.16.10.108
+  stacklight_telemetry_node02_hostname: mon02
+  stacklight_telemetry_node03_address: 172.16.10.109
+  stacklight_telemetry_node03_hostname: mon03
+  stacklight_version: '2'
+  static_ips_on_deploy_network_enabled: 'False'
+  tenant_network_gateway: 10.1.0.1
+  tenant_network_netmask: 255.255.255.0
+  tenant_network_subnet: 10.1.0.0/24
+  tenant_vlan: '20'
+  upstream_proxy_enabled: 'False'
+  use_default_network_scheme: 'False'
diff --git a/tcp_tests/templates/virtual-mcp-pike-dvr-maas/_context-environment.yaml b/tcp_tests/templates/virtual-mcp-pike-dvr-maas/_context-environment.yaml
new file mode 100644
index 0000000..0127547
--- /dev/null
+++ b/tcp_tests/templates/virtual-mcp-pike-dvr-maas/_context-environment.yaml
@@ -0,0 +1,165 @@
+nodes:
+    cfg01.mcp11-ovs-dpdk.local:
+      reclass_storage_name: infra_config_node01
+      roles:
+      - infra_config
+      - linux_system_codename_xenial
+      interfaces:
+        ens3:
+          role: single_dhcp
+        ens4:
+          role: single_ctl
+
+    ctl01.mcp11-ovs-dpdk.local:
+      reclass_storage_name: openstack_control_node01
+      roles:
+      - infra_kvm
+      - openstack_control_leader
+      - openstack_database_leader
+      - openstack_message_queue
+      - features_designate_pool_manager_database
+      - features_designate_pool_manager
+      - features_designate_pool_manager_keystone
+      - linux_system_codename_xenial
+      interfaces:
+        ens3:
+          role: single_dhcp
+        ens4:
+          role: single_ctl
+
+    ctl02.mcp11-ovs-dpdk.local:
+      reclass_storage_name: openstack_control_node02
+      roles:
+      - infra_kvm
+      - openstack_control
+      - openstack_database
+      - openstack_message_queue
+      - features_designate_pool_manager_database
+      - features_designate_pool_manager
+      - linux_system_codename_xenial
+      interfaces:
+        ens3:
+          role: single_dhcp
+        ens4:
+          role: single_ctl
+
+    ctl03.mcp11-ovs-dpdk.local:
+      reclass_storage_name: openstack_control_node03
+      roles:
+      - infra_kvm
+      - openstack_control
+      - openstack_database
+      - openstack_message_queue
+      - features_designate_pool_manager_database
+      - features_designate_pool_manager
+      - linux_system_codename_xenial
+      interfaces:
+        ens3:
+          role: single_dhcp
+        ens4:
+          role: single_ctl
+
+    prx01.mcp11-ovs-dpdk.local:
+      reclass_storage_name: openstack_proxy_node01
+      roles:
+      - openstack_proxy
+      - linux_system_codename_xenial
+      interfaces:
+        ens3:
+          role: single_dhcp
+        ens4:
+          role: single_ctl
+
+    mon01.mcp11-ovs-dpdk.local:
+      reclass_storage_name: stacklight_server_node01
+      roles:
+      - stacklightv2_server_leader
+      - stacklight_telemetry_leader
+      - stacklight_log_leader_v2
+      - linux_system_codename_xenial
+      interfaces:
+        ens3:
+          role: single_dhcp
+        ens4:
+          role: single_ctl
+
+    mon02.mcp11-ovs-dpdk.local:
+      reclass_storage_name: stacklight_server_node02
+      roles:
+      - stacklightv2_server
+      - stacklight_telemetry
+      - stacklight_log
+      - linux_system_codename_xenial
+      interfaces:
+        ens3:
+          role: single_dhcp
+        ens4:
+          role: single_ctl
+
+    mon03.mcp11-ovs-dpdk.local:
+      reclass_storage_name: stacklight_server_node03
+      roles:
+      - stacklightv2_server
+      - stacklight_telemetry
+      - stacklight_log
+      - linux_system_codename_xenial
+      interfaces:
+        ens3:
+          role: single_dhcp
+        ens4:
+          role: single_ctl
+
+    # Generator-based computes. For compatibility only
+    cmp<<count>>.mcp11-ovs-dpdk.local:
+      reclass_storage_name: openstack_compute_rack01
+      roles:
+      - openstack_compute
+      - linux_system_codename_xenial
+      interfaces:
+        ens3:
+          role: single_dhcp
+        ens4:
+          role: single_ctl
+        ens5:
+          role: bond0_ab_ovs_vxlan_mesh
+        ens6:
+          role: bond1_ab_ovs_floating
+
+    gtw01.mcp11-ovs-dpdk.local:
+      reclass_storage_name: openstack_gateway_node01
+      roles:
+      - openstack_gateway
+      - linux_system_codename_xenial
+      interfaces:
+        ens3:
+          role: single_dhcp
+        ens4:
+          role: single_ctl
+        ens5:
+          role: bond0_ab_ovs_vxlan_mesh
+        ens6:
+          role: bond1_ab_ovs_floating
+
+    dns01.mcp11-ovs-dpdk.local:
+      reclass_storage_name: openstack_dns_node01
+      roles:
+      - features_designate_pool_manager_dns
+      - linux_system_codename_xenial
+      interfaces:
+        ens3:
+          role: single_dhcp
+        ens4:
+          role: single_ctl
+          single_address: ${_param:openstack_dns_node01_address}
+
+    dns02.mcp11-ovs-dpdk.local:
+      reclass_storage_name: openstack_dns_node02
+      roles:
+      - features_designate_pool_manager_dns
+      - linux_system_codename_xenial
+      interfaces:
+        ens3:
+          role: single_dhcp
+        ens4:
+          role: single_ctl
+          single_address: ${_param:openstack_dns_node02_address}
diff --git a/tcp_tests/templates/virtual-mcp-pike-dvr-maas/cfg01_configure.yaml b/tcp_tests/templates/virtual-mcp-pike-dvr-maas/cfg01_configure.yaml
new file mode 100644
index 0000000..6f11a9a
--- /dev/null
+++ b/tcp_tests/templates/virtual-mcp-pike-dvr-maas/cfg01_configure.yaml
@@ -0,0 +1,146 @@
+{% from 'virtual-mcp-pike-dvr-maas/underlay.yaml' import HOSTNAME_CFG01 with context %}
+{% from 'virtual-mcp-pike-dvr-maas/underlay.yaml' import HOSTNAME_CMP01 with context %}
+{% from 'virtual-mcp-pike-dvr-maas/underlay.yaml' import HOSTNAME_CMP02 with context %}
+{% from 'virtual-mcp-pike-dvr-maas/underlay.yaml' import HOSTNAME_GTW01 with context %}
+{% from 'virtual-mcp-pike-dvr-maas/underlay.yaml' import LAB_CONFIG_NAME with context %}
+{% from 'virtual-mcp-pike-dvr-maas/underlay.yaml' import DOMAIN_NAME with context %}
+
+{% set SALT_MODELS_REPOSITORY = os_env('SALT_MODELS_REPOSITORY','https://gerrit.mcp.mirantis.net/salt-models/mcp-virtual-lab') %}
+{% set CLUSTER_NAME = os_env('CLUSTER_NAME', LAB_CONFIG_NAME) %}
+# Other salt model repository parameters see in shared-salt.yaml
+
+{% import 'shared-salt.yaml' as SHARED with context %}
+
+{{ SHARED.MACRO_CONFIG_DAY01_SALT_MASTER() }}
+
+{{ SHARED.MACRO_CLONE_RECLASS_MODELS() }}
+
+- description: Import ssh key for jenkins user
+  cmd: |
+   mkdir -p /var/lib/jenkins/.ssh && \
+   ssh-keyscan cfg01 > /var/lib/jenkins/.ssh/known_hosts && \
+   chown jenkins /var/lib/jenkins/.ssh/known_hosts
+  node_name: {{ HOSTNAME_CFG01 }}
+  skip_fail: False
+
+- description: Upload maas config
+  upload:
+    local_path: {{ config.day1_cfg_config.templates_dir }}{{ LAB_CONFIG_NAME }}/
+    local_filename: {{ config.day1_cfg_config.cluster_maas_config }}
+    remote_path: /srv/salt/reclass/classes/cluster/{{ CLUSTER_NAME }}/infra/
+  node_name: {{ HOSTNAME_CFG01 }}
+  skip_fail: False
+
+- description: Rename maas config
+  cmd: mv -v /srv/salt/reclass/classes/cluster/{{ CLUSTER_NAME }}/infra/{{ config.day1_cfg_config.cluster_maas_config }} /srv/salt/reclass/classes/cluster/{{ CLUSTER_NAME }}/infra/maas.yml
+  node_name: {{ HOSTNAME_CFG01 }}
+  skip_fail: False
+
+- description: Save machines macs
+  cmd: |
+    echo -n '{{ config.day1_cfg_config.maas_machines_macs | tojson }}' | \
+    python -c 'import sys, yaml, json; yaml.safe_dump(json.load(sys.stdin), sys.stdout, default_flow_style=False)' > /srv/salt/reclass/classes/cluster/{{ CLUSTER_NAME }}/infra/maas-machines.yml
+  node_name: {{ HOSTNAME_CFG01 }}
+  skip_fail: False
+
+{#
+{{ SHARED.MACRO_INSTALL_FORMULAS(FORMULA_SERVICES='"fluentd"') }}
+
+{{ SHARED.MACRO_CONFIGURE_RECLASS(FORMULA_SERVICES='"linux" "reclass" "salt" "openssh" "ntp" "git" "nginx" "collectd" "sensu" "heka" "sphinx" "keystone" "mysql" "grafana" "haproxy" "rsyslog" "horizon" "prometheus" "telegraf" "elasticsearch" "powerdns" "fluentd"') }}
+
+{{ SHARED.MACRO_RUN_SALT_MASTER_UNDERLAY_STATES() }}
+
+{{ SHARED.ADJUST_SL_OPTS(OVERRIDES_FILENAME='/srv/salt/reclass/classes/cluster/' + SHARED.CLUSTER_NAME + '/stacklight/server.yml') }}
+
+{{ SHARED.MACRO_GENERATE_INVENTORY() }}
+
+{{ SHARED.MACRO_BOOTSTRAP_ALL_MINIONS() }}
+#}
+
+{{ SHARED.MACRO_CONFIG_DAY01_SALT_MINION() }}
+
+- description: Fix config for Jenkins
+  cmd: |
+    export SALT_MASTER_MINION_ID={{ HOSTNAME_CFG01 }}
+    find /var/lib/jenkins/jenkins.model.JenkinsLocationConfiguration.xml -type f -print0 | xargs -0 sed -i -e 's/10.167.4.15/'$SALT_MASTER_DEPLOY_IP'/g'
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 1}
+  skip_fail: false
+
+- description: Setup pipeline libraries
+  cmd: |
+    export PIPELINE_REPO_URL=https://github.com/Mirantis
+    git clone --mirror $PIPELINE_REPO_URL/mk-pipelines.git /home/repo/mk/mk-pipelines/
+    git clone --mirror $PIPELINE_REPO_URL/pipeline-library.git /home/repo/mcp-ci/pipeline-library/
+    chown -R git:www-data /home/repo/mk/mk-pipelines/*
+    chown -R git:www-data /home/repo/mcp-ci/pipeline-library/*
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+
+- description: Refresh pillars before generating nodes
+  cmd: salt-call --hard-crash --state-output=mixed --state-verbose=False saltutil.refresh_pillar
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+- description: Sync all salt resources
+  cmd: salt-call --hard-crash --state-output=mixed --state-verbose=False saltutil.sync_all && sleep 5
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+- description: Configure network, linux, openssh and salt on cfg01 node
+  cmd: salt-call --hard-crash --state-output=mixed --state-verbose=False state.sls linux.network,linux,openssh,salt
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 2, delay: 5}
+  skip_fail: false
+
+#- description: Restart MaaS services before run state (need to prevent maas stucking)
+#  cmd: systemctl restart maas-regiond && systemctl restart maas-rackd
+#  node_name: {{ HOSTNAME_CFG01 }}
+#  retry: {count: 1, delay: 5}
+#  skip_fail: false
+
+- description: Configure MaaS(cluster) on cfg01 node
+  cmd: salt-call --hard-crash --state-output=mixed --state-verbose=False state.sls maas.cluster
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+- description: Configure MaaS(region) on cfg01 node
+  cmd: salt-call --hard-crash --state-output=mixed --state-verbose=False state.sls maas.region
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 3, delay: 5}
+  skip_fail: false
+
+- description: Configure reclass on cfg01 node
+  cmd: salt-call --hard-crash --state-output=mixed --state-verbose=False state.sls reclass
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+- description: Configure jenkins on cfg01 node
+  cmd: salt-call --hard-crash --state-output=mixed --state-verbose=False state.sls jenkins.client
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+# - description: Hack gtw node
+#   cmd: salt '{{ HOSTNAME_GTW01 }}' cmd.run "ip addr del {{ SHARED.IPV4_NET_CONTROL_PREFIX }}.110/24 dev ens4; ip addr flush dev ens4";
+#   node_name: {{ HOSTNAME_CFG01 }}
+#   retry: {count: 1, delay: 10}
+#   skip_fail: false
+
+# - description: Hack cmp01 node
+#   cmd: salt '{{ HOSTNAME_CMP01 }}' cmd.run "ip addr del {{ SHARED.IPV4_NET_CONTROL_PREFIX }}.105/24 dev ens4; ip addr flush dev ens4";
+#   node_name: {{ HOSTNAME_CFG01 }}
+#   retry: {count: 1, delay: 10}
+#   skip_fail: false
+
+# - description: Hack cmp02 node
+#   cmd: salt '{{ HOSTNAME_CMP02 }}' cmd.run "ip addr del {{ SHARED.IPV4_NET_CONTROL_PREFIX }}.106/24 dev ens4; ip addr flush dev ens4";
+#   node_name: {{ HOSTNAME_CFG01 }}
+#   retry: {count: 1, delay: 10}
+#   skip_fail: false
diff --git a/tcp_tests/templates/virtual-mcp-pike-dvr-maas/cluster_infra_maas.yml b/tcp_tests/templates/virtual-mcp-pike-dvr-maas/cluster_infra_maas.yml
new file mode 100644
index 0000000..56394da
--- /dev/null
+++ b/tcp_tests/templates/virtual-mcp-pike-dvr-maas/cluster_infra_maas.yml
@@ -0,0 +1,147 @@
+---
+classes:
+- system.linux.system.repo.mcp.apt_mirantis.maas
+- system.maas.region.single
+- service.jenkins.client
+- system.jenkins.client.credential.salt
+- system.jenkins.client.job.deploy.openstack
+- cluster.virtual-mcp-pike-dvr.infra
+parameters:
+  _param:
+    maas_admin_username: mirantis
+    maas_admin_password: r00tme
+    maas_db_password: fRqC7NJrBR0x
+    dns_server01: 8.8.8.8
+    maas_region_port: 5240
+    maas_cluster_region_port: ${_param:maas_region_port}
+    infra_config_deploy_address: ${_param:reclass_config_master}
+    jenkins_git_url: 'git@cfg01:/home/repo'
+    jenkins_gerrit_url: ${_param:jenkins_git_url}
+    salt_api_password: hovno12345!
+    jenkins_git_url: 'git@cfg01:/home/repo'
+    jenkins_gerrit_url: ${_param:jenkins_git_url}
+    jenkins_salt_api_url: "http://${_param:reclass_config_master}:6969"
+    jenkins_pipeline_library_url: git@cfg01:/home/repo/mcp-ci/pipeline-library
+    jenkins_pipelines_branch: master
+  jenkins:
+    client:
+      lib:
+        pipeline-library:
+          url: ${_param:jenkins_pipeline_library_url}
+          branch: ${_param:jenkins_pipelines_branch}
+      master:
+        host: ${_param:reclass_config_master}
+        port: 8081
+        password: r00tme
+  maas:
+    region:
+      commissioning_scripts:
+        00-maas-05-simplify-network-interfaces: /etc/maas/files/commisioning_scripts/00-maas-05-simplify-network-interfaces
+      bind:
+        host: ${_param:reclass_config_master}:${_param:maas_region_port}
+        port: 5240
+      maas_config:
+        main_archive: http://mirror.mirantis.com/${_param:apt_mk_version}/ubuntu/
+        disk_erase_with_secure_erase: false
+      machines:
+        ctl01: # cz7341-kvm.host-telecom.com
+          distro_series: "xenial"
+          # hwe_kernel: "hwe-16.04"
+          interface:
+          #   mac: "0c:c4:7a:33:1f:e4"
+            # ip: ${_param:infra_kvm_node01_deploy_address}
+            # subnet: "deploy_network" # create it manually... in UI
+            # gateway: ${_param:deploy_network_gateway}
+          power_parameters:
+            # power_address: "185.8.59.161"
+            power_password: "r00tme"
+            power_type: ipmi
+            power_user: admin
+        ctl02: # #cz7342-kvm.host-telecom.com
+          distro_series: "xenial"
+          # hwe_kernel: "hwe-16.04"
+          interface:
+          #   mac: "0c:c4:7a:33:20:fc"
+          #   # ip: ${_param:infra_kvm_node02_deploy_address}
+          power_parameters:
+            # power_address: "185.8.59.162"
+            power_password: "r00tme"
+            power_type: ipmi
+            power_user: admin
+        ctl03: # #cz7343-kvm.host-telecom.com
+          distro_series: "xenial"
+          # hwe_kernel: "hwe-16.04"
+          interface:
+            # mac: "0c:c4:7a:31:fb:b6"
+            # ip: ${_param:infra_kvm_node03_deploy_address}
+          power_parameters:
+            # power_address: "185.8.59.163"
+            power_password: "r00tme"
+            power_type: ipmi
+            power_user: admin
+        gtw01: # #cz7055-kvm.host-telecom.com
+          distro_series: "xenial"
+          # hwe_kernel: "hwe-16.04"
+          interface:
+            # mac: "00:25:90:e3:3b:26"
+            # ip: ${_param:infra_kvm_node06_deploy_address}
+          power_parameters:
+            # power_address: "176.74.222.106"
+            power_password: "r00tme"
+            power_type: ipmi
+            power_user: admin
+        cmp01: # cz7054-kvm.host-telecom.com
+          distro_series: "xenial"
+          # hwe_kernel: "hwe-16.04"
+          interface:
+            # mac: "00:25:90:e3:37:2e"
+          power_parameters:
+            # power_address: "176.74.222.104"
+            power_password: "r00tme"
+            power_type: ipmi
+            power_user: admin
+        cmp02: #cz7056-kvm.host-telecom.com
+          distro_series: "xenial"
+          # hwe_kernel: "hwe-16.04"
+          interface:
+          #   mac: "00:25:90:e3:3f:2a"
+          power_parameters:
+            # power_address: "176.74.222.108"
+            power_password: "r00tme"
+            power_type: ipmi
+            power_user: admin
+        dns01: #cz7056-kvm.host-telecom.com
+          distro_series: "xenial"
+          # hwe_kernel: "hwe-16.04"
+          interface:
+          #   mac: "00:25:90:e3:3f:2a"
+          power_parameters:
+            # power_address: "176.74.222.108"
+            power_password: "r00tme"
+            power_type: ipmi
+            power_user: admin
+        dns02: #cz7056-kvm.host-telecom.com
+          distro_series: "xenial"
+          # hwe_kernel: "hwe-16.04"
+          interface:
+          #   mac: "00:25:90:e3:3f:2a"
+          power_parameters:
+            # power_address: "176.74.222.108"
+            power_password: "r00tme"
+            power_type: ipmi
+            power_user: admin
+        prx01: #cz7056-kvm.host-telecom.com
+          distro_series: "xenial"
+          # hwe_kernel: "hwe-16.04"
+          interface:
+          #   mac: "00:25:90:e3:3f:2a"
+          power_parameters:
+            # power_address: "176.74.222.108"
+            power_password: "r00tme"
+            power_type: ipmi
+            power_user: admin
+
+    cluster:
+      region:
+        host: ${_param:reclass_config_master}:${_param:maas_cluster_region_port}
+
diff --git a/tcp_tests/templates/virtual-mcp-pike-dvr-maas/common-services.yaml b/tcp_tests/templates/virtual-mcp-pike-dvr-maas/common-services.yaml
new file mode 100644
index 0000000..965d297
--- /dev/null
+++ b/tcp_tests/templates/virtual-mcp-pike-dvr-maas/common-services.yaml
@@ -0,0 +1,118 @@
+{% from 'virtual-mcp-pike-dvr/underlay.yaml' import HOSTNAME_CFG01 with context %}
+
+# Install support services
+- description: Install keepalived on ctl01
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@keepalived:cluster and *01*' state.sls keepalived
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 10}
+  skip_fail: true
+
+- description: Install keepalived
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@keepalived:cluster' state.sls keepalived
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 10}
+  skip_fail: true
+
+- description: Check the VIP
+  cmd: |
+    OPENSTACK_CONTROL_ADDRESS=`salt-call --out=newline_values_only pillar.get _param:openstack_control_address`;
+    echo "_param:openstack_control_address (vip): ${OPENSTACK_CONTROL_ADDRESS}";
+    salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@keepalived:cluster' cmd.run "ip a | grep ${OPENSTACK_CONTROL_ADDRESS}" | grep -B1 ${OPENSTACK_CONTROL_ADDRESS}
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+
+- description: Install glusterfs
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@glusterfs:server' state.sls glusterfs.server.service
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+- description: Setup glusterfs on primary controller
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@glusterfs:server' state.sls glusterfs.server.setup -b 1
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 2, delay: 5}
+  skip_fail: false
+
+- description: Check the gluster status
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@glusterfs:server' cmd.run 'gluster peer status; gluster volume status' -b 1
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+- description: Install RabbitMQ on ctl01
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@rabbitmq:server and *01*' state.sls rabbitmq
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+- description: Install RabbitMQ
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@rabbitmq:server' state.sls rabbitmq
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+- description: Check the rabbitmq status
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@rabbitmq:server' cmd.run 'rabbitmqctl cluster_status'
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+- description: Install Galera on first server
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@galera:master' state.sls galera
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+- description: Install Galera on other servers
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@galera:slave' state.sls galera
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+- description: Check mysql status
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@galera:*' mysql.status | grep -A1 -e "wsrep_incoming_addresses\|wsrep_cluster_size"
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: true
+
+
+- description: Install haproxy
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@haproxy:proxy' state.sls haproxy
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+- description: Check haproxy status
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@haproxy:proxy' service.status haproxy
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+- description: Restart rsyslog
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@haproxy:proxy' service.restart rsyslog
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+- description: Install memcached on all controllers
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@memcached:server' state.sls memcached
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
diff --git a/tcp_tests/templates/virtual-mcp-pike-dvr-maas/openstack.yaml b/tcp_tests/templates/virtual-mcp-pike-dvr-maas/openstack.yaml
new file mode 100644
index 0000000..45ededb
--- /dev/null
+++ b/tcp_tests/templates/virtual-mcp-pike-dvr-maas/openstack.yaml
@@ -0,0 +1,395 @@
+{% from 'virtual-mcp-pike-dvr/underlay.yaml' import HOSTNAME_CFG01 with context %}
+{% from 'virtual-mcp-pike-dvr/underlay.yaml' import HOSTNAME_CTL01 with context %}
+{% from 'virtual-mcp-pike-dvr/underlay.yaml' import HOSTNAME_CTL02 with context %}
+{% from 'virtual-mcp-pike-dvr/underlay.yaml' import HOSTNAME_CTL03 with context %}
+{% from 'virtual-mcp-pike-dvr/underlay.yaml' import HOSTNAME_GTW01 with context %}
+{% from 'shared-salt.yaml' import IPV4_NET_EXTERNAL_PREFIX with context %}
+{% from 'shared-salt.yaml' import IPV4_NET_TENANT_PREFIX with context %}
+{% set LAB_CONFIG_NAME = os_env('LAB_CONFIG_NAME') %}
+{% set OVERRIDE_POLICY = os_env('OVERRIDE_POLICY', '') %}
+
+# Install OpenStack control services
+
+{%- if OVERRIDE_POLICY != '' %}
+- description: Upload policy override
+  upload:
+    local_path:  {{ config.salt_deploy.templates_dir }}{{ LAB_CONFIG_NAME }}/
+    local_filename: overrides-policy.yml
+    remote_path: /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/openstack/
+  node_name: {{ HOSTNAME_CFG01 }}
+
+- description: Create custom cluster control class
+  cmd: echo -e "classes:\n- cluster.{{ LAB_CONFIG_NAME }}.openstack.control_orig\n$(cat /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/openstack/overrides-policy.yml)" > /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/openstack/overrides-policy.yml
+  node_name: {{ HOSTNAME_CFG01 }}
+
+- description: Rename control classes
+  cmd: mv /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/openstack/control.yml /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/openstack/control_orig.yml &&
+    ln -s /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/openstack/overrides-policy.yml /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/openstack/control.yml &&
+    salt --hard-crash --state-output=mixed --state-verbose=False '*' saltutil.sync_all &&
+    salt --hard-crash --state-output=mixed --state-verbose=False '*' saltutil.refresh_pillar
+  node_name: {{ HOSTNAME_CFG01 }}
+{%- endif %}
+
+- description: Install glance on all controllers
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+     -C 'I@glance:server' state.sls glance -b 1
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+- description: Install keystone service (note that different fernet keys are created on different nodes)
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@keystone:server' state.sls keystone.server -b 1
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 2, delay: 15}
+  skip_fail: false
+
+- description: Restart apache due to PROD-10477
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl*' cmd.run "systemctl restart apache2"
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 15}
+  skip_fail: false
+
+- description: Check apache status to PROD-10477
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl*' cmd.run "systemctl status apache2"
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 15}
+  skip_fail: false
+
+- description: Mount glusterfs.client volumes (resuires created 'keystone' and 'glusterfs' system users)
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@glance:server' state.sls glusterfs.client
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+- description: Update fernet keys for keystone server on the mounted glusterfs volume
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@keystone:server' state.sls keystone.server -b 1
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+- description: Populate keystone services/tenants/admins
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@keystone:client' state.sls keystone.client
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 2, delay: 5}
+  skip_fail: false
+
+- description: Check keystone service-list
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@keystone:server' cmd.run '. /root/keystonercv3; openstack service list'
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+- description: Check glance image-list
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@keystone:server' cmd.run '. /root/keystonerc; glance image-list'
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+
+- description: Install nova on all controllers
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@nova:controller' state.sls nova -b 1
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 2, delay: 5}
+  skip_fail: false
+
+- description: Check nova service-list
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@keystone:server' cmd.run '. /root/keystonerc; nova --debug service-list'
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 3, delay: 5}
+  skip_fail: false
+
+
+- description: Install cinder
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@cinder:controller' state.sls cinder -b 1
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+- description: Check cinder list
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@keystone:server' cmd.run '. /root/keystonerc; cinder list'
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+
+- description: Install neutron service
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@neutron:server' state.sls neutron -b 1
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+- description: Install neutron on gtw node
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@neutron:gateway' state.sls neutron
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+# isntall designate
+- description: Install powerdns
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@powerdns:server' state.sls powerdns.server
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+- description: Install designate
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@designate:server' state.sls designate -b 1
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 5, delay: 10}
+  skip_fail: false
+
+- description: Check neutron agent-list
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@keystone:server' cmd.run '. /root/keystonerc; neutron agent-list'
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+- description: Install heat service
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@heat:server' state.sls heat -b 1
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+- description: Check heat service
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@keystone:server' cmd.run '. /root/keystonercv3; openstack orchestration resource type list'
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 5, delay: 10}
+  skip_fail: false
+
+
+- description: Deploy horizon dashboard
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@horizon:server' state.sls horizon
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: true
+
+- description: Deploy nginx proxy
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@nginx:server' state.sls nginx
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: true
+
+
+# Install compute node
+
+- description: Apply formulas for compute node
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'cmp*' state.apply
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: true
+
+- description: Re-apply(as in doc) formulas for compute node
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'cmp*' state.apply
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+- description: Check IP on computes
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'cmp*' cmd.run
+    'ip a'
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 10, delay: 30}
+  skip_fail: false
+
+
+  # Upload cirros image
+
+- description: Upload cirros image on ctl01
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
+    'wget http://download.cirros-cloud.net/0.3.4/cirros-0.3.4-i386-disk.img'
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 2, delay: 30}
+  skip_fail: false
+
+- description: Register image in glance
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
+    '. /root/keystonercv3; glance --timeout 120 image-create --name cirros --visibility public --disk-format qcow2 --container-format bare --progress < /root/cirros-0.3.4-i386-disk.img'
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 30}
+  skip_fail: false
+
+- description: Create net04_external
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
+    '. /root/keystonercv3; neutron net-create net04_ext --router:external True --provider:physical_network physnet1 --provider:network_type flat'
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 30}
+  skip_fail: false
+
+- description: Create subnet_external
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
+    '. /root/keystonercv3; neutron subnet-create net04_ext {{ IPV4_NET_EXTERNAL_PREFIX }}.0/24 --name net04_ext__subnet --disable-dhcp --allocation-pool start={{ IPV4_NET_EXTERNAL_PREFIX }}.150,end={{ IPV4_NET_EXTERNAL_PREFIX }}.180 --gateway {{ IPV4_NET_EXTERNAL_PREFIX }}.1'
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 30}
+  skip_fail: false
+
+- description: Create net04
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
+    '. /root/keystonercv3; neutron net-create net04'
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 30}
+  skip_fail: false
+
+- description: Create subnet_net04
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
+    '. /root/keystonercv3; neutron subnet-create net04 {{ IPV4_NET_TENANT_PREFIX }}.0/24 --name net04__subnet --allocation-pool start={{ IPV4_NET_TENANT_PREFIX }}.120,end={{ IPV4_NET_TENANT_PREFIX }}.240'
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 30}
+  skip_fail: false
+
+- description: Create router
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
+    '. /root/keystonercv3; neutron router-create net04_router01'
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 30}
+  skip_fail: false
+
+- description: Set geteway
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
+    '. /root/keystonercv3; neutron router-gateway-set net04_router01 net04_ext'
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 30}
+  skip_fail: false
+
+- description:  Add interface
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
+    '. /root/keystonercv3; neutron router-interface-add net04_router01 net04__subnet'
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 30}
+  skip_fail: false
+
+#- description:  Allow all tcp
+#  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
+#    '. /root/keystonercv3; nova secgroup-add-rule default tcp 1 65535 0.0.0.0/0'
+#  node_name: {{ HOSTNAME_CFG01 }}
+#  retry: {count: 1, delay: 30}
+#  skip_fail: false
+#
+#- description:  Allow all icmp
+#  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
+#    '. /root/keystonercv3; nova secgroup-add-rule default icmp -1 -1 0.0.0.0/0'
+#  node_name: {{ HOSTNAME_CFG01 }}
+#  retry: {count: 1, delay: 30}
+#  skip_fail: false
+
+- description: sync time
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False '*' cmd.run
+    'service ntp stop; ntpd -gq;  service ntp start'
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 30}
+  skip_fail: false
+
+# Configure cinder-volume salt-call PROD-13167
+- description: Set disks 01
+  cmd: salt-call cmd.run 'echo -e "nn\np\n\n\n\nw" | fdisk /dev/vdb'
+  node_name: {{ HOSTNAME_CTL01 }}
+  retry: {count: 1, delay: 30}
+  skip_fail: false
+
+- description: Set disks 02
+  cmd: salt-call cmd.run 'echo -e "nn\np\n\n\n\nw" | fdisk /dev/vdb'
+  node_name: {{ HOSTNAME_CTL02 }}
+  retry: {count: 1, delay: 30}
+  skip_fail: false
+
+- description: Set disks 03
+  cmd: salt-call cmd.run 'echo -e "nn\np\n\n\n\nw" | fdisk /dev/vdb'
+  node_name: {{ HOSTNAME_CTL03 }}
+  retry: {count: 1, delay: 30}
+  skip_fail: false
+
+- description: Create partitions 01
+  cmd: salt-call cmd.run 'pvcreate /dev/vdb1'
+  node_name: {{ HOSTNAME_CTL01 }}
+  retry: {count: 1, delay: 30}
+  skip_fail: false
+
+- description: Create partitions 02
+  cmd: salt-call cmd.run 'pvcreate /dev/vdb1'
+  node_name: {{ HOSTNAME_CTL02 }}
+  retry: {count: 1, delay: 30}
+  skip_fail: false
+
+- description: Create partitions 03
+  cmd: salt-call cmd.run 'pvcreate /dev/vdb1'
+  node_name: {{ HOSTNAME_CTL03 }}
+  retry: {count: 1, delay: 30}
+  skip_fail: false
+
+- description: create volume_group
+  cmd: salt "ctl*" cmd.run 'vgcreate cinder-volumes /dev/vdb1'
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 30}
+  skip_fail: false
+
+- description: Install cinder-volume
+  cmd: salt 'ctl*' cmd.run 'apt-get install cinder-volume -y'
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 30}
+  skip_fail: false
+
+- description: Install crudini
+  cmd: salt "ctl*" cmd.run 'apt-get install crudini -y'
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 30}
+  skip_fail: false
+
+- description: Temporary WR set enabled backends value 01
+  cmd: salt-call cmd.run 'crudini --verbose --set /etc/cinder/cinder.conf DEFAULT enabled_backends lvm'
+  node_name: {{ HOSTNAME_CTL01 }}
+  retry: {count: 1, delay: 30}
+  skip_fail: false
+
+- description: Temporary WR set enabled backends value 02
+  cmd: salt-call cmd.run 'crudini --verbose --set /etc/cinder/cinder.conf DEFAULT enabled_backends lvm'
+  node_name: {{ HOSTNAME_CTL02 }}
+  retry: {count: 1, delay: 30}
+  skip_fail: false
+
+- description: Temporary WR set enabled backends value 03
+  cmd: salt-call cmd.run 'crudini --verbose --set /etc/cinder/cinder.conf DEFAULT enabled_backends lvm'
+  node_name: {{ HOSTNAME_CTL03 }}
+  retry: {count: 1, delay: 30}
+  skip_fail: false
+
+- description: Install docker.io on gtw
+  cmd: salt-call cmd.run 'apt-get install docker.io -y'
+  node_name: {{ HOSTNAME_GTW01 }}
+  retry: {count: 1, delay: 30}
+  skip_fail: false
+
+- description: Restart cinder volume
+  cmd: |
+    salt -C 'I@cinder:controller' service.restart cinder-volume;
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 2, delay: 5}
+  skip_fail: false
+
+- description: create rc file on cfg
+  cmd: scp ctl01:/root/keystonercv3 /root
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 30}
+  skip_fail: false
+
+- description: Copy rc file
+  cmd: scp /root/keystonercv3 gtw01:/root
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 30}
+  skip_fail: false
diff --git a/tcp_tests/templates/virtual-mcp-pike-dvr-maas/overrides-policy.yml b/tcp_tests/templates/virtual-mcp-pike-dvr-maas/overrides-policy.yml
new file mode 100644
index 0000000..1f35a6b
--- /dev/null
+++ b/tcp_tests/templates/virtual-mcp-pike-dvr-maas/overrides-policy.yml
@@ -0,0 +1,40 @@
+parameters:
+  nova:
+    controller:
+      policy:
+        context_is_admin: 'role:admin or role:administrator'
+        'compute:create': 'rule:admin_or_owner'
+        'compute:create:attach_network':
+  cinder:
+    controller:
+      policy:
+        'volume:delete': 'rule:admin_or_owner'
+        'volume:extend':
+  neutron:
+    server:
+      policy:
+        create_subnet: 'rule:admin_or_network_owner'
+        'get_network:queue_id': 'rule:admin_only'
+        'create_network:shared':
+  glance:
+    server:
+      policy:
+        publicize_image: "role:admin"
+        add_member:
+  keystone:
+    server:
+      policy:
+        admin_or_token_subject: 'rule:admin_required or rule:token_subject'
+  heat:
+    server:
+      policy:
+        context_is_admin: 'role:admin and is_admin_project:True'
+        deny_stack_user: 'not role:heat_stack_user'
+        deny_everybody: '!'
+        'cloudformation:ValidateTemplate': 'rule:deny_everybody'
+        'cloudformation:DescribeStackResources':
+  ceilometer:
+    server:
+      policy:
+        segregation: 'rule:context_is_admin'
+        'telemetry:get_resource':
diff --git a/tcp_tests/templates/virtual-mcp-pike-dvr-maas/sl.yaml b/tcp_tests/templates/virtual-mcp-pike-dvr-maas/sl.yaml
new file mode 100644
index 0000000..b3818b7
--- /dev/null
+++ b/tcp_tests/templates/virtual-mcp-pike-dvr-maas/sl.yaml
@@ -0,0 +1,177 @@
+{% from 'virtual-mcp-pike-dvr/underlay.yaml' import HOSTNAME_CFG01 with context %}
+{% import 'shared-sl-tests.yaml' as SHARED_SL_TESTS with context %}
+# Install docker swarm
+- description: Install keepalived on mon nodes
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'mon*' state.sls keepalived
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 10}
+  skip_fail: false
+
+- description: Check the VIP on mon nodes
+  cmd: |
+    SL_VIP=`salt-call --out=newline_values_only pillar.get _param:stacklight_monitor_address`;
+    echo "_param:stacklight_monitor_address (vip): ${SL_VIP}";
+    salt --hard-crash --state-output=mixed --state-verbose=False -C 'mon*' cmd.run "ip a | grep ${SL_VIP}" | grep -B1 ${SL_VIP}
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+- description: Configure docker service
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' state.sls docker.host
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 10}
+  skip_fail: false
+
+- description: Install docker swarm on master node
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:master' state.sls docker.swarm
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 10}
+  skip_fail: false
+
+- description: Send grains to the swarm slave nodes
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' state.sls salt.minion.grains
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 10}
+  skip_fail: false
+
+- description:  Update mine
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' mine.update
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 10}
+  skip_fail: false
+
+- description:  Refresh modules
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' saltutil.refresh_modules; sleep 5;
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 10}
+  skip_fail: false
+
+- description:  Rerun swarm on slaves to proper token population
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:master' state.sls docker.swarm
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 10}
+  skip_fail: false
+
+- description:  Configure slave nodes
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:manager' state.sls docker.swarm -b 1
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 10}
+  skip_fail: false
+
+- description:  List registered Docker swarm nodes
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:master' cmd.run 'docker node ls'
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 10}
+  skip_fail: false
+
+# Install slv2 infra
+- description: Install telegraf
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@telegraf:agent or I@telegraf:remote_agent' state.sls telegraf
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 2, delay: 10}
+  skip_fail: false
+
+- description: Configure Prometheus exporters, if pillar 'prometheus:exporters' exists on any server
+  cmd: |
+    if salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@prometheus:exporters' match.pillar 'prometheus:exporters' ; then
+      salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@prometheus:exporters' state.sls prometheus
+    fi
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 10}
+  skip_fail: false
+
+- description: Configure fluentd
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@fluentd:agent' state.sls fluentd
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 10}
+  skip_fail: false
+
+- description: Install elasticsearch server
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@elasticsearch:server' state.sls elasticsearch.server -b 1
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 10}
+  skip_fail: false
+
+- description: Install kibana server
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@kibana:server' state.sls kibana.server -b 1
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 10}
+  skip_fail: false
+
+- description: Install elasticsearch client
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@elasticsearch:client' state.sls elasticsearch.client
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 10}
+  skip_fail: false
+
+- description: Install kibana client
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@kibana:client' state.sls kibana.client
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 10}
+  skip_fail: false
+
+- description: Check influix db
+  cmd: |
+    INFLUXDB_SERVICE=`salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@influxdb:server' test.ping 1>/dev/null 2>&1 && echo true`;
+    echo "Influxdb service presence: ${INFLUXDB_SERVICE}";
+    if [[ "$INFLUXDB_SERVICE" == "true" ]]; then
+        salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@influxdb:server' state.sls influxdb
+    fi
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: true
+
+# Collect grains needed to configure the services
+
+- description: Get grains
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@salt:minion' state.sls salt.minion.grains
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 10}
+  skip_fail: false
+
+- description: Sync modules
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@salt:minion' saltutil.refresh_modules
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 10}
+  skip_fail: false
+
+- description: Update mine
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@salt:minion' mine.update; sleep 5;
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 5, delay: 15}
+  skip_fail: false
+
+# Configure the services running in Docker Swarm
+- description: Install prometheus alertmanager
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' state.sls prometheus,heka.remote_collector -b 1
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 10}
+  skip_fail: false
+
+- description: run docker state
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:master' state.sls docker
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 10}
+  skip_fail: false
+
+- description: docker ps
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' dockerng.ps
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 10}
+  skip_fail: false
+
+- description: Configure Grafana dashboards and datasources
+  cmd: sleep 30;  salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@grafana:client' state.sls grafana.client
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 2, delay: 10}
+  skip_fail: false
+
+- description: Run salt minion to create cert files
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False "*" state.sls salt.minion
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 10}
+  skip_fail: false
+
+{{  SHARED_SL_TESTS.MACRO_CLONE_SL_TESTS() }}
+{{  SHARED_SL_TESTS.MACRO_CONFIGURE_TESTS() }}
diff --git a/tcp_tests/templates/virtual-mcp-pike-dvr-maas/underlay--meta-data.yaml b/tcp_tests/templates/virtual-mcp-pike-dvr-maas/underlay--meta-data.yaml
new file mode 100644
index 0000000..3699401
--- /dev/null
+++ b/tcp_tests/templates/virtual-mcp-pike-dvr-maas/underlay--meta-data.yaml
@@ -0,0 +1,4 @@
+| # All the data below will be stored as a string object
+  instance-id: iid-local1
+  hostname: {hostname}
+  local-hostname: {hostname}
diff --git a/tcp_tests/templates/virtual-mcp-pike-dvr-maas/underlay--user-data-cfg01.yaml b/tcp_tests/templates/virtual-mcp-pike-dvr-maas/underlay--user-data-cfg01.yaml
new file mode 100644
index 0000000..be74a88
--- /dev/null
+++ b/tcp_tests/templates/virtual-mcp-pike-dvr-maas/underlay--user-data-cfg01.yaml
@@ -0,0 +1,67 @@
+| # All the data below will be stored as a string object
+  #cloud-config, see http://cloudinit.readthedocs.io/en/latest/topics/examples.html
+
+  ssh_pwauth: True
+  users:
+   - name: root
+     sudo: ALL=(ALL) NOPASSWD:ALL
+     shell: /bin/bash
+     ssh_authorized_keys:
+     {% for key in config.underlay.ssh_keys %}
+      - ssh-rsa {{ key['public'] }}
+     {% endfor %}
+
+  disable_root: false
+  chpasswd:
+   list: |
+    root:r00tme
+   expire: False
+
+  bootcmd:
+   # Enable root access
+   - sudo ifdown ens3
+   - rm /etc/network/interfaces
+   - sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
+   - service sshd restart
+  output:
+    all: '| tee -a /var/log/cloud-init-output.log /dev/tty0'
+
+  runcmd:
+   # Configure dhclient
+   - sudo echo "nameserver {gateway}" >> /etc/resolvconf/resolv.conf.d/base
+   - sudo resolvconf -u
+
+   # Prepare network connection
+   - sudo ifdown ens3 || true
+   - sudo ip r d default || true  # remove existing default route to get it from dhcp
+   - sudo ifup ens3
+   #- sudo route add default gw {gateway} {interface_name}
+
+   # Create swap
+   - fallocate -l 4G /swapfile
+   - chmod 600 /swapfile
+   - mkswap /swapfile
+   - swapon /swapfile
+   - echo "/swapfile   none    swap    defaults    0   0" >> /etc/fstab
+
+   - echo "nameserver 172.18.176.6" >> /etc/resolv.conf;
+
+  write_files:
+   - path: /etc/network/interfaces
+     content: |
+          auto ens3
+          iface ens3 inet static
+          address {address}
+          netmask {netmask}
+          gateway {gateway}
+          dns-nameservers 172.18.176.6
+
+   - path: /root/.ssh/config
+     owner: root:root
+     permissions: '0600'
+     content: |
+          Host *
+            ServerAliveInterval 300
+            ServerAliveCountMax 10
+            StrictHostKeyChecking no
+            UserKnownHostsFile /dev/null
diff --git a/tcp_tests/templates/virtual-mcp-pike-dvr-maas/underlay--user-data1604.yaml b/tcp_tests/templates/virtual-mcp-pike-dvr-maas/underlay--user-data1604.yaml
new file mode 100644
index 0000000..3fbb777
--- /dev/null
+++ b/tcp_tests/templates/virtual-mcp-pike-dvr-maas/underlay--user-data1604.yaml
@@ -0,0 +1,50 @@
+| # All the data below will be stored as a string object
+  #cloud-config, see http://cloudinit.readthedocs.io/en/latest/topics/examples.html
+
+  ssh_pwauth: True
+  users:
+   - name: root
+     sudo: ALL=(ALL) NOPASSWD:ALL
+     shell: /bin/bash
+     ssh_authorized_keys:
+     {% for key in config.underlay.ssh_keys %}
+      - ssh-rsa {{ key['public'] }}
+     {% endfor %}
+
+  disable_root: false
+  chpasswd:
+   list: |
+    root:r00tme
+   expire: False
+
+  bootcmd:
+   # Enable root access
+   - sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
+   - service sshd restart
+  output:
+    all: '| tee -a /var/log/cloud-init-output.log /dev/tty0'
+
+  runcmd:
+   - export TERM=linux
+   - export LANG=C
+   # Configure dhclient
+   - sudo echo "nameserver {gateway}" >> /etc/resolvconf/resolv.conf.d/base
+   - sudo resolvconf -u
+
+   # Prepare network connection
+   - sudo ifup ens3
+   #- sudo route add default gw {gateway} {interface_name}
+
+   # Create swap
+   - fallocate -l 4G /swapfile
+   - chmod 600 /swapfile
+   - mkswap /swapfile
+   - swapon /swapfile
+   - echo "/swapfile   none    swap    defaults   0   0" >> /etc/fstab
+
+  write_files:
+   - path: /etc/network/interfaces
+     content: |
+          auto ens3
+          iface ens3 inet dhcp
+
diff --git a/tcp_tests/templates/virtual-mcp-pike-dvr-maas/underlay.yaml b/tcp_tests/templates/virtual-mcp-pike-dvr-maas/underlay.yaml
new file mode 100644
index 0000000..aa5cbb5
--- /dev/null
+++ b/tcp_tests/templates/virtual-mcp-pike-dvr-maas/underlay.yaml
@@ -0,0 +1,512 @@
+# Set the repository suite, one of the: 'nightly', 'testing', 'stable', or any other required
+{% set REPOSITORY_SUITE = os_env('REPOSITORY_SUITE', 'testing') %}
+
+{% import 'virtual-mcp-pike-dvr-maas/underlay--meta-data.yaml' as CLOUDINIT_META_DATA with context %}
+{% import 'virtual-mcp-pike-dvr-maas/underlay--user-data-cfg01.yaml' as CLOUDINIT_USER_DATA_CFG01 with context %}
+{% import 'virtual-mcp-pike-dvr-maas/underlay--user-data1604.yaml' as CLOUDINIT_USER_DATA_1604 with context %}
+
+---
+aliases:
+ - &interface_model {{ os_env('INTERFACE_MODEL', 'virtio') }}
+ - &cloudinit_meta_data {{ CLOUDINIT_META_DATA }}
+ - &cloudinit_user_data_cfg01 {{ CLOUDINIT_USER_DATA_CFG01 }}
+ - &cloudinit_user_data_1604 {{ CLOUDINIT_USER_DATA_1604 }}
+
+{% set LAB_CONFIG_NAME = os_env('LAB_CONFIG_NAME', 'virtual-mcp-pike-dvr') %}
+{% set DOMAIN_NAME = os_env('DOMAIN_NAME', LAB_CONFIG_NAME) + '.local' %}
+{% set HOSTNAME_CFG01 = os_env('HOSTNAME_CFG01', 'cfg01.' + DOMAIN_NAME) %}
+{% set HOSTNAME_CTL01 = os_env('HOSTNAME_CTL01', 'ctl01.' + DOMAIN_NAME) %}
+{% set HOSTNAME_CTL02 = os_env('HOSTNAME_CTL02', 'ctl02.' + DOMAIN_NAME) %}
+{% set HOSTNAME_CTL03 = os_env('HOSTNAME_CTL03', 'ctl03.' + DOMAIN_NAME) %}
+{% set HOSTNAME_CMP01 = os_env('HOSTNAME_CMP01', 'cmp01.' + DOMAIN_NAME) %}
+{% set HOSTNAME_CMP02 = os_env('HOSTNAME_CMP02', 'cmp02.' + DOMAIN_NAME) %}
+{% set HOSTNAME_GTW01 = os_env('HOSTNAME_GTW01', 'gtw01.' + DOMAIN_NAME) %}
+{% set HOSTNAME_DNS01 = os_env('HOSTNAME_DNS01', 'dns01.' + DOMAIN_NAME) %}
+{% set HOSTNAME_DNS02 = os_env('HOSTNAME_DNS02', 'dns02.' + DOMAIN_NAME) %}
+{% set HOSTNAME_PRX01 = os_env('HOSTNAME_PRX01', 'prx01.' + DOMAIN_NAME) %}
+
+template:
+  devops_settings:
+    env_name: {{ os_env('ENV_NAME', 'virtual-mcp-pike-dvr_' + REPOSITORY_SUITE + "_" + os_env('BUILD_NUMBER', '')) }}
+
+    address_pools:
+      private-pool01:
+        net: {{ os_env('PRIVATE_ADDRESS_POOL01', '10.60.0.0/16:24') }}
+        params:
+          ip_reserved:
+            gateway: +1
+            l2_network_device: +1
+            default_{{ HOSTNAME_CFG01 }}: +100
+            default_{{ HOSTNAME_CTL01 }}: +101
+            default_{{ HOSTNAME_CTL02 }}: +102
+            default_{{ HOSTNAME_CTL03 }}: +103
+            default_{{ HOSTNAME_CMP01 }}: +105
+            default_{{ HOSTNAME_CMP02 }}: +106
+            default_{{ HOSTNAME_GTW01 }}: +110
+            default_{{ HOSTNAME_DNS01 }}: +111
+            default_{{ HOSTNAME_DNS02 }}: +112
+            default_{{ HOSTNAME_PRX01 }}: +121
+          ip_ranges:
+            dhcp: [+90, -10]
+
+      admin-pool01:
+        net: {{ os_env('ADMIN_ADDRESS_POOL01', '10.70.0.0/16:24') }}
+        params:
+          ip_reserved:
+            gateway: +1
+            l2_network_device: +1
+            default_{{ HOSTNAME_CFG01 }}: +90
+            default_{{ HOSTNAME_CTL01 }}: +101
+            default_{{ HOSTNAME_CTL02 }}: +102
+            default_{{ HOSTNAME_CTL03 }}: +103
+            default_{{ HOSTNAME_CMP01 }}: +105
+            default_{{ HOSTNAME_CMP02 }}: +106
+            default_{{ HOSTNAME_GTW01 }}: +110
+            default_{{ HOSTNAME_DNS01 }}: +111
+            default_{{ HOSTNAME_DNS02 }}: +112
+            default_{{ HOSTNAME_PRX01 }}: +121
+          ip_ranges:
+            dhcp: [+90, -10]
+
+      tenant-pool01:
+        net: {{ os_env('TENANT_ADDRESS_POOL01', '10.80.0.0/16:24') }}
+        params:
+          ip_reserved:
+            gateway: +1
+            l2_network_device: +1
+            default_{{ HOSTNAME_CFG01 }}: +100
+            default_{{ HOSTNAME_CTL01 }}: +101
+            default_{{ HOSTNAME_CTL02 }}: +102
+            default_{{ HOSTNAME_CTL03 }}: +103
+            default_{{ HOSTNAME_CMP01 }}: +105
+            default_{{ HOSTNAME_CMP02 }}: +106
+            default_{{ HOSTNAME_GTW01 }}: +110
+            default_{{ HOSTNAME_DNS01 }}: +111
+            default_{{ HOSTNAME_DNS02 }}: +112
+            default_{{ HOSTNAME_PRX01 }}: +121
+          ip_ranges:
+            dhcp: [+10, -10]
+
+      external-pool01:
+        net: {{ os_env('EXTERNAL_ADDRESS_POOL01', '10.90.0.0/16:24') }}
+        params:
+          ip_reserved:
+            gateway: +1
+            l2_network_device: +1
+            default_{{ HOSTNAME_CFG01 }}: +100
+            default_{{ HOSTNAME_CTL01 }}: +101
+            default_{{ HOSTNAME_CTL02 }}: +102
+            default_{{ HOSTNAME_CTL03 }}: +103
+            default_{{ HOSTNAME_CMP01 }}: +105
+            default_{{ HOSTNAME_CMP02 }}: +106
+            default_{{ HOSTNAME_GTW01 }}: +110
+            default_{{ HOSTNAME_DNS01 }}: +111
+            default_{{ HOSTNAME_DNS02 }}: +112
+            default_{{ HOSTNAME_PRX01 }}: +121
+          ip_ranges:
+            dhcp: [+10, -10]
+
+
+    groups:
+      - name: default
+        driver:
+          name: devops.driver.libvirt
+          params:
+            connection_string: !os_env CONNECTION_STRING, qemu:///system
+            storage_pool_name: !os_env STORAGE_POOL_NAME, default
+            stp: False
+            hpet: False
+            enable_acpi: true
+            use_host_cpu: !os_env DRIVER_USE_HOST_CPU, true
+            use_hugepages: !os_env DRIVER_USE_HUGEPAGES, false
+
+        network_pools:
+          admin: admin-pool01
+          private: private-pool01
+          tenant: tenant-pool01
+          external: external-pool01
+
+        l2_network_devices:
+          private:
+            address_pool: private-pool01
+            dhcp: true
+            forward:
+              mode: route
+
+          admin:
+            address_pool: admin-pool01
+            dhcp: false
+            forward:
+              mode: nat
+
+          tenant:
+            address_pool: tenant-pool01
+            dhcp: true
+
+          external:
+            address_pool: external-pool01
+            dhcp: true
+            forward:
+              mode: nat
+
+
+        group_volumes:
+         - name: cloudimage1604    # This name is used for 'backing_store' option for node volumes.
+           source_image: !os_env IMAGE_PATH1604  # https://cloud-images.ubuntu.com/xenial/current/xenial-server-cloudimg-amd64-disk1.img
+           format: qcow2
+         - name: cfg01_day01_image               # Pre-configured day01 image
+           source_image: {{ os_env('IMAGE_PATH_CFG01_DAY01', os_env('IMAGE_PATH1604')) }} # http://images.mirantis.com/cfg01-day01.qcow2 or fallback to IMAGE_PATH1604
+           format: qcow2
+         # - name: mcp_ubuntu_1604_image           # Pre-configured image for control plane
+         #   source_image: !os_env MCP_IMAGE_PATH1604
+         #   format: qcow2
+
+        nodes:
+          - name: {{ HOSTNAME_CFG01 }}
+            role: salt_master
+            params:
+              vcpu: !os_env SLAVE_NODE_CPU, 4
+              memory: !os_env SLAVE_NODE_MEMORY, 8192
+              boot:
+                - hd
+              cloud_init_volume_name: iso
+              cloud_init_iface_up: ens3
+              volumes:
+                - name: system
+                  capacity: !os_env NODE_VOLUME_SIZE, 150
+                  backing_store: cfg01_day01_image
+                  format: qcow2
+                - name: iso  # Volume with name 'iso' will be used
+                             # for store image with cloud-init metadata.
+                  capacity: 1
+                  format: raw
+                  device: cdrom
+                  bus: ide
+                  cloudinit_meta_data: *cloudinit_meta_data
+                  cloudinit_user_data: *cloudinit_user_data_cfg01
+
+              interfaces:
+                - label: ens3
+                  l2_network_device: admin
+                  interface_model: *interface_model
+                - label: ens4
+                  l2_network_device: private
+                  interface_model: *interface_model
+              network_config:
+                ens3:
+                  networks:
+                    - admin
+                ens4:
+                  networks:
+                    - private
+              bmc_port: 41623
+              bmc_network: admin
+
+          - name: {{ HOSTNAME_CTL01 }}
+            role: salt_minion
+            params:
+              vcpu: !os_env SLAVE_NODE_CPU, 2
+              memory: !os_env SLAVE_NODE_MEMORY, 12288
+              boot:
+                - hd
+              # cloud_init_volume_name: iso
+              # cloud_init_iface_up: ens3
+              volumes:
+                - name: system
+                  capacity: !os_env NODE_VOLUME_SIZE, 150
+                  # backing_store: mcp_ubuntu_1604_image
+                  format: qcow2
+                - name: cinder
+                  capacity: 50
+                  format: qcow2
+                # - name: iso  # Volume with name 'iso' will be used
+                #              # for store image with cloud-init metadata.
+                #   capacity: 1
+                #   format: raw
+                #   device: cdrom
+                #   bus: ide
+                #   cloudinit_meta_data: *cloudinit_meta_data
+                #   cloudinit_user_data: *cloudinit_user_data_1604
+
+              interfaces: &interfaces
+                - label: ens3
+                  l2_network_device: admin
+                  interface_model: *interface_model
+                - label: ens4
+                  l2_network_device: private
+                  interface_model: *interface_model
+              network_config: &network_config
+                ens3:
+                  networks:
+                    - admin
+                ens4:
+                  networks:
+                    - private
+              bmc_port: 41624
+              bmc_network: admin
+
+          - name: {{ HOSTNAME_CTL02 }}
+            role: salt_minion
+            params:
+              vcpu: !os_env SLAVE_NODE_CPU, 2
+              memory: !os_env SLAVE_NODE_MEMORY, 12288
+              boot:
+                - hd
+              # cloud_init_volume_name: iso
+              # cloud_init_iface_up: ens3
+              volumes:
+                - name: system
+                  capacity: !os_env NODE_VOLUME_SIZE, 150
+                  # backing_store: mcp_ubuntu_1604_image
+                  format: qcow2
+                - name: cinder
+                  capacity: 50
+                  format: qcow2
+                # - name: iso  # Volume with name 'iso' will be used
+                #              # for store image with cloud-init metadata.
+                #   capacity: 1
+                #   format: raw
+                #   device: cdrom
+                #   bus: ide
+                #   cloudinit_meta_data: *cloudinit_meta_data
+                #   cloudinit_user_data: *cloudinit_user_data_1604
+
+              interfaces: *interfaces
+              network_config: *network_config
+
+              bmc_port: 41625
+              bmc_network: admin
+
+          - name: {{ HOSTNAME_CTL03 }}
+            role: salt_minion
+            params:
+              vcpu: !os_env SLAVE_NODE_CPU, 2
+              memory: !os_env SLAVE_NODE_MEMORY, 12288
+              boot:
+                - hd
+              # cloud_init_volume_name: iso
+              # cloud_init_iface_up: ens3
+              volumes:
+                - name: system
+                  capacity: !os_env NODE_VOLUME_SIZE, 150
+                  # backing_store: mcp_ubuntu_1604_image
+                  format: qcow2
+                - name: cinder
+                  capacity: 50
+                  format: qcow2
+                # - name: iso  # Volume with name 'iso' will be used
+                #              # for store image with cloud-init metadata.
+                #   capacity: 1
+                #   format: raw
+                #   device: cdrom
+                #   bus: ide
+                #   cloudinit_meta_data: *cloudinit_meta_data
+                #   cloudinit_user_data: *cloudinit_user_data_1604
+
+              interfaces: *interfaces
+              network_config: *network_config
+
+              bmc_port: 41626
+              bmc_network: admin
+
+          - name: {{ HOSTNAME_PRX01 }}
+            role: salt_minion
+            params:
+              vcpu: !os_env SLAVE_NODE_CPU, 1
+              memory: !os_env SLAVE_NODE_MEMORY, 8192
+              boot:
+                - hd
+              # cloud_init_volume_name: iso
+              # cloud_init_iface_up: ens3
+              volumes:
+                - name: system
+                  capacity: !os_env NODE_VOLUME_SIZE, 150
+                  # backing_store: mcp_ubuntu_1604_image
+                  format: qcow2
+                - name: cinder
+                  capacity: 50
+                  format: qcow2
+                # - name: iso  # Volume with name 'iso' will be used
+                #              # for store image with cloud-init metadata.
+                #   capacity: 1
+                #   format: raw
+                #   device: cdrom
+                #   bus: ide
+                #   cloudinit_meta_data: *cloudinit_meta_data
+                #   cloudinit_user_data: *cloudinit_user_data_1604
+
+              interfaces: *interfaces
+              network_config: *network_config
+
+              bmc_port: 41630
+              bmc_network: admin
+
+          - name: {{ HOSTNAME_CMP01 }}
+            role: salt_minion
+            params:
+              vcpu: !os_env SLAVE_NODE_CPU, 3
+              memory: !os_env SLAVE_NODE_MEMORY, 4096
+              boot:
+                - hd
+              # cloud_init_volume_name: iso
+              # cloud_init_iface_up: ens3
+              volumes:
+                - name: system
+                  capacity: !os_env NODE_VOLUME_SIZE, 150
+                  # backing_store: cloudimage1604
+                  format: qcow2
+                # - name: iso  # Volume with name 'iso' will be used
+                #              # for store image with cloud-init metadata.
+                #   capacity: 1
+                #   format: raw
+                #   device: cdrom
+                #   bus: ide
+                #   cloudinit_meta_data: *cloudinit_meta_data
+                #   cloudinit_user_data: *cloudinit_user_data_1604
+
+
+              interfaces: &all_interfaces
+                - label: ens3
+                  l2_network_device: admin
+                  interface_model: *interface_model
+                - label: ens4
+                  l2_network_device: private
+                  interface_model: *interface_model
+                - label: ens5
+                  l2_network_device: tenant
+                  interface_model: *interface_model
+                - label: ens6
+                  l2_network_device: external
+                  interface_model: *interface_model
+              network_config: &all_network_config
+                ens3:
+                  networks:
+                    - admin
+                ens4:
+                  networks:
+                    - private
+                ens5:
+                  networks:
+                    - tenant
+                ens6:
+                  networks:
+                    - external
+
+              bmc_port: 41631
+              bmc_network: admin
+
+          - name: {{ HOSTNAME_CMP02 }}
+            role: salt_minion
+            params:
+              vcpu: !os_env SLAVE_NODE_CPU, 3
+              memory: !os_env SLAVE_NODE_MEMORY, 4096
+              boot:
+                - hd
+              # cloud_init_volume_name: iso
+              # cloud_init_iface_up: ens3
+              volumes:
+                - name: system
+                  capacity: !os_env NODE_VOLUME_SIZE, 150
+                  # backing_store: cloudimage1604
+                  format: qcow2
+                # - name: iso  # Volume with name 'iso' will be used
+                #              # for store image with cloud-init metadata.
+                #   capacity: 1
+                #   format: raw
+                #   device: cdrom
+                #   bus: ide
+                #   cloudinit_meta_data: *cloudinit_meta_data
+                #   cloudinit_user_data: *cloudinit_user_data_1604
+
+              interfaces: *all_interfaces
+              network_config: *all_network_config
+
+              bmc_port: 41632
+              bmc_network: admin
+
+          - name: {{ HOSTNAME_GTW01 }}
+            role: salt_minion
+            params:
+              vcpu: !os_env SLAVE_NODE_CPU, 1
+              memory: !os_env SLAVE_NODE_MEMORY, 2048
+              boot:
+                - hd
+              # cloud_init_volume_name: iso
+              # cloud_init_iface_up: ens3
+              volumes:
+                - name: system
+                  capacity: !os_env NODE_VOLUME_SIZE, 150
+                  # backing_store: cloudimage1604
+                  format: qcow2
+                # - name: iso  # Volume with name 'iso' will be used
+                #              # for store image with cloud-init metadata.
+                #   capacity: 1
+                #   format: raw
+                #   device: cdrom
+                #   bus: ide
+                #   cloudinit_meta_data: *cloudinit_meta_data
+                #   cloudinit_user_data: *cloudinit_user_data_1604
+
+              interfaces: *all_interfaces
+              network_config: *all_network_config
+
+              bmc_port: 41633
+              bmc_network: admin
+
+          - name: {{ HOSTNAME_DNS01 }}
+            role: salt_minion
+            params:
+              vcpu: !os_env SLAVE_NODE_CPU, 1
+              memory: !os_env SLAVE_NODE_MEMORY, 2048
+              boot:
+                - hd
+              # cloud_init_volume_name: iso
+              # cloud_init_iface_up: ens3
+              volumes:
+                - name: system
+                  capacity: !os_env NODE_VOLUME_SIZE, 150
+                  # backing_store: mcp_ubuntu_1604_image
+                  format: qcow2
+                # - name: iso  # Volume with name 'iso' will be used
+                #              # for store image with cloud-init metadata.
+                #   capacity: 1
+                #   format: raw
+                #   device: cdrom
+                #   bus: ide
+                #   cloudinit_meta_data: *cloudinit_meta_data
+                #   cloudinit_user_data: *cloudinit_user_data_1604
+
+              interfaces: *all_interfaces
+              network_config: *all_network_config
+
+              bmc_port: 41634
+              bmc_network: admin
+
+          - name: {{ HOSTNAME_DNS02 }}
+            role: salt_minion
+            params:
+              vcpu: !os_env SLAVE_NODE_CPU, 1
+              memory: !os_env SLAVE_NODE_MEMORY, 2048
+              boot:
+                - hd
+              # cloud_init_volume_name: iso
+              # cloud_init_iface_up: ens3
+              volumes:
+                - name: system
+                  capacity: !os_env NODE_VOLUME_SIZE, 150
+                  # backing_store: mcp_ubuntu_1604_image
+                  format: qcow2
+                # - name: iso  # Volume with name 'iso' will be used
+                #              # for store image with cloud-init metadata.
+                #   capacity: 1
+                #   format: raw
+                #   device: cdrom
+                #   bus: ide
+                #   cloudinit_meta_data: *cloudinit_meta_data
+                #   cloudinit_user_data: *cloudinit_user_data_1604
+
+              interfaces: *all_interfaces
+              network_config: *all_network_config
+
+              bmc_port: 41635
+              bmc_network: admin
diff --git a/tcp_tests/templates/virtual-mcp11-k8s-calico/sl.yaml b/tcp_tests/templates/virtual-mcp11-k8s-calico/sl.yaml
index 57c3eee..5f9f3e6 100644
--- a/tcp_tests/templates/virtual-mcp11-k8s-calico/sl.yaml
+++ b/tcp_tests/templates/virtual-mcp11-k8s-calico/sl.yaml
@@ -67,6 +67,13 @@
   skip_fail: false
 
 # Install slv2 infra
+# Install MongoDB for alerta
+- description: Install MongoDB
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@mongodb:server' state.sls mongodb
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 2, delay: 10}
+  skip_fail: false
+
 #Launch containers
 - description: launch prometheus containers
   cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:master and I@prometheus:server' state.sls docker.client
diff --git a/tcp_tests/templates/virtual-mcp11-k8s-contrail/sl.yaml b/tcp_tests/templates/virtual-mcp11-k8s-contrail/sl.yaml
index 07f156b..23df071 100644
--- a/tcp_tests/templates/virtual-mcp11-k8s-contrail/sl.yaml
+++ b/tcp_tests/templates/virtual-mcp11-k8s-contrail/sl.yaml
@@ -67,6 +67,13 @@
   skip_fail: false
 
 # Install slv2 infra
+# Install MongoDB for alerta
+- description: Install MongoDB
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@mongodb:server' state.sls mongodb
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 2, delay: 10}
+  skip_fail: false
+
 #Launch containers
 - description: launch prometheus containers
   cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:master and I@prometheus:server' state.sls docker.client
diff --git a/tcp_tests/tests/system/conftest.py b/tcp_tests/tests/system/conftest.py
index 080cb4d..1214cd3 100644
--- a/tcp_tests/tests/system/conftest.py
+++ b/tcp_tests/tests/system/conftest.py
@@ -26,6 +26,8 @@
 from tcp_tests.fixtures.stacklight_fixtures import *  # noqa
 from tcp_tests.fixtures.k8s_fixtures import *  # noqa
 from tcp_tests.fixtures.drivetrain_fixtures import *  # noqa
+from tcp_tests.fixtures.day1_fixtures import *  # noqa
+
 
 __all__ = sorted([  # sort for documentation
     # common_fixtures
@@ -69,5 +71,8 @@
     'ceph_action',
     # k8s fixtures
     'k8s_actions',
-    'k8s_deployed'
+    'k8s_deployed',
+    'day1_underlay',
+    'day1_cfg_config',
+    'day1_salt_action'
 ])
diff --git a/tcp_tests/tests/system/test_offline.py b/tcp_tests/tests/system/test_offline.py
index 51757fd..83f3766 100644
--- a/tcp_tests/tests/system/test_offline.py
+++ b/tcp_tests/tests/system/test_offline.py
@@ -11,7 +11,7 @@
 #    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
 #    License for the specific language governing permissions and limitations
 #    under the License.
-# import pytest
+import pytest
 import time
 
 from collections import Counter
@@ -28,6 +28,139 @@
 class TestOfflineDeployment(object):
     """docstring for TestOfflineDeployment"""
 
+    @pytest.mark.day1_underlay
+    def test_maas_provision(self, show_step, hardware, underlay,
+                            day1_cfg_config):
+        """Test for deploying an mcp dvr environment and check it
+
+        Scenario:
+        1. Prepare salt on hosts
+        2. Setup controller nodes
+        3. Setup compute nodes
+        """
+
+        show_step(1)
+        show_step(2)
+        show_step(3)
+
+        cfg_node = 'cfg01.virtual-mcp-pike-dvr.local'
+        ssh_test_key = day1_cfg_config.config.underlay.ssh_keys[0]['public']
+        verbose = True
+
+        cfg_admin_iface = next(i for i in hardware.master_nodes[0].interfaces
+                               if i.network.name == 'admin')
+        admin_net = cfg_admin_iface.network.address_pool.ip_network
+
+        underlay.check_call(
+            node_name=cfg_node, verbose=verbose,
+            cmd='maas logout mirantis && '
+            'maas login mirantis '
+            'http://localhost:5240/MAAS/api/2.0/ '
+            'FTvqwe7ybBp68gPar2:5mcctTAXVL8mns4ef4:zrA9LZwu2tMc8BAZpsPUfwWwTyQnAtDN') # noqa
+
+        underlay.check_call(
+            node_name=cfg_node,
+            verbose=verbose,
+            cmd="maas mirantis package-repository update main_archive "
+                "disabled_pockets=backports,security")
+
+        underlay.check_call(
+            node_name=cfg_node, verbose=verbose,
+            cmd="maas mirantis ipranges create "
+            "type=dynamic start_ip={start} end_ip={end} "
+            "subnet=$(maas mirantis subnets read | jq '.[] | select(.name==\"{net}\") | .id')".format(  # noqa
+                start=admin_net[191],
+                end=admin_net[253],
+                net=admin_net))
+        underlay.check_call(node_name=cfg_node, verbose=verbose,
+            cmd="maas mirantis vlan update "
+            "$(maas mirantis subnets read | jq '.[] | select(.name==\"{net}\") | .vlan.fabric_id') " # noqa
+            "0 dhcp_on=True primary_rack='cfg01'".format(net=admin_net))
+
+        underlay.check_call(
+            node_name=cfg_node, verbose=verbose,
+            cmd="ssh-keygen -y -f ~root/.ssh/id_rsa > ~root/.ssh/id_rsa.pub")
+        underlay.check_call(
+            node_name=cfg_node, verbose=verbose,
+            cmd='maas mirantis sshkeys create '
+                'key="$(cat ~root/.ssh/id_rsa.pub)"')
+
+        r, f = day1_cfg_config.salt.enforce_state('cfg01*', 'maas.machines')
+        LOG.info(r)
+        LOG.info(f)
+
+        # show_step(8)
+        nodes_amount = len(hardware.slave_nodes)
+        cmd = """   timeout 1800s bash -c 'hosts=$(maas mirantis nodes read | jq -r ".[] | select(.node_type_name==\\"Machine\\") | select(.status_name==\\"Ready\\") | .hostname "); while ! [ $(echo "$hosts" | wc -w) -eq {amount} ]; do echo "Ready hosts:\n$hosts"; sleep 30; hosts=$(maas mirantis nodes read | jq -r ".[] | select(.node_type_name==\\"Machine\\") | select(.status_name==\\"Ready\\") | .hostname "); done '   """.format(amount=nodes_amount)  # noqa
+        underlay.check_call(node_name=cfg_node, verbose=verbose, cmd=cmd)
+        underlay.check_call(node_name=cfg_node, verbose=verbose,
+                            cmd='salt-key')
+        # show_step(9)
+        underlay.check_call(
+            node_name=cfg_node, verbose=verbose,
+            cmd='salt-call state.sls maas.machines.deploy')
+        # show_step(10)
+        underlay.check_call(
+            node_name=cfg_node, verbose=verbose,
+            cmd='salt-call state.sls maas.machines.wait_for_deployed')
+        underlay.check_call(node_name=cfg_node, verbose=verbose,
+                            cmd='salt-key')
+
+        underlay.check_call(
+            node_name=cfg_node,
+            verbose=verbose,
+            cmd="salt '*' ssh.set_auth_key root '{}'".format(ssh_test_key))
+        underlay.check_call(
+            node_name=cfg_node,
+            verbose=verbose,
+            cmd='salt "*" ssh.set_auth_key root '
+                '"$(ssh-keygen -y -f ~/.ssh/id_rsa | cut -d " " -f 2)"')
+
+        underlay.check_call(
+            node_name=cfg_node,
+            verbose=verbose,
+            cmd="salt '*' ssh.set_auth_key ubuntu '{}'".format(ssh_test_key))
+        underlay.check_call(
+            node_name=cfg_node,
+            verbose=verbose,
+            cmd='salt "*" ssh.set_auth_key ubuntu '
+                '"$(ssh-keygen -y -f ~/.ssh/id_rsa | cut -d " " -f 2)"')
+
+        result = \
+            day1_cfg_config.salt.get_pillar(cfg_node,
+                                            '_param:jenkins_salt_api_url')
+        result = result[0].get(cfg_node)
+
+        jenkins = JenkinsClient(
+            host='http://{host}:8081'.format(
+                host=day1_cfg_config.config.salt.salt_master_host),
+            username='admin',
+            password='r00tme')
+        params = jenkins.make_defults_params('deploy_openstack')
+        params['SALT_MASTER_URL'] = result
+        params['STACK_INSTALL'] = "core,openstack,ovs"
+        build = jenkins.run_build('deploy_openstack', params)
+
+        jenkins.wait_end_of_build(
+            name=build[0],
+            build_id=build[1],
+            timeout=60 * 60 * 2)
+
+        assert \
+            jenkins.build_info(
+                name=build[0], build_id=build[1])['result'] == 'SUCCESS', \
+            "Deploy openstack was failed"
+
+        openstack = managers.openstack_manager.OpenstackManager(
+            day1_cfg_config.config, underlay, hardware,
+            day1_cfg_config.salt)
+
+        if settings.RUN_TEMPEST:
+            openstack.run_tempest(pattern=settings.PATTERN)
+            openstack.download_tempest_report()
+
+        LOG.info("*************** DONE **************")
+
     def test_deploy_day1(self, show_step, config, underlay, hardware,
                          common_services_deployed, salt_deployed):
         """Test for deploying an mcp from day01 images
diff --git a/tcp_tests/tests/system/test_opencontrail.py b/tcp_tests/tests/system/test_opencontrail.py
index 39d3d59..91d7789 100644
--- a/tcp_tests/tests/system/test_opencontrail.py
+++ b/tcp_tests/tests/system/test_opencontrail.py
@@ -47,9 +47,61 @@
             openstack_deployed.download_tempest_report(stored_node='ctl01')
         LOG.info("*************** DONE **************")
 
-        # opencontrail.prepare_tests(
-        #     config.opencontrail.opencontrail_prepare_tests_steps_path)
+    @pytest.mark.fail_snapshot
+    @pytest.mark.with_rally(rally_node="ctl01.")
+    def test_opencontrail_maas(self, config, underlay, salt_actions,
+                               openstack_deployed, show_step, sl_deployed):
+        """Runner for Juniper contrail-tests
 
-        # opencontrail.run_tests(
-        #     tags=config.opencontrail.opencontrail_tags,
-        #     features=config.opencontrail.opencontrail_features)
+        Scenario:
+            1. Prepare salt on hosts
+            2. Setup controller nodes
+            3. Setup compute nodes
+            4. Run tempest
+            5. Exporting results
+            6. Check docker services
+            7. Run SL tests
+            8. Download sl tests report
+        """
+        openstack_deployed._salt.local(
+            tgt='*', fun='cmd.run',
+            args='service ntp stop; ntpd -gq; service ntp start')
+
+        if settings.RUN_TEMPEST:
+            show_step(4)
+            openstack_deployed.run_tempest(target='ctl01',
+                                           pattern=settings.PATTERN)
+            openstack_deployed.download_tempest_report(stored_node='ctl01')
+
+        expected_service_list = ['monitoring_server',
+                                 'monitoring_remote_agent',
+                                 'dashboard_grafana',
+                                 'monitoring_alertmanager',
+                                 'monitoring_remote_collector',
+                                 'monitoring_pushgateway']
+        mon_nodes = sl_deployed.get_monitoring_nodes()
+        LOG.debug('Mon nodes list {0}'.format(mon_nodes))
+
+        prometheus_relay_enabled = salt_actions.get_pillar(
+            tgt=mon_nodes[0],
+            pillar="prometheus:relay:enabled")[0]
+        if not prometheus_relay_enabled:
+            # InfluxDB is used if prometheus relay service is not installed
+            expected_service_list.append('monitoring_remote_storage_adapter')
+        show_step(6)
+        sl_deployed.check_docker_services(mon_nodes, expected_service_list)
+        # Run SL component tetsts
+        if settings.RUN_SL_TESTS:
+            show_step(7)
+            sl_deployed.run_sl_functional_tests(
+                'cfg01',
+                '/root/stacklight-pytest/stacklight_tests/',
+                'tests/prometheus',
+                'test_alerts.py')
+            show_step(8)
+            # Download report
+            sl_deployed.download_sl_test_report(
+                'cfg01',
+                '/root/stacklight-pytest/stacklight_tests/report.xml')
+
+        LOG.info("*************** DONE **************")