Add SALT API client
diff --git a/tcp_tests/fixtures/common_services_fixtures.py b/tcp_tests/fixtures/common_services_fixtures.py
index 21c0e5d..f629dd2 100644
--- a/tcp_tests/fixtures/common_services_fixtures.py
+++ b/tcp_tests/fixtures/common_services_fixtures.py
@@ -26,14 +26,15 @@
 
 
 @pytest.fixture(scope='function')
-def common_services_actions(config, underlay):
+def common_services_actions(config, underlay, salt_actions):
     """Fixture that provides various actions for CommonServices
 
     :param config: fixture provides oslo.config
     :param underlay: fixture provides underlay manager
     :rtype: CommonServicesManager
     """
-    return common_services_manager.CommonServicesManager(config, underlay)
+    return common_services_manager.CommonServicesManager(config, underlay,
+                                                         salt_actions)
 
 
 @pytest.mark.revert_snapshot(ext.SNAPSHOT.common_services_deployed)
diff --git a/tcp_tests/fixtures/openstack_fixtures.py b/tcp_tests/fixtures/openstack_fixtures.py
index 6ec700a..c4ffa35 100644
--- a/tcp_tests/fixtures/openstack_fixtures.py
+++ b/tcp_tests/fixtures/openstack_fixtures.py
@@ -26,7 +26,7 @@
 
 
 @pytest.fixture(scope='function')
-def openstack_actions(config, underlay):
+def openstack_actions(config, underlay, salt_actions):
     """Fixture that provides various actions for K8S
 
     :param config: fixture provides oslo.config
@@ -35,7 +35,7 @@
 
     For use in tests or fixtures to deploy a custom K8S
     """
-    return openstack_manager.OpenstackManager(config, underlay)
+    return openstack_manager.OpenstackManager(config, underlay, salt_actions)
 
 
 @pytest.mark.revert_snapshot(ext.SNAPSHOT.openstack_deployed)
@@ -83,3 +83,50 @@
         pass
 
     return openstack_actions
+
+
+@pytest.mark.revert_snapshot(ext.SNAPSHOT.openstack_deployed)
+@pytest.fixture(scope='function')
+def deploy_openstack(revert_snapshot, request, config,
+                     hardware, underlay, common_services_deployed,
+                     openstack_actions):
+    """Fixture to get or install OpenStack services on environment
+
+    :param revert_snapshot: fixture that reverts snapshot that is specified
+                            in test with @pytest.mark.revert_snapshot(<name>)
+    :param request: fixture provides pytest data
+    :param config: fixture provides oslo.config
+    :param hardware: fixture provides enviromnet manager
+    :param underlay: fixture provides underlay manager
+    :param tcp_actions: fixture provides OpenstackManager instance
+    :rtype: OpenstackManager
+
+    If config.openstack.openstack_installed is not set, this fixture assumes
+    that the openstack services were not installed, and do the following:
+    - install openstack services
+    - make snapshot with name 'openstack_deployed'
+    - return OpenstackManager instance
+
+    If config.openstack.openstack_installed was set, this fixture assumes that
+    the openstack services were already installed, and do the following:
+    - return OpenstackManager instance
+
+    If you want to revert 'openstack_deployed' snapshot, please use mark:
+    @pytest.mark.revert_snapshot("openstack_deployed")
+    """
+    # Create Salt cluster
+    if not config.openstack.openstack_installed:
+        steps_path = config.openstack_deploy.openstack_steps_path
+        commands = utils.read_template(steps_path)
+        openstack_actions.install(commands)
+        hardware.create_snapshot(ext.SNAPSHOT.openstack_deployed)
+
+    else:
+        # 1. hardware environment created and powered on
+        # 2. config.underlay.ssh contains SSH access to provisioned nodes
+        #    (can be passed from external config with TESTS_CONFIGS variable)
+        # 3. config.tcp.* options contain access credentials to the already
+        #    installed TCP API endpoint
+        pass
+
+    return openstack_actions
diff --git a/tcp_tests/helpers/log_helpers.py b/tcp_tests/helpers/log_helpers.py
new file mode 100644
index 0000000..f448ebd
--- /dev/null
+++ b/tcp_tests/helpers/log_helpers.py
@@ -0,0 +1,88 @@
+import six
+
+
+def _simple(item):
+    """Check for nested iterations: True, if not"""
+    return not isinstance(item, (list, set, tuple, dict))
+
+
+_formatters = {
+    'simple': "{spc:<{indent}}{val!r}".format,
+    'text': "{spc:<{indent}}{prefix}'''{string}'''".format,
+    'dict': "\n{spc:<{indent}}{key!r:{size}}: {val},".format,
+}
+
+
+def pretty_repr(src, indent=0, no_indent_start=False, max_indent=20):
+    """Make human readable repr of object
+    :param src: object to process
+    :type src: object
+    :param indent: start indentation, all next levels is +4
+    :type indent: int
+    :param no_indent_start: do not indent open bracket and simple parameters
+    :type no_indent_start: bool
+    :param max_indent: maximal indent before classic repr() call
+    :type max_indent: int
+    :return: formatted string
+    """
+    if _simple(src) or indent >= max_indent:
+        indent = 0 if no_indent_start else indent
+        if isinstance(src, (six.binary_type, six.text_type)):
+            if isinstance(src, six.binary_type):
+                string = src.decode(
+                    encoding='utf-8',
+                    errors='backslashreplace'
+                )
+                prefix = 'b'
+            else:
+                string = src
+                prefix = 'u'
+            return _formatters['text'](
+                spc='',
+                indent=indent,
+                prefix=prefix,
+                string=string
+            )
+        return _formatters['simple'](
+            spc='',
+            indent=indent,
+            val=src
+        )
+    if isinstance(src, dict):
+        prefix, suffix = '{', '}'
+        result = ''
+        max_len = len(max([repr(key) for key in src])) if src else 0
+        for key, val in src.items():
+            result += _formatters['dict'](
+                spc='',
+                indent=indent + 4,
+                size=max_len,
+                key=key,
+                val=pretty_repr(val, indent + 8, no_indent_start=True)
+            )
+        return (
+            '\n{start:>{indent}}'.format(
+                start=prefix,
+                indent=indent + 1
+            ) +
+            result +
+            '\n{end:>{indent}}'.format(end=suffix, indent=indent + 1)
+        )
+    if isinstance(src, list):
+        prefix, suffix = '[', ']'
+    elif isinstance(src, tuple):
+        prefix, suffix = '(', ')'
+    else:
+        prefix, suffix = '{', '}'
+    result = ''
+    for elem in src:
+        if _simple(elem):
+            result += '\n'
+        result += pretty_repr(elem, indent + 4) + ','
+    return (
+        '\n{start:>{indent}}'.format(
+            start=prefix,
+            indent=indent + 1) +
+        result +
+        '\n{end:>{indent}}'.format(end=suffix, indent=indent + 1)
+    )
diff --git a/tcp_tests/managers/common_services_manager.py b/tcp_tests/managers/common_services_manager.py
index a91eb46..dee792c 100644
--- a/tcp_tests/managers/common_services_manager.py
+++ b/tcp_tests/managers/common_services_manager.py
@@ -12,18 +12,22 @@
 #    License for the specific language governing permissions and limitations
 #    under the License.
 
-class CommonServicesManager(object):
+from tcp_tests.managers.execute_commands import ExecuteCommandsMixin
+
+
+class CommonServicesManager(ExecuteCommandsMixin):
     """docstring for CommonServicesManager"""
 
-    __config = None
-    __underlay = None
+    _config = None
+    _underlay = None
 
-    def __init__(self, config, underlay):
-        self.__config = config
-        self.__underlay = underlay
+    def __init__(self, config, underlay, salt=None):
+        self._config = config
+        self._underlay = underlay
+        self._salt = salt
         super(CommonServicesManager, self).__init__()
 
     def install(self, commands):
-        self.__underlay.execute_commands(commands,
-                                         label='Install common services')
-        self.__config.common_services.common_services_installed = True
+        self.execute_commands(commands,
+                              label='Install common services')
+        self._config.common_services.common_services_installed = True
diff --git a/tcp_tests/managers/envmanager_devops.py b/tcp_tests/managers/envmanager_devops.py
index e25faef..0874b49 100644
--- a/tcp_tests/managers/envmanager_devops.py
+++ b/tcp_tests/managers/envmanager_devops.py
@@ -33,7 +33,7 @@
 class EnvironmentManager(object):
     """Class-helper for creating VMs via devops environments"""
 
-    __config = None
+    _config = None
 
     def __init__(self, config=None):
         """Initializing class instance and create the environment
@@ -45,7 +45,7 @@
         """
         self.__devops_config = env_config.EnvironmentConfig()
         self._env = None
-        self.__config = config
+        self._config = config
 
         if config.hardware.conf_path is not None:
             self._devops_config.load_template(config.hardware.conf_path)
@@ -173,9 +173,9 @@
         msg = "[ Create snapshot '{0}' ] {1}".format(name, description or '')
         LOG.info("\n\n{0}\n{1}".format(msg, '*' * len(msg)))
 
-        self.__config.hardware.current_snapshot = name
+        self._config.hardware.current_snapshot = name
         LOG.info("Set current snapshot in config to '{0}'".format(
-            self.__config.hardware.current_snapshot))
+            self._config.hardware.current_snapshot))
         if self._env is not None:
             LOG.info('trying to suspend ....')
             self._env.suspend()
@@ -185,10 +185,11 @@
             self._env.resume()
         else:
             raise exceptions.EnvironmentIsNotSet()
-        settings_oslo.save_config(self.__config, name, self._env.name)
+        settings_oslo.save_config(self._config, name, self._env.name)
 
         if settings.VIRTUAL_ENV:
-            venv_msg = "source {0}/bin/activate;\n".format(settings.VIRTUAL_ENV)
+            venv_msg = "source {0}/bin/activate;\n".format(
+                settings.VIRTUAL_ENV)
         else:
             venv_msg = ""
         LOG.info("To revert the snapshot:\n\n"
@@ -204,7 +205,7 @@
                          snapshot_name=name,
                          login=settings.SSH_NODE_CREDENTIALS['login'],
                          password=settings.SSH_NODE_CREDENTIALS['password'],
-                         salt_master_host=self.__config.salt.salt_master_host))
+                         salt_master_host=self._config.salt.salt_master_host))
 
     def _get_snapshot_config_name(self, snapshot_name):
         """Get config name for the environment"""
@@ -235,13 +236,13 @@
 
         try:
             test_config_path = self._get_snapshot_config_name(name)
-            settings_oslo.reload_snapshot_config(self.__config,
+            settings_oslo.reload_snapshot_config(self._config,
                                                  test_config_path)
         except cfg.ConfigFilesNotFoundError as conf_err:
             LOG.error("Config file(s) {0} not found!".format(
                 conf_err.config_files))
 
-        self.__config.hardware.current_snapshot = name
+        self._config.hardware.current_snapshot = name
 
     def _create_environment(self):
         """Create environment and start VMs.
@@ -373,12 +374,12 @@
 
     def set_dns_config(self):
         # Set local nameserver to use by default
-        if not self.__config.underlay.nameservers:
-            self.__config.underlay.nameservers = [self.nameserver]
-        if not self.__config.underlay.upstream_dns_servers:
-            self.__config.underlay.upstream_dns_servers = [self.nameserver]
+        if not self._config.underlay.nameservers:
+            self._config.underlay.nameservers = [self.nameserver]
+        if not self._config.underlay.upstream_dns_servers:
+            self._config.underlay.upstream_dns_servers = [self.nameserver]
 
     def set_address_pools_config(self):
         """Store address pools CIDRs in config object"""
         for ap in self._env.get_address_pools():
-            self.__config.underlay.address_pools[ap.name] = ap.net
+            self._config.underlay.address_pools[ap.name] = ap.net
diff --git a/tcp_tests/managers/envmanager_empty.py b/tcp_tests/managers/envmanager_empty.py
index b9ab8e1..702d723 100644
--- a/tcp_tests/managers/envmanager_empty.py
+++ b/tcp_tests/managers/envmanager_empty.py
@@ -18,7 +18,7 @@
 class EnvironmentManagerEmpty(object):
     """Class-helper for creating VMs via devops environments"""
 
-    __config = None
+    _config = None
 
     def __init__(self, config=None):
         """Initializing class instance and create the environment
@@ -28,12 +28,12 @@
         :param config.hardware.current_snapshot: name of the snapshot that
                                                  descriebe environment status.
         """
-        self.__config = config
+        self._config = config
 
     def lvm_storages(self):
         """Returns data of lvm_storages on nodes in environment
 
-        It's expected that data of self.__config.lvm_storages will be
+        It's expected that data of self._config.lvm_storages will be
         like this:
             {
                 "node1": {
@@ -48,7 +48,7 @@
             }
         :rtype: dict
         """
-        return self.__config.underlay.lvm
+        return self._config.underlay.lvm
 
     def get_ssh_data(self, roles=None):
         raise Exception("EnvironmentManagerEmpty doesn't have SSH details. "
@@ -60,24 +60,24 @@
         - Store the state of the environment <name> to the 'config' object
         - Save 'config' object to a file 'config_<name>.ini'
         """
-        self.__config.hardware.current_snapshot = name
-        settings_oslo.save_config(self.__config, name)
+        self._config.hardware.current_snapshot = name
+        settings_oslo.save_config(self._config, name)
 
     def revert_snapshot(self, name):
         """Check the current state <name> of the environment
 
         - Check that the <name> matches the current state of the environment
-          that is stored in the 'self.__config.hardware.current_snapshot'
+          that is stored in the 'self._config.hardware.current_snapshot'
         - Try to reload 'config' object from a file 'config_<name>.ini'
           If the file not found, then pass with defaults.
         - Set <name> as the current state of the environment after reload
 
         :param name: string
         """
-        if self.__config.hardware.current_snapshot != name:
+        if self._config.hardware.current_snapshot != name:
             raise Exception(
                 "EnvironmentManagerEmpty cannot revert nodes from {} to {}"
-                .format(self.__config.hardware.current_snapshot, name))
+                .format(self._config.hardware.current_snapshot, name))
 
     def start(self):
         """Start environment"""
@@ -96,10 +96,10 @@
         pass
 
     def has_snapshot(self, name):
-        return self.__config.hardware.current_snapshot == name
+        return self._config.hardware.current_snapshot == name
 
     def has_snapshot_config(self, name):
-        return self.__config.hardware.current_snapshot == name
+        return self._config.hardware.current_snapshot == name
 
     def delete_environment(self):
         """Delete environment"""
diff --git a/tcp_tests/managers/execute_commands.py b/tcp_tests/managers/execute_commands.py
new file mode 100644
index 0000000..76f4bc9
--- /dev/null
+++ b/tcp_tests/managers/execute_commands.py
@@ -0,0 +1,191 @@
+
+import time
+
+from tcp_tests import logger
+from tcp_tests.helpers.log_helpers import pretty_repr
+
+LOG = logger.logger
+
+
+class ExecuteCommandsMixin(object):
+    """docstring for ExecuteCommands"""
+
+    def ensure_running_service(self, service_name, host, check_cmd,
+                               state_running='start/running'):
+        """Check if the service_name running or try to restart it
+
+        :param service_name: name of the service that will be checked
+        :param node_name: node on which the service will be checked
+        :param check_cmd: shell command to ensure that the service is running
+        :param state_running: string for check the service state
+        """
+        cmd = "service {0} status | grep -q '{1}'".format(
+            service_name, state_running)
+        with self._underlay.remote(host=host) as remote:
+            result = remote.execute(cmd)
+            if result.exit_code != 0:
+                LOG.info("{0} is not in running state on the node {1},"
+                         " trying to start".format(service_name, host))
+                cmd = ("service {0} stop;"
+                       " sleep 3; killall -9 {0};"
+                       "service {0} start; sleep 5;"
+                       .format(service_name))
+                remote.execute(cmd)
+
+                remote.execute(check_cmd)
+                remote.execute(check_cmd)
+
+    def execute_commands(self, commands, label="Command"):
+        """Execute a sequence of commands
+
+        Main propose is to implement workarounds for salt formulas like:
+        - exit_code == 0 when there are actual failures
+        - salt_master and/or salt_minion stop working after executing a formula
+        - a formula fails at first run, but completes at next runs
+
+        :param label: label of the current sequence of the commands, for log
+        :param commands: list of dicts with the following data:
+        commands = [
+            ...
+            {
+                # Required:
+                'cmd': 'shell command(s) to run',
+                'node_name': 'name of the node to run the command(s)',
+                # Optional:
+                'description': 'string with a readable command description',
+                'retry': {
+                    'count': int,  # How many times should be run the command
+                                   # until success
+                    'delay': int,  # Delay between tries in seconds
+                },
+                'skip_fail': bool  # If True - continue with the next step
+                                   # without failure even if count number
+                                   # is reached.
+                                   # If False - rise an exception (default)
+            },
+            ...
+        ]
+        """
+        for n, step in enumerate(commands):
+            # Required fields
+            cmd = step.get('cmd')
+            do = step.get('do')
+            # node_name = step.get('node_name')
+            # Optional fields
+            description = step.get('description', cmd)
+            # retry = step.get('retry', {'count': 1, 'delay': 1})
+            # retry_count = retry.get('count', 1)
+            # retry_delay = retry.get('delay', 1)
+            # skip_fail = step.get('skip_fail', False)
+
+            msg = "[ {0} #{1} ] {2}".format(label, n + 1, description)
+            LOG.info("\n\n{0}\n{1}".format(msg, '=' * len(msg)))
+
+            if cmd:
+                self.execute_command(step)
+            elif do:
+                self.command2(step)
+
+    def execute_command(self, step):
+        # Required fields
+        cmd = step.get('cmd')
+        node_name = step.get('node_name')
+        # Optional fields
+        description = step.get('description', cmd)
+        retry = step.get('retry', {'count': 1, 'delay': 1})
+        retry_count = retry.get('count', 1)
+        retry_delay = retry.get('delay', 1)
+        skip_fail = step.get('skip_fail', False)
+
+        with self._underlay.remote(node_name=node_name) as remote:
+
+            for x in range(retry_count, 0, -1):
+                time.sleep(3)
+                result = remote.execute(cmd, verbose=True)
+
+                # Workaround of exit code 0 from salt in case of failures
+                failed = 0
+                for s in result['stdout']:
+                    if s.startswith("Failed:"):
+                        failed += int(s.split("Failed:")[1])
+
+                if result.exit_code != 0:
+                    time.sleep(retry_delay)
+                    LOG.info(
+                        " === RETRY ({0}/{1}) ========================="
+                        .format(x - 1, retry_count))
+                elif failed != 0:
+                    LOG.error(
+                        " === SALT returned exit code = 0 while "
+                        "there are failed modules! ===")
+                    LOG.info(
+                        " === RETRY ({0}/{1}) ======================="
+                        .format(x - 1, retry_count))
+                else:
+                    if self._config.salt.salt_master_host != '0.0.0.0':
+                        # Workarounds for crashed services
+                        self.ensure_running_service(
+                            "salt-master",
+                            self._config.salt.salt_master_host,
+                            "salt-call pillar.items",
+                            'active (running)')  # Hardcoded for now
+                        self.ensure_running_service(
+                            "salt-minion",
+                            self._config.salt.salt_master_host,
+                            "salt 'cfg01*' pillar.items",
+                            "active (running)")  # Hardcoded for now
+                        break
+
+                if x == 1 and skip_fail is False:
+                    # In the last retry iteration, raise an exception
+                    raise Exception("Step '{0}' failed"
+                                    .format(description))
+
+    def command2(self, step):
+        # Required fields
+        do = step['do']
+        target = step['target']
+        state = step.get('state')
+        states = step.get('states')
+        # Optional fields
+        args = step.get('args')
+        kwargs = step.get('kwargs')
+        description = step.get('description', do)
+        retry = step.get('retry', {'count': 1, 'delay': 1})
+        retry_count = retry.get('count', 1)
+        retry_delay = retry.get('delay', 1)
+        skip_fail = step.get('skip_fail', False)
+
+        if not bool(state) ^ bool(states):
+            raise ValueError("You should use state or states in step")
+
+        for x in range(retry_count, 0, -1):
+            time.sleep(3)
+
+            method = getattr(self._salt, self._salt._map[do])
+            command_ret = method(tgt=target, state=state or states,
+                                 args=args, kwargs=kwargs)
+            command_ret = command_ret if \
+                isinstance(command_ret, list) else [command_ret]
+            results = [(r['return'][0], f) for r, f in command_ret]
+
+            # FIMME: Change to debug level
+            LOG.info(" === States output =======================\n"
+                     "{}\n"
+                     " =========================================".format(
+                         pretty_repr([r for r, f in results])))
+
+            all_fails = [f for r, f in results if f]
+            if all_fails:
+                LOG.error("States finished with failures.\n{}".format(
+                    all_fails))
+                time.sleep(retry_delay)
+                LOG.info(" === RETRY ({0}/{1}) ========================="
+                         .format(x - 1, retry_count))
+            else:
+                break
+
+            if x == 1 and skip_fail is False:
+                # In the last retry iteration, raise an exception
+                raise Exception("Step '{0}' failed"
+                                .format(description))
diff --git a/tcp_tests/managers/opencontrail_manager.py b/tcp_tests/managers/opencontrail_manager.py
index 3c6621c..108794d 100644
--- a/tcp_tests/managers/opencontrail_manager.py
+++ b/tcp_tests/managers/opencontrail_manager.py
@@ -12,31 +12,33 @@
 #    License for the specific language governing permissions and limitations
 #    under the License.
 
-class OpenContrailManager(object):
+from tcp_tests.managers.execute_commands import ExecuteCommandsMixin
+
+
+class OpenContrailManager(ExecuteCommandsMixin):
     """docstring for OpenstackManager"""
 
-    __config = None
-    __underlay = None
-    __openstack_actions = None
+    _config = None
+    _underlay = None
+    _openstack_actions = None
 
     def __init__(self, config, underlay, openstack_deployed):
-        self.__config = config
-        self.__underlay = underlay
-        self.__openstack_actions = openstack_deployed
+        self._config = config
+        self._underlay = underlay
+        self._openstack_actions = openstack_deployed
         super(OpenContrailManager, self).__init__()
 
-    def prepare_tests(commands):
-        self.__underlay.execute_commands(commands=commands,
-                                         label="Prepare Juniper contrail-test")
+    def prepare_tests(self, commands):
+        self.execute_commands(commands=commands,
+                              label="Prepare Juniper contrail-test")
 
-    def run_tests(tags='', features=''):
+    def run_tests(self, tags='', features=''):
         cmd = "salt 'ctl01*' grains.get fqdn|tail -n1"
-        result = self.__underlay.check_call(
-            cmd, host=self.__config.salt.salt_master_host)
+        result = self._underlay.check_call(
+            cmd, host=self._config.salt.salt_master_host)
 
         ctl01_name = result['stdout'].strip()
 
-
         cmd = '. /etc/contrail/openstackrc; cd /opt/contrail-test; ./run_tests.sh'
         if tags != '':
             cmd += ' --tags ' + tags
@@ -44,6 +46,6 @@
         if features != '':
             cmd += ' --features ' + features
 
-        self.__underlay.check_call(
+        self._underlay.check_call(
             cmd,
             node_name=ctl01_name)
diff --git a/tcp_tests/managers/openstack_manager.py b/tcp_tests/managers/openstack_manager.py
index b8725bd..55af1e2 100644
--- a/tcp_tests/managers/openstack_manager.py
+++ b/tcp_tests/managers/openstack_manager.py
@@ -12,18 +12,22 @@
 #    License for the specific language governing permissions and limitations
 #    under the License.
 
-class OpenstackManager(object):
+from tcp_tests.managers.execute_commands import ExecuteCommandsMixin
+
+
+class OpenstackManager(ExecuteCommandsMixin):
     """docstring for OpenstackManager"""
 
-    __config = None
-    __underlay = None
+    _config = None
+    _underlay = None
 
-    def __init__(self, config, underlay):
-        self.__config = config
-        self.__underlay = underlay
+    def __init__(self, config, underlay, salt):
+        self._config = config
+        self._underlay = underlay
+        self._salt = salt
         super(OpenstackManager, self).__init__()
 
     def install(self, commands):
-        self.__underlay.execute_commands(commands=commands,
-                                         label="Install OpenStack services")
-        self.__config.openstack.openstack_installed = True
+        self.execute_commands(commands,
+                              label='Install OpenStack services')
+        self._config.common_services.common_services_installed = True
diff --git a/tcp_tests/managers/saltmanager.py b/tcp_tests/managers/saltmanager.py
index 1a89616..b65c74f 100644
--- a/tcp_tests/managers/saltmanager.py
+++ b/tcp_tests/managers/saltmanager.py
@@ -11,26 +11,165 @@
 #    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
 #    License for the specific language governing permissions and limitations
 #    under the License.
+# import time
 
-class SaltManager(object):
+from collections import defaultdict
+
+from datetime import datetime
+from pepper.libpepper import Pepper
+from tcp_tests import settings
+from tcp_tests import logger
+from tcp_tests.managers.execute_commands import ExecuteCommandsMixin
+
+LOG = logger.logger
+
+
+class SaltManager(ExecuteCommandsMixin):
     """docstring for SaltManager"""
 
-    __config = None
-    __underlay = None
+    _config = None
+    _underlay = None
+    _map = {
+        'enforceState': 'enforce_state',
+        'enforceStates': 'enforce_states',
+        'runState': 'run_state',
+        'runStates': 'run_states',
+    }
 
-    def __init__(self, config, underlay):
-        self.__config = config
-        self.__underlay = underlay
-
+    def __init__(self, config, underlay, host=None, port='6969'):
+        self._config = config
+        self._underlay = underlay
+        self._port = port
+        self._host = host
+        self._api = None
+        self._user = settings.SALT_USER
+        self._password = settings.SALT_PASSWORD
+        self._salt = self
 
         super(SaltManager, self).__init__()
 
     def install(self, commands):
-        if self.__config.salt.salt_master_host == '0.0.0.0':
-            # Temporary workaround. Underlay should be extended with roles
-            salt_nodes = self.__underlay.node_names()
-            self.__config.salt.salt_master_host = \
-                self.__underlay.host_by_node_name(salt_nodes[0])
+        if commands[0].get('do'):
+            self.install2(commands)
+        else:
+            self.install1(commands)
 
-        self.__underlay.execute_commands(commands=commands,
-                                         label="Install and configure salt")
+    def install1(self, commands):
+        if self._config.salt.salt_master_host == '0.0.0.0':
+            # Temporary workaround. Underlay should be extended with roles
+            salt_nodes = self._underlay.node_names()
+            self._config.salt.salt_master_host = \
+                self._underlay.host_by_node_name(salt_nodes[0])
+
+        # self._underlay.execute_commands(commands=commands,
+        #                                  label="Install and configure salt")
+        self.execute_commands(commands=commands,
+                              label="Install and configure salt")
+
+    def install2(self, commands):
+        if self._config.salt.salt_master_host == '0.0.0.0':
+            # Temporary workaround. Underlay should be extended with roles
+            salt_nodes = self._underlay.node_names()
+            self._config.salt.salt_master_host = \
+                self._underlay.host_by_node_name(salt_nodes[0])
+
+        # self.run_commands(commands=commands,
+        #                   label="Install and configure salt")
+        self.execute_commands(commands=commands,
+                              label="Install and configure salt")
+
+    @property
+    def port(self):
+        return self._port
+
+    @property
+    def host(self):
+        if self._host:
+            return self._host
+        elif self._config.salt.salt_master_host == '0.0.0.0':
+            # Temporary workaround. Underlay should be extended with roles
+            salt_nodes = self._underlay.node_names()
+            self._config.salt.salt_master_host = \
+                self._underlay.host_by_node_name(salt_nodes[0])
+
+        return self._config.salt.salt_master_host
+
+    @property
+    def api(self):
+        def login():
+            LOG.info("Authentication in Salt API")
+            self._api.login(
+                username=self._user,
+                password=self._password,
+                eauth='pam')
+            return datetime.now()
+
+        if self._api:
+            if (datetime.now() - self.__session_start).seconds < 5 * 60:
+                return self._api
+            else:
+                # FIXXME: Change to debug
+                LOG.info("Session's expired")
+                self.__session_start = login()
+                return self._api
+
+        LOG.info("Connect to Salt API")
+        url = "http://{host}:{port}".format(
+            host=self.host, port=self.port)
+        self._api = Pepper(url)
+        self.__session_start = login()
+        return self._api
+
+    def local(self, tgt, fun, args=None, kwargs=None):
+        return self.api.local(tgt, fun, args, kwargs, expr_form='compound')
+
+    def local_async(self, tgt, fun, args=None, kwargs=None):
+        return self.api.local_async(tgt, fun, args, kwargs)
+
+    def lookup_result(self, jid):
+        return self.api.lookup_jid(jid)
+
+    def check_result(self, r):
+        if len(r.get('return', [])) == 0:
+            raise LookupError("Result is empty or absent")
+
+        result = r['return'][0]
+        LOG.info("Job has result for %s nodes", result.keys())
+        fails = defaultdict(list)
+        for h in result:
+            host_result = result[h]
+            LOG.info("On %s executed:", h)
+            if isinstance(host_result, list):
+                fails[h].append(host_result)
+                continue
+            for t in host_result:
+                task = host_result[t]
+                if task['result'] is False:
+                    fails[h].append(task)
+                    LOG.error("%s - %s", t, task['result'])
+                else:
+                    LOG.info("%s - %s", t, task['result'])
+
+        return fails if fails else None
+
+    def enforce_state(self, tgt, state, args=None, kwargs=None):
+        r = self.local(tgt=tgt, fun='state.sls', args=state)
+        f = self.check_result(r)
+        return r, f
+
+    def enforce_states(self, tgt, state, args=None, kwargs=None):
+        rets = []
+        for s in state:
+            r = self.enforce_state(tgt=tgt, state=s)
+            rets.append(r)
+        return rets
+
+    def run_state(self, tgt, state, args=None, kwargs=None):
+        return self.local(tgt=tgt, fun=state, args=args, kwargs=kwargs), None
+
+    def run_states(self, tgt, state, args=None, kwargs=None):
+        rets = []
+        for s in state:
+            r = self.run_state(tgt=tgt, state=s, args=args, kwargs=kwargs)
+            rets.append(r)
+        return rets
diff --git a/tcp_tests/managers/underlay_ssh_manager.py b/tcp_tests/managers/underlay_ssh_manager.py
index f4c5df3..18095c6 100644
--- a/tcp_tests/managers/underlay_ssh_manager.py
+++ b/tcp_tests/managers/underlay_ssh_manager.py
@@ -13,7 +13,6 @@
 #    under the License.
 
 import random
-import time
 
 from devops.helpers import helpers
 from devops.helpers import ssh_client
@@ -375,116 +374,3 @@
         }
         template = utils.render_template(file_path, options=options)
         return yaml.load(template)
-
-    def ensure_running_service(self, service_name, host, check_cmd,
-                               state_running='start/running'):
-        """Check if the service_name running or try to restart it
-
-        :param service_name: name of the service that will be checked
-        :param node_name: node on which the service will be checked
-        :param check_cmd: shell command to ensure that the service is running
-        :param state_running: string for check the service state
-        """
-        cmd = "service {0} status | grep -q '{1}'".format(
-            service_name, state_running)
-        with self.remote(host=host) as remote:
-            result = remote.execute(cmd)
-            if result.exit_code != 0:
-                LOG.info("{0} is not in running state on the node {1},"
-                         " trying to start".format(service_name, host))
-                cmd = ("service {0} stop;"
-                       " sleep 3; killall -9 {0};"
-                       "service {0} start; sleep 5;"
-                       .format(service_name))
-                remote.execute(cmd)
-
-                remote.execute(check_cmd)
-                remote.execute(check_cmd)
-
-    def execute_commands(self, commands, label="Command"):
-        """Execute a sequence of commands
-
-        Main propose is to implement workarounds for salt formulas like:
-        - exit_code == 0 when there are actual failures
-        - salt_master and/or salt_minion stop working after executing a formula
-        - a formula fails at first run, but completes at next runs
-
-        :param label: label of the current sequence of the commands, for log
-        :param commands: list of dicts with the following data:
-        commands = [
-            ...
-            {
-                # Required:
-                'cmd': 'shell command(s) to run',
-                'node_name': 'name of the node to run the command(s)',
-                # Optional:
-                'description': 'string with a readable command description',
-                'retry': {
-                    'count': int,  # How many times should be run the command
-                                   # until success
-                    'delay': int,  # Delay between tries in seconds
-                },
-                'skip_fail': bool  # If True - continue with the next step
-                                   # without failure even if count number
-                                   # is reached.
-                                   # If False - rise an exception (default)
-            },
-            ...
-        ]
-        """
-        for n, step in enumerate(commands):
-            # Required fields
-            cmd = step.get('cmd')
-            node_name = step.get('node_name')
-            # Optional fields
-            description = step.get('description', cmd)
-            retry = step.get('retry', {'count': 1, 'delay': 1})
-            retry_count = retry.get('count', 1)
-            retry_delay = retry.get('delay', 1)
-            skip_fail = step.get('skip_fail', False)
-
-            msg = "[ {0} #{1} ] {2}".format(label, n+1, description)
-            LOG.info("\n\n{0}\n{1}".format(msg, '=' * len(msg)))
-
-            with self.remote(node_name=node_name) as remote:
-
-                for x in range(retry_count, 0, -1):
-                    time.sleep(3)
-                    result = remote.execute(cmd, verbose=True)
-
-                    # Workaround of exit code 0 from salt in case of failures
-                    failed = 0
-                    for s in result['stdout']:
-                        if s.startswith("Failed:"):
-                            failed += int(s.split("Failed:")[1])
-                        if 'Minion did not return. [No response]' in s:
-                            failed += 1
-
-                    if result.exit_code != 0:
-                        time.sleep(retry_delay)
-                        LOG.info(" === RETRY ({0}/{1}) ========================="
-                                 .format(x-1, retry_count))
-                    elif failed != 0:
-                        LOG.error(" === SALT returned exit code = 0 while "
-                                  "there are failed modules! ===")
-                        LOG.info(" === RETRY ({0}/{1}) ======================="
-                                 .format(x-1, retry_count))
-                    else:
-                        if self.__config.salt.salt_master_host != '0.0.0.0':
-                            # Workarounds for crashed services
-                            self.ensure_running_service(
-                                "salt-master",
-                                self.__config.salt.salt_master_host,
-                                "salt-call pillar.items",
-                                'active (running)') # Hardcoded for now
-                            self.ensure_running_service(
-                                "salt-minion",
-                                self.__config.salt.salt_master_host,
-                                "salt 'cfg01*' pillar.items",
-                                "active (running)") # Hardcoded for now
-                            break
-
-                    if x == 1 and skip_fail == False:
-                        # In the last retry iteration, raise an exception
-                        raise Exception("Step '{0}' failed"
-                                        .format(description))
diff --git a/tcp_tests/requirements.txt b/tcp_tests/requirements.txt
index a2e3de5..d1b3ec5 100644
--- a/tcp_tests/requirements.txt
+++ b/tcp_tests/requirements.txt
@@ -12,4 +12,4 @@
 jira
 testrail
 functools32
-
+salt-pepper
diff --git a/tcp_tests/settings.py b/tcp_tests/settings.py
index f451eaf..d237755 100644
--- a/tcp_tests/settings.py
+++ b/tcp_tests/settings.py
@@ -48,3 +48,6 @@
     os.environ.get("IFACE_0", "eth0"),
     os.environ.get("IFACE_1", "eth1"),
 ]
+
+SALT_USER = os.environ.get('SALT_USER', 'salt')
+SALT_PASSWORD = os.environ.get('SALT_PASSWORD', 'hovno12345!')
diff --git a/tcp_tests/templates/common-services/virtual-mcp10-ovs-common-services.yaml b/tcp_tests/templates/common-services/virtual-mcp10-ovs-common-services.yaml
index 1d228ff..a322b67 100644
--- a/tcp_tests/templates/common-services/virtual-mcp10-ovs-common-services.yaml
+++ b/tcp_tests/templates/common-services/virtual-mcp10-ovs-common-services.yaml
@@ -1,104 +1,147 @@
 {% from 'virtual-mcp10-ovs.jinja' import HOSTNAME_CFG01 with context %}
 
-# Install support services
+################### Install OpenStack infra ##########################
 
+# salt.enforceState(saltMaster, 'I@glusterfs:server', 'glusterfs.server.service', true)
+- description: Install and run GlusterFS
+  do: enforceState
+  target: I@glusterfs:server
+  state: glusterfs.server.service
+
+#     // Install keepaliveds
+#     //runSaltProcessStep(master, 'I@keepalived:cluster', 'state.sls', ['keepalived'], 1)
+#     salt.enforceState(saltMaster, 'I@keepalived:cluster and *01*', 'keepalived', true)
+#     salt.enforceState(saltMaster, 'I@keepalived:cluster', 'keepalived', true)
 - description: Install keepalived
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@keepalived:cluster' state.sls keepalived -b 1
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 10}
-  skip_fail: true
+  do: enforceState
+  target: I@keepalived:cluster and *01*
+  state: keepalived
 
-- description: Check the VIP
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@keepalived:cluster' cmd.run 'ip a | grep 172.16.10.2' | grep -B1 172.16.10.2
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
+- description: Re run installation of keepalived
+  do: enforceState
+  target: I@keepalived:cluster
+  state: keepalived
+
+#  // Check the keepalived VIPs
+#  salt.runSaltProcessStep(saltMaster, 'I@keepalived:cluster', 'cmd.run', ['ip a | grep 172.16.10.2'])
+#  salt.enforceState(saltMaster, 'I@glusterfs:server and *01*', 'glusterfs.server.setup', true)
+#  salt.runSaltProcessStep(saltMaster, 'I@glusterfs:server', 'cmd.run', ['gluster peer status'], null, true)
+#  salt.runSaltProcessStep(saltMaster, 'I@glusterfs:server', 'cmd.run', ['gluster volume status'], null, true)
+
+- description: Show VIPs
+  do: runState
+  target: I@keepalived:cluster
+  state: cmd.run
+  args: ['ip a | grep 172.16.10.2']
+
+- description: Re run Gluster sertver setup
+  do: enforceState
+  target: I@glusterfs:server and *01*
+  state: glusterfs.server.setup
+
+- description: Show Gluster peer status
+  do: runState
+  target: I@glusterfs:server
+  state: cmd.run
+  args: ['gluster peer status']
+
+- description: Show Gluster volumes status
+  do: runState
+  target: I@glusterfs:server
+  state: cmd.run
+  args: ['gluster volume status']
 
 
-- description: Install glusterfs
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@glusterfs:server' state.sls glusterfs.server.service
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
+  #     // Install rabbitmq
+  #     withEnv(['ASK_ON_ERROR=false']){
+  #         retry(2) {
+  #             salt.enforceState(saltMaster, 'I@rabbitmq:server', 'rabbitmq', true)
+  #         }
+  #     }
+  #     // Check the rabbitmq status
+  #     salt.runSaltProcessStep(saltMaster, 'I@rabbitmq:server', 'cmd.run', ['rabbitmqctl cluster_status'])
 
-- description: Setup glusterfs on primary controller
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@glusterfs:server' state.sls glusterfs.server.setup -b 1
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
+- description: Install rabbitmq
+  do: enforceState
+  target: I@rabbitmq:server
+  state: rabbitmq
+  retry: {count: 2, delay: 5}
 
-- description: Check the gluster status
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@glusterfs:server' cmd.run 'gluster peer status; gluster volume status' -b 1
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
+- description: Show rabbitmq status
+  do: runState
+  target: I@glusterfs:server
+  state: cmd.run
+  args: ['rabbitmqctl cluster_status']
+
+  #     // Install galera
+  #     withEnv(['ASK_ON_ERROR=false']){
+  #         retry(2) {
+  #             salt.enforceState(saltMaster, 'I@galera:master', 'galera', true)
+  #         }
+  #     }
+  #     salt.enforceState(saltMaster, 'I@galera:slave', 'galera', true)
+
+  #     // Check galera status
+  #     salt.runSaltProcessStep(saltMaster, 'I@galera:master', 'mysql.status')
+  #     salt.runSaltProcessStep(saltMaster, 'I@galera:slave', 'mysql.status')
+
+  #     // // Setup mysql client
+  #     // salt.enforceState(saltMaster, 'I@mysql:client', 'mysql.client', true)
 
 
-- description: Install RabbitMQ
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@rabbitmq:server' state.sls rabbitmq
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
+- description: Install Galera (master)
+  do: enforceState
+  target: I@galera:master
+  state: galera
+  retry: {count: 2, delay: 5}
 
-- description: Check the rabbitmq status
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@rabbitmq:server' cmd.run 'rabbitmqctl cluster_status'
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
+- description: Install Galera (slaves)
+  do: enforceState
+  target: I@galera:slave
+  state: galera
 
-- description: Install Galera on first server
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@galera:master' state.sls galera
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
+- description: Show master galera status
+  do: runState
+  target: I@galera:master
+  state: mysql.status
 
-- description: Install Galera on other servers
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@galera:slave' state.sls galera
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
+- description: Show master galera status
+  do: runState
+  target: I@galera:master
+  state: mysql.status
 
-- description: Check mysql status
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@galera:*' mysql.status | grep -A1 -e "wsrep_incoming_addresses\|wsrep_cluster_size"
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: true
+# - description: Install mysql client
+#   do: enforceState
+#   target: I@mysql:client
+#   state: mysql.client
 
 
-- description: Install haproxy
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@haproxy:proxy' state.sls haproxy
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
+# // Install haproxy
+# salt.enforceState(saltMaster, 'I@haproxy:proxy', 'haproxy', true)
+# salt.runSaltProcessStep(saltMaster, 'I@haproxy:proxy', 'service.status', ['haproxy'])
+# salt.runSaltProcessStep(saltMaster, 'I@haproxy:proxy', 'service.restart', ['rsyslog'])
 
-- description: Check haproxy status
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@haproxy:proxy' service.status haproxy
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
 
-- description: Restart rsyslog
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@haproxy:proxy' service.restart rsyslog
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
+- description: Install HAProxy
+  do: enforceState
+  target: I@haproxy:proxy
+  state: haproxy
 
-- description: Install memcached on all controllers
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@memcached:server' state.sls memcached
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
+- description: Show HAProxy service status
+  do: runState
+  target: I@haproxy:proxy
+  state: service.status
+  args: ['haproxy']
+
+- description: Restart HAProxy service
+  do: runState
+  target: I@haproxy:proxy
+  state: service.restart
+  args: ['haproxy']
+
+# // Install memcached
+# salt.enforceState(saltMaster, 'I@memcached:server', 'memcached', true)
+- description: Install Memcached
+  do: enforceState
+  target: I@memcached:server
+  state: memcached
diff --git a/tcp_tests/templates/common-services/virtual-mcp11-ovs-common-services.yaml b/tcp_tests/templates/common-services/virtual-mcp11-ovs-common-services.yaml
index ccbbfac..6d13c9b 100644
--- a/tcp_tests/templates/common-services/virtual-mcp11-ovs-common-services.yaml
+++ b/tcp_tests/templates/common-services/virtual-mcp11-ovs-common-services.yaml
@@ -1,118 +1,147 @@
 {% from 'underlay/virtual-mcp11-ovs.yaml' import HOSTNAME_CFG01 with context %}
 
-# Install support services
-- description: Install keepalived on ctl01
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@keepalived:cluster and *01*' state.sls keepalived
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 10}
-  skip_fail: true
+################### Install OpenStack infra ##########################
 
+# salt.enforceState(saltMaster, 'I@glusterfs:server', 'glusterfs.server.service', true)
+- description: Install and run GlusterFS
+  do: enforceState
+  target: I@glusterfs:server
+  state: glusterfs.server.service
+
+#     // Install keepaliveds
+#     //runSaltProcessStep(master, 'I@keepalived:cluster', 'state.sls', ['keepalived'], 1)
+#     salt.enforceState(saltMaster, 'I@keepalived:cluster and *01*', 'keepalived', true)
+#     salt.enforceState(saltMaster, 'I@keepalived:cluster', 'keepalived', true)
 - description: Install keepalived
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@keepalived:cluster' state.sls keepalived
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 10}
-  skip_fail: true
+  do: enforceState
+  target: I@keepalived:cluster and *01*
+  state: keepalived
 
-- description: Check the VIP
-  cmd: |
-    OPENSTACK_CONTROL_ADDRESS=`salt-call --out=newline_values_only pillar.get _param:openstack_control_address`;
-    echo "_param:openstack_control_address (vip): ${OPENSTACK_CONTROL_ADDRESS}";
-    salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@keepalived:cluster' cmd.run "ip a | grep ${OPENSTACK_CONTROL_ADDRESS}" | grep -B1 ${OPENSTACK_CONTROL_ADDRESS}
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
+- description: Re run installation of keepalived
+  do: enforceState
+  target: I@keepalived:cluster
+  state: keepalived
+
+#  // Check the keepalived VIPs
+#  salt.runSaltProcessStep(saltMaster, 'I@keepalived:cluster', 'cmd.run', ['ip a | grep 172.16.10.2'])
+#  salt.enforceState(saltMaster, 'I@glusterfs:server and *01*', 'glusterfs.server.setup', true)
+#  salt.runSaltProcessStep(saltMaster, 'I@glusterfs:server', 'cmd.run', ['gluster peer status'], null, true)
+#  salt.runSaltProcessStep(saltMaster, 'I@glusterfs:server', 'cmd.run', ['gluster volume status'], null, true)
+
+- description: Show VIPs
+  do: runState
+  target: I@keepalived:cluster
+  state: cmd.run
+  args: ['ip a | grep 172.16.10.2']
+
+- description: Re run Gluster sertver setup
+  do: enforceState
+  target: I@glusterfs:server and *01*
+  state: glusterfs.server.setup
+
+- description: Show Gluster peer status
+  do: runState
+  target: I@glusterfs:server
+  state: cmd.run
+  args: ['gluster peer status']
+
+- description: Show Gluster volumes status
+  do: runState
+  target: I@glusterfs:server
+  state: cmd.run
+  args: ['gluster volume status']
 
 
-- description: Install glusterfs
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@glusterfs:server' state.sls glusterfs.server.service
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
+  #     // Install rabbitmq
+  #     withEnv(['ASK_ON_ERROR=false']){
+  #         retry(2) {
+  #             salt.enforceState(saltMaster, 'I@rabbitmq:server', 'rabbitmq', true)
+  #         }
+  #     }
+  #     // Check the rabbitmq status
+  #     salt.runSaltProcessStep(saltMaster, 'I@rabbitmq:server', 'cmd.run', ['rabbitmqctl cluster_status'])
 
-- description: Setup glusterfs on primary controller
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@glusterfs:server' state.sls glusterfs.server.setup -b 1
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
+- description: Install rabbitmq
+  do: enforceState
+  target: I@rabbitmq:server
+  state: rabbitmq
+  retry: {count: 2, delay: 5}
 
-- description: Check the gluster status
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@glusterfs:server' cmd.run 'gluster peer status; gluster volume status' -b 1
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
+- description: Show rabbitmq status
+  do: runState
+  target: I@glusterfs:server
+  state: cmd.run
+  args: ['rabbitmqctl cluster_status']
 
-- description: Install RabbitMQ on ctl01
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@rabbitmq:server and *01*' state.sls rabbitmq
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
+  #     // Install galera
+  #     withEnv(['ASK_ON_ERROR=false']){
+  #         retry(2) {
+  #             salt.enforceState(saltMaster, 'I@galera:master', 'galera', true)
+  #         }
+  #     }
+  #     salt.enforceState(saltMaster, 'I@galera:slave', 'galera', true)
 
-- description: Install RabbitMQ
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@rabbitmq:server' state.sls rabbitmq
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
+  #     // Check galera status
+  #     salt.runSaltProcessStep(saltMaster, 'I@galera:master', 'mysql.status')
+  #     salt.runSaltProcessStep(saltMaster, 'I@galera:slave', 'mysql.status')
 
-- description: Check the rabbitmq status
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@rabbitmq:server' cmd.run 'rabbitmqctl cluster_status'
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Install Galera on first server
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@galera:master' state.sls galera
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Install Galera on other servers
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@galera:slave' state.sls galera
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Check mysql status
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@galera:*' mysql.status | grep -A1 -e "wsrep_incoming_addresses\|wsrep_cluster_size"
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: true
+  #     // // Setup mysql client
+  #     // salt.enforceState(saltMaster, 'I@mysql:client', 'mysql.client', true)
 
 
-- description: Install haproxy
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@haproxy:proxy' state.sls haproxy
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
+- description: Install Galera (master)
+  do: enforceState
+  target: I@galera:master
+  state: galera
+  retry: {count: 2, delay: 5}
 
-- description: Check haproxy status
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@haproxy:proxy' service.status haproxy
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
+- description: Install Galera (slaves)
+  do: enforceState
+  target: I@galera:slave
+  state: galera
 
-- description: Restart rsyslog
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@haproxy:proxy' service.restart rsyslog
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
+- description: Show master galera status
+  do: runState
+  target: I@galera:master
+  state: mysql.status
 
-- description: Install memcached on all controllers
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@memcached:server' state.sls memcached
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
+- description: Show master galera status
+  do: runState
+  target: I@galera:master
+  state: mysql.status
+
+# - description: Install mysql client
+#   do: enforceState
+#   target: I@mysql:client
+#   state: mysql.client
+
+
+# // Install haproxy
+# salt.enforceState(saltMaster, 'I@haproxy:proxy', 'haproxy', true)
+# salt.runSaltProcessStep(saltMaster, 'I@haproxy:proxy', 'service.status', ['haproxy'])
+# salt.runSaltProcessStep(saltMaster, 'I@haproxy:proxy', 'service.restart', ['rsyslog'])
+
+
+- description: Install HAProxy
+  do: enforceState
+  target: I@haproxy:proxy
+  state: haproxy
+
+- description: Show HAProxy service status
+  do: runState
+  target: I@haproxy:proxy
+  state: service.status
+  args: ['haproxy']
+
+- description: Restart HAProxy service
+  do: runState
+  target: I@haproxy:proxy
+  state: service.restart
+  args: ['haproxy']
+
+# // Install memcached
+# salt.enforceState(saltMaster, 'I@memcached:server', 'memcached', true)
+- description: Install Memcached
+  do: enforceState
+  target: I@memcached:server
+  state: memcached
diff --git a/tcp_tests/templates/mcp10-lab-dvr-defaults.jinja b/tcp_tests/templates/mcp10-lab-dvr-defaults.jinja
new file mode 100644
index 0000000..48d1f19
--- /dev/null
+++ b/tcp_tests/templates/mcp10-lab-dvr-defaults.jinja
@@ -0,0 +1,2 @@
+{% set DOMAIN_NAME = os_env('LAB_CONFIG_NAME', 'mcp10-lab-dvr') + '.local' %}
+{% set HOSTNAME_CFG01 = os_env('HOSTNAME_CFG01', 'cfg01.' + DOMAIN_NAME) %}
diff --git a/tcp_tests/templates/mcp10-lab-ovs-defaults.jinja b/tcp_tests/templates/mcp10-lab-ovs-defaults.jinja
new file mode 100644
index 0000000..7e457e8
--- /dev/null
+++ b/tcp_tests/templates/mcp10-lab-ovs-defaults.jinja
@@ -0,0 +1,2 @@
+{% set DOMAIN_NAME = os_env('LAB_CONFIG_NAME', 'mcp10-ovs-dvr') + '.local' %}
+{% set HOSTNAME_CFG01 = os_env('HOSTNAME_CFG01', 'cfg01.' + DOMAIN_NAME) %}
diff --git a/tcp_tests/templates/openstack/virtual-mcp10-dvr-openstack.yaml b/tcp_tests/templates/openstack/virtual-mcp10-dvr-openstack.yaml
new file mode 100644
index 0000000..a32f144
--- /dev/null
+++ b/tcp_tests/templates/openstack/virtual-mcp10-dvr-openstack.yaml
@@ -0,0 +1,158 @@
+{% from 'virtual-mcp10-dvr.jinja' import HOSTNAME_CFG01 with context %}
+
+# Install OpenStack control services
+
+- description: Install keystone service
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@keystone:server' state.sls keystone.server -b 1
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+- description: Populate keystone services/tenants/admins
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@keystone:client' state.sls keystone.client
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+- description: Check keystone service-list
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@keystone:server' cmd.run '. /root/keystonerc; keystone service-list'
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+
+- description: Install glance on all controllers
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+     -C 'I@glance:server' state.sls glance -b 1
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+- description: Configure glusterfs.client on all controllers
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@glance:server' state.sls glusterfs.client
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+- description: Update fernet tokens for keystone server
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@keystone:server' state.sls keystone.server -b 1
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+- description: Check glance image-list
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@keystone:server' cmd.run '. /root/keystonerc; glance image-list'
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+
+- description: Install nova on all controllers
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@nova:controller' state.sls nova -b 1
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+- description: Check nova service-list
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@keystone:server' cmd.run '. /root/keystonerc; nova service-list'
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+
+- description: Install cinder
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@cinder:controller' state.sls cinder -b 1
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+- description: Check cinder list
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@keystone:server' cmd.run '. /root/keystonerc; cinder list'
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+
+- description: Install neutron service
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@neutron:server' state.sls neutron -b 1
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+- description: Install neutron on gtw node
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@neutron:gateway' state.sls neutron
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+
+- description: Check neutron agent-list
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@keystone:server' cmd.run '. /root/keystonerc; neutron agent-list'
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+
+- description: Install heat service
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@heat:server' state.sls heat -b 1
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+- description: Check heat service
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@keystone:server' cmd.run '. /root/keystonerc; heat resource-type-list'
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+
+- description: Deploy horizon dashboard
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@horizon:server' state.sls horizon
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: true
+
+- description: Deploy nginx proxy
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@nginx:server' state.sls nginx
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: true
+
+
+# Install compute node
+
+- description: Apply formulas for compute node
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'cmp*' state.apply
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: true
+
+- description: Re-apply(as in doc) formulas for compute node
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'cmp*' state.apply
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: true
+
+- description: Check IP on computes
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'cmp*' cmd.run
+    'ip a'
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 10, delay: 30}
+  skip_fail: false
diff --git a/tcp_tests/templates/openstack/virtual-mcp10-ovs-openstack.yaml b/tcp_tests/templates/openstack/virtual-mcp10-ovs-openstack.yaml
index eec4b60..696e9f6 100644
--- a/tcp_tests/templates/openstack/virtual-mcp10-ovs-openstack.yaml
+++ b/tcp_tests/templates/openstack/virtual-mcp10-ovs-openstack.yaml
@@ -1,158 +1,255 @@
-{% from 'virtual-mcp10-ovs.jinja' import HOSTNAME_CFG01 with context %}
-
-# Install OpenStack control services
-
-- description: Install keystone service
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@keystone:server' state.sls keystone.server -b 1
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Populate keystone services/tenants/admins
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@keystone:client' state.sls keystone.client
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Check keystone service-list
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@keystone:server' cmd.run '. /root/keystonerc; keystone service-list'
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
+{% from 'mcp10-lab-ovs-defaults.jinja' import DOMAIN_NAME with context %}
+{% from 'mcp10-lab-ovs-defaults.jinja' import HOSTNAME_CFG01 with context %}
 
 
-- description: Install glance on all controllers
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-     -C 'I@glance:server' state.sls glance -b 1
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
+################### Install OpenStack control ##########################
 
-- description: Configure glusterfs.client on all controllers
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@glance:server' state.sls glusterfs.client
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
+# // Install horizon dashboard
+# salt.enforceState(saltMaster, 'I@horizon:server', 'horizon', true)
+# salt.enforceState(saltMaster, 'I@nginx:server', 'nginx', true)
 
-- description: Update fernet tokens for keystone server
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@keystone:server' state.sls keystone.server -b 1
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
+- description: Install Horizon
+  do: enforceState
+  target: I@horizon:server
+  state: horizon
 
-- description: Check glance image-list
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@keystone:server' cmd.run '. /root/keystonerc; glance image-list'
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
+- description: Install nginx
+  do: enforceState
+  target: I@nginx:server
+  state: nginx
+
+# // setup keystone service
+# //runSaltProcessStep(saltMaster, 'I@keystone:server', 'state.sls', ['keystone.server'], 1)
+# salt.enforceState(saltMaster, 'I@keystone:server and *01*', 'keystone.server', true)
+# salt.enforceState(saltMaster, 'I@keystone:server', 'keystone.server', true)
+# // populate keystone services/tenants/roles/users
+
+- description: Install Keystone on 01
+  do: enforceState
+  target: I@keystone:server and *01*
+  state: keystone.server
+
+- description: Install Keystone
+  do: enforceState
+  target: I@keystone:server
+  state: keystone.server
+
+# // keystone:client must be called locally
+# //salt.runSaltProcessStep(saltMaster, 'I@keystone:client', 'cmd.run', ['salt-call state.sls keystone.client'], null, true)
+# salt.runSaltProcessStep(saltMaster, 'I@keystone:server', 'service.restart', ['apache2'])
+# salt.enforceState(saltMaster, 'I@keystone:client', 'keystone.client', true)
+# salt.enforceState(saltMaster, 'I@keystone:client', 'keystone.client', true)
+# salt.runSaltProcessStep(saltMaster, 'I@keystone:server', 'cmd.run', ['. /root/keystonerc; keystone service-list'], null, true)
+
+# - description: Install Keystone client
+#   do: runState
+#   target: I@keystone:client
+#   state: cmd.run
+#   args: ['salt-call state.sls keystone.client']
+
+- description: Restart apache on Keystone servers
+  do: runState
+  target: I@keystone:server
+  state: service.restart
+  args: ['apache2']
+
+- description: Install Keystone Client
+  do: enforceState
+  target: I@keystone:client
+  state: keystone.client
+
+- description: Install Keystone Client
+  do: enforceState
+  target: I@keystone:client
+  state: keystone.client
+
+- description: Show Keystone config
+  do: runState
+  target: I@keystone:server
+  state: cmd.run
+  args: ['. /root/keystonerc; keystone service-list']
 
 
-- description: Install nova on all controllers
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@nova:controller' state.sls nova -b 1
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Check nova service-list
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@keystone:server' cmd.run '. /root/keystonerc; nova service-list'
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
+# // Install glance and ensure glusterfs clusters
+# //runSaltProcessStep(saltMaster, 'I@glance:server', 'state.sls', ['glance.server'], 1)
+# salt.enforceState(saltMaster, 'I@glance:server and *01*', 'glance.server', true)
+# salt.enforceState(saltMaster, 'I@glance:server', 'glance.server', true)
+# salt.enforceState(saltMaster, 'I@glance:server', 'glusterfs.client', true)
 
 
-- description: Install cinder
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@cinder:controller' state.sls cinder -b 1
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
+- description: Install glance on 01
+  do: enforceState
+  target: I@glance:server and *01*
+  state: glance.server
 
-- description: Check cinder list
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@keystone:server' cmd.run '. /root/keystonerc; cinder list'
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
+- description: Install glance
+  do: enforceState
+  target: I@glance:server
+  state: glance.server
+
+- description: Install gluster client on glance servers
+  do: enforceState
+  target: I@glance:server
+  state: glusterfs.client
+
+# // Update fernet tokens before doing request on keystone server
+# salt.enforceState(saltMaster, 'I@keystone:server', 'keystone.server', true)
+
+- description: Update fernet tokens
+  do: enforceState
+  target: I@keystone:server
+  state: keystone.server
+
+# // Check glance service
+# salt.runSaltProcessStep(saltMaster, 'I@keystone:server', 'cmd.run', ['. /root/keystonerc; glance image-list'], null, true)
+
+- description: Show glance images via keystone node
+  do: runState
+  target: I@keystone:server
+  state: cmd.run
+  args: ['. /root/keystonerc; glance image-list']
+
+# // Install and check nova service
+# //runSaltProcessStep(saltMaster, 'I@nova:controller', 'state.sls', ['nova'], 1)
+# salt.enforceState(saltMaster, 'I@nova:controller and *01*', 'nova.controller', true)
+# salt.enforceState(saltMaster, 'I@nova:controller', 'nova.controller', true)
+# salt.runSaltProcessStep(saltMaster, 'I@keystone:server', 'cmd.run', ['. /root/keystonerc; nova service-list'], null, true)
+
+- description: Install nova on controllers on 01
+  do: enforceState
+  target: I@nova:controller and *01*
+  state: nova.controller
+
+- description: Install Keystone
+  do: enforceState
+  target: I@nova:controller
+  state: nova.controller
+
+- description: Show nova services via keystone node
+  do: runState
+  target: I@keystone:server
+  state: cmd.run
+  args: ['. /root/keystonerc; nova service-list']
 
 
-- description: Install neutron service
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@neutron:server' state.sls neutron -b 1
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
 
-- description: Install neutron on gtw node
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@neutron:gateway' state.sls neutron
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
+# // Install and check cinder service
+# //runSaltProcessStep(saltMaster, 'I@cinder:controller', 'state.sls', ['cinder'], 1)
+# salt.enforceState(saltMaster, 'I@cinder:controller and *01*', 'cinder', true)
+# salt.enforceState(saltMaster, 'I@cinder:controller', 'cinder', true)
+# salt.runSaltProcessStep(saltMaster, 'I@keystone:server', 'cmd.run', ['. /root/keystonerc; cinder list'], null, true)
 
 
-- description: Check neutron agent-list
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@keystone:server' cmd.run '. /root/keystonerc; neutron agent-list'
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
+- description: Install cinder on controllers on 01
+  do: enforceState
+  target: I@cinder:controller and *01*
+  state: cinder
+
+- description: Install cinder on controllers
+  do: enforceState
+  target: I@cinder:controller
+  state: cinder
+
+- description: Show cinder list via keystone node
+  do: runState
+  target: I@keystone:server
+  state: cmd.run
+  args: ['. /root/keystonerc; nova list']
 
 
-- description: Install heat service
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@heat:server' state.sls heat -b 1
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
+# // Install neutron service
+# //runSaltProcessStep(saltMaster, 'I@neutron:server', 'state.sls', ['neutron'], 1)
 
-- description: Check heat service
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@keystone:server' cmd.run '. /root/keystonerc; heat resource-type-list'
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
+# salt.enforceState(saltMaster, 'I@neutron:server and *01*', 'neutron.server', true)
+# salt.enforceState(saltMaster, 'I@neutron:server', 'neutron.server', true)
+# salt.runSaltProcessStep(saltMaster, 'I@keystone:server', 'cmd.run', ['. /root/keystonerc; neutron agent-list'], null, true)
+
+- description: Install neutron on controllers on 01
+  do: enforceState
+  target: I@neutron:server and *01*
+  state: neutron.server
+
+- description: Install neutron on controllers
+  do: enforceState
+  target: I@neutron:server
+  state: neutron.server
+
+- description: Show neutron agent list via keystone node
+  do: runState
+  target: I@keystone:server
+  state: cmd.run
+  args: ['. /root/keystonerc; neutron agent-list']
+
+# // Install heat service
+# //runSaltProcessStep(saltMaster, 'I@heat:server', 'state.sls', ['heat'], 1)
+# salt.enforceState(saltMaster, 'I@heat:server and *01*', 'heat', true)
+# salt.enforceState(saltMaster, 'I@heat:server', 'heat', true)
+# salt.runSaltProcessStep(saltMaster, 'I@keystone:server', 'cmd.run', ['. /root/keystonerc; heat resource-type-list'], null, true)
+
+- description: Install heat on controllers on 01
+  do: enforceState
+  target: I@heat:server and *01*
+  state: heat
+
+- description: Install heat on controllers
+  do: enforceState
+  target: I@heat:server
+  state: heat
+
+- description: Show heat resource type list via keystone node
+  do: runState
+  target: I@keystone:server
+  state: cmd.run
+  args: ['. /root/keystonerc;  heat resource-type-list']
+
+# // Restart nova api
+# salt.runSaltProcessStep(saltMaster, 'I@nova:controller', 'service.restart', ['nova-api'])
+
+- description: Restart nova-api
+  do: runState
+  target: I@nova:controller
+  state: service.restart
+  args: ['nova-api']
+
+################### Install OpenStack network ##########################
+
+# // Apply gateway
+# salt.runSaltProcessStep(saltMaster, 'I@neutron:gateway', 'state.apply', [], null, true)
+
+- description: Apply gateway
+  do: runState
+  target: I@neutron:gateway
+  state: state.apply
+
+# // Pring information
+# salt.runSaltProcessStep(saltMaster, 'I@keystone:server', 'cmd.run', ['. /root/keystonerc; neutron net-list'], null, true)
+# salt.runSaltProcessStep(saltMaster, 'I@keystone:server', 'cmd.run', ['. /root/keystonerc; nova net-list'], null, true)
+
+- description: Show neutron networks via keystone node
+  do: runState
+  target: I@keystone:server
+  state: cmd.run
+  args: ['. /root/keystonerc; neutron net-list']
+
+- description: Show nova networks via keystone node
+  do: runState
+  target: I@keystone:server
+  state: cmd.run
+  args: ['. /root/keystonerc; nova net-list']
 
 
-- description: Deploy horizon dashboard
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@horizon:server' state.sls horizon
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: true
+################### Install OpenStack compute ##########################
 
-- description: Deploy nginx proxy
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@nginx:server' state.sls nginx
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: true
+#  //orchestrate.installOpenstackMkCompute(saltMaster, physical)
+#  // Configure compute nodes
+#  retry(2) {
+#      salt.runSaltProcessStep(saltMaster, 'I@nova:compute', 'state.apply', [], null, true)
+#  }
+
+- description: Install Nova compute
+  do: runState
+  target: I@nova:compute
+  state: state.apply
+  retry: {count: 2, delay: 5}
 
 
-# Install compute node
-
-- description: Apply formulas for compute node
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'cmp*' state.apply
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: true
-
-- description: Re-apply(as in doc) formulas for compute node
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'cmp*' state.apply
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: true
-
-- description: Check IP on computes
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'cmp*' cmd.run
-    'ip a'
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 10, delay: 30}
-  skip_fail: false
diff --git a/tcp_tests/templates/openstack/virtual-mcp11-ovs-openstack.yaml b/tcp_tests/templates/openstack/virtual-mcp11-ovs-openstack.yaml
index c8c3a3f..214d1af 100644
--- a/tcp_tests/templates/openstack/virtual-mcp11-ovs-openstack.yaml
+++ b/tcp_tests/templates/openstack/virtual-mcp11-ovs-openstack.yaml
@@ -1,169 +1,259 @@
-{% from 'underlay/virtual-mcp11-ovs.yaml' import HOSTNAME_CFG01 with context %}
 
-# Install OpenStack control services
+################### Install OpenStack control ##########################
 
-- description: Install glance on all controllers
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-     -C 'I@glance:server' state.sls glance -b 1
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
+# // Install horizon dashboard
+# salt.enforceState(saltMaster, 'I@horizon:server', 'horizon', true)
+# salt.enforceState(saltMaster, 'I@nginx:server', 'nginx', true)
 
-- description: Install keystone service (note that different fernet keys are created on different nodes)
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@keystone:server' state.sls keystone.server -b 1
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 2, delay: 15}
-  skip_fail: false
+- description: Install Horizon
+  do: enforceState
+  target: I@horizon:server
+  state: horizon
 
-- description: Restart apache due to PROD-10477
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl*' cmd.run "systemctl restart apache2"
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 15}
-  skip_fail: false
+- description: Update certs on nginx servers
+  do: enforceState
+  target: I@nginx:server
+  state: salt.minion.cert
 
-- description: Check apache status to PROD-10477
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl*' cmd.run "systemctl status apache2"
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 15}
-  skip_fail: false
+- description: Install nginx
+  do: enforceState
+  target: I@nginx:server
+  state: nginx
 
-- description: Mount glusterfs.client volumes (resuires created 'keystone' and 'glusterfs' system users)
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@glance:server' state.sls glusterfs.client
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
+# // setup keystone service
+# //runSaltProcessStep(saltMaster, 'I@keystone:server', 'state.sls', ['keystone.server'], 1)
+# salt.enforceState(saltMaster, 'I@keystone:server and *01*', 'keystone.server', true)
+# salt.enforceState(saltMaster, 'I@keystone:server', 'keystone.server', true)
+# // populate keystone services/tenants/roles/users
 
-- description: Update fernet keys for keystone server on the mounted glusterfs volume
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@keystone:server' state.sls keystone.server -b 1
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
+- description: Install Keystone on 01
+  do: enforceState
+  target: I@keystone:server and *01*
+  state: keystone.server
+  retry: {count: 3, delay: 5}
 
-- description: Populate keystone services/tenants/admins
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@keystone:client' state.sls keystone.client
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
+- description: Install Keystone
+  do: enforceState
+  target: I@keystone:server
+  state: keystone.server
+  retry: {count: 3, delay: 5}
 
-- description: Check keystone service-list
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@keystone:server' cmd.run '. /root/keystonerc; openstack service list'
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
+# // keystone:client must be called locally
+# //salt.runSaltProcessStep(saltMaster, 'I@keystone:client', 'cmd.run', ['salt-call state.sls keystone.client'], null, true)
+# salt.runSaltProcessStep(saltMaster, 'I@keystone:server', 'service.restart', ['apache2'])
+# salt.enforceState(saltMaster, 'I@keystone:client', 'keystone.client', true)
+# salt.enforceState(saltMaster, 'I@keystone:client', 'keystone.client', true)
+# salt.runSaltProcessStep(saltMaster, 'I@keystone:server', 'cmd.run', ['. /root/keystonerc; keystone service-list'], null, true)
 
-- description: Check glance image-list
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@keystone:server' cmd.run '. /root/keystonerc; glance image-list'
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
+# - description: Install Keystone client
+#   do: runState
+#   target: I@keystone:client
+#   state: cmd.run
+#   args: ['salt-call state.sls keystone.client']
+
+- description: Restart apache on Keystone servers
+  do: runState
+  target: I@keystone:server
+  state: service.restart
+  args: ['apache2']
+
+- description: Install Keystone Client
+  do: enforceState
+  target: I@keystone:client
+  state: keystone.client
+
+- description: Install Keystone Client
+  do: enforceState
+  target: I@keystone:client
+  state: keystone.client
+
+- description: Show Keystone config
+  do: runState
+  target: I@keystone:server
+  state: cmd.run
+  args: ['. /root/keystonerc; keystone service-list']
 
 
-- description: Install nova on all controllers
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@nova:controller' state.sls nova -b 1
-  node_name: {{ HOSTNAME_CFG01 }}
+# // Install glance and ensure glusterfs clusters
+# //runSaltProcessStep(saltMaster, 'I@glance:server', 'state.sls', ['glance.server'], 1)
+# salt.enforceState(saltMaster, 'I@glance:server and *01*', 'glance.server', true)
+# salt.enforceState(saltMaster, 'I@glance:server', 'glance.server', true)
+# salt.enforceState(saltMaster, 'I@glance:server', 'glusterfs.client', true)
+
+
+- description: Install glance on 01
+  do: enforceState
+  target: I@glance:server and *01*
+  state: glance.server
+
+- description: Install glance
+  do: enforceState
+  target: I@glance:server
+  state: glance.server
+
+- description: Install gluster client on glance servers
+  do: enforceState
+  target: I@glance:server
+  state: glusterfs.client
+
+# // Update fernet tokens before doing request on keystone server
+# salt.enforceState(saltMaster, 'I@keystone:server', 'keystone.server', true)
+
+- description: Update fernet tokens
+  do: enforceState
+  target: I@keystone:server
+  state: keystone.server
+
+# // Check glance service
+# salt.runSaltProcessStep(saltMaster, 'I@keystone:server', 'cmd.run', ['. /root/keystonerc; glance image-list'], null, true)
+
+- description: Show glance images via keystone node
+  do: runState
+  target: I@keystone:server
+  state: cmd.run
+  args: ['. /root/keystonerc; glance image-list']
+
+# // Install and check nova service
+# //runSaltProcessStep(saltMaster, 'I@nova:controller', 'state.sls', ['nova'], 1)
+# salt.enforceState(saltMaster, 'I@nova:controller and *01*', 'nova.controller', true)
+# salt.enforceState(saltMaster, 'I@nova:controller', 'nova.controller', true)
+# salt.runSaltProcessStep(saltMaster, 'I@keystone:server', 'cmd.run', ['. /root/keystonerc; nova service-list'], null, true)
+
+- description: Install nova on controllers on 01
+  do: enforceState
+  target: I@nova:controller and *01*
+  state: nova.controller
+
+- description: Install Keystone
+  do: enforceState
+  target: I@nova:controller
+  state: nova.controller
+
+- description: Show nova services via keystone node
+  do: runState
+  target: I@keystone:server
+  state: cmd.run
+  args: ['. /root/keystonerc; nova service-list']
+
+
+
+# // Install and check cinder service
+# //runSaltProcessStep(saltMaster, 'I@cinder:controller', 'state.sls', ['cinder'], 1)
+# salt.enforceState(saltMaster, 'I@cinder:controller and *01*', 'cinder', true)
+# salt.enforceState(saltMaster, 'I@cinder:controller', 'cinder', true)
+# salt.runSaltProcessStep(saltMaster, 'I@keystone:server', 'cmd.run', ['. /root/keystonerc; cinder list'], null, true)
+
+
+- description: Install cinder on controllers on 01
+  do: enforceState
+  target: I@cinder:controller and *01*
+  state: cinder
+
+- description: Install cinder on controllers
+  do: enforceState
+  target: I@cinder:controller
+  state: cinder
+
+- description: Show cinder list via keystone node
+  do: runState
+  target: I@keystone:server
+  state: cmd.run
+  args: ['. /root/keystonerc; nova list']
+
+
+# // Install neutron service
+# //runSaltProcessStep(saltMaster, 'I@neutron:server', 'state.sls', ['neutron'], 1)
+
+# salt.enforceState(saltMaster, 'I@neutron:server and *01*', 'neutron.server', true)
+# salt.enforceState(saltMaster, 'I@neutron:server', 'neutron.server', true)
+# salt.runSaltProcessStep(saltMaster, 'I@keystone:server', 'cmd.run', ['. /root/keystonerc; neutron agent-list'], null, true)
+
+- description: Install neutron on controllers on 01
+  do: enforceState
+  target: I@neutron:server and *01*
+  state: neutron.server
+
+- description: Install neutron on controllers
+  do: enforceState
+  target: I@neutron:server
+  state: neutron.server
+
+- description: Show neutron agent list via keystone node
+  do: runState
+  target: I@keystone:server
+  state: cmd.run
+  args: ['. /root/keystonerc; neutron agent-list']
+
+# // Install heat service
+# //runSaltProcessStep(saltMaster, 'I@heat:server', 'state.sls', ['heat'], 1)
+# salt.enforceState(saltMaster, 'I@heat:server and *01*', 'heat', true)
+# salt.enforceState(saltMaster, 'I@heat:server', 'heat', true)
+# salt.runSaltProcessStep(saltMaster, 'I@keystone:server', 'cmd.run', ['. /root/keystonerc; heat resource-type-list'], null, true)
+
+- description: Install heat on controllers on 01
+  do: enforceState
+  target: I@heat:server and *01*
+  state: heat
+
+- description: Install heat on controllers
+  do: enforceState
+  target: I@heat:server
+  state: heat
+
+- description: Show heat resource type list via keystone node
+  do: runState
+  target: I@keystone:server
+  state: cmd.run
+  args: ['. /root/keystonerc;  heat resource-type-list']
+
+# // Restart nova api
+# salt.runSaltProcessStep(saltMaster, 'I@nova:controller', 'service.restart', ['nova-api'])
+
+- description: Restart nova-api
+  do: runState
+  target: I@nova:controller
+  state: service.restart
+  args: ['nova-api']
+
+################### Install OpenStack network ##########################
+
+# // Apply gateway
+# salt.runSaltProcessStep(saltMaster, 'I@neutron:gateway', 'state.apply', [], null, true)
+
+- description: Apply gateway
+  do: runState
+  target: I@neutron:gateway
+  state: state.apply
+
+# // Pring information
+# salt.runSaltProcessStep(saltMaster, 'I@keystone:server', 'cmd.run', ['. /root/keystonerc; neutron net-list'], null, true)
+# salt.runSaltProcessStep(saltMaster, 'I@keystone:server', 'cmd.run', ['. /root/keystonerc; nova net-list'], null, true)
+
+- description: Show neutron networks via keystone node
+  do: runState
+  target: I@keystone:server
+  state: cmd.run
+  args: ['. /root/keystonerc; neutron net-list']
+
+- description: Show nova networks via keystone node
+  do: runState
+  target: I@keystone:server
+  state: cmd.run
+  args: ['. /root/keystonerc; nova net-list']
+
+
+################### Install OpenStack compute ##########################
+
+#  //orchestrate.installOpenstackMkCompute(saltMaster, physical)
+#  // Configure compute nodes
+#  retry(2) {
+#      salt.runSaltProcessStep(saltMaster, 'I@nova:compute', 'state.apply', [], null, true)
+#  }
+
+- description: Install Nova compute
+  do: runState
+  target: I@nova:compute
+  state: state.apply
   retry: {count: 2, delay: 5}
-  skip_fail: false
-
-- description: Check nova service-list
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@keystone:server' cmd.run '. /root/keystonerc; nova service-list'
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
 
 
-- description: Install cinder
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@cinder:controller' state.sls cinder -b 1
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Check cinder list
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@keystone:server' cmd.run '. /root/keystonerc; cinder list'
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-
-- description: Install neutron service
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@neutron:server' state.sls neutron -b 1
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Install neutron on gtw node
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@neutron:gateway' state.sls neutron
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-
-- description: Check neutron agent-list
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@keystone:server' cmd.run '. /root/keystonerc; neutron agent-list'
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-
-- description: Install heat service
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@heat:server' state.sls heat -b 1
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Check heat service
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@keystone:server' cmd.run '. /root/keystonerc; heat resource-type-list'
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-
-- description: Deploy horizon dashboard
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@horizon:server' state.sls horizon
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: true
-
-- description: Deploy nginx proxy
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@nginx:server' state.sls nginx
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: true
-
-
-# Install compute node
-
-- description: Apply formulas for compute node
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'cmp*' state.apply
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: true
-
-- description: Re-apply(as in doc) formulas for compute node
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'cmp*' state.apply
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: true
-
-- description: Check IP on computes
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'cmp*' cmd.run
-    'ip a'
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 10, delay: 30}
-  skip_fail: false
diff --git a/tcp_tests/templates/salt/virtual-mcp10-dvr-salt.yaml b/tcp_tests/templates/salt/virtual-mcp10-dvr-salt.yaml
new file mode 100644
index 0000000..11f04c7
--- /dev/null
+++ b/tcp_tests/templates/salt/virtual-mcp10-dvr-salt.yaml
@@ -0,0 +1,319 @@
+{% from 'virtual-mcp10-dvr.jinja' import HOSTNAME_CFG01 with context %}
+
+# Install salt to the config node
+
+
+- description: Configure repository on the cfg01 node
+  cmd:
+    echo "172.18.248.114    jenkins.mcp.mirantis.net  gerrit.mcp.mirantis.net" >> /etc/hosts;
+    echo "185.135.196.10    apt-mk.mirantis.com" >> /etc/hosts;
+    echo "nameserver 172.18.208.44 >> /etc/resolv.conf;
+    echo "nameserver 8.8.8.8 >> /etc/resolv.conf;
+    which wget >/dev/null || (apt-get update; apt-get install -y wget);
+    echo "deb [arch=amd64] http://apt-mk.mirantis.com/xenial nightly salt extra" > /etc/apt/sources.list.d/mcp_salt.list;
+    wget -O - http://apt-mk.mirantis.com/public.gpg | apt-key add -;
+    echo "deb http://repo.saltstack.com/apt/ubuntu/16.04/amd64/2016.3 xenial main" > /etc/apt/sources.list.d/saltstack.list;
+    wget -O - https://repo.saltstack.com/apt/ubuntu/16.04/amd64/2016.3/SALTSTACK-GPG-KEY.pub | apt-key add -;
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 1}
+  skip_fail: false
+
+- description: Update packages on cfg01
+  cmd: apt-get clean; apt-get update
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 1}
+  skip_fail: false
+
+- description: Installing salt master on cfg01
+  cmd:  apt-get install -y reclass git; apt-get install -y salt-master
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 1}
+  skip_fail: false
+
+- description: Install common packages on cfg01
+  cmd: apt-get install -y python-pip wget curl tmux byobu iputils-ping traceroute htop tree
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 1}
+  skip_fail: false
+
+- description: Configure salt-master on cfg01
+  cmd: |
+    cat << 'EOF' >> /etc/salt/master.d/master.conf
+    file_roots:
+      base:
+      - /usr/share/salt-formulas/env
+    pillar_opts: False
+    open_mode: True
+    reclass: &reclass
+      storage_type: yaml_fs
+      inventory_base_uri: /srv/salt/reclass
+    ext_pillar:
+      - reclass: *reclass
+    master_tops:
+      reclass: *reclass
+    EOF
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 1}
+  skip_fail: false
+
+- description: Configure GIT settings and certificates
+  cmd: touch /root/.git_trusted_certs.pem;
+    for server in git.tcpcloud.eu github.com; do
+        openssl s_client -showcerts -connect $server:443 </dev/null
+        | openssl x509 -outform PEM
+        >> /root/.git_trusted_certs.pem;
+    done;
+    HOME=/root git config --global http.sslCAInfo /root/.git_trusted_certs.pem;
+    HOME=/root git config --global user.email "tcp-qa@example.com";
+    HOME=/root git config --global user.name "TCP QA";
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 1}
+  skip_fail: false
+
+
+- description: Clone reclass models with submodules
+  cmd: |
+    ssh-keyscan -H github.com >> ~/.ssh/known_hosts;
+    git clone -b master --recurse-submodules https://gerrit.mcp.mirantis.net/salt-models/mcp-virtual-lab /srv/salt/reclass;
+    mkdir -p /srv/salt/reclass/classes/service;
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 1}
+  skip_fail: false
+
+- description: Configure reclass
+  cmd: |
+    FORMULA_PATH=${FORMULA_PATH:-/usr/share/salt-formulas};
+    FORMULA_REPOSITORY=${FORMULA_REPOSITORY:-deb [arch=amd64] http://apt.tcpcloud.eu/nightly xenial tcp-salt};
+    FORMULA_GPG=${FORMULA_GPG:-http://apt.tcpcloud.eu/public.gpg};
+    which wget > /dev/null || (apt-get update; apt-get install -y wget);
+    echo "${FORMULA_REPOSITORY}" > /etc/apt/sources.list.d/tcpcloud_salt.list;
+    wget -O - "${FORMULA_GPG}" | apt-key add -;
+    apt-get clean; apt-get update;
+    [ ! -d /srv/salt/reclass/classes/service ] && mkdir -p /srv/salt/reclass/classes/service;
+    declare -a formula_services=("linux" "reclass" "salt" "openssh" "ntp" "git" "nginx" "collectd" "sensu" "heka" "sphinx" "keystone" "mysql" "grafana" "haproxy" "rsyslog" "horizon");
+    echo -e "\nInstalling all required salt formulas\n";
+    apt-get install -y "${formula_services[@]/#/salt-formula-}";
+    for formula_service in "${formula_services[@]}"; do
+      echo -e "\nLink service metadata for formula ${formula_service} ...\n";
+      [ ! -L "/srv/salt/reclass/classes/service/${formula_service}" ] && ln -s ${FORMULA_PATH}/reclass/service/${formula_service} /srv/salt/reclass/classes/service/${formula_service};
+    done;
+    [ ! -d /srv/salt/env ] && mkdir -p /srv/salt/env;
+    [ ! -L /srv/salt/env/prd ] && ln -s ${FORMULA_PATH}/env /srv/salt/env/prd;
+    [ ! -d /etc/reclass ] && mkdir /etc/reclass;
+
+    cat << 'EOF' >> /etc/reclass/reclass-config.yml
+    storage_type: yaml_fs
+    pretty_print: True
+    output: yaml
+    inventory_base_uri: /srv/salt/reclass
+    EOF
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 1}
+  skip_fail: false
+
+- description: Configure salt-minion on cfg01
+  cmd: |
+    [ ! -d /etc/salt/minion.d ] && mkdir -p /etc/salt/minion.d;
+    cat << "EOF" >> /etc/salt/minion.d/minion.conf
+    id: {{ HOSTNAME_CFG01 }}
+    master: 127.0.0.1
+    EOF
+    apt-get install -y salt-minion;
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 1}
+  skip_fail: false
+
+- description: Configure salt adoptors on cfg01
+  cmd: |
+    ln -s /usr/lib/python2.7/dist-packages/reclass/adapters/salt.py /usr/local/sbin/reclass-salt;
+    chmod +x /usr/lib/python2.7/dist-packages/reclass/adapters/salt.py
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 1}
+  skip_fail: false
+
+- description: Restart services
+  cmd: |
+     systemctl restart salt-master;
+     systemctl restart salt-minion;
+     echo "Showing system info and metadata ...";
+     salt-call --no-color grains.items;
+     salt-call --no-color pillar.data;
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+# Prepare salt services and nodes settings
+- description: Run 'linux' formula on cfg01
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@salt:master' state.sls linux;
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+- description: Run 'openssh' formula on cfg01
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@salt:master' state.sls openssh;
+    salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@salt:master' cmd.run "sed -i 's/PasswordAuthentication no/PasswordAuthentication
+    yes/' /etc/ssh/sshd_config && service ssh restart";
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 3, delay: 5}
+  skip_fail: false
+
+- description: '*Workaround* of the bug https://mirantis.jira.com/browse/PROD-7962'
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    '*' cmd.run "echo '    StrictHostKeyChecking no' >> /root/.ssh/config"
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 1}
+  skip_fail: false
+
+- description: Run 'reclass' formula on cfg01
+  cmd: timeout 120 salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@salt:master' reclass;
+    salt-call --no-color state.sls salt.master;
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: true
+
+
+- description: Run 'salt' formula on cfg01
+  cmd: timeout 120 salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@salt:master' state.sls salt.master.service;
+    salt-call --no-color state.sls salt.master,salt.api,salt.minion.ca;
+    systemctl restart salt-minion;
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: true
+
+- description: Accept salt keys from all the nodes
+  cmd: salt-key -A -y
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+- description: Generate inventory for all the nodes to the /srv/salt/reclass/nodes/_generated
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@salt:master' state.sls reclass
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+- description: Refresh pillars on all minions
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False '*' saltutil.refresh_pillar
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+- description: Sync all salt resources
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False '*' saltutil.sync_all
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+- description: Show  reclass-salt --top
+  cmd: reclass-salt --top; salt-call --no-color state.sls salt.minion.cert -l info;
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+
+# Bootstrap all nodes
+
+- description: Configure linux on controllers
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl*' state.sls
+    linux
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+- description: Configure linux on proxy
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'prx*' state.sls
+    linux
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+- description: Configure linux on gtw
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'gtw*' state.sls
+    linux
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 3, delay: 20}
+  skip_fail: false
+
+- description: Configure linux on cmp
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'cmp*' state.sls
+    linux
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 3, delay: 20}
+  skip_fail: false
+
+- description: Configure openssh on all nodes
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C '* and not
+    cfg*' state.sls openssh;salt --hard-crash --state-output=mixed --state-verbose=False
+    -C '* and not cfg*' cmd.run "sed -i 's/PasswordAuthentication no/PasswordAuthentication
+    yes/' /etc/ssh/sshd_config && service ssh restart"
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+- description: Configure salt.minion on ctl
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl*' state.sls salt.minion
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 3, delay: 5}
+  skip_fail: false
+
+- description: Configure salt.minion on prx
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'prx*' state.sls salt.minion
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 3, delay: 5}
+  skip_fail: false
+
+
+- description: Configure salt.minion on gtw
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'gtw*' state.sls salt.minion
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 3, delay: 5}
+  skip_fail: false
+
+- description: Configure salt.minion on cmp
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'cmp*' state.sls salt.minion
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 3, delay: 5}
+  skip_fail: false
+
+- description: Check salt minion versions on slaves
+  cmd: salt '*' test.version
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+- description: Check salt top states on nodes
+  cmd: salt '*' state.show_top
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+- description: Configure ntp and rsyslog on nodes
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False '*' state.sls ntp,rsyslog
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 10}
+  skip_fail: false
+
+- description: Hack gtw node
+  cmd: salt 'gtw*' cmd.run "ip addr del 172.16.10.110/24 dev ens4; ip addr flush dev ens4";
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 10}
+  skip_fail: false
+
+- description: Hack cmp01 node
+  cmd: salt 'cmp01*' cmd.run "ip addr del 172.16.10.105/24 dev ens4; ip addr flush dev ens4";
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 10}
+  skip_fail: false
+
+- description: Hack cmp02 node
+  cmd: salt 'cmp02*' cmd.run "ip addr del 172.16.10.106/24 dev ens4; ip addr flush dev ens4";
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 10}
+  skip_fail: false
+
diff --git a/tcp_tests/templates/underlay/virtual-mcp10-dvr--meta-data.yaml b/tcp_tests/templates/underlay/virtual-mcp10-dvr--meta-data.yaml
new file mode 100644
index 0000000..3699401
--- /dev/null
+++ b/tcp_tests/templates/underlay/virtual-mcp10-dvr--meta-data.yaml
@@ -0,0 +1,4 @@
+| # All the data below will be stored as a string object
+  instance-id: iid-local1
+  hostname: {hostname}
+  local-hostname: {hostname}
diff --git a/tcp_tests/templates/underlay/virtual-mcp10-dvr--user-data-cfg01.yaml b/tcp_tests/templates/underlay/virtual-mcp10-dvr--user-data-cfg01.yaml
new file mode 100644
index 0000000..443e50c
--- /dev/null
+++ b/tcp_tests/templates/underlay/virtual-mcp10-dvr--user-data-cfg01.yaml
@@ -0,0 +1,98 @@
+| # All the data below will be stored as a string object
+  #cloud-config, see http://cloudinit.readthedocs.io/en/latest/topics/examples.html
+
+  ssh_pwauth: True
+  users:
+   - name: root
+     sudo: ALL=(ALL) NOPASSWD:ALL
+     shell: /bin/bash
+     ssh_authorized_keys:
+      - ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCp0evjOaK8c8SKYK4r2+0BN7g+8YSvQ2n8nFgOURCyvkJqOHi1qPGZmuN0CclYVdVuZiXbWw3VxRbSW3EH736VzgY1U0JmoTiSamzLHaWsXvEIW8VCi7boli539QJP0ikJiBaNAgZILyCrVPN+A6mfqtacs1KXdZ0zlMq1BPtFciR1JTCRcVs5vP2Wwz5QtY2jMIh3aiwkePjMTQPcfmh1TkOlxYu5IbQyZ3G1ahA0mNKI9a0dtF282av/F6pwB/N1R1nEZ/9VtcN2I1mf1NW/tTHEEcTzXYo1R/8K9vlqAN8QvvGLZtZduGviNVNoNWvoxaXxDt8CPv2B2NCdQFZp
+
+  disable_root: false
+  chpasswd:
+   list: |
+    root:r00tme
+   expire: False
+
+  bootcmd:
+   # Block access to SSH while node is preparing
+   - cloud-init-per once sudo iptables -A INPUT -p tcp --dport 22 -j DROP
+   # Enable root access
+   - sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
+   - service sshd restart
+  output:
+    all: '| tee -a /var/log/cloud-init-output.log /dev/tty0'
+
+  runcmd:
+   # Configure dhclient
+   - sudo echo "nameserver {gateway}" >> /etc/resolvconf/resolv.conf.d/base
+   - sudo resolvconf -u
+
+   # Prepare network connection
+   - sudo ifup ens3
+   #- sudo route add default gw {gateway} {interface_name}
+   - sudo ifup ens4
+
+   # Create swap
+   - fallocate -l 4G /swapfile
+   - chmod 600 /swapfile
+   - mkswap /swapfile
+   - swapon /swapfile
+   - echo "/swapfile   none    swap    defaults    0   0" >> /etc/fstab
+
+   ########################################################
+   # Node is ready, allow SSH access
+   - echo "Allow SSH access ..."
+   - sudo iptables -D INPUT -p tcp --dport 22 -j DROP
+   ########################################################
+
+  write_files:
+   - path: /etc/network/interfaces
+     content: |
+          auto ens3
+          iface ens3 inet dhcp
+          auto ens4
+          iface ens4 inet dhcp
+
+   - path: /root/.ssh/id_rsa
+     owner: root:root
+     permissions: '0600'
+     content: |
+         -----BEGIN RSA PRIVATE KEY-----
+         MIIEogIBAAKCAQEAqdHr4zmivHPEimCuK9vtATe4PvGEr0Np/JxYDlEQsr5Cajh4
+         tajxmZrjdAnJWFXVbmYl21sN1cUW0ltxB+9+lc4GNVNCZqE4kmpsyx2lrF7xCFvF
+         Qou26JYud/UCT9IpCYgWjQIGSC8gq1TzfgOpn6rWnLNSl3WdM5TKtQT7RXIkdSUw
+         kXFbObz9lsM+ULWNozCId2osJHj4zE0D3H5odU5DpcWLuSG0MmdxtWoQNJjSiPWt
+         HbRdvNmr/xeqcAfzdUdZxGf/VbXDdiNZn9TVv7UxxBHE812KNUf/Cvb5agDfEL7x
+         i2bWXbhr4jVTaDVr6MWl8Q7fAj79gdjQnUBWaQIDAQABAoIBAFU3kU6yIna9BViH
+         UX+S2ijtRBjZ68JjavEnp4xvo5h+nydcdT57q9lv/0nAi3g3gmXm/oJH+/ZU87HV
+         zy+zP+t+umDSChUkPBZFL5jxpKyN7BhMrP1KzRuEGYd6vJE/nfY5g095P5vDgnpX
+         o+SNg/YqrY1u8zgr/hnfRaV2/XyIDEEcQXTHseWTnnMQnULFU88xL8yq8ACT5GhK
+         7A9m5ukfcU6d/fs/psz5Yqw5IQsWbv1yJ3/FKufPHlo2Nzh3/3eDAZUXvaBgf1so
+         FWFpHtkry3OXOGaZ98HgF9hL0twS0pzMvuypdGUQAt6nyB1N5re4LK/MAOddqwEc
+         1+NQzfECgYEA2ryEf0GLJdtiYs3F4HbwTwJVIXdyWv7kjYGeMkutzzAjXl6wx8aq
+         kfqLJ7x7UkR5unZ1ajEbKBciAlSuFA+Gikn6a4Lv8h87aSnHpPd/2VSitRlI/gW7
+         w4U4CL3Br1JyonU5WA7VYfTow7KnHBhdwm27RMA9uosyIpveQRpqSG0CgYEAxsAS
+         wCQKrhuPq2YtGtFR7K4BL+N+0E1Vq6h49u1ukcgUe0GHVD3VzBypNCv7rWEVHzAg
+         biCVi7PCjzZYW4fYZmzVD4JbFLVGOUu7aJwLaE4wDe72DNr6YZhcS+Ta98BP+x0q
+         Wt34JNPDabRPfhXfhiCqnWjjod+4Zqx4VJVNgG0CgYB5EXL8xJhyAbW5Hk/x56Mm
+         +BGKjoR7HS3/rMiU6hJv5SMObrbGPI3YcqZm/gn8BO6jaEGg30E6tWMbiyc270j2
+         be/vZe/NQcAuevOHuX3IGvJb7nzaLO46UBgtrmnv0mCkzuFIfh1ZNKdI+i9Ie6wZ
+         m4bVjNod0EGVqlQgELDXGQKBgB+NNmzSS++/6FrpaZesSzkrlnynvOYMoOETacCp
+         iLgT70xx5q308w/oLORfZyDrHJNK7JsPCS6YZvadRgGh2zTHajuAEj2DWZaW8zV0
+         MEtqvi44FU+NI9qCeYSC3FAgc5IF20d5nX8bLxaEzWnSxx1f6jX7BMgZ4AhMsP2c
+         hiUxAoGAFaxn+t9blIjqUiuh0smSYFhLBVPZveYHQDmQYERjktptBd3X95fGnSKh
+         iDe2iPGyud2+Yu4X/VjHLh/MRru+ZXvPXw1XwEqX93q8a1n283ul0Rl9+KKKOVHR
+         eecTjI/BfXBf33mPRKny3xuHw6uwta2T3OXky9IhqYS1kkHiZWA=
+         -----END RSA PRIVATE KEY-----
+
+   - path: /root/.ssh/config
+     owner: root:root
+     permissions: '0600'
+     content: |
+          Host *
+            ServerAliveInterval 300
+            ServerAliveCountMax 10
+            StrictHostKeyChecking no
+            UserKnownHostsFile /dev/null
diff --git a/tcp_tests/templates/underlay/virtual-mcp10-dvr--user-data1404.yaml b/tcp_tests/templates/underlay/virtual-mcp10-dvr--user-data1404.yaml
new file mode 100644
index 0000000..e5454e1
--- /dev/null
+++ b/tcp_tests/templates/underlay/virtual-mcp10-dvr--user-data1404.yaml
@@ -0,0 +1,136 @@
+| # All the data below will be stored as a string object
+  #cloud-config, see http://cloudinit.readthedocs.io/en/latest/topics/examples.html
+
+  ssh_pwauth: True
+  users:
+   - name: root
+     sudo: ALL=(ALL) NOPASSWD:ALL
+     shell: /bin/bash
+     ssh_authorized_keys:
+      - ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDGwjUlYn9UsmWmAGSuEA2sICad7WqxgsJR0HKcMbbxi0tn96h4Cq2iGYmzlJ48egLm5R5pxyWnFvL4b/2zb+kKTPCMwRc9nv7xEGosEFNQEoSDd+gYu2CO0dgS2bX/7m2DXmzvhqPjxWQUXXsb0OYAS1r9Es65FE8y4rLaegz8V35xfH45bTCA0W8VSKh264XtGz12hacqsttE/UvyjJTZe+/XV+xJy3WAWxe8J/MuW1VqbqNewTmpTE/LJU8i6pG4msU6+wH99UvsGAOKQOduynUHKWG3VZg5YCjpbbV/t/pfW/vHB3b3jiifQmNhulyiG/CNnSQ5BahtV/7qPsYt vagrant@cfg01
+
+  disable_root: false
+  chpasswd:
+   list: |
+    root:r00tme
+   expire: False
+
+  bootcmd:
+   # Block access to SSH while node is preparing
+   - cloud-init-per once sudo iptables -A INPUT -p tcp --dport 22 -j DROP
+   # Configure dhclient
+   - cloud-init-per once echo "nameserver {gateway}" >> /etc/resolvconf/resolv.conf.d/base
+   - cloud-init-per once resolvconf -u
+   - cloud-init-per once echo "172.18.248.114    jenkins.mcp.mirantis.net  gerrit.mcp.mirantis.net" >> /etc/hosts
+   - cloud-init-per once echo "185.135.196.10    apt-mk.mirantis.com" >> /etc/hosts
+   # Enable root access
+   - sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
+   - service sshd restart
+  output:
+    all: '| tee -a /var/log/cloud-init-output.log /dev/tty0'
+
+  apt:
+   preserve_sources_list: true
+   sources:
+    mcp_salt.list:
+     source: "deb [arch=amd64] http://apt-mk.mirantis.com/xenial nightly salt extra"
+     keyid: A76882D3
+    saltstack.list:
+     source: "deb http://repo.saltstack.com/apt/ubuntu/16.04/amd64/2016.3 xenial main"
+     keyid: DE57BFBE
+    tcpcloud_salt.list:
+     source: "deb [arch=amd64] http://apt.tcpcloud.eu/nightly xenial tcp-salt"
+     keyid: A76882D3
+
+  packages:
+   - wget
+   - git
+   - salt-minion
+   - python-pip
+   - curl
+   - tmux
+   - byobu
+   - iputils-ping
+   - traceroute
+   - htop
+   - tree
+   - screen
+
+  runcmd:
+   # Configure dhclient
+   - sudo echo "nameserver {gateway}" >> /etc/resolvconf/resolv.conf.d/base
+   - sudo resolvconf -u
+
+   # Prepare network connection
+   - sudo ifup eth0
+   #- sudo route add default gw {gateway} {interface_name}
+   - sudo ifup eth1
+
+   # Create swap
+   - fallocate -l 4G /swapfile
+   - chmod 600 /swapfile
+   - mkswap /swapfile
+   - swapon /swapfile
+   - echo "/swapfile   none    swap    defaults    0   0" >> /etc/fstab
+
+   ############## TCP Cloud cfg01 node ##################
+   #- sleep 120
+   # - echo "Preparing base OS"
+   # - which wget >/dev/null || (apt-get update; apt-get install -y wget)
+   # - echo "deb [arch=amd64] http://apt.tcpcloud.eu/nightly/ trusty main security extra tcp tcp-salt" > /etc/apt/sources.list
+   # - wget -O - http://apt.tcpcloud.eu/public.gpg | apt-key add -
+   # saltstack repo is for minions that have the same version in the xenial and trusty (2016.3.3)
+   #- echo "deb http://repo.saltstack.com/apt/ubuntu/14.04/amd64/latest trusty main" > /etc/apt/sources.list.d/saltstack.list
+   #- wget -O - https://repo.saltstack.com/apt/ubuntu/14.04/amd64/latest/SALTSTACK-GPG-KEY.pub | apt-key add -
+   # - echo "deb http://repo.saltstack.com/apt/ubuntu/14.04/amd64/2016.3 trusty main" > /etc/apt/sources.list.d/saltstack.list
+   # - wget -O - https://repo.saltstack.com/apt/ubuntu/14.04/amd64/2016.3/SALTSTACK-GPG-KEY.pub | apt-key add -
+
+   # - apt-get clean
+   # - apt-get update
+   # - apt-get -y upgrade
+
+   # Install common packages
+   # - apt-get install -y python-pip git
+   # - apt-get install -y curl tmux byobu iputils-ping traceroute htop tree
+
+   # - apt-get install -y salt-minion
+
+   # To be configured from inventory/fuel-devops by operator or autotests
+   # - 'echo "id: {hostname}" >> /etc/salt/minion'
+   # - 'echo "master: 192.168.10.100" >> /etc/salt/minion'
+
+   - echo "Restarting minion service with workarounds..."
+   - rm -f /etc/salt/pki/minion/minion_master.pub
+   - service salt-minion restart
+   - sleep 5
+   - rm -f /etc/salt/pki/minion/minion_master.pub
+   - service salt-minion restart
+
+   #- echo "Showing node metadata..."
+   #- salt-call pillar.data
+
+   #- echo "Running complete state ..."
+   #- salt-call state.sls linux,openssh,salt
+
+   # Workaround for bug https://mirantis.jira.com/browse/PROD-8214
+   - apt-get -y install --install-recommends linux-generic-lts-xenial
+   - reboot
+
+   ########################################################
+   # Node is ready, allow SSH access
+   ##- echo "Allow SSH access ..."
+   ##- sudo iptables -D INPUT -p tcp --dport 22 -j DROP
+   ########################################################
+
+  write_files:
+   - path: /etc/network/interfaces
+     content: |
+          auto eth0
+          iface eth0 inet dhcp
+          auto eth1
+          iface eth1 inet dhcp
+
+   - path: /etc/salt/minion.d/minion.conf
+     content: |
+         id: {hostname}
+         master: 192.168.10.100
diff --git a/tcp_tests/templates/underlay/virtual-mcp10-dvr--user-data1604.yaml b/tcp_tests/templates/underlay/virtual-mcp10-dvr--user-data1604.yaml
new file mode 100644
index 0000000..06bcb7f
--- /dev/null
+++ b/tcp_tests/templates/underlay/virtual-mcp10-dvr--user-data1604.yaml
@@ -0,0 +1,137 @@
+| # All the data below will be stored as a string object
+  #cloud-config, see http://cloudinit.readthedocs.io/en/latest/topics/examples.html
+
+  ssh_pwauth: True
+  users:
+   - name: root
+     sudo: ALL=(ALL) NOPASSWD:ALL
+     shell: /bin/bash
+     ssh_authorized_keys:
+      - ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDGwjUlYn9UsmWmAGSuEA2sICad7WqxgsJR0HKcMbbxi0tn96h4Cq2iGYmzlJ48egLm5R5pxyWnFvL4b/2zb+kKTPCMwRc9nv7xEGosEFNQEoSDd+gYu2CO0dgS2bX/7m2DXmzvhqPjxWQUXXsb0OYAS1r9Es65FE8y4rLaegz8V35xfH45bTCA0W8VSKh264XtGz12hacqsttE/UvyjJTZe+/XV+xJy3WAWxe8J/MuW1VqbqNewTmpTE/LJU8i6pG4msU6+wH99UvsGAOKQOduynUHKWG3VZg5YCjpbbV/t/pfW/vHB3b3jiifQmNhulyiG/CNnSQ5BahtV/7qPsYt vagrant@cfg01
+
+  disable_root: false
+  chpasswd:
+   list: |
+    root:r00tme
+   expire: False
+
+  bootcmd:
+   # Block access to SSH while node is preparing
+   - cloud-init-per once sudo iptables -A INPUT -p tcp --dport 22 -j DROP
+   # Configure dhclient
+   - cloud-init-per once echo "nameserver {gateway}" >> /etc/resolvconf/resolv.conf.d/base
+   - cloud-init-per once resolvconf -u
+   - cloud-init-per once echo "172.18.248.114    jenkins.mcp.mirantis.net  gerrit.mcp.mirantis.net" >> /etc/hosts
+   - cloud-init-per once echo "185.135.196.10    apt-mk.mirantis.com" >> /etc/hosts
+   # Enable root access
+   - sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
+   - service sshd restart
+  output:
+    all: '| tee -a /var/log/cloud-init-output.log /dev/tty0'
+
+  apt:
+   preserve_sources_list: true
+   sources:
+    mcp_salt.list:
+     source: "deb [arch=amd64] http://apt-mk.mirantis.com/xenial nightly salt extra"
+     keyid: A76882D3
+    saltstack.list:
+     source: "deb http://repo.saltstack.com/apt/ubuntu/16.04/amd64/2016.3 xenial main"
+     keyid: DE57BFBE
+    tcpcloud_salt.list:
+     source: "deb [arch=amd64] http://apt.tcpcloud.eu/nightly xenial tcp-salt"
+     keyid: A76882D3
+
+  packages:
+   - wget
+   - git
+   - salt-minion
+   - python-pip
+   - curl
+   - tmux
+   - byobu
+   - iputils-ping
+   - traceroute
+   - htop
+   - tree
+   - screen
+
+  runcmd:
+   - export TERM=linux
+   - export LANG=C
+   # Configure dhclient
+   # - sudo echo "nameserver {gateway}" >> /etc/resolvconf/resolv.conf.d/base
+   # - sudo resolvconf -u
+
+   # Prepare network connection
+   - sudo ifup ens3
+   #- sudo route add default gw {gateway} {interface_name}
+   - sudo ifup ens4
+
+   # Create swap
+   - fallocate -l 4G /swapfile
+   - chmod 600 /swapfile
+   - mkswap /swapfile
+   - swapon /swapfile
+   - echo "/swapfile   none    swap    defaults   0   0" >> /etc/fstab
+
+
+
+   ############## TCP Cloud cfg01 node ##################
+   #- sleep 120
+   # - echo "Preparing base OS"
+   # - which wget >/dev/null || (apt-get update; apt-get install -y wget)
+   # - echo "deb [arch=amd64] http://apt.tcpcloud.eu/nightly/ xenial main security extra tcp tcp-salt" > /etc/apt/sources.list
+   # - wget -O - http://apt.tcpcloud.eu/public.gpg | apt-key add -
+   # saltstack repo is for minions that have the same version in the xenial and trusty (2016.3.3)
+   #- echo "deb http://repo.saltstack.com/apt/ubuntu/16.04/amd64/latest xenial main" > /etc/apt/sources.list.d/saltstack.list
+   #- wget -O - https://repo.saltstack.com/apt/ubuntu/16.04/amd64/latest/SALTSTACK-GPG-KEY.pub | apt-key add -
+   # - echo "deb http://repo.saltstack.com/apt/ubuntu/16.04/amd64/2016.3 xenial main" > /etc/apt/sources.list.d/saltstack.list
+   # - wget -O - https://repo.saltstack.com/apt/ubuntu/16.04/amd64/2016.3/SALTSTACK-GPG-KEY.pub | apt-key add -
+
+   # - apt-get clean
+   # - apt-get update
+   # - apt-get -y upgrade
+
+   # Install common packages
+   # - apt-get install -y python-pip git
+   # - apt-get install -y curl tmux byobu iputils-ping traceroute htop tree
+
+   # - apt-get install -y salt-minion
+
+   # To be configured from inventory/fuel-devops by operator or autotests
+   # - 'echo "id: {hostname}" >> /etc/salt/minion'
+   # - 'echo "master: 192.168.10.100" >> /etc/salt/minion'
+
+   - echo "Restarting minion service with workarounds..."
+   - rm -f /etc/salt/pki/minion/minion_master.pub
+   - service salt-minion restart
+   - sleep 5
+   - rm -f /etc/salt/pki/minion/minion_master.pub
+   - service salt-minion restart
+
+   #- echo "Showing node metadata..."
+   #- salt-call pillar.data
+
+   #- echo "Running complete state ..."
+   #- salt-call state.sls linux,openssh,salt
+
+   ########################################################
+   # Node is ready, allow SSH access
+   - echo "Allow SSH access ..."
+   - sudo iptables -D INPUT -p tcp --dport 22 -j DROP
+   ########################################################
+
+  write_files:
+   - path: /etc/network/interfaces
+     content: |
+          auto ens3
+          iface ens3 inet dhcp
+          auto ens4
+          iface ens4 inet dhcp
+
+   - path: /etc/salt/minion.d/minion.conf
+     content: |
+         id: {hostname}
+         master: 192.168.10.100
+
diff --git a/tcp_tests/templates/underlay/virtual-mcp10-dvr.yaml b/tcp_tests/templates/underlay/virtual-mcp10-dvr.yaml
new file mode 100644
index 0000000..e930fb0
--- /dev/null
+++ b/tcp_tests/templates/underlay/virtual-mcp10-dvr.yaml
@@ -0,0 +1,411 @@
+---
+aliases:
+  default_interface_model:
+    - &interface_model !os_env INTERFACE_MODEL, virtio
+
+{% set DOMAIN_NAME = os_env('LAB_CONFIG_NAME', 'virtual-mcp10-dvr') + '.local' %}
+{% set HOSTNAME_CFG01 = os_env('HOSTNAME_CFG01', 'cfg01.' + DOMAIN_NAME) %}
+{% set HOSTNAME_CTL01 = os_env('HOSTNAME_CTL01', 'ctl01.' + DOMAIN_NAME) %}
+{% set HOSTNAME_CTL02 = os_env('HOSTNAME_CTL02', 'ctl02.' + DOMAIN_NAME) %}
+{% set HOSTNAME_CTL03 = os_env('HOSTNAME_CTL03', 'ctl03.' + DOMAIN_NAME) %}
+{% set HOSTNAME_CMP01 = os_env('HOSTNAME_CMP01', 'cmp01.' + DOMAIN_NAME) %}
+{% set HOSTNAME_CMP02 = os_env('HOSTNAME_CMP02', 'cmp02.' + DOMAIN_NAME) %}
+{% set HOSTNAME_GTW01 = os_env('HOSTNAME_GTW01', 'gtw01.' + DOMAIN_NAME) %}
+{% set HOSTNAME_PRX01 = os_env('HOSTNAME_PRX01', 'prx01.' + DOMAIN_NAME) %}
+
+template:
+  devops_settings:
+    env_name: {{ os_env('ENV_NAME', 'virtual-mcp10-dvr') }}
+
+    address_pools:
+      admin-pool01:
+        net: 172.16.10.0/24:24
+        params:
+          ip_reserved:
+            gateway: +1
+            l2_network_device: +1
+            default_{{ HOSTNAME_CFG01 }}: +100
+            default_{{ HOSTNAME_CTL01 }}: +101
+            default_{{ HOSTNAME_CTL02 }}: +102
+            default_{{ HOSTNAME_CTL03 }}: +103
+            default_{{ HOSTNAME_CMP01 }}: +105
+            default_{{ HOSTNAME_CMP02 }}: +106
+            default_{{ HOSTNAME_GTW01 }}: +110
+            default_{{ HOSTNAME_PRX01 }}: +121
+          ip_ranges:
+            dhcp: [+90, -10]
+
+      private-pool01:
+        net: 192.168.10.0/24:24
+        params:
+          ip_reserved:
+            gateway: +1
+            l2_network_device: +1
+            default_{{ HOSTNAME_CFG01 }}: +100
+            default_{{ HOSTNAME_CTL01 }}: +101
+            default_{{ HOSTNAME_CTL02 }}: +102
+            default_{{ HOSTNAME_CTL03 }}: +103
+            default_{{ HOSTNAME_CMP01 }}: +105
+            default_{{ HOSTNAME_CMP02 }}: +106
+            default_{{ HOSTNAME_GTW01 }}: +110
+            default_{{ HOSTNAME_PRX01 }}: +121
+          ip_ranges:
+            dhcp: [+90, -10]
+
+      tenant-pool01:
+        net: 10.1.0.0/24:24
+        params:
+          ip_reserved:
+            gateway: +1
+            l2_network_device: +1
+            default_{{ HOSTNAME_CFG01 }}: +100
+            default_{{ HOSTNAME_CTL01 }}: +101
+            default_{{ HOSTNAME_CTL02 }}: +102
+            default_{{ HOSTNAME_CTL03 }}: +103
+            default_{{ HOSTNAME_CMP01 }}: +105
+            default_{{ HOSTNAME_CMP02 }}: +106
+            default_{{ HOSTNAME_GTW01 }}: +110
+            default_{{ HOSTNAME_PRX01 }}: +121
+          ip_ranges:
+            dhcp: [+10, -10]
+
+      external-pool01:
+        net: 10.16.0.0/24:24
+        params:
+          ip_reserved:
+            gateway: +1
+            l2_network_device: +1
+            default_{{ HOSTNAME_CFG01 }}: +100
+            default_{{ HOSTNAME_CTL01 }}: +101
+            default_{{ HOSTNAME_CTL02 }}: +102
+            default_{{ HOSTNAME_CTL03 }}: +103
+            default_{{ HOSTNAME_CMP01 }}: +105
+            default_{{ HOSTNAME_CMP02 }}: +106
+            default_{{ HOSTNAME_GTW01 }}: +110
+            default_{{ HOSTNAME_PRX01 }}: +121
+          ip_ranges:
+            dhcp: [+10, -10]
+
+
+    groups:
+      - name: default
+        driver:
+          name: devops.driver.libvirt
+          params:
+            connection_string: !os_env CONNECTION_STRING, qemu:///system
+            storage_pool_name: !os_env STORAGE_POOL_NAME, default
+            stp: False
+            hpet: False
+            enable_acpi: true
+            use_host_cpu: !os_env DRIVER_USE_HOST_CPU, true
+
+        network_pools:
+          admin: admin-pool01
+          private: private-pool01
+          tenant: tenant-pool01
+          external: external-pool01
+
+        l2_network_devices:
+          private:
+            address_pool: private-pool01
+            dhcp: true
+            forward:
+              mode: nat
+
+          admin:
+            address_pool: admin-pool01
+            dhcp: true
+            forward:
+              mode: nat
+
+          tenant:
+            address_pool: tenant-pool01
+            dhcp: true
+
+          external:
+            address_pool: external-pool01
+            dhcp: true
+            forward:
+              mode: nat
+
+
+        group_volumes:
+         - name: cloudimage1404    # This name is used for 'backing_store' option for node volumes.
+           source_image: !os_env IMAGE_PATH1404  # https://cloud-images.ubuntu.com/trusty/current/trusty-server-cloudimg-amd64-disk1.img or
+                                             # http://apt.tcpcloud.eu/images/ubuntu-14-04-x64-201608231134.qcow2
+           format: qcow2
+         - name: cloudimage1604    # This name is used for 'backing_store' option for node volumes.
+           source_image: !os_env IMAGE_PATH1604  # https://cloud-images.ubuntu.com/xenial/current/xenial-server-cloudimg-amd64-disk1.img or
+                                             # http://apt.tcpcloud.eu/images/ubuntu-16-04-x64-201608231004.qcow2
+           format: qcow2
+
+        nodes:
+          - name: {{ HOSTNAME_CFG01 }}
+            role: salt_master
+            params:
+              vcpu: !os_env SLAVE_NODE_CPU, 4
+              memory: !os_env SLAVE_NODE_MEMORY, 2048
+              boot:
+                - hd
+              cloud_init_volume_name: iso
+              cloud_init_iface_up: ens3
+              volumes:
+                - name: system
+                  capacity: !os_env NODE_VOLUME_SIZE, 150
+                  backing_store: cloudimage1604
+                  format: qcow2
+                - name: iso  # Volume with name 'iso' will be used
+                             # for store image with cloud-init metadata.
+                  capacity: 1
+                  format: raw
+                  device: cdrom
+                  bus: ide
+                  cloudinit_meta_data: !include virtual-mcp10-dvr--meta-data.yaml
+                  cloudinit_user_data: !include virtual-mcp10-dvr--user-data-cfg01.yaml
+
+              interfaces:
+                - label: ens3
+                  l2_network_device: private
+                  interface_model: *interface_model
+                - label: ens4
+                  l2_network_device: admin
+                  interface_model: *interface_model
+              network_config:
+                ens3:
+                  networks:
+                    - private
+                ens4:
+                  networks:
+                    - admin
+
+          - name: {{ HOSTNAME_CTL01 }}
+            role: salt_minion
+            params:
+              vcpu: !os_env SLAVE_NODE_CPU, 4
+              memory: !os_env SLAVE_NODE_MEMORY, 8192
+              boot:
+                - hd
+              cloud_init_volume_name: iso
+              cloud_init_iface_up: eth0
+              volumes:
+                - name: system
+                  capacity: !os_env NODE_VOLUME_SIZE, 150
+                  backing_store: cloudimage1404
+                  format: qcow2
+                - name: cinder
+                  capacity: 50
+                  format: qcow2
+                - name: iso  # Volume with name 'iso' will be used
+                             # for store image with cloud-init metadata.
+                  capacity: 1
+                  format: raw
+                  device: cdrom
+                  bus: ide
+                  cloudinit_meta_data: !include virtual-mcp10-dvr--meta-data.yaml
+                  cloudinit_user_data: !include virtual-mcp10-dvr--user-data1404.yaml
+
+              interfaces: &interfaces
+                - label: eth0
+                  l2_network_device: private
+                  interface_model: *interface_model
+                - label: eth1
+                  l2_network_device: admin
+                  interface_model: *interface_model
+              network_config: &network_config
+                eth0:
+                  networks:
+                    - private
+                eth1:
+                  networks:
+                    - admin
+
+          - name: {{ HOSTNAME_CTL02 }}
+            role: salt_minion
+            params:
+              vcpu: !os_env SLAVE_NODE_CPU, 4
+              memory: !os_env SLAVE_NODE_MEMORY, 8192
+              boot:
+                - hd
+              cloud_init_volume_name: iso
+              cloud_init_iface_up: eth0
+              volumes:
+                - name: system
+                  capacity: !os_env NODE_VOLUME_SIZE, 150
+                  backing_store: cloudimage1404
+                  format: qcow2
+                - name: cinder
+                  capacity: 50
+                  format: qcow2
+                - name: iso  # Volume with name 'iso' will be used
+                             # for store image with cloud-init metadata.
+                  capacity: 1
+                  format: raw
+                  device: cdrom
+                  bus: ide
+                  cloudinit_meta_data: !include virtual-mcp10-dvr--meta-data.yaml
+                  cloudinit_user_data: !include virtual-mcp10-dvr--user-data1404.yaml
+
+              interfaces: *interfaces
+              network_config: *network_config
+
+          - name: {{ HOSTNAME_CTL03 }}
+            role: salt_minion
+            params:
+              vcpu: !os_env SLAVE_NODE_CPU, 4
+              memory: !os_env SLAVE_NODE_MEMORY, 8192
+              boot:
+                - hd
+              cloud_init_volume_name: iso
+              cloud_init_iface_up: eth0
+              volumes:
+                - name: system
+                  capacity: !os_env NODE_VOLUME_SIZE, 150
+                  backing_store: cloudimage1404
+                  format: qcow2
+                - name: cinder
+                  capacity: 50
+                  format: qcow2
+                - name: iso  # Volume with name 'iso' will be used
+                             # for store image with cloud-init metadata.
+                  capacity: 1
+                  format: raw
+                  device: cdrom
+                  bus: ide
+                  cloudinit_meta_data: !include virtual-mcp10-dvr--meta-data.yaml
+                  cloudinit_user_data: !include virtual-mcp10-dvr--user-data1404.yaml
+
+              interfaces: *interfaces
+              network_config: *network_config
+
+          - name: {{ HOSTNAME_PRX01 }}
+            role: salt_minion
+            params:
+              vcpu: !os_env SLAVE_NODE_CPU, 4
+              memory: !os_env SLAVE_NODE_MEMORY, 8192
+              boot:
+                - hd
+              cloud_init_volume_name: iso
+              cloud_init_iface_up: eth0
+              volumes:
+                - name: system
+                  capacity: !os_env NODE_VOLUME_SIZE, 150
+                  backing_store: cloudimage1404
+                  format: qcow2
+                - name: cinder
+                  capacity: 50
+                  format: qcow2
+                - name: iso  # Volume with name 'iso' will be used
+                             # for store image with cloud-init metadata.
+                  capacity: 1
+                  format: raw
+                  device: cdrom
+                  bus: ide
+                  cloudinit_meta_data: !include virtual-mcp10-dvr--meta-data.yaml
+                  cloudinit_user_data: !include virtual-mcp10-dvr--user-data1404.yaml
+
+              interfaces: *interfaces
+              network_config: *network_config
+
+
+          - name: {{ HOSTNAME_CMP01 }}
+            role: salt_minion
+            params:
+              vcpu: !os_env SLAVE_NODE_CPU, 4
+              memory: !os_env SLAVE_NODE_MEMORY, 4096
+              boot:
+                - hd
+              cloud_init_volume_name: iso
+              cloud_init_iface_up: ens3
+              volumes:
+                - name: system
+                  capacity: !os_env NODE_VOLUME_SIZE, 150
+                  backing_store: cloudimage1604
+                  format: qcow2
+                - name: iso  # Volume with name 'iso' will be used
+                             # for store image with cloud-init metadata.
+                  capacity: 1
+                  format: raw
+                  device: cdrom
+                  bus: ide
+                  cloudinit_meta_data: !include virtual-mcp10-dvr--meta-data.yaml
+                  cloudinit_user_data: !include virtual-mcp10-dvr--user-data1604.yaml
+
+
+              interfaces: &all_interfaces
+                - label: ens3
+                  l2_network_device: private
+                  interface_model: *interface_model
+                - label: ens4
+                  l2_network_device: admin
+                  interface_model: *interface_model
+                - label: ens5
+                  l2_network_device: tenant
+                  interface_model: *interface_model
+                - label: ens6
+                  l2_network_device: external
+                  interface_model: *interface_model
+              network_config: &all_network_config
+                ens3:
+                  networks:
+                    - private
+                ens4:
+                  networks:
+                    - admin
+                ens5:
+                  networks:
+                    - tenant
+                ens6:
+                  networks:
+                    - external
+
+          - name: {{ HOSTNAME_CMP02 }}
+            role: salt_minion
+            params:
+              vcpu: !os_env SLAVE_NODE_CPU, 4
+              memory: !os_env SLAVE_NODE_MEMORY, 4096
+              boot:
+                - hd
+              cloud_init_volume_name: iso
+              cloud_init_iface_up: ens3
+              volumes:
+                - name: system
+                  capacity: !os_env NODE_VOLUME_SIZE, 150
+                  backing_store: cloudimage1604
+                  format: qcow2
+                - name: iso  # Volume with name 'iso' will be used
+                             # for store image with cloud-init metadata.
+                  capacity: 1
+                  format: raw
+                  device: cdrom
+                  bus: ide
+                  cloudinit_meta_data: !include virtual-mcp10-dvr--meta-data.yaml
+                  cloudinit_user_data: !include virtual-mcp10-dvr--user-data1604.yaml
+
+              interfaces: *all_interfaces
+              network_config: *all_network_config
+
+          - name: {{ HOSTNAME_GTW01 }}
+            role: salt_minion
+            params:
+              vcpu: !os_env SLAVE_NODE_CPU, 1
+              memory: !os_env SLAVE_NODE_MEMORY, 2048
+              boot:
+                - hd
+              cloud_init_volume_name: iso
+              cloud_init_iface_up: ens3
+              volumes:
+                - name: system
+                  capacity: !os_env NODE_VOLUME_SIZE, 150
+                  backing_store: cloudimage1604
+                  format: qcow2
+                - name: iso  # Volume with name 'iso' will be used
+                             # for store image with cloud-init metadata.
+                  capacity: 1
+                  format: raw
+                  device: cdrom
+                  bus: ide
+                  cloudinit_meta_data: !include virtual-mcp10-dvr--meta-data.yaml
+                  cloudinit_user_data: !include virtual-mcp10-dvr--user-data1604.yaml
+
+              interfaces: *all_interfaces
+              network_config: *all_network_config
diff --git a/tcp_tests/templates/virtual-mcp10-dvr.jinja b/tcp_tests/templates/virtual-mcp10-dvr.jinja
new file mode 100644
index 0000000..138534c
--- /dev/null
+++ b/tcp_tests/templates/virtual-mcp10-dvr.jinja
@@ -0,0 +1,2 @@
+{% set DOMAIN_NAME = os_env('LAB_CONFIG_NAME', 'virtual-mcp10-dvr') + '.local' %}
+{% set HOSTNAME_CFG01 = os_env('HOSTNAME_CFG01', 'cfg01.' + DOMAIN_NAME) %}